Backed out 7 changesets (bug 1454998) for build bustages at MediaTrackGraph.h on a CLOSED TREE.
authorGurzau Raul <rgurzau@mozilla.com>
Wed, 02 Oct 2019 11:46:23 +0300
changeset 495879 5f5e153eb14b8ecd44b5453e7a4a9e53e95193dd
parent 495878 80417bdfa72112c6f9472c29ce49e8ff81e8c688
child 495880 c4157510d5a908864b988212a5b21ca92127bb0c
push id36639
push userrgurzau@mozilla.com
push dateWed, 02 Oct 2019 16:35:54 +0000
treeherdermozilla-central@314a0fee08fd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1454998
milestone71.0a1
backs out80417bdfa72112c6f9472c29ce49e8ff81e8c688
8ff03f2f4ca2da0761fb285f8c403b51765a19cf
ae6056b748d1eb640cfe5d457f39054d66346a8e
ab721cb2066b9c2635734723121d433735f5317c
d0e8d413cd1c6aa414cf167b085f635ae857e408
3ce4dc7e9ae24adc463c4106cc5d3ab6c13dedb6
6105a4176729d7f05462aa6a32eaf0c1617752ce
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 7 changesets (bug 1454998) for build bustages at MediaTrackGraph.h on a CLOSED TREE. Backed out changeset 80417bdfa721 (bug 1454998) Backed out changeset 8ff03f2f4ca2 (bug 1454998) Backed out changeset ae6056b748d1 (bug 1454998) Backed out changeset ab721cb2066b (bug 1454998) Backed out changeset d0e8d413cd1c (bug 1454998) Backed out changeset 3ce4dc7e9ae2 (bug 1454998) Backed out changeset 6105a4176729 (bug 1454998)
dom/html/HTMLCanvasElement.cpp
dom/html/HTMLMediaElement.cpp
dom/html/HTMLMediaElement.h
dom/media/AudioBufferUtils.h
dom/media/AudioCaptureStream.cpp
dom/media/AudioCaptureStream.h
dom/media/AudioCaptureTrack.cpp
dom/media/AudioCaptureTrack.h
dom/media/AudioSampleFormat.h
dom/media/AudioSegment.h
dom/media/AudioStreamTrack.cpp
dom/media/AudioStreamTrack.h
dom/media/CanvasCaptureMediaStream.cpp
dom/media/CanvasCaptureMediaStream.h
dom/media/CubebUtils.cpp
dom/media/CubebUtils.h
dom/media/DOMMediaStream.cpp
dom/media/DOMMediaStream.h
dom/media/DriftCompensation.h
dom/media/ForwardedInputTrack.cpp
dom/media/ForwardedInputTrack.h
dom/media/GraphDriver.cpp
dom/media/GraphDriver.h
dom/media/GraphRunner.cpp
dom/media/GraphRunner.h
dom/media/ImageToI420.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderOwner.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaInfo.h
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/MediaPlaybackDelayPolicy.cpp
dom/media/MediaRecorder.cpp
dom/media/MediaRecorder.h
dom/media/MediaSegment.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/MediaStreamListener.cpp
dom/media/MediaStreamListener.h
dom/media/MediaStreamTrack.cpp
dom/media/MediaStreamTrack.h
dom/media/MediaStreamTypes.h
dom/media/MediaStreamWindowCapturer.cpp
dom/media/MediaStreamWindowCapturer.h
dom/media/MediaTrack.cpp
dom/media/MediaTrack.h
dom/media/MediaTrackGraph.cpp
dom/media/MediaTrackGraph.h
dom/media/MediaTrackGraphImpl.h
dom/media/MediaTrackListener.cpp
dom/media/MediaTrackListener.h
dom/media/PrincipalHandle.h
dom/media/StreamTracks.cpp
dom/media/StreamTracks.h
dom/media/Tracing.h
dom/media/TrackID.h
dom/media/TrackUnionStream.cpp
dom/media/TrackUnionStream.h
dom/media/VideoFrameConverter.h
dom/media/VideoOutput.h
dom/media/VideoSegment.h
dom/media/VideoStreamTrack.cpp
dom/media/VideoStreamTrack.h
dom/media/VideoUtils.cpp
dom/media/VideoUtils.h
dom/media/encoder/MediaEncoder.cpp
dom/media/encoder/MediaEncoder.h
dom/media/encoder/OpusTrackEncoder.cpp
dom/media/encoder/TrackEncoder.cpp
dom/media/encoder/TrackEncoder.h
dom/media/encoder/VP8TrackEncoder.cpp
dom/media/encoder/VP8TrackEncoder.h
dom/media/gtest/TestAudioCallbackDriver.cpp
dom/media/gtest/TestGroupId.cpp
dom/media/gtest/TestVideoTrackEncoder.cpp
dom/media/imagecapture/CaptureTask.cpp
dom/media/imagecapture/CaptureTask.h
dom/media/imagecapture/ImageCapture.cpp
dom/media/imagecapture/ImageCapture.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/OutputStreamManager.cpp
dom/media/mediasink/OutputStreamManager.h
dom/media/moz.build
dom/media/mp4/MP4Metadata.cpp
dom/media/mp4/MP4Metadata.h
dom/media/test/mochitest.ini
dom/media/test/test_autoplay_policy_web_audio_AudioParamStream.html
dom/media/test/test_imagecapture.html
dom/media/tests/mochitest/identity/mochitest.ini
dom/media/tests/mochitest/mochitest.ini
dom/media/tests/mochitest/peerconnection_audio_forced_sample_rate.js
dom/media/tests/mochitest/test_peerConnection_basicAudio_forced_higher_rate.html
dom/media/tests/mochitest/test_peerConnection_basicAudio_forced_lower_rate.html
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioBlock.h
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioBufferSourceNode.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
dom/media/webaudio/AudioEventTimeline.cpp
dom/media/webaudio/AudioEventTimeline.h
dom/media/webaudio/AudioListener.cpp
dom/media/webaudio/AudioNode.cpp
dom/media/webaudio/AudioNode.h
dom/media/webaudio/AudioNodeEngine.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeExternalInputStream.cpp
dom/media/webaudio/AudioNodeExternalInputStream.h
dom/media/webaudio/AudioNodeExternalInputTrack.cpp
dom/media/webaudio/AudioNodeExternalInputTrack.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/AudioNodeTrack.cpp
dom/media/webaudio/AudioNodeTrack.h
dom/media/webaudio/AudioParam.cpp
dom/media/webaudio/AudioParam.h
dom/media/webaudio/AudioParamTimeline.h
dom/media/webaudio/AudioWorkletGlobalScope.cpp
dom/media/webaudio/AudioWorkletImpl.cpp
dom/media/webaudio/AudioWorkletImpl.h
dom/media/webaudio/AudioWorkletNode.cpp
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ChannelMergerNode.cpp
dom/media/webaudio/ChannelSplitterNode.cpp
dom/media/webaudio/ConstantSourceNode.cpp
dom/media/webaudio/ConstantSourceNode.h
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/IIRFilterNode.cpp
dom/media/webaudio/MediaElementAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioDestinationNode.cpp
dom/media/webaudio/MediaStreamAudioDestinationNode.h
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.h
dom/media/webaudio/MediaStreamTrackAudioSourceNode.cpp
dom/media/webaudio/MediaStreamTrackAudioSourceNode.h
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/OscillatorNode.h
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/PannerNode.h
dom/media/webaudio/PlayingRefChangeHandler.h
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
dom/media/webaudio/WaveShaperNode.h
dom/media/webaudio/WebAudioUtils.cpp
dom/media/webaudio/WebAudioUtils.h
dom/media/webaudio/moz.build
dom/media/webaudio/test/blink/mochitest.ini
dom/media/webaudio/test/mochitest.ini
dom/media/webaudio/test/test_WebAudioMemoryReporting.html
dom/media/webaudio/test/test_audioContextParams_sampleRate.html
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineDefault.cpp
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineSource.h
dom/media/webrtc/MediaEngineTabVideoSource.cpp
dom/media/webrtc/MediaEngineTabVideoSource.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
dom/media/webrtc/SineWaveGenerator.h
dom/media/webspeech/recognition/SpeechRecognition.cpp
dom/media/webspeech/recognition/SpeechRecognition.h
dom/media/webspeech/recognition/SpeechTrackListener.cpp
dom/media/webspeech/recognition/SpeechTrackListener.h
dom/media/webspeech/recognition/test/mochitest.ini
dom/media/webspeech/synth/test/mochitest.ini
dom/media/webspeech/synth/test/startup/mochitest.ini
dom/webidl/AudioParam.webidl
media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
media/webrtc/signaling/src/peerconnection/RemoteTrackSource.h
media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
modules/libpref/init/all.js
--- a/dom/html/HTMLCanvasElement.cpp
+++ b/dom/html/HTMLCanvasElement.cpp
@@ -5,17 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/dom/HTMLCanvasElement.h"
 
 #include "ImageEncoder.h"
 #include "jsapi.h"
 #include "jsfriendapi.h"
 #include "Layers.h"
-#include "MediaTrackGraph.h"
+#include "MediaStreamGraph.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/Base64.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/dom/CanvasCaptureMediaStream.h"
 #include "mozilla/dom/CanvasRenderingContext2D.h"
 #include "mozilla/dom/Event.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/HTMLCanvasElementBinding.h"
@@ -677,25 +677,26 @@ already_AddRefed<CanvasCaptureMediaStrea
 
   if (!mCurrentContext) {
     aRv.Throw(NS_ERROR_NOT_INITIALIZED);
     return nullptr;
   }
 
   auto stream = MakeRefPtr<CanvasCaptureMediaStream>(window, this);
 
+  const TrackID videoTrackId = 1;
   nsCOMPtr<nsIPrincipal> principal = NodePrincipal();
-  nsresult rv = stream->Init(aFrameRate, principal);
+  nsresult rv = stream->Init(aFrameRate, videoTrackId, principal);
   if (NS_FAILED(rv)) {
     aRv.Throw(rv);
     return nullptr;
   }
 
   RefPtr<MediaStreamTrack> track =
-      new VideoStreamTrack(window, stream->GetSourceStream(),
+      new VideoStreamTrack(window, stream->GetSourceStream(), videoTrackId,
                            new CanvasCaptureTrackSource(principal, stream));
   stream->AddTrackInternal(track);
 
   // Check site-specific permission and display prompt if appropriate.
   // If no permission, arrange for the frame capture listener to return
   // all-white, opaque image data.
   bool usePlaceholder = !CanvasUtils::IsImageExtractionAllowed(
       OwnerDoc(), nsContentUtils::GetCurrentJSContext(), aSubjectPrincipal);
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -29,20 +29,19 @@
 #include "MediaContainerType.h"
 #include "MediaError.h"
 #include "MediaManager.h"
 #include "MediaMetadataManager.h"
 #include "MediaResource.h"
 #include "MediaShutdownManager.h"
 #include "MediaSourceDecoder.h"
 #include "MediaStreamError.h"
-#include "MediaTrackGraphImpl.h"
-#include "MediaTrackListener.h"
+#include "MediaStreamGraphImpl.h"
+#include "MediaStreamListener.h"
 #include "MediaStreamWindowCapturer.h"
-#include "MediaTrack.h"
 #include "MediaTrackList.h"
 #include "SVGObserverUtils.h"
 #include "TimeRanges.h"
 #include "VideoFrameContainer.h"
 #include "VideoOutput.h"
 #include "VideoStreamTrack.h"
 #include "base/basictypes.h"
 #include "jsapi.h"
@@ -385,17 +384,18 @@ class HTMLMediaElement::FirstFrameListen
                      AbstractThread* aMainThread)
       : VideoOutput(aContainer, aMainThread) {
     MOZ_ASSERT(NS_IsMainThread());
   }
 
   // NB that this overrides VideoOutput::NotifyRealtimeTrackData, so we can
   // filter out all frames but the first one with a real size. This allows us to
   // later re-use the logic in VideoOutput for rendering that frame.
-  void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aTrackOffset,
+  void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
+                               StreamTime aTrackOffset,
                                const MediaSegment& aMedia) override {
     MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
 
     if (mInitialSizeFound) {
       return;
     }
 
     const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
@@ -412,17 +412,17 @@ class HTMLMediaElement::FirstFrameListen
         VideoOutput::NotifyRealtimeTrackData(aGraph, aTrackOffset, segment);
         return;
       }
     }
   }
 
  private:
   // Whether a frame with a concrete size has been received. May only be
-  // accessed on the MTG's appending thread. (this is a direct listener so we
+  // accessed on the MSG's appending thread. (this is a direct listener so we
   // get called by whoever is producing this track's data)
   bool mInitialSizeFound = false;
 };
 
 /**
  * Helper class that manages audio and video outputs for all enabled tracks in a
  * media element. It also manages calculating the current time when playing a
  * MediaStream.
@@ -436,31 +436,31 @@ class HTMLMediaElement::MediaStreamRende
                       VideoFrameContainer* aVideoContainer,
                       void* aAudioOutputKey)
       : mVideoContainer(aVideoContainer),
         mAudioOutputKey(aAudioOutputKey),
         mWatchManager(this, aMainThread) {}
 
   void UpdateGraphTime() {
     mGraphTime =
-        mGraphTimeDummy->mTrack->Graph()->CurrentTime() - *mGraphTimeOffset;
+        mGraphTimeDummy->mStream->Graph()->CurrentTime() - *mGraphTimeOffset;
   }
 
   void Start() {
     if (mRendering) {
       return;
     }
 
     mRendering = true;
 
     if (!mGraphTimeDummy) {
       return;
     }
 
-    MediaTrackGraph* graph = mGraphTimeDummy->mTrack->Graph();
+    MediaStreamGraph* graph = mGraphTimeDummy->mStream->Graph();
     mGraphTimeOffset = Some(graph->CurrentTime().Ref() - mGraphTime);
     mWatchManager.Watch(graph->CurrentTime(),
                         &MediaStreamRenderer::UpdateGraphTime);
 
     for (const auto& t : mAudioTracks) {
       if (t) {
         t->AsAudioStreamTrack()->AddAudioOutput(mAudioOutputKey);
         t->AsAudioStreamTrack()->SetAudioOutputVolume(mAudioOutputKey,
@@ -479,17 +479,17 @@ class HTMLMediaElement::MediaStreamRende
     }
 
     mRendering = false;
 
     if (!mGraphTimeDummy) {
       return;
     }
 
-    mWatchManager.Unwatch(mGraphTimeDummy->mTrack->Graph()->CurrentTime(),
+    mWatchManager.Unwatch(mGraphTimeDummy->mStream->Graph()->CurrentTime(),
                           &MediaStreamRenderer::UpdateGraphTime);
 
     for (const auto& t : mAudioTracks) {
       if (t) {
         t->AsAudioStreamTrack()->RemoveAudioOutput(mAudioOutputKey);
       }
     }
 
@@ -553,17 +553,18 @@ class HTMLMediaElement::MediaStreamRende
     mVideoTrack = nullptr;
   }
 
   double CurrentTime() const {
     if (!mGraphTimeDummy) {
       return 0.0;
     }
 
-    return mGraphTimeDummy->mTrack->GraphImpl()->MediaTimeToSeconds(mGraphTime);
+    return mGraphTimeDummy->mStream->GraphImpl()->MediaTimeToSeconds(
+        mGraphTime);
   }
 
   Watchable<GraphTime>& CurrentGraphTime() { return mGraphTime; }
 
   // Set if we're rendering video.
   const RefPtr<VideoFrameContainer> mVideoContainer;
 
   // Set if we're rendering audio, nullptr otherwise.
@@ -584,35 +585,35 @@ class HTMLMediaElement::MediaStreamRende
     MOZ_DIAGNOSTIC_ASSERT(!mVideoTrack);
   }
 
   void EnsureGraphTimeDummy() {
     if (mGraphTimeDummy) {
       return;
     }
 
-    MediaTrackGraph* graph = nullptr;
+    MediaStreamGraph* graph = nullptr;
     for (const auto& t : mAudioTracks) {
       if (t && !t->Ended()) {
         graph = t->Graph();
         break;
       }
     }
 
     if (!graph && mVideoTrack && !mVideoTrack->Ended()) {
       graph = mVideoTrack->Graph();
     }
 
     if (!graph) {
       return;
     }
 
     // This dummy keeps `graph` alive and ensures access to it.
-    mGraphTimeDummy = MakeRefPtr<SharedDummyTrack>(
-        graph->CreateSourceTrack(MediaSegment::AUDIO));
+    mGraphTimeDummy =
+        MakeRefPtr<SharedDummyStream>(graph->CreateSourceStream());
 
     if (mRendering) {
       mGraphTimeOffset = Some(graph->CurrentTime() - mGraphTime);
       mWatchManager.Watch(graph->CurrentTime(),
                           &MediaStreamRenderer::UpdateGraphTime);
     }
   }
 
@@ -621,20 +622,20 @@ class HTMLMediaElement::MediaStreamRende
   bool mRendering = false;
 
   // The audio output volume for all audio tracks.
   float mAudioOutputVolume = 1.0f;
 
   // WatchManager for mGraphTime.
   WatchManager<MediaStreamRenderer> mWatchManager;
 
-  // A dummy MediaTrack to guarantee a MediaTrackGraph is kept alive while
+  // A dummy MediaStream to guarantee a MediaStreamGraph is kept alive while
   // we're actively rendering, so we can track the graph's current time. Set
   // when the first track is added, never unset.
-  RefPtr<SharedDummyTrack> mGraphTimeDummy;
+  RefPtr<SharedDummyStream> mGraphTimeDummy;
 
   // Watchable that relays the graph's currentTime updates to the media element
   // only while we're rendering. This is the current time of the rendering in
   // GraphTime units.
   Watchable<GraphTime> mGraphTime = {0, "MediaStreamRenderer::mGraphTime"};
 
   // Nothing until a track has been added. Then, the current GraphTime at the
   // time when we were last Start()ed.
@@ -651,45 +652,46 @@ class HTMLMediaElement::StreamCaptureTra
     : public MediaStreamTrackSource,
       public MediaStreamTrackSource::Sink {
  public:
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(StreamCaptureTrackSource,
                                            MediaStreamTrackSource)
 
   StreamCaptureTrackSource(MediaStreamTrackSource* aCapturedTrackSource,
-                           ProcessedMediaTrack* aStream, MediaInputPort* aPort)
+                           ProcessedMediaStream* aStream, MediaInputPort* aPort)
       : MediaStreamTrackSource(aCapturedTrackSource->GetPrincipal(),
                                nsString()),
         mCapturedTrackSource(aCapturedTrackSource),
-        mTrack(aStream),
+        mStream(aStream),
         mPort(aPort) {
     MOZ_ASSERT(mCapturedTrackSource);
-    MOZ_ASSERT(mTrack);
+    MOZ_ASSERT(mStream);
     MOZ_ASSERT(mPort);
 
     mCapturedTrackSource->RegisterSink(this);
   }
 
   void SetEnabled(bool aEnabled) {
-    if (!mTrack) {
+    if (!mStream) {
       return;
     }
-    mTrack->SetEnabled(aEnabled ? DisabledTrackMode::ENABLED
-                                : DisabledTrackMode::SILENCE_FREEZE);
+    mStream->SetTrackEnabled(mPort->GetDestinationTrackId(),
+                             aEnabled ? DisabledTrackMode::ENABLED
+                                      : DisabledTrackMode::SILENCE_FREEZE);
   }
 
   void Destroy() override {
     if (mCapturedTrackSource) {
       mCapturedTrackSource->UnregisterSink(this);
       mCapturedTrackSource = nullptr;
     }
-    if (mTrack) {
-      mTrack->Destroy();
-      mTrack = nullptr;
+    if (mStream) {
+      mStream->Destroy();
+      mStream = nullptr;
     }
     if (mPort) {
       mPort->Destroy();
       mPort = nullptr;
     }
   }
 
   MediaSourceEnum GetMediaSource() const override {
@@ -740,22 +742,22 @@ class HTMLMediaElement::StreamCaptureTra
 
     Destroy();
     MediaStreamTrackSource::OverrideEnded();
   }
 
  private:
   virtual ~StreamCaptureTrackSource() {
     MOZ_ASSERT(!mCapturedTrackSource);
-    MOZ_ASSERT(!mTrack);
+    MOZ_ASSERT(!mStream);
     MOZ_ASSERT(!mPort);
   };
 
   RefPtr<MediaStreamTrackSource> mCapturedTrackSource;
-  RefPtr<ProcessedMediaTrack> mTrack;
+  RefPtr<ProcessedMediaStream> mStream;
   RefPtr<MediaInputPort> mPort;
 };
 
 NS_IMPL_ADDREF_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
                          MediaStreamTrackSource)
 NS_IMPL_RELEASE_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
                           MediaStreamTrackSource)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
@@ -1075,29 +1077,29 @@ class HTMLMediaElement::AudioChannelAgen
                  "this = %p, Error : unknown suspended type!\n",
                  this));
     }
     return NS_OK;
   }
 
   NS_IMETHODIMP WindowAudioCaptureChanged(bool aCapture) override {
     MOZ_ASSERT(mAudioChannelAgent);
-    AudioCaptureTrackChangeIfNeeded();
+    AudioCaptureStreamChangeIfNeeded();
     return NS_OK;
   }
 
-  void AudioCaptureTrackChangeIfNeeded() {
+  void AudioCaptureStreamChangeIfNeeded() {
     MOZ_ASSERT(!mIsShutDown);
     if (!IsPlayingStarted()) {
       return;
     }
 
     MOZ_ASSERT(mAudioChannelAgent);
     bool isCapturing = mAudioChannelAgent->IsWindowAudioCapturingEnabled();
-    mOwner->AudioCaptureTrackChange(isCapturing);
+    mOwner->AudioCaptureStreamChange(isCapturing);
   }
 
   void NotifyAudioPlaybackChanged(AudibleChangedReasons aReason) {
     MOZ_ASSERT(!mIsShutDown);
     if (!IsPlayingStarted()) {
       return;
     }
 
@@ -1169,18 +1171,18 @@ class HTMLMediaElement::AudioChannelAgen
   }
 
   void StopAudioChanelAgent() {
     MOZ_ASSERT(mAudioChannelAgent);
     MOZ_ASSERT(mAudioChannelAgent->IsPlayingStarted());
     mAudioChannelAgent->NotifyStoppedPlaying();
     NotifyMediaStopped(mAudioChannelAgent->WindowID());
     // If we have started audio capturing before, we have to tell media element
-    // to clear the output capturing track.
-    mOwner->AudioCaptureTrackChange(false);
+    // to clear the output capturing stream.
+    mOwner->AudioCaptureStreamChange(false);
   }
 
   void SetSuspended(SuspendTypes aSuspend) {
     if (mSuspended == aSuspend) {
       return;
     }
 
     MaybeNotifyMediaResumed(aSuspend);
@@ -1898,17 +1900,17 @@ void HTMLMediaElement::AbortExistingLoad
   // We need to remove FirstFrameListener before VideoTracks get emptied.
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
     mFirstFrameListener = nullptr;
   }
 
   // When aborting the existing loads, empty the objects in audio track list and
   // video track list, no events (in particular, no removetrack events) are
-  // fired as part of this. Ending MediaTrack sends track ended notifications,
+  // fired as part of this. Ending MediaStream sends track ended notifications,
   // so we empty the track lists prior.
   if (AudioTracks()) {
     AudioTracks()->EmptyTracks();
   }
   if (VideoTracks()) {
     VideoTracks()->EmptyTracks();
   }
 
@@ -2259,17 +2261,17 @@ void HTMLMediaElement::NotifyLoadError(c
   } else if (mSourceLoadCandidate) {
     DispatchAsyncSourceError(mSourceLoadCandidate);
     QueueLoadFromSourceTask();
   } else {
     NS_WARNING("Should know the source we were loading from!");
   }
 }
 
-void HTMLMediaElement::NotifyMediaTrackEnabled(dom::MediaTrack* aTrack) {
+void HTMLMediaElement::NotifyMediaTrackEnabled(MediaTrack* aTrack) {
   MOZ_ASSERT(aTrack);
   if (!aTrack) {
     return;
   }
 #ifdef DEBUG
   nsString id;
   aTrack->GetId(id);
 
@@ -2331,17 +2333,17 @@ void HTMLMediaElement::NotifyMediaTrackE
         // If the output stream is for audio only we ignore video tracks.
         continue;
       }
       AddCaptureMediaTrackToOutputStream(aTrack, ms);
     }
   }
 }
 
-void HTMLMediaElement::NotifyMediaTrackDisabled(dom::MediaTrack* aTrack) {
+void HTMLMediaElement::NotifyMediaTrackDisabled(MediaTrack* aTrack) {
   MOZ_ASSERT(aTrack);
   if (!aTrack) {
     return;
   }
 #ifdef DEBUG
   nsString id;
   aTrack->GetId(id);
 
@@ -2401,17 +2403,17 @@ void HTMLMediaElement::NotifyMediaTrackD
       continue;
     }
     MOZ_ASSERT(ms.mCapturingMediaStream);
     for (int32_t i = ms.mTracks.Length() - 1; i >= 0; --i) {
       if (ms.mTracks[i].first() != aTrack->GetId()) {
         continue;
       }
       // The source of this track just ended. Force-notify that it ended.
-      // If we bounce it to the MediaTrackGraph it might not be picked up,
+      // If we bounce it to the MediaStreamGraph it might not be picked up,
       // for instance if the MediaInputPort was destroyed in the same
       // iteration as it was added.
       mMainThreadEventTarget->Dispatch(NewRunnableMethod(
           "StreamCaptureTrackSource::OverrideEnded",
           static_cast<StreamCaptureTrackSource*>(ms.mTracks[i].second().get()),
           &StreamCaptureTrackSource::OverrideEnded));
 
       ms.mTracks.RemoveElementAt(i);
@@ -3155,18 +3157,17 @@ void HTMLMediaElement::SetCapturedOutput
       LOG(LogLevel::Debug, ("%s track %p for captured MediaStream %p",
                             aEnabled ? "Enabled" : "Disabled",
                             pair.second().get(), ms.mStream.get()));
     }
   }
 }
 
 void HTMLMediaElement::AddCaptureMediaTrackToOutputStream(
-    dom::MediaTrack* aTrack, OutputMediaStream& aOutputStream,
-    bool aAsyncAddtrack) {
+    MediaTrack* aTrack, OutputMediaStream& aOutputStream, bool aAsyncAddtrack) {
   if (aOutputStream.mCapturingDecoder) {
     MOZ_ASSERT(!aOutputStream.mCapturingMediaStream);
     return;
   }
   aOutputStream.mCapturingMediaStream = true;
 
   if (aOutputStream.mStream == mSrcStream) {
     // Cycle detected. This can happen since tracks are added async.
@@ -3187,52 +3188,50 @@ void HTMLMediaElement::AddCaptureMediaTr
   }
   MOZ_DIAGNOSTIC_ASSERT(!inputTrack->Ended());
 
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return;
   }
 
-  MediaSegment::Type type = inputTrack->AsAudioStreamTrack()
-                                ? MediaSegment::AUDIO
-                                : MediaSegment::VIDEO;
-  ProcessedMediaTrack* track =
-      inputTrack->Graph()->CreateForwardedInputTrack(type);
-  RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(track);
+  ProcessedMediaStream* stream = inputTrack->Graph()->CreateTrackUnionStream();
+  RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(stream);
   auto source = MakeRefPtr<StreamCaptureTrackSource>(&inputTrack->GetSource(),
-                                                     track, port);
+                                                     stream, port);
 
   // Track is muted initially, so we don't leak data if it's added while paused
-  // and an MTG iteration passes before the mute comes into effect.
+  // and an MSG iteration passes before the mute comes into effect.
   source->SetEnabled(mSrcStreamIsPlaying);
 
-  RefPtr<MediaStreamTrack> domTrack;
+  RefPtr<MediaStreamTrack> track;
   if (inputTrack->AsAudioStreamTrack()) {
-    domTrack = new AudioStreamTrack(window, track, source);
+    track =
+        new AudioStreamTrack(window, stream, inputTrack->GetTrackID(), source);
   } else {
-    domTrack = new VideoStreamTrack(window, track, source);
+    track =
+        new VideoStreamTrack(window, stream, inputTrack->GetTrackID(), source);
   }
 
   aOutputStream.mTracks.AppendElement(
       Pair<nsString, RefPtr<MediaStreamTrackSource>>(aTrack->GetId(),
                                                      source.get()));
 
   if (aAsyncAddtrack) {
     mMainThreadEventTarget->Dispatch(
         NewRunnableMethod<StoreRefPtrPassByPtr<MediaStreamTrack>>(
             "DOMMediaStream::AddTrackInternal", aOutputStream.mStream,
-            &DOMMediaStream::AddTrackInternal, domTrack));
+            &DOMMediaStream::AddTrackInternal, track));
   } else {
-    aOutputStream.mStream->AddTrackInternal(domTrack);
+    aOutputStream.mStream->AddTrackInternal(track);
   }
 
   LOG(LogLevel::Debug,
       ("Created %s track %p from track %p through MediaInputPort %p",
-       inputTrack->AsAudioStreamTrack() ? "audio" : "video", domTrack.get(),
+       inputTrack->AsAudioStreamTrack() ? "audio" : "video", track.get(),
        inputTrack, port.get()));
 }
 
 void HTMLMediaElement::DiscardFinishWhenEndedOutputStreams() {
   // Discard all output streams that have finished now.
   for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
     if (!mOutputStreams[i].mFinishWhenEnded) {
       continue;
@@ -3260,35 +3259,35 @@ bool HTMLMediaElement::CanBeCaptured(Str
       ContainsRestrictedContent()) {
     return false;
   }
   return true;
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureStreamInternal(
     StreamCaptureBehavior aFinishBehavior, StreamCaptureType aStreamCaptureType,
-    MediaTrackGraph* aGraph) {
+    MediaStreamGraph* aGraph) {
   MOZ_RELEASE_ASSERT(aGraph);
   MOZ_ASSERT(CanBeCaptured(aStreamCaptureType));
 
   MarkAsContentSource(CallerAPI::CAPTURE_STREAM);
   MarkAsTainted();
 
   // We don't support routing to a different graph.
   if (!mOutputStreams.IsEmpty() &&
-      aGraph != mOutputStreams[0].mGraphKeepAliveDummyStream->mTrack->Graph()) {
+      aGraph !=
+          mOutputStreams[0].mGraphKeepAliveDummyStream->mStream->Graph()) {
     return nullptr;
   }
 
   OutputMediaStream* out = mOutputStreams.AppendElement();
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   out->mGraphKeepAliveDummyStream =
       mOutputStreams.Length() == 1
-          ? MakeRefPtr<SharedDummyTrack>(
-                aGraph->CreateSourceTrack(MediaSegment::AUDIO))
+          ? MakeRefPtr<SharedDummyStream>(aGraph->CreateSourceStream())
           : mOutputStreams[0].mGraphKeepAliveDummyStream;
   out->mStream = MakeAndAddRef<DOMMediaStream>(window);
   out->mStream->SetFinishedOnInactive(false);
   out->mFinishWhenEnded =
       aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED;
   out->mCapturingAudioOnly =
       aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO;
 
@@ -3339,17 +3338,17 @@ already_AddRefed<DOMMediaStream> HTMLMed
       }
     }
   }
   RefPtr<DOMMediaStream> result = out->mStream;
   return result.forget();
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureAudio(
-    ErrorResult& aRv, MediaTrackGraph* aGraph) {
+    ErrorResult& aRv, MediaStreamGraph* aGraph) {
   MOZ_RELEASE_ASSERT(aGraph);
 
   if (!CanBeCaptured(StreamCaptureType::CAPTURE_AUDIO)) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   RefPtr<DOMMediaStream> stream =
@@ -3372,64 +3371,64 @@ RefPtr<GenericNonExclusivePromise> HTMLM
     return GenericNonExclusivePromise::CreateAndResolve(true, __func__);
   }
   AUTOPLAY_LOG("create allow-to-play promise for MediaElement %p", this);
   return mAllowedToPlayPromise.Ensure(__func__);
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::MozCaptureStream(
     ErrorResult& aRv) {
-  MediaTrackGraph::GraphDriverType graphDriverType =
-      HasAudio() ? MediaTrackGraph::AUDIO_THREAD_DRIVER
-                 : MediaTrackGraph::SYSTEM_THREAD_DRIVER;
+  MediaStreamGraph::GraphDriverType graphDriverType =
+      HasAudio() ? MediaStreamGraph::AUDIO_THREAD_DRIVER
+                 : MediaStreamGraph::SYSTEM_THREAD_DRIVER;
 
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   if (!CanBeCaptured(StreamCaptureType::CAPTURE_ALL_TRACKS)) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
-  MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
-      graphDriverType, window, MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+  MediaStreamGraph* graph = MediaStreamGraph::GetInstance(
+      graphDriverType, window, MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
 
   RefPtr<DOMMediaStream> stream =
       CaptureStreamInternal(StreamCaptureBehavior::CONTINUE_WHEN_ENDED,
                             StreamCaptureType::CAPTURE_ALL_TRACKS, graph);
   if (!stream) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   return stream.forget();
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::MozCaptureStreamUntilEnded(
     ErrorResult& aRv) {
-  MediaTrackGraph::GraphDriverType graphDriverType =
-      HasAudio() ? MediaTrackGraph::AUDIO_THREAD_DRIVER
-                 : MediaTrackGraph::SYSTEM_THREAD_DRIVER;
+  MediaStreamGraph::GraphDriverType graphDriverType =
+      HasAudio() ? MediaStreamGraph::AUDIO_THREAD_DRIVER
+                 : MediaStreamGraph::SYSTEM_THREAD_DRIVER;
 
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   if (!CanBeCaptured(StreamCaptureType::CAPTURE_ALL_TRACKS)) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
-  MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
-      graphDriverType, window, MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+  MediaStreamGraph* graph = MediaStreamGraph::GetInstance(
+      graphDriverType, window, MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
 
   RefPtr<DOMMediaStream> stream =
       CaptureStreamInternal(StreamCaptureBehavior::FINISH_WHEN_ENDED,
                             StreamCaptureType::CAPTURE_ALL_TRACKS, graph);
   if (!stream) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
@@ -4846,17 +4845,17 @@ void HTMLMediaElement::UpdateSrcMediaStr
         mSelectedVideoStreamTrack->AddVideoOutput(mFirstFrameListener);
       }
     }
 
     SetCapturedOutputStreamsEnabled(false);  // Mute
   }
 }
 
-void HTMLMediaElement::UpdateSrcTrackTime() {
+void HTMLMediaElement::UpdateSrcStreamTime() {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (mSrcStreamPlaybackEnded) {
     // We do a separate FireTimeUpdate() when this is set.
     return;
   }
 
   FireTimeUpdate(true);
@@ -4871,17 +4870,17 @@ void HTMLMediaElement::SetupSrcMediaStre
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return;
   }
 
   mMediaStreamRenderer = MakeAndAddRef<MediaStreamRenderer>(
       mAbstractMainThread, GetVideoFrameContainer(), this);
   mWatchManager.Watch(mMediaStreamRenderer->CurrentGraphTime(),
-                      &HTMLMediaElement::UpdateSrcTrackTime);
+                      &HTMLMediaElement::UpdateSrcStreamTime);
   SetVolumeInternal();
 
   UpdateSrcMediaStreamPlaying();
   mSrcStreamVideoPrincipal = NodePrincipal();
 
   // If we pause this media element, track changes in the underlying stream
   // will continue to fire events at this element and alter its track list.
   // That's simpler than delaying the events, but probably confusing...
@@ -4911,17 +4910,17 @@ void HTMLMediaElement::EndSrcMediaStream
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
   }
   mSelectedVideoStreamTrack = nullptr;
   mFirstFrameListener = nullptr;
 
   if (mMediaStreamRenderer) {
     mWatchManager.Unwatch(mMediaStreamRenderer->CurrentGraphTime(),
-                          &HTMLMediaElement::UpdateSrcTrackTime);
+                          &HTMLMediaElement::UpdateSrcStreamTime);
     mMediaStreamRenderer = nullptr;
   }
 
   mSrcStream->UnregisterTrackListener(mMediaStreamTrackListener.get());
   mMediaStreamTrackListener = nullptr;
   mSrcStreamTracksAvailable = false;
   mSrcStreamPlaybackEnded = false;
   mSrcStreamVideoPrincipal = nullptr;
@@ -4990,17 +4989,17 @@ void HTMLMediaElement::NotifyMediaStream
     MOZ_DIAGNOSTIC_ASSERT(VideoTracks(), "Element can't have been unlinked");
     RefPtr<VideoTrack> videoTrack =
         CreateVideoTrack(t, VideoTracks()->GetOwnerGlobal());
     VideoTracks()->AddTrack(videoTrack);
     // New MediaStreamTrack added, set the new added video track as selected
     // video track when there is no selected track.
     if (VideoTracks()->SelectedIndex() == -1) {
       MOZ_ASSERT(!mSelectedVideoStreamTrack);
-      videoTrack->SetEnabledInternal(true, dom::MediaTrack::FIRE_NO_EVENTS);
+      videoTrack->SetEnabledInternal(true, MediaTrack::FIRE_NO_EVENTS);
     }
   }
 
   UpdateReadyStateInternal();
 
   if (!mSrcStreamTracksAvailable) {
     mAbstractMainThread->Dispatch(NS_NewRunnableFunction(
         "HTMLMediaElement::NotifyMediaStreamTrackAdded->FirstFrameLoaded",
@@ -5028,19 +5027,19 @@ void HTMLMediaElement::NotifyMediaStream
   aTrack->GetId(id);
 
   LOG(LogLevel::Debug, ("%p, Removing %sTrack with id %s", this,
                         aTrack->AsAudioStreamTrack() ? "Audio" : "Video",
                         NS_ConvertUTF16toUTF8(id).get()));
 
   MOZ_DIAGNOSTIC_ASSERT(AudioTracks() && VideoTracks(),
                         "Element can't have been unlinked");
-  if (dom::MediaTrack* t = AudioTracks()->GetTrackById(id)) {
+  if (MediaTrack* t = AudioTracks()->GetTrackById(id)) {
     AudioTracks()->RemoveTrack(t);
-  } else if (dom::MediaTrack* t = VideoTracks()->GetTrackById(id)) {
+  } else if (MediaTrack* t = VideoTracks()->GetTrackById(id)) {
     VideoTracks()->RemoveTrack(t);
   } else {
     NS_ASSERTION(aTrack->AsVideoStreamTrack() && !IsVideo(),
                  "MediaStreamTrack ended but did not exist in track lists. "
                  "This is only allowed if a video element ends and we are an "
                  "audio element.");
     return;
   }
@@ -7022,39 +7021,39 @@ bool HTMLMediaElement::ShouldElementBePa
 
 void HTMLMediaElement::SetMediaInfo(const MediaInfo& aInfo) {
   const bool oldHasAudio = mMediaInfo.HasAudio();
   mMediaInfo = aInfo;
   if ((aInfo.HasAudio() != oldHasAudio) && mResumeDelayedPlaybackAgent) {
     mResumeDelayedPlaybackAgent->UpdateAudibleState(this, IsAudible());
   }
   if (mAudioChannelWrapper) {
-    mAudioChannelWrapper->AudioCaptureTrackChangeIfNeeded();
+    mAudioChannelWrapper->AudioCaptureStreamChangeIfNeeded();
   }
   UpdateWakeLock();
 }
 
-void HTMLMediaElement::AudioCaptureTrackChange(bool aCapture) {
+void HTMLMediaElement::AudioCaptureStreamChange(bool aCapture) {
   // No need to capture a silent media element.
   if (!HasAudio()) {
     return;
   }
 
   if (aCapture && !mStreamWindowCapturer) {
     nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
     if (!window) {
       return;
     }
 
-    MediaTrackGraph* mtg = MediaTrackGraph::GetInstance(
-        MediaTrackGraph::AUDIO_THREAD_DRIVER, window,
-        MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+    MediaStreamGraph* msg = MediaStreamGraph::GetInstance(
+        MediaStreamGraph::AUDIO_THREAD_DRIVER, window,
+        MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
     RefPtr<DOMMediaStream> stream =
         CaptureStreamInternal(StreamCaptureBehavior::CONTINUE_WHEN_ENDED,
-                              StreamCaptureType::CAPTURE_AUDIO, mtg);
+                              StreamCaptureType::CAPTURE_AUDIO, msg);
     mStreamWindowCapturer =
         MakeUnique<MediaStreamWindowCapturer>(stream, window->WindowID());
   } else if (!aCapture && mStreamWindowCapturer) {
     for (size_t i = 0; i < mOutputStreams.Length(); i++) {
       if (mOutputStreams[i].mStream == mStreamWindowCapturer->mStream) {
         if (mOutputStreams[i].mCapturingDecoder && mDecoder) {
           mDecoder->RemoveOutputStream(mOutputStreams[i].mStream);
         }
@@ -7349,17 +7348,17 @@ already_AddRefed<Promise> HTMLMediaEleme
                       return SinkInfoPromise::CreateAndResolve(aInfo, __func__);
                     }
                     return SinkInfoPromise::CreateAndReject(
                         aValue.RejectValue(), __func__);
                   });
               return p;
             }
             if (self->mSrcAttrStream) {
-              // Set Sink Id through MTG is not supported yet.
+              // Set Sink Id through MSG is not supported yet.
               return SinkInfoPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
             }
             // No media attached to the element save it for later.
             return SinkInfoPromise::CreateAndResolve(aInfo, __func__);
           },
           [](nsresult res) {
             // Promise is rejected, sink not found.
             return SinkInfoPromise::CreateAndReject(res, __func__);
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -48,20 +48,20 @@ namespace mozilla {
 class AbstractThread;
 class ChannelMediaDecoder;
 class DecoderDoctorDiagnostics;
 class DOMMediaStream;
 class ErrorResult;
 class MediaResource;
 class MediaDecoder;
 class MediaInputPort;
-class MediaTrack;
-class MediaTrackGraph;
+class MediaStream;
+class MediaStreamGraph;
 class MediaStreamWindowCapturer;
-struct SharedDummyTrack;
+struct SharedDummyStream;
 class VideoFrameContainer;
 namespace dom {
 class MediaKeys;
 class TextTrack;
 class TimeRanges;
 class WakeLock;
 class MediaStreamTrack;
 class MediaStreamTrackSource;
@@ -104,16 +104,17 @@ class HTMLMediaElement : public nsGeneri
                          public MediaDecoderOwner,
                          public PrincipalChangeObserver<MediaStreamTrack>,
                          public SupportsWeakPtr<HTMLMediaElement>,
                          public nsStubMutationObserver {
  public:
   typedef mozilla::TimeStamp TimeStamp;
   typedef mozilla::layers::ImageContainer ImageContainer;
   typedef mozilla::VideoFrameContainer VideoFrameContainer;
+  typedef mozilla::MediaStream MediaStream;
   typedef mozilla::MediaResource MediaResource;
   typedef mozilla::MediaDecoderOwner MediaDecoderOwner;
   typedef mozilla::MetadataTags MetadataTags;
 
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement)
   NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED
 
   CORSMode GetCORSMode() { return mCORSMode; }
@@ -329,23 +330,23 @@ class HTMLMediaElement : public nsGeneri
    * whether it's appropriate to fire an error event.
    */
   void NotifyLoadError(const nsACString& aErrorDetails = nsCString());
 
   /**
    * Called by one of our associated MediaTrackLists (audio/video) when an
    * AudioTrack is enabled or a VideoTrack is selected.
    */
-  void NotifyMediaTrackEnabled(dom::MediaTrack* aTrack);
+  void NotifyMediaTrackEnabled(MediaTrack* aTrack);
 
   /**
    * Called by one of our associated MediaTrackLists (audio/video) when an
    * AudioTrack is disabled or a VideoTrack is unselected.
    */
-  void NotifyMediaTrackDisabled(dom::MediaTrack* aTrack);
+  void NotifyMediaTrackDisabled(MediaTrack* aTrack);
 
   /**
    * Returns the current load ID. Asynchronous events store the ID that was
    * current when they were enqueued, and if it has changed when they come to
    * fire, they consider themselves cancelled, and don't fire.
    */
   uint32_t GetCurrentLoadID() { return mCurrentLoadID; }
 
@@ -607,17 +608,17 @@ class HTMLMediaElement : public nsGeneri
   // in the URL bar of the browser window.
   already_AddRefed<nsIPrincipal> GetTopLevelPrincipal();
 
   bool ContainsRestrictedContent();
 
   void NotifyWaitingForKey() override;
 
   already_AddRefed<DOMMediaStream> CaptureAudio(ErrorResult& aRv,
-                                                MediaTrackGraph* aGraph);
+                                                MediaStreamGraph* aGraph);
 
   already_AddRefed<DOMMediaStream> MozCaptureStream(ErrorResult& aRv);
 
   already_AddRefed<DOMMediaStream> MozCaptureStreamUntilEnded(ErrorResult& aRv);
 
   bool MozAudioCaptured() const { return mAudioCaptured; }
 
   void MozGetMetadata(JSContext* aCx, JS::MutableHandle<JSObject*> aResult,
@@ -749,17 +750,17 @@ class HTMLMediaElement : public nsGeneri
   struct OutputMediaStream {
     OutputMediaStream();
     ~OutputMediaStream();
 
     RefPtr<DOMMediaStream> mStream;
     // Dummy stream to keep mGraph from shutting down when MediaDecoder shuts
     // down. Shared across all OutputMediaStreams as one stream is enough to
     // keep the graph alive.
-    RefPtr<SharedDummyTrack> mGraphKeepAliveDummyStream;
+    RefPtr<SharedDummyStream> mGraphKeepAliveDummyStream;
     bool mFinishWhenEnded;
     bool mCapturingAudioOnly;
     bool mCapturingDecoder;
     bool mCapturingMediaStream;
 
     // The following members are keeping state for a captured MediaStream.
     nsTArray<Pair<nsString, RefPtr<MediaStreamTrackSource>>> mTracks;
   };
@@ -824,17 +825,17 @@ class HTMLMediaElement : public nsGeneri
    */
   enum { REMOVING_SRC_STREAM = 0x1 };
   void UpdateSrcMediaStreamPlaying(uint32_t aFlags = 0);
 
   /**
    * mSrcStream's graph's CurrentTime() has been updated. It might be time to
    * fire "timeupdate".
    */
-  void UpdateSrcTrackTime();
+  void UpdateSrcStreamTime();
 
   /**
    * Called by our DOMMediaStream::TrackListener when a new MediaStreamTrack has
    * been added to the playback stream of |mSrcStream|.
    */
   void NotifyMediaStreamTrackAdded(const RefPtr<MediaStreamTrack>& aTrack);
 
   /**
@@ -852,17 +853,17 @@ class HTMLMediaElement : public nsGeneri
    */
   void SetCapturedOutputStreamsEnabled(bool aEnabled);
 
   /**
    * Create a new MediaStreamTrack for aTrack and add it to the DOMMediaStream
    * in aOutputStream. This automatically sets the output track to enabled or
    * disabled depending on our current playing state.
    */
-  void AddCaptureMediaTrackToOutputStream(dom::MediaTrack* aTrack,
+  void AddCaptureMediaTrackToOutputStream(MediaTrack* aTrack,
                                           OutputMediaStream& aOutputStream,
                                           bool aAsyncAddtrack = true);
 
   /**
    * Discard all output streams that are flagged to finish when playback ends.
    */
   void DiscardFinishWhenEndedOutputStreams();
 
@@ -874,17 +875,17 @@ class HTMLMediaElement : public nsGeneri
    * The stream will never finish.
    *
    * When aType is CAPTURE_AUDIO, we stop playout of audio and instead route it
    * to the DOMMediaStream. Volume and mute state will be applied to the audio
    * reaching the stream. No video tracks will be captured in this case.
    */
   already_AddRefed<DOMMediaStream> CaptureStreamInternal(
       StreamCaptureBehavior aBehavior, StreamCaptureType aType,
-      MediaTrackGraph* aGraph);
+      MediaStreamGraph* aGraph);
 
   /**
    * Initialize a decoder as a clone of an existing decoder in another
    * element.
    * mLoadingSrc must already be set.
    */
   nsresult InitializeDecoderAsClone(ChannelMediaDecoder* aOriginal);
 
@@ -1187,17 +1188,17 @@ class HTMLMediaElement : public nsGeneri
   // Recomputes ready state and fires events as necessary based on current
   // state.
   void UpdateReadyStateInternal();
 
   // Determine if the element should be paused because of suspend conditions.
   bool ShouldElementBePaused();
 
   // Create or destroy the captured stream.
-  void AudioCaptureTrackChange(bool aCapture);
+  void AudioCaptureStreamChange(bool aCapture);
 
   // A method to check whether the media element is allowed to start playback.
   bool AudioChannelAgentBlockedPlay();
 
   // If the network state is empty and then we would trigger DoLoad().
   void MaybeDoLoad();
 
   // Anything we need to check after played success and not related with spec.
@@ -1299,24 +1300,25 @@ class HTMLMediaElement : public nsGeneri
 
   // The DocGroup-specific AbstractThread::MainThread() of this HTML element.
   RefPtr<AbstractThread> mAbstractMainThread;
 
   // A reference to the VideoFrameContainer which contains the current frame
   // of video to display.
   RefPtr<VideoFrameContainer> mVideoFrameContainer;
 
-  // Holds a reference to the MediaStream that has been set in the src
-  // attribute.
+  // Holds a reference to the DOM wrapper for the MediaStream that has been
+  // set in the src attribute.
   RefPtr<DOMMediaStream> mSrcAttrStream;
 
   // Holds the triggering principal for the src attribute.
   nsCOMPtr<nsIPrincipal> mSrcAttrTriggeringPrincipal;
 
-  // Holds a reference to the MediaStream that we're actually playing.
+  // Holds a reference to the DOM wrapper for the MediaStream that we're
+  // actually playing.
   // At most one of mDecoder and mSrcStream can be non-null.
   RefPtr<DOMMediaStream> mSrcStream;
 
   // The MediaStreamRenderer handles rendering of our selected video track, and
   // enabled audio tracks, while mSrcStream is set.
   RefPtr<MediaStreamRenderer> mMediaStreamRenderer;
 
   // True once mSrcStream's initial set of tracks are known.
--- a/dom/media/AudioBufferUtils.h
+++ b/dom/media/AudioBufferUtils.h
@@ -95,20 +95,20 @@ class AudioCallbackBufferWrapper {
   }
 
   /**
    * Check that the buffer is completly filled, and reset internal state so this
    * instance can be reused.
    */
   void BufferFilled() {
     // It's okay to have exactly zero samples here, it can happen we have an
-    // audio callback driver because of a hint on MTG creation, but the
-    // AudioOutputStream has not been created yet, or if all the tracks have
+    // audio callback driver because of a hint on MSG creation, but the
+    // AudioOutputStream has not been created yet, or if all the streams have
     // finished but we're still running. Note: it's also ok if we had data in
-    // the scratch buffer - and we usually do - and all the tracks were ended
+    // the scratch buffer - and we usually do - and all the streams were ended
     // (no mixer callback occured).
     // XXX Remove this warning, or find a way to avoid it if the mixer callback
     // isn't called.
     NS_WARNING_ASSERTION(
         Available() == 0 || mSampleWriteOffset == 0,
         "Audio Buffer is not full by the end of the callback.");
     // Make sure the data returned is always set and not random!
     if (Available()) {
@@ -130,17 +130,17 @@ class AudioCallbackBufferWrapper {
   /* The position at which new samples should be written. We want to return to
    * the audio callback iff this is equal to mSamples. */
   uint32_t mSampleWriteOffset;
   uint32_t mChannels;
 };
 
 /**
  * This is a class that interfaces with the AudioCallbackBufferWrapper, and is
- * responsible for storing the excess of data produced by the MediaTrackGraph
+ * responsible for storing the excess of data produced by the MediaStreamGraph
  * because of different rounding constraints, to be used the next time the audio
  * backend calls back.
  */
 template <typename T, uint32_t BLOCK_SIZE>
 class SpillBuffer {
  public:
   SpillBuffer() : mBuffer(nullptr), mPosition(0), mChannels(0) {}
 
rename from dom/media/AudioCaptureTrack.cpp
rename to dom/media/AudioCaptureStream.cpp
--- a/dom/media/AudioCaptureTrack.cpp
+++ b/dom/media/AudioCaptureStream.cpp
@@ -1,115 +1,124 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "MediaTrackGraphImpl.h"
-#include "MediaTrackListener.h"
+#include "MediaStreamGraphImpl.h"
+#include "MediaStreamListener.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/Unused.h"
 
 #include "AudioSegment.h"
 #include "mozilla/Logging.h"
 #include "mozilla/Attributes.h"
-#include "AudioCaptureTrack.h"
+#include "AudioCaptureStream.h"
 #include "ImageContainer.h"
 #include "AudioNodeEngine.h"
-#include "AudioNodeTrack.h"
-#include "AudioNodeExternalInputTrack.h"
+#include "AudioNodeStream.h"
+#include "AudioNodeExternalInputStream.h"
 #include "webaudio/MediaStreamAudioDestinationNode.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 
 namespace mozilla {
 
 // We are mixing to mono until PeerConnection can accept stereo
 static const uint32_t MONO = 1;
 
-AudioCaptureTrack::AudioCaptureTrack(TrackRate aRate)
-    : ProcessedMediaTrack(aRate, MediaSegment::AUDIO, new AudioSegment()),
-      mStarted(false) {
+AudioCaptureStream::AudioCaptureStream(TrackID aTrackId)
+    : ProcessedMediaStream(), mTrackId(aTrackId), mStarted(false) {
   MOZ_ASSERT(NS_IsMainThread());
-  MOZ_COUNT_CTOR(AudioCaptureTrack);
+  MOZ_COUNT_CTOR(AudioCaptureStream);
   mMixer.AddCallback(this);
 }
 
-AudioCaptureTrack::~AudioCaptureTrack() {
-  MOZ_COUNT_DTOR(AudioCaptureTrack);
+AudioCaptureStream::~AudioCaptureStream() {
+  MOZ_COUNT_DTOR(AudioCaptureStream);
   mMixer.RemoveCallback(this);
 }
 
-void AudioCaptureTrack::Start() {
+void AudioCaptureStream::Start() {
   class Message : public ControlMessage {
    public:
-    explicit Message(AudioCaptureTrack* aTrack)
-        : ControlMessage(aTrack), mTrack(aTrack) {}
+    explicit Message(AudioCaptureStream* aStream)
+        : ControlMessage(aStream), mStream(aStream) {}
 
-    virtual void Run() { mTrack->mStarted = true; }
+    virtual void Run() { mStream->mStarted = true; }
 
    protected:
-    AudioCaptureTrack* mTrack;
+    AudioCaptureStream* mStream;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this));
 }
 
-void AudioCaptureTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
-                                     uint32_t aFlags) {
+void AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
+                                      uint32_t aFlags) {
   if (!mStarted) {
     return;
   }
 
   uint32_t inputCount = mInputs.Length();
+  StreamTracks::Track* track = EnsureTrack(mTrackId);
 
-  if (mEnded) {
+  if (IsFinishedOnGraphThread()) {
     return;
   }
 
-  // If the captured track is connected back to a object on the page (be it an
-  // HTMLMediaElement with a track as source, or an AudioContext), a cycle
+  // If the captured stream is connected back to a object on the page (be it an
+  // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
   // situation occur. This can work if it's an AudioContext with at least one
-  // DelayNode, but the MTG will mute the whole cycle otherwise.
+  // DelayNode, but the MSG will mute the whole cycle otherwise.
   if (InMutedCycle() || inputCount == 0) {
-    GetData<AudioSegment>()->AppendNullData(aTo - aFrom);
+    track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
   } else {
     // We mix down all the tracks of all inputs, to a stereo track. Everything
     // is {up,down}-mixed to stereo.
     mMixer.StartMixing();
     AudioSegment output;
     for (uint32_t i = 0; i < inputCount; i++) {
-      MediaTrack* s = mInputs[i]->GetSource();
-      AudioSegment* inputSegment = s->GetData<AudioSegment>();
-      TrackTime inputStart = s->GraphTimeToTrackTimeWithBlocking(aFrom);
-      TrackTime inputEnd = s->GraphTimeToTrackTimeWithBlocking(aTo);
-      AudioSegment toMix;
-      if (s->Ended() && inputSegment->GetDuration() <= inputStart) {
+      MediaStream* s = mInputs[i]->GetSource();
+      StreamTracks::TrackIter track(s->GetStreamTracks(), MediaSegment::AUDIO);
+      if (track.IsEnded()) {
+        // No tracks for this input. Still we append data to trigger the mixer.
+        AudioSegment toMix;
         toMix.AppendNullData(aTo - aFrom);
-      } else {
-        toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
-        // Care for tracks blocked in the [aTo, aFrom] range.
-        if (inputEnd - inputStart < aTo - aFrom) {
-          toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
+        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
+      }
+      for (; !track.IsEnded(); track.Next()) {
+        AudioSegment* inputSegment = track->Get<AudioSegment>();
+        StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
+        StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
+        AudioSegment toMix;
+        if (track->IsEnded() && inputSegment->GetDuration() <= inputStart) {
+          toMix.AppendNullData(aTo - aFrom);
+        } else {
+          toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
+          // Care for streams blocked in the [aTo, aFrom] range.
+          if (inputEnd - inputStart < aTo - aFrom) {
+            toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
+          }
         }
+        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
       }
-      toMix.Mix(mMixer, MONO, Graph()->GraphRate());
     }
     // This calls MixerCallback below
     mMixer.FinishMixing();
   }
 }
 
-void AudioCaptureTrack::MixerCallback(AudioDataValue* aMixedBuffer,
-                                      AudioSampleFormat aFormat,
-                                      uint32_t aChannels, uint32_t aFrames,
-                                      uint32_t aSampleRate) {
+void AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
+                                       AudioSampleFormat aFormat,
+                                       uint32_t aChannels, uint32_t aFrames,
+                                       uint32_t aSampleRate) {
   AutoTArray<nsTArray<AudioDataValue>, MONO> output;
   AutoTArray<const AudioDataValue*, MONO> bufferPtrs;
   output.SetLength(MONO);
   bufferPtrs.SetLength(MONO);
 
   uint32_t written = 0;
   // We need to copy here, because the mixer will reuse the storage, we should
   // not hold onto it. Buffers are in planar format.
@@ -124,12 +133,12 @@ void AudioCaptureTrack::MixerCallback(Au
       new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
   chunk.mDuration = aFrames;
   chunk.mBufferFormat = aFormat;
   chunk.mChannelData.SetLength(MONO);
   for (uint32_t channel = 0; channel < aChannels; channel++) {
     chunk.mChannelData[channel] = bufferPtrs[channel];
   }
 
-  // Now we have mixed data, simply append it.
-  GetData<AudioSegment>()->AppendAndConsumeChunk(&chunk);
+  // Now we have mixed data, simply append it to out track.
+  EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
 }
 }  // namespace mozilla
rename from dom/media/AudioCaptureTrack.h
rename to dom/media/AudioCaptureStream.h
--- a/dom/media/AudioCaptureTrack.h
+++ b/dom/media/AudioCaptureStream.h
@@ -1,41 +1,43 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef MOZILLA_AUDIOCAPTURETRACK_H_
-#define MOZILLA_AUDIOCAPTURETRACK_H_
+#ifndef MOZILLA_AUDIOCAPTURESTREAM_H_
+#define MOZILLA_AUDIOCAPTURESTREAM_H_
 
-#include "MediaTrackGraph.h"
+#include "MediaStreamGraph.h"
 #include "AudioMixer.h"
+#include "StreamTracks.h"
 #include <algorithm>
 
 namespace mozilla {
 
 class AbstractThread;
 class DOMMediaStream;
 
 /**
- * See MediaTrackGraph::CreateAudioCaptureTrack.
+ * See MediaStreamGraph::CreateAudioCaptureStream.
  */
-class AudioCaptureTrack : public ProcessedMediaTrack,
-                          public MixerCallbackReceiver {
+class AudioCaptureStream : public ProcessedMediaStream,
+                           public MixerCallbackReceiver {
  public:
-  explicit AudioCaptureTrack(TrackRate aRate);
-  virtual ~AudioCaptureTrack();
+  explicit AudioCaptureStream(TrackID aTrackId);
+  virtual ~AudioCaptureStream();
 
   void Start();
 
   void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
  protected:
   void MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat,
                      uint32_t aChannels, uint32_t aFrames,
                      uint32_t aSampleRate) override;
   AudioMixer mMixer;
+  TrackID mTrackId;
   bool mStarted;
   bool mTrackCreated;
 };
 }  // namespace mozilla
 
-#endif /* MOZILLA_AUDIOCAPTURETRACK_H_ */
+#endif /* MOZILLA_AUDIOCAPTURESTREAM_H_ */
--- a/dom/media/AudioSampleFormat.h
+++ b/dom/media/AudioSampleFormat.h
@@ -7,17 +7,17 @@
 #define MOZILLA_AUDIOSAMPLEFORMAT_H_
 
 #include "mozilla/Assertions.h"
 #include <algorithm>
 
 namespace mozilla {
 
 /**
- * Audio formats supported in MediaTracks and media elements.
+ * Audio formats supported in MediaStreams and media elements.
  *
  * Only one of these is supported by AudioStream, and that is determined
  * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). Media decoders
  * produce that format only; queued AudioData always uses that format.
  */
 enum AudioSampleFormat {
   // Silence: format will be chosen later
   AUDIO_FORMAT_SILENCE,
--- a/dom/media/AudioSegment.h
+++ b/dom/media/AudioSegment.h
@@ -144,30 +144,30 @@ void DownmixAndInterleave(const nsTArray
  * pointers so it can represent a subinterval of a buffer without copying.
  * An AudioChunk can store its individual channels anywhere; it maintains
  * separate pointers to each channel's buffer.
  */
 struct AudioChunk {
   typedef mozilla::AudioSampleFormat SampleFormat;
 
   // Generic methods
-  void SliceTo(TrackTime aStart, TrackTime aEnd) {
+  void SliceTo(StreamTime aStart, StreamTime aEnd) {
     MOZ_ASSERT(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
                "Slice out of bounds");
     if (mBuffer) {
       MOZ_ASSERT(aStart < INT32_MAX,
                  "Can't slice beyond 32-bit sample lengths");
       for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) {
         mChannelData[channel] = AddAudioSampleOffset(
             mChannelData[channel], mBufferFormat, int32_t(aStart));
       }
     }
     mDuration = aEnd - aStart;
   }
-  TrackTime GetDuration() const { return mDuration; }
+  StreamTime GetDuration() const { return mDuration; }
   bool CanCombineWithFollowing(const AudioChunk& aOther) const {
     if (aOther.mBuffer != mBuffer) {
       return false;
     }
     if (!mBuffer) {
       return true;
     }
     if (aOther.mVolume != mVolume) {
@@ -188,17 +188,17 @@ struct AudioChunk {
           AddAudioSampleOffset(mChannelData[channel], mBufferFormat,
                                int32_t(mDuration))) {
         return false;
       }
     }
     return true;
   }
   bool IsNull() const { return mBuffer == nullptr; }
-  void SetNull(TrackTime aDuration) {
+  void SetNull(StreamTime aDuration) {
     mBuffer = nullptr;
     mChannelData.Clear();
     mDuration = aDuration;
     mVolume = 1.0f;
     mBufferFormat = AUDIO_FORMAT_SILENCE;
     mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
   }
 
@@ -257,17 +257,17 @@ struct AudioChunk {
   T* ChannelDataForWrite(size_t aChannel) {
     MOZ_ASSERT(AudioSampleTypeToFormat<T>::Format == mBufferFormat);
     MOZ_ASSERT(!mBuffer->IsShared());
     return static_cast<T*>(const_cast<void*>(mChannelData[aChannel]));
   }
 
   const PrincipalHandle& GetPrincipalHandle() const { return mPrincipalHandle; }
 
-  TrackTime mDuration = 0;             // in frames within the buffer
+  StreamTime mDuration = 0;            // in frames within the buffer
   RefPtr<ThreadSharedObject> mBuffer;  // the buffer object whose lifetime is
                                        // managed; null means data is all zeroes
   // one pointer per channel; empty if and only if mBuffer is null
   AutoTArray<const void*, GUESS_AUDIO_CHANNELS> mChannelData;
   float mVolume = 1.0f;  // volume multiplier to apply
   // format of frames in mBuffer (or silence if mBuffer is null)
   SampleFormat mBufferFormat = AUDIO_FORMAT_SILENCE;
   // principalHandle for the data in this chunk.
--- a/dom/media/AudioStreamTrack.cpp
+++ b/dom/media/AudioStreamTrack.cpp
@@ -1,49 +1,49 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioStreamTrack.h"
 
-#include "MediaTrackGraph.h"
+#include "MediaStreamGraph.h"
 #include "nsContentUtils.h"
 
 namespace mozilla {
 namespace dom {
 
 void AudioStreamTrack::AddAudioOutput(void* aKey) {
   if (Ended()) {
     return;
   }
-  mTrack->AddAudioOutput(aKey);
+  mStream->AddAudioOutput(aKey);
 }
 
 void AudioStreamTrack::RemoveAudioOutput(void* aKey) {
   if (Ended()) {
     return;
   }
-  mTrack->RemoveAudioOutput(aKey);
+  mStream->RemoveAudioOutput(aKey);
 }
 
 void AudioStreamTrack::SetAudioOutputVolume(void* aKey, float aVolume) {
   if (Ended()) {
     return;
   }
-  mTrack->SetAudioOutputVolume(aKey, aVolume);
+  mStream->SetAudioOutputVolume(aKey, aVolume);
 }
 
 void AudioStreamTrack::GetLabel(nsAString& aLabel, CallerType aCallerType) {
   if (nsContentUtils::ResistFingerprinting(aCallerType)) {
     aLabel.AssignLiteral("Internal Microphone");
     return;
   }
   MediaStreamTrack::GetLabel(aLabel, aCallerType);
 }
 
 already_AddRefed<MediaStreamTrack> AudioStreamTrack::CloneInternal() {
-  return do_AddRef(new AudioStreamTrack(mWindow, mInputTrack, mSource,
-                                        ReadyState(), mConstraints));
+  return do_AddRef(new AudioStreamTrack(mWindow, mInputStream, mTrackID,
+                                        mSource, ReadyState(), mConstraints));
 }
 
 }  // namespace dom
 }  // namespace mozilla
--- a/dom/media/AudioStreamTrack.h
+++ b/dom/media/AudioStreamTrack.h
@@ -10,21 +10,21 @@
 #include "DOMMediaStream.h"
 
 namespace mozilla {
 namespace dom {
 
 class AudioStreamTrack : public MediaStreamTrack {
  public:
   AudioStreamTrack(
-      nsPIDOMWindowInner* aWindow, mozilla::MediaTrack* aInputTrack,
+      nsPIDOMWindowInner* aWindow, MediaStream* aInputStream, TrackID aTrackID,
       MediaStreamTrackSource* aSource,
       MediaStreamTrackState aReadyState = MediaStreamTrackState::Live,
       const MediaTrackConstraints& aConstraints = MediaTrackConstraints())
-      : MediaStreamTrack(aWindow, aInputTrack, aSource, aReadyState,
+      : MediaStreamTrack(aWindow, aInputStream, aTrackID, aSource, aReadyState,
                          aConstraints) {}
 
   AudioStreamTrack* AsAudioStreamTrack() override { return this; }
   const AudioStreamTrack* AsAudioStreamTrack() const override { return this; }
 
   void AddAudioOutput(void* aKey);
   void RemoveAudioOutput(void* aKey);
   void SetAudioOutputVolume(void* aKey, float aVolume);
--- a/dom/media/CanvasCaptureMediaStream.cpp
+++ b/dom/media/CanvasCaptureMediaStream.cpp
@@ -2,38 +2,41 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "CanvasCaptureMediaStream.h"
 
 #include "DOMMediaStream.h"
 #include "ImageContainer.h"
-#include "MediaTrackGraph.h"
-#include "Tracing.h"
-#include "VideoSegment.h"
+#include "MediaStreamGraph.h"
 #include "gfxPlatform.h"
 #include "mozilla/Atomics.h"
 #include "mozilla/dom/CanvasCaptureMediaStreamBinding.h"
 #include "mozilla/gfx/2D.h"
 #include "nsContentUtils.h"
+#include "Tracing.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::gfx;
 
 namespace mozilla {
 namespace dom {
 
-OutputStreamDriver::OutputStreamDriver(SourceMediaTrack* aSourceStream,
+OutputStreamDriver::OutputStreamDriver(SourceMediaStream* aSourceStream,
+                                       const TrackID& aTrackId,
                                        const PrincipalHandle& aPrincipalHandle)
     : FrameCaptureListener(),
+      mTrackId(aTrackId),
       mSourceStream(aSourceStream),
       mPrincipalHandle(aPrincipalHandle) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mSourceStream);
+  MOZ_ASSERT(IsTrackIDExplicit(mTrackId));
+  mSourceStream->AddTrack(aTrackId, new VideoSegment());
 
   // All CanvasCaptureMediaStreams shall at least get one frame.
   mFrameCaptureRequested = true;
 }
 
 OutputStreamDriver::~OutputStreamDriver() {
   MOZ_ASSERT(NS_IsMainThread());
   EndTrack();
@@ -45,31 +48,32 @@ void OutputStreamDriver::EndTrack() {
     mSourceStream->Destroy();
   }
 }
 
 void OutputStreamDriver::SetImage(const RefPtr<layers::Image>& aImage,
                                   const TimeStamp& aTime) {
   MOZ_ASSERT(NS_IsMainThread());
 
-  TRACE_COMMENT("SourceMediaTrack %p", mSourceStream.get());
+  TRACE_COMMENT("SourceMediaStream %p track %i", mSourceStream.get(), mTrackId);
 
   VideoSegment segment;
   segment.AppendFrame(do_AddRef(aImage), aImage->GetSize(), mPrincipalHandle,
                       false, aTime);
-  mSourceStream->AppendData(&segment);
+  mSourceStream->AppendToTrack(mTrackId, &segment);
 }
 
 // ----------------------------------------------------------------------
 
 class TimerDriver : public OutputStreamDriver {
  public:
-  explicit TimerDriver(SourceMediaTrack* aSourceStream, const double& aFPS,
+  explicit TimerDriver(SourceMediaStream* aSourceStream, const double& aFPS,
+                       const TrackID& aTrackId,
                        const PrincipalHandle& aPrincipalHandle)
-      : OutputStreamDriver(aSourceStream, aPrincipalHandle),
+      : OutputStreamDriver(aSourceStream, aTrackId, aPrincipalHandle),
         mFPS(aFPS),
         mTimer(nullptr) {
     if (mFPS == 0.0) {
       return;
     }
 
     NS_NewTimerWithFuncCallback(
         getter_AddRefs(mTimer), &TimerTick, this, int(1000 / mFPS),
@@ -109,19 +113,19 @@ class TimerDriver : public OutputStreamD
   const double mFPS;
   nsCOMPtr<nsITimer> mTimer;
 };
 
 // ----------------------------------------------------------------------
 
 class AutoDriver : public OutputStreamDriver {
  public:
-  explicit AutoDriver(SourceMediaTrack* aSourceStream,
+  explicit AutoDriver(SourceMediaStream* aSourceStream, const TrackID& aTrackId,
                       const PrincipalHandle& aPrincipalHandle)
-      : OutputStreamDriver(aSourceStream, aPrincipalHandle) {}
+      : OutputStreamDriver(aSourceStream, aTrackId, aPrincipalHandle) {}
 
   void NewFrame(already_AddRefed<Image> aImage,
                 const TimeStamp& aTime) override {
     // Don't reset `mFrameCaptureRequested` since AutoDriver shall always have
     // `mFrameCaptureRequested` set to true.
     // This also means we should accept every frame as NewFrame is called only
     // after something changed.
 
@@ -161,30 +165,32 @@ JSObject* CanvasCaptureMediaStream::Wrap
 
 void CanvasCaptureMediaStream::RequestFrame() {
   if (mOutputStreamDriver) {
     mOutputStreamDriver->RequestFrameCapture();
   }
 }
 
 nsresult CanvasCaptureMediaStream::Init(const dom::Optional<double>& aFPS,
+                                        const TrackID aTrackId,
                                         nsIPrincipal* aPrincipal) {
-  MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
-      MediaTrackGraph::SYSTEM_THREAD_DRIVER, mWindow,
-      MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
-  SourceMediaTrack* source = graph->CreateSourceTrack(MediaSegment::VIDEO);
+  MediaStreamGraph* graph = MediaStreamGraph::GetInstance(
+      MediaStreamGraph::SYSTEM_THREAD_DRIVER, mWindow,
+      MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+  SourceMediaStream* source = graph->CreateSourceStream();
   PrincipalHandle principalHandle = MakePrincipalHandle(aPrincipal);
   if (!aFPS.WasPassed()) {
-    mOutputStreamDriver = new AutoDriver(source, principalHandle);
+    mOutputStreamDriver = new AutoDriver(source, aTrackId, principalHandle);
   } else if (aFPS.Value() < 0) {
     return NS_ERROR_ILLEGAL_VALUE;
   } else {
     // Cap frame rate to 60 FPS for sanity
     double fps = std::min(60.0, aFPS.Value());
-    mOutputStreamDriver = new TimerDriver(source, fps, principalHandle);
+    mOutputStreamDriver =
+        new TimerDriver(source, fps, aTrackId, principalHandle);
   }
   return NS_OK;
 }
 
 FrameCaptureListener* CanvasCaptureMediaStream::FrameCaptureListener() {
   return mOutputStreamDriver;
 }
 
@@ -193,17 +199,17 @@ void CanvasCaptureMediaStream::StopCaptu
     return;
   }
 
   mOutputStreamDriver->EndTrack();
   mOutputStreamDriver->Forget();
   mOutputStreamDriver = nullptr;
 }
 
-SourceMediaTrack* CanvasCaptureMediaStream::GetSourceStream() const {
+SourceMediaStream* CanvasCaptureMediaStream::GetSourceStream() const {
   if (!mOutputStreamDriver) {
     return nullptr;
   }
   return mOutputStreamDriver->mSourceStream;
 }
 
 }  // namespace dom
 }  // namespace mozilla
--- a/dom/media/CanvasCaptureMediaStream.h
+++ b/dom/media/CanvasCaptureMediaStream.h
@@ -3,23 +3,23 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_dom_CanvasCaptureMediaStream_h_
 #define mozilla_dom_CanvasCaptureMediaStream_h_
 
 #include "DOMMediaStream.h"
 #include "mozilla/dom/HTMLCanvasElement.h"
-#include "PrincipalHandle.h"
+#include "StreamTracks.h"
 
 class nsIPrincipal;
 
 namespace mozilla {
 class DOMMediaStream;
-class SourceMediaTrack;
+class SourceMediaStream;
 
 namespace layers {
 class Image;
 }  // namespace layers
 
 namespace dom {
 class CanvasCaptureMediaStream;
 class HTMLCanvasElement;
@@ -42,86 +42,88 @@ class OutputStreamFrameListener;
  * |        | ------------------------> |   OutputStreamDriver   |
  * | Canvas |  SetFrameCapture()        | (FrameCaptureListener) |
  * |________| ------------------------> |________________________|
  *                                                  |
  *                                                  | SetImage() -
  *                                                  | AppendToTrack()
  *                                                  |
  *                                                  v
- *                                      __________________________
- *                                     |                          |
- *                                     |  MTG / SourceMediaTrack  |
- *                                     |__________________________|
+ *                                      ___________________________
+ *                                     |                           |
+ *                                     |  MSG / SourceMediaStream  |
+ *                                     |___________________________|
  * ----------------------------------------------------------------------------
  */
 
 /*
  * Base class for drivers of the output stream.
  * It is up to each sub class to implement the NewFrame() callback of
  * FrameCaptureListener.
  */
 class OutputStreamDriver : public FrameCaptureListener {
  public:
-  OutputStreamDriver(SourceMediaTrack* aSourceStream,
+  OutputStreamDriver(SourceMediaStream* aSourceStream, const TrackID& aTrackId,
                      const PrincipalHandle& aPrincipalHandle);
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamDriver);
 
   /*
    * Sub classes can SetImage() to update the image being appended to the
-   * output stream. It will be appended on the next NotifyPull from MTG.
+   * output stream. It will be appended on the next NotifyPull from MSG.
    */
   void SetImage(const RefPtr<layers::Image>& aImage, const TimeStamp& aTime);
 
   /*
    * Ends the track in mSourceStream when we know there won't be any more images
    * requested for it.
    */
   void EndTrack();
 
   /*
    * Makes sure any internal resources this driver is holding that may create
    * reference cycles are released.
    */
   virtual void Forget() {}
 
-  const RefPtr<SourceMediaTrack> mSourceStream;
+  const TrackID mTrackId;
+  const RefPtr<SourceMediaStream> mSourceStream;
   const PrincipalHandle mPrincipalHandle;
 
  protected:
   virtual ~OutputStreamDriver();
 };
 
 class CanvasCaptureMediaStream : public DOMMediaStream {
  public:
   CanvasCaptureMediaStream(nsPIDOMWindowInner* aWindow,
                            HTMLCanvasElement* aCanvas);
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(CanvasCaptureMediaStream,
                                            DOMMediaStream)
 
-  nsresult Init(const dom::Optional<double>& aFPS, nsIPrincipal* aPrincipal);
+  nsresult Init(const dom::Optional<double>& aFPS, const TrackID aTrackId,
+                nsIPrincipal* aPrincipal);
 
   JSObject* WrapObject(JSContext* aCx,
                        JS::Handle<JSObject*> aGivenProto) override;
 
   // WebIDL
   HTMLCanvasElement* Canvas() const { return mCanvas; }
   void RequestFrame();
 
   dom::FrameCaptureListener* FrameCaptureListener();
 
   /**
    * Stops capturing for this stream at mCanvas.
    */
   void StopCapture();
 
-  SourceMediaTrack* GetSourceStream() const;
+  SourceMediaStream* GetSourceStream() const;
 
  protected:
   ~CanvasCaptureMediaStream();
 
  private:
   RefPtr<HTMLCanvasElement> mCanvas;
   RefPtr<OutputStreamDriver> mOutputStreamDriver;
 };
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -38,17 +38,17 @@
 
 #define AUDIOIPC_POOL_SIZE_DEFAULT 2
 #define AUDIOIPC_STACK_SIZE_DEFAULT (64 * 4096)
 
 #define PREF_VOLUME_SCALE "media.volume_scale"
 #define PREF_CUBEB_BACKEND "media.cubeb.backend"
 #define PREF_CUBEB_OUTPUT_DEVICE "media.cubeb.output_device"
 #define PREF_CUBEB_LATENCY_PLAYBACK "media.cubeb_latency_playback_ms"
-#define PREF_CUBEB_LATENCY_MTG "media.cubeb_latency_mtg_frames"
+#define PREF_CUBEB_LATENCY_MSG "media.cubeb_latency_msg_frames"
 // Allows to get something non-default for the preferred sample-rate, to allow
 // troubleshooting in the field and testing.
 #define PREF_CUBEB_FORCE_SAMPLE_RATE "media.cubeb.force_sample_rate"
 #define PREF_CUBEB_LOGGING_LEVEL "media.cubeb.logging_level"
 // Hidden pref used by tests to force failure to obtain cubeb context
 #define PREF_CUBEB_FORCE_NULL_CONTEXT "media.cubeb.force_null_context"
 // Hidden pref to disable BMO 1427011 experiment; can be removed once proven.
 #define PREF_CUBEB_DISABLE_DEVICE_SWITCHING \
@@ -107,23 +107,23 @@ StaticMutex sMutex;
 enum class CubebState {
   Uninitialized = 0,
   Initialized,
   Shutdown
 } sCubebState = CubebState::Uninitialized;
 cubeb* sCubebContext;
 double sVolumeScale = 1.0;
 uint32_t sCubebPlaybackLatencyInMilliseconds = 100;
-uint32_t sCubebMTGLatencyInFrames = 512;
+uint32_t sCubebMSGLatencyInFrames = 512;
 // If sCubebForcedSampleRate is zero, PreferredSampleRate will return the
 // preferred sample-rate for the audio backend in use. Otherwise, it will be
 // used as the preferred sample-rate.
 uint32_t sCubebForcedSampleRate = 0;
 bool sCubebPlaybackLatencyPrefSet = false;
-bool sCubebMTGLatencyPrefSet = false;
+bool sCubebMSGLatencyPrefSet = false;
 bool sAudioStreamInitEverSucceeded = false;
 bool sCubebForceNullContext = false;
 bool sCubebDisableDeviceSwitching = true;
 #ifdef MOZ_CUBEB_REMOTING
 bool sCubebSandbox = false;
 size_t sAudioIPCPoolSize;
 size_t sAudioIPCStackSize;
 #endif
@@ -221,25 +221,25 @@ void PrefChanged(const char* aPref, void
     StaticMutexAutoLock lock(sMutex);
     // Arbitrary default stream latency of 100ms.  The higher this
     // value, the longer stream volume changes will take to become
     // audible.
     sCubebPlaybackLatencyPrefSet = Preferences::HasUserValue(aPref);
     uint32_t value = Preferences::GetUint(aPref, CUBEB_NORMAL_LATENCY_MS);
     sCubebPlaybackLatencyInMilliseconds =
         std::min<uint32_t>(std::max<uint32_t>(value, 1), 1000);
-  } else if (strcmp(aPref, PREF_CUBEB_LATENCY_MTG) == 0) {
+  } else if (strcmp(aPref, PREF_CUBEB_LATENCY_MSG) == 0) {
     StaticMutexAutoLock lock(sMutex);
-    sCubebMTGLatencyPrefSet = Preferences::HasUserValue(aPref);
+    sCubebMSGLatencyPrefSet = Preferences::HasUserValue(aPref);
     uint32_t value = Preferences::GetUint(aPref, CUBEB_NORMAL_LATENCY_FRAMES);
     // 128 is the block size for the Web Audio API, which limits how low the
     // latency can be here.
     // We don't want to limit the upper limit too much, so that people can
     // experiment.
-    sCubebMTGLatencyInFrames =
+    sCubebMSGLatencyInFrames =
         std::min<uint32_t>(std::max<uint32_t>(value, 128), 1e6);
   } else if (strcmp(aPref, PREF_CUBEB_FORCE_SAMPLE_RATE) == 0) {
     StaticMutexAutoLock lock(sMutex);
     sCubebForcedSampleRate = Preferences::GetUint(aPref);
   } else if (strcmp(aPref, PREF_CUBEB_LOGGING_LEVEL) == 0) {
     nsAutoCString value;
     Preferences::GetCString(aPref, value);
     LogModule* cubebLog = LogModule::Get("cubeb");
@@ -575,47 +575,47 @@ uint32_t GetCubebPlaybackLatencyInMillis
   return sCubebPlaybackLatencyInMilliseconds;
 }
 
 bool CubebPlaybackLatencyPrefSet() {
   StaticMutexAutoLock lock(sMutex);
   return sCubebPlaybackLatencyPrefSet;
 }
 
-bool CubebMTGLatencyPrefSet() {
+bool CubebMSGLatencyPrefSet() {
   StaticMutexAutoLock lock(sMutex);
-  return sCubebMTGLatencyPrefSet;
+  return sCubebMSGLatencyPrefSet;
 }
 
-uint32_t GetCubebMTGLatencyInFrames(cubeb_stream_params* params) {
+uint32_t GetCubebMSGLatencyInFrames(cubeb_stream_params* params) {
   StaticMutexAutoLock lock(sMutex);
-  if (sCubebMTGLatencyPrefSet) {
-    MOZ_ASSERT(sCubebMTGLatencyInFrames > 0);
-    return sCubebMTGLatencyInFrames;
+  if (sCubebMSGLatencyPrefSet) {
+    MOZ_ASSERT(sCubebMSGLatencyInFrames > 0);
+    return sCubebMSGLatencyInFrames;
   }
 
 #ifdef MOZ_WIDGET_ANDROID
   return AndroidGetAudioOutputFramesPerBuffer();
 #else
   cubeb* context = GetCubebContextUnlocked();
   if (!context) {
-    return sCubebMTGLatencyInFrames;  // default 512
+    return sCubebMSGLatencyInFrames;  // default 512
   }
   uint32_t latency_frames = 0;
   if (cubeb_get_min_latency(context, params, &latency_frames) != CUBEB_OK) {
     NS_WARNING("Could not get minimal latency from cubeb.");
-    return sCubebMTGLatencyInFrames;  // default 512
+    return sCubebMSGLatencyInFrames;  // default 512
   }
   return latency_frames;
 #endif
 }
 
 static const char* gInitCallbackPrefs[] = {
     PREF_VOLUME_SCALE,           PREF_CUBEB_OUTPUT_DEVICE,
-    PREF_CUBEB_LATENCY_PLAYBACK, PREF_CUBEB_LATENCY_MTG,
+    PREF_CUBEB_LATENCY_PLAYBACK, PREF_CUBEB_LATENCY_MSG,
     PREF_CUBEB_BACKEND,          PREF_CUBEB_FORCE_NULL_CONTEXT,
     PREF_CUBEB_SANDBOX,          PREF_AUDIOIPC_POOL_SIZE,
     PREF_AUDIOIPC_STACK_SIZE,    nullptr,
 };
 static const char* gCallbackPrefs[] = {
     PREF_CUBEB_FORCE_SAMPLE_RATE,
     // We don't want to call the callback on startup, because the pref is the
     // empty string by default ("", which means "logging disabled"). Because the
--- a/dom/media/CubebUtils.h
+++ b/dom/media/CubebUtils.h
@@ -40,17 +40,17 @@ uint32_t PreferredSampleRate();
 enum Side { Input, Output };
 
 double GetVolumeScale();
 bool GetFirstStream();
 cubeb* GetCubebContext();
 void ReportCubebStreamInitFailure(bool aIsFirstStream);
 void ReportCubebBackendUsed();
 uint32_t GetCubebPlaybackLatencyInMilliseconds();
-uint32_t GetCubebMTGLatencyInFrames(cubeb_stream_params* params);
+uint32_t GetCubebMSGLatencyInFrames(cubeb_stream_params* params);
 bool CubebLatencyPrefSet();
 void GetCurrentBackend(nsAString& aBackend);
 cubeb_stream_prefs GetDefaultStreamPrefs();
 char* GetForcedOutputDevice();
 // No-op on all platforms but Android, where it tells the device's AudioManager
 // to switch to "communication mode", which might change audio routing,
 // bluetooth communication type, etc.
 void SetInCommunication(bool aInCommunication);
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -1,22 +1,22 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DOMMediaStream.h"
 
-#include "AudioCaptureTrack.h"
+#include "AudioCaptureStream.h"
 #include "AudioChannelAgent.h"
 #include "AudioStreamTrack.h"
 #include "Layers.h"
-#include "MediaTrackGraph.h"
-#include "MediaTrackGraphImpl.h"
-#include "MediaTrackListener.h"
+#include "MediaStreamGraph.h"
+#include "MediaStreamGraphImpl.h"
+#include "MediaStreamListener.h"
 #include "VideoStreamTrack.h"
 #include "mozilla/dom/AudioTrack.h"
 #include "mozilla/dom/AudioTrackList.h"
 #include "mozilla/dom/DocGroup.h"
 #include "mozilla/dom/HTMLCanvasElement.h"
 #include "mozilla/dom/MediaStreamBinding.h"
 #include "mozilla/dom/MediaStreamTrackEvent.h"
 #include "mozilla/dom/Promise.h"
@@ -39,16 +39,18 @@
 using namespace mozilla;
 using namespace mozilla::dom;
 using namespace mozilla::layers;
 using namespace mozilla::media;
 
 static LazyLogModule gMediaStreamLog("MediaStream");
 #define LOG(type, msg) MOZ_LOG(gMediaStreamLog, type, msg)
 
+const TrackID TRACK_VIDEO_PRIMARY = 1;
+
 static bool ContainsLiveTracks(
     const nsTArray<RefPtr<MediaStreamTrack>>& aTracks) {
   for (const auto& track : aTracks) {
     if (track->ReadyState() == MediaStreamTrackState::Live) {
       return true;
     }
   }
 
@@ -227,35 +229,35 @@ already_AddRefed<Promise> DOMMediaStream
     return nullptr;
   }
 
   RefPtr<Promise> p = Promise::Create(go, aRv);
   if (aRv.Failed()) {
     return nullptr;
   }
 
-  MediaTrackGraph* graph = MediaTrackGraph::GetInstanceIfExists(
-      window, MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+  MediaStreamGraph* graph = MediaStreamGraph::GetInstanceIfExists(
+      window, MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
   if (!graph) {
     p->MaybeResolve(0);
     return p.forget();
   }
 
-  auto* graphImpl = static_cast<MediaTrackGraphImpl*>(graph);
+  auto* graphImpl = static_cast<MediaStreamGraphImpl*>(graph);
 
   class Counter : public ControlMessage {
    public:
-    Counter(MediaTrackGraphImpl* aGraph, const RefPtr<Promise>& aPromise)
+    Counter(MediaStreamGraphImpl* aGraph, const RefPtr<Promise>& aPromise)
         : ControlMessage(nullptr), mGraph(aGraph), mPromise(aPromise) {
       MOZ_ASSERT(NS_IsMainThread());
     }
 
     void Run() override {
       uint32_t streams =
-          mGraph->mTracks.Length() + mGraph->mSuspendedTracks.Length();
+          mGraph->mStreams.Length() + mGraph->mSuspendedStreams.Length();
       mGraph->DispatchToMainThreadStableState(NS_NewRunnableFunction(
           "DOMMediaStream::CountUnderlyingStreams (stable state)",
           [promise = std::move(mPromise), streams]() mutable {
             NS_DispatchToMainThread(NS_NewRunnableFunction(
                 "DOMMediaStream::CountUnderlyingStreams",
                 [promise = std::move(promise), streams]() {
                   promise->MaybeResolve(streams);
                 }));
@@ -268,17 +270,17 @@ already_AddRefed<Promise> DOMMediaStream
     void RunDuringShutdown() override {
       NS_ReleaseOnMainThreadSystemGroup(
           "DOMMediaStream::CountUnderlyingStreams::Counter::RunDuringShutdown",
           mPromise.forget());
     }
 
    private:
     // mGraph owns this Counter instance and decides its lifetime.
-    MediaTrackGraphImpl* mGraph;
+    MediaStreamGraphImpl* mGraph;
     RefPtr<Promise> mPromise;
   };
   graphImpl->AppendMessage(MakeUnique<Counter>(graphImpl, p));
 
   return p.forget();
 }
 
 void DOMMediaStream::GetId(nsAString& aID) const { aID = mID; }
@@ -322,32 +324,34 @@ void DOMMediaStream::GetVideoTracks(
 void DOMMediaStream::GetTracks(
     nsTArray<RefPtr<MediaStreamTrack>>& aTracks) const {
   for (const auto& track : mTracks) {
     aTracks.AppendElement(track);
   }
 }
 
 void DOMMediaStream::AddTrack(MediaStreamTrack& aTrack) {
-  LOG(LogLevel::Info, ("DOMMediaStream %p Adding track %p (from track %p)",
-                       this, &aTrack, aTrack.GetTrack()));
+  LOG(LogLevel::Info,
+      ("DOMMediaStream %p Adding track %p (from stream %p with ID %d)", this,
+       &aTrack, aTrack.GetStream(), aTrack.GetTrackID()));
 
   if (HasTrack(aTrack)) {
     LOG(LogLevel::Debug,
         ("DOMMediaStream %p already contains track %p", this, &aTrack));
     return;
   }
 
   mTracks.AppendElement(&aTrack);
   NotifyTrackAdded(&aTrack);
 }
 
 void DOMMediaStream::RemoveTrack(MediaStreamTrack& aTrack) {
-  LOG(LogLevel::Info, ("DOMMediaStream %p Removing track %p (from track %p)",
-                       this, &aTrack, aTrack.GetTrack()));
+  LOG(LogLevel::Info,
+      ("DOMMediaStream %p Removing track %p (from stream %p with ID %d)", this,
+       &aTrack, aTrack.GetStream(), aTrack.GetTrackID()));
 
   if (!mTracks.RemoveElement(&aTrack)) {
     LOG(LogLevel::Debug,
         ("DOMMediaStream %p does not contain track %p", this, &aTrack));
     return;
   }
 
   if (!aTrack.Ended()) {
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -6,25 +6,29 @@
 #ifndef NSDOMMEDIASTREAM_H_
 #define NSDOMMEDIASTREAM_H_
 
 #include "ImageContainer.h"
 
 #include "nsAutoPtr.h"
 #include "nsCycleCollectionParticipant.h"
 #include "nsWrapperCache.h"
+#include "StreamTracks.h"
 #include "nsIPrincipal.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/RelativeTimeline.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class DOMMediaStream;
+class MediaStream;
+class MediaInputPort;
+class ProcessedMediaStream;
 
 enum class BlockingMode;
 
 namespace dom {
 class HTMLCanvasElement;
 class MediaStreamTrack;
 class MediaStreamTrackSource;
 class AudioStreamTrack;
--- a/dom/media/DriftCompensation.h
+++ b/dom/media/DriftCompensation.h
@@ -14,19 +14,19 @@
 
 namespace mozilla {
 
 static LazyLogModule gDriftCompensatorLog("DriftCompensator");
 #define LOG(type, ...) MOZ_LOG(gDriftCompensatorLog, type, (__VA_ARGS__))
 
 /**
  * DriftCompensator can be used to handle drift between audio and video tracks
- * from the MediaTrackGraph.
+ * from the MediaStreamGraph.
  *
- * Drift can occur because audio is driven by a MediaTrackGraph running off an
+ * Drift can occur because audio is driven by a MediaStreamGraph running off an
  * audio callback, thus it's progressed by the clock of one the audio output
  * devices on the user's machine. Video on the other hand is always expressed in
  * wall-clock TimeStamps, i.e., it's progressed by the system clock. These
  * clocks will, over time, drift apart.
  *
  * Do not use the DriftCompensator across multiple audio tracks, as it will
  * automatically record the start time of the first audio samples, and all
  * samples for the same audio track on the same audio clock will have to be
@@ -37,17 +37,17 @@ static LazyLogModule gDriftCompensatorLo
  * - The video thread for compensating drift of video frames to match the audio
  *   clock.
  */
 class DriftCompensator {
   const RefPtr<nsIEventTarget> mVideoThread;
   const TrackRate mAudioRate;
 
   // Number of audio samples produced. Any thread.
-  Atomic<TrackTime> mAudioSamples{0};
+  Atomic<StreamTime> mAudioSamples{0};
 
   // Time the first audio samples were added. mVideoThread only.
   TimeStamp mAudioStartTime;
 
   void SetAudioStartTime(TimeStamp aTime) {
     MOZ_ASSERT(mVideoThread->IsOnCurrentThread());
     MOZ_ASSERT(mAudioStartTime.IsNull());
     mAudioStartTime = aTime;
@@ -73,32 +73,32 @@ class DriftCompensator {
         &DriftCompensator::SetAudioStartTime, aStart));
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
     Unused << rv;
   }
 
   /**
    * aSamples is the number of samples fed by an AudioStream.
    */
-  void NotifyAudio(TrackTime aSamples) {
+  void NotifyAudio(StreamTime aSamples) {
     MOZ_ASSERT(aSamples > 0);
     mAudioSamples += aSamples;
 
     LOG(LogLevel::Verbose,
         "DriftCompensator %p Processed another %" PRId64
         " samples; now %.3fs audio",
         this, aSamples, static_cast<double>(mAudioSamples) / mAudioRate);
   }
 
   /**
    * Drift compensates a video TimeStamp based on historical audio data.
    */
   virtual TimeStamp GetVideoTime(TimeStamp aNow, TimeStamp aTime) {
     MOZ_ASSERT(mVideoThread->IsOnCurrentThread());
-    TrackTime samples = mAudioSamples;
+    StreamTime samples = mAudioSamples;
 
     if (samples / mAudioRate < 10) {
       // We don't apply compensation for the first 10 seconds because of the
       // higher inaccuracy during this time.
       LOG(LogLevel::Debug, "DriftCompensator %p %" PRId64 "ms so far; ignoring",
           this, samples * 1000 / mAudioRate);
       return aTime;
     }
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -1,15 +1,15 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include <MediaTrackGraphImpl.h>
+#include <MediaStreamGraphImpl.h>
 #include "mozilla/dom/AudioContext.h"
 #include "mozilla/dom/AudioDeviceInfo.h"
 #include "mozilla/dom/WorkletThread.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Unused.h"
 #include "mozilla/MathAlgorithms.h"
 #include "CubebDeviceEnumerator.h"
@@ -18,25 +18,25 @@
 #ifdef MOZ_WEBRTC
 #  include "webrtc/MediaEngineWebRTC.h"
 #endif
 
 #ifdef XP_MACOSX
 #  include <sys/sysctl.h>
 #endif
 
-extern mozilla::LazyLogModule gMediaTrackGraphLog;
+extern mozilla::LazyLogModule gMediaStreamGraphLog;
 #ifdef LOG
 #  undef LOG
 #endif  // LOG
-#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg)
+#define LOG(type, msg) MOZ_LOG(gMediaStreamGraphLog, type, msg)
 
 namespace mozilla {
 
-GraphDriver::GraphDriver(MediaTrackGraphImpl* aGraphImpl)
+GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
     : mIterationStart(0),
       mIterationEnd(0),
       mGraphImpl(aGraphImpl),
       mPreviousDriver(nullptr),
       mNextDriver(nullptr) {}
 
 void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
                                GraphTime aLastSwitchNextIterationStart,
@@ -139,49 +139,49 @@ void GraphDriver::SetNextDriver(GraphDri
 }
 
 void GraphDriver::SetPreviousDriver(GraphDriver* aPreviousDriver) {
   MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
   GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
   mPreviousDriver = aPreviousDriver;
 }
 
-ThreadedDriver::ThreadedDriver(MediaTrackGraphImpl* aGraphImpl)
+ThreadedDriver::ThreadedDriver(MediaStreamGraphImpl* aGraphImpl)
     : GraphDriver(aGraphImpl), mThreadRunning(false) {}
 
-class MediaTrackGraphShutdownThreadRunnable : public Runnable {
+class MediaStreamGraphShutdownThreadRunnable : public Runnable {
  public:
-  explicit MediaTrackGraphShutdownThreadRunnable(
+  explicit MediaStreamGraphShutdownThreadRunnable(
       already_AddRefed<nsIThread> aThread)
-      : Runnable("MediaTrackGraphShutdownThreadRunnable"), mThread(aThread) {}
+      : Runnable("MediaStreamGraphShutdownThreadRunnable"), mThread(aThread) {}
   NS_IMETHOD Run() override {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(mThread);
 
     mThread->Shutdown();
     mThread = nullptr;
     return NS_OK;
   }
 
  private:
   nsCOMPtr<nsIThread> mThread;
 };
 
 ThreadedDriver::~ThreadedDriver() {
   if (mThread) {
     nsCOMPtr<nsIRunnable> event =
-        new MediaTrackGraphShutdownThreadRunnable(mThread.forget());
+        new MediaStreamGraphShutdownThreadRunnable(mThread.forget());
     SystemGroup::Dispatch(TaskCategory::Other, event.forget());
   }
 }
 
-class MediaTrackGraphInitThreadRunnable : public Runnable {
+class MediaStreamGraphInitThreadRunnable : public Runnable {
  public:
-  explicit MediaTrackGraphInitThreadRunnable(ThreadedDriver* aDriver)
-      : Runnable("MediaTrackGraphInitThreadRunnable"), mDriver(aDriver) {}
+  explicit MediaStreamGraphInitThreadRunnable(ThreadedDriver* aDriver)
+      : Runnable("MediaStreamGraphInitThreadRunnable"), mDriver(aDriver) {}
   NS_IMETHOD Run() override {
     MOZ_ASSERT(!mDriver->ThreadRunning());
     LOG(LogLevel::Debug, ("Starting a new system driver for graph %p",
                           mDriver->mGraphImpl.get()));
 
     RefPtr<GraphDriver> previousDriver;
     {
       MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
@@ -216,39 +216,39 @@ class MediaTrackGraphInitThreadRunnable 
 
 void ThreadedDriver::Start() {
   MOZ_ASSERT(!ThreadRunning());
   LOG(LogLevel::Debug,
       ("Starting thread for a SystemClockDriver  %p", mGraphImpl.get()));
   Unused << NS_WARN_IF(mThread);
   MOZ_ASSERT(!mThread);  // Ensure we haven't already started it
 
-  nsCOMPtr<nsIRunnable> event = new MediaTrackGraphInitThreadRunnable(this);
+  nsCOMPtr<nsIRunnable> event = new MediaStreamGraphInitThreadRunnable(this);
   // Note: mThread may be null during event->Run() if we pass to NewNamedThread!
   // See AudioInitTask
-  nsresult rv = NS_NewNamedThread("MediaTrackGrph", getter_AddRefs(mThread));
+  nsresult rv = NS_NewNamedThread("MediaStreamGrph", getter_AddRefs(mThread));
   if (NS_SUCCEEDED(rv)) {
     mThread->EventTarget()->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
   }
 }
 
 void ThreadedDriver::Shutdown() {
   NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread");
   // mGraph's thread is not running so it's OK to do whatever here
-  LOG(LogLevel::Debug, ("Stopping threads for MediaTrackGraph %p", this));
+  LOG(LogLevel::Debug, ("Stopping threads for MediaStreamGraph %p", this));
 
   if (mThread) {
     LOG(LogLevel::Debug,
         ("%p: Stopping ThreadedDriver's %p thread", GraphImpl(), this));
     mThread->Shutdown();
     mThread = nullptr;
   }
 }
 
-SystemClockDriver::SystemClockDriver(MediaTrackGraphImpl* aGraphImpl)
+SystemClockDriver::SystemClockDriver(MediaStreamGraphImpl* aGraphImpl)
     : ThreadedDriver(aGraphImpl),
       mInitialTimeStamp(TimeStamp::Now()),
       mCurrentTimeStamp(TimeStamp::Now()),
       mLastTimeStamp(TimeStamp::Now()),
       mIsFallback(false) {}
 
 SystemClockDriver::~SystemClockDriver() {}
 
@@ -315,17 +315,17 @@ void ThreadedDriver::RunThread() {
 
 MediaTime SystemClockDriver::GetIntervalForIteration() {
   TimeStamp now = TimeStamp::Now();
   MediaTime interval =
       GraphImpl()->SecondsToMediaTime((now - mCurrentTimeStamp).ToSeconds());
   mCurrentTimeStamp = now;
 
   MOZ_LOG(
-      gMediaTrackGraphLog, LogLevel::Verbose,
+      gMediaStreamGraphLog, LogLevel::Verbose,
       ("%p: Updating current time to %f (real %f, StateComputedTime() %f)",
        GraphImpl(), GraphImpl()->MediaTimeToSeconds(IterationEnd() + interval),
        (now - mInitialTimeStamp).ToSeconds(),
        GraphImpl()->MediaTimeToSeconds(StateComputedTime())));
 
   return interval;
 }
 
@@ -378,17 +378,17 @@ TimeDuration SystemClockDriver::WaitInte
   timeoutMS = std::max<int64_t>(0, std::min<int64_t>(timeoutMS, 60 * 1000));
   LOG(LogLevel::Verbose,
       ("%p: Waiting for next iteration; at %f, timeout=%f", GraphImpl(),
        (now - mInitialTimeStamp).ToSeconds(), timeoutMS / 1000.0));
 
   return TimeDuration::FromMilliseconds(timeoutMS);
 }
 
-OfflineClockDriver::OfflineClockDriver(MediaTrackGraphImpl* aGraphImpl,
+OfflineClockDriver::OfflineClockDriver(MediaStreamGraphImpl* aGraphImpl,
                                        GraphTime aSlice)
     : ThreadedDriver(aGraphImpl), mSlice(aSlice) {}
 
 OfflineClockDriver::~OfflineClockDriver() {}
 
 MediaTime OfflineClockDriver::GetIntervalForIteration() {
   return GraphImpl()->MillisecondsToMediaTime(mSlice);
 }
@@ -441,25 +441,25 @@ AsyncCubebTask::Run() {
     default:
       MOZ_CRASH("Operation not implemented.");
   }
 
   // The thread will kill itself after a bit
   return NS_OK;
 }
 
-TrackAndPromiseForOperation::TrackAndPromiseForOperation(
-    MediaTrack* aTrack, void* aPromise, dom::AudioContextOperation aOperation,
+StreamAndPromiseForOperation::StreamAndPromiseForOperation(
+    MediaStream* aStream, void* aPromise, dom::AudioContextOperation aOperation,
     dom::AudioContextOperationFlags aFlags)
-    : mTrack(aTrack),
+    : mStream(aStream),
       mPromise(aPromise),
       mOperation(aOperation),
       mFlags(aFlags) {}
 
-AudioCallbackDriver::AudioCallbackDriver(MediaTrackGraphImpl* aGraphImpl,
+AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl,
                                          uint32_t aInputChannelCount,
                                          AudioInputType aAudioInputType)
     : GraphDriver(aGraphImpl),
       mOutputChannels(0),
       mSampleRate(0),
       mInputChannelCount(aInputChannelCount),
       mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS),
       mStarted(false),
@@ -584,20 +584,20 @@ bool AudioCallbackDriver::Init() {
   output.layout = CUBEB_LAYOUT_UNDEFINED;
   output.prefs = CubebUtils::GetDefaultStreamPrefs();
 #if !defined(XP_WIN)
   if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
     output.prefs |= static_cast<cubeb_stream_prefs>(CUBEB_STREAM_PREF_VOICE);
   }
 #endif
 
-  uint32_t latencyFrames = CubebUtils::GetCubebMTGLatencyInFrames(&output);
+  uint32_t latencyFrames = CubebUtils::GetCubebMSGLatencyInFrames(&output);
 
   // Macbook and MacBook air don't have enough CPU to run very low latency
-  // MediaTrackGraphs, cap the minimal latency to 512 frames int this case.
+  // MediaStreamGraphs, cap the minimal latency to 512 frames int this case.
   if (IsMacbookOrMacbookAir()) {
     latencyFrames = std::max((uint32_t)512, latencyFrames);
   }
 
   // On OSX, having a latency that is lower than 10ms is very common. It's
   // not very useful when doing voice, because all the WebRTC code deal in 10ms
   // chunks of audio.  Take the first power of two above 10ms at the current
   // rate in this case. It's probably 512, for common rates.
@@ -630,17 +630,17 @@ bool AudioCallbackDriver::Init() {
     DebugOnly<int> rv =
         cubeb_stream_set_volume(mAudioStream, CubebUtils::GetVolumeScale());
     NS_WARNING_ASSERTION(
         rv == CUBEB_OK,
         "Could not set the audio stream volume in GraphDriver.cpp");
     CubebUtils::ReportCubebBackendUsed();
   } else {
     NS_WARNING(
-        "Could not create a cubeb stream for MediaTrackGraph, falling "
+        "Could not create a cubeb stream for MediaStreamGraph, falling "
         "back to a SystemClockDriver");
     // Only report failures when we're not coming from a driver that was
     // created itself as a fallback driver because of a previous audio driver
     // failure.
     if (!mFromFallback) {
       CubebUtils::ReportCubebStreamInitFailure(firstStream);
     }
     MonitorAutoLock lock(GraphImpl()->GetMonitor());
@@ -693,28 +693,28 @@ void AudioCallbackDriver::Start() {
       new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebOperation::INIT);
   initEvent->Dispatch();
 }
 
 bool AudioCallbackDriver::StartStream() {
   MOZ_ASSERT(!IsStarted() && OnCubebOperationThread());
   mShouldFallbackIfError = true;
   if (cubeb_stream_start(mAudioStream) != CUBEB_OK) {
-    NS_WARNING("Could not start cubeb stream for MTG.");
+    NS_WARNING("Could not start cubeb stream for MSG.");
     return false;
   }
 
   mStarted = true;
   return true;
 }
 
 void AudioCallbackDriver::Stop() {
   MOZ_ASSERT(OnCubebOperationThread());
   if (cubeb_stream_stop(mAudioStream) != CUBEB_OK) {
-    NS_WARNING("Could not stop cubeb stream for MTG.");
+    NS_WARNING("Could not stop cubeb stream for MSG.");
   }
   mStarted = false;
 }
 
 void AudioCallbackDriver::RemoveMixerCallback() {
   MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
 
   if (mAddedMixer) {
@@ -1048,49 +1048,49 @@ uint32_t AudioCallbackDriver::IterationD
   MOZ_ASSERT(OnGraphThread());
   // The real fix would be to have an API in cubeb to give us the number. Short
   // of that, we approximate it here. bug 1019507
   return mIterationDurationMS;
 }
 
 bool AudioCallbackDriver::IsStarted() { return mStarted; }
 
-void AudioCallbackDriver::EnqueueTrackAndPromiseForOperation(
-    MediaTrack* aTrack, void* aPromise, dom::AudioContextOperation aOperation,
+void AudioCallbackDriver::EnqueueStreamAndPromiseForOperation(
+    MediaStream* aStream, void* aPromise, dom::AudioContextOperation aOperation,
     dom::AudioContextOperationFlags aFlags) {
   MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
   MonitorAutoLock mon(mGraphImpl->GetMonitor());
   MOZ_ASSERT((aFlags | dom::AudioContextOperationFlags::SendStateChange) ||
              !aPromise);
   if (aFlags == dom::AudioContextOperationFlags::SendStateChange) {
     mPromisesForOperation.AppendElement(
-        TrackAndPromiseForOperation(aTrack, aPromise, aOperation, aFlags));
+        StreamAndPromiseForOperation(aStream, aPromise, aOperation, aFlags));
   }
 }
 
 void AudioCallbackDriver::CompleteAudioContextOperations(
     AsyncCubebOperation aOperation) {
   MOZ_ASSERT(OnCubebOperationThread());
-  AutoTArray<TrackAndPromiseForOperation, 1> array;
+  AutoTArray<StreamAndPromiseForOperation, 1> array;
 
   // We can't lock for the whole function because AudioContextOperationCompleted
   // will grab the monitor
   {
     MonitorAutoLock mon(GraphImpl()->GetMonitor());
     array.SwapElements(mPromisesForOperation);
   }
 
   for (uint32_t i = 0; i < array.Length(); i++) {
-    TrackAndPromiseForOperation& s = array[i];
+    StreamAndPromiseForOperation& s = array[i];
     if ((aOperation == AsyncCubebOperation::INIT &&
          s.mOperation == dom::AudioContextOperation::Resume) ||
         (aOperation == AsyncCubebOperation::SHUTDOWN &&
          s.mOperation != dom::AudioContextOperation::Resume)) {
       MOZ_ASSERT(s.mFlags == dom::AudioContextOperationFlags::SendStateChange);
-      GraphImpl()->AudioContextOperationCompleted(s.mTrack, s.mPromise,
+      GraphImpl()->AudioContextOperationCompleted(s.mStream, s.mPromise,
                                                   s.mOperation, s.mFlags);
       array.RemoveElementAt(i);
       i--;
     }
   }
 
   if (!array.IsEmpty()) {
     MonitorAutoLock mon(GraphImpl()->GetMonitor());
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -28,101 +28,101 @@ template <>
 class nsAutoRefTraits<cubeb_stream> : public nsPointerRefTraits<cubeb_stream> {
  public:
   static void Release(cubeb_stream* aStream) { cubeb_stream_destroy(aStream); }
 };
 
 namespace mozilla {
 
 /**
- * Assume we can run an iteration of the MediaTrackGraph loop in this much time
+ * Assume we can run an iteration of the MediaStreamGraph loop in this much time
  * or less.
  * We try to run the control loop at this rate.
  */
 static const int MEDIA_GRAPH_TARGET_PERIOD_MS = 10;
 
 /**
- * Assume that we might miss our scheduled wakeup of the MediaTrackGraph by
+ * Assume that we might miss our scheduled wakeup of the MediaStreamGraph by
  * this much.
  */
 static const int SCHEDULE_SAFETY_MARGIN_MS = 10;
 
 /**
  * Try have this much audio buffered in streams and queued to the hardware.
  * The maximum delay to the end of the next control loop
  * is 2*MEDIA_GRAPH_TARGET_PERIOD_MS + SCHEDULE_SAFETY_MARGIN_MS.
  * There is no point in buffering more audio than this in a stream at any
  * given time (until we add processing).
  * This is not optimal yet.
  */
 static const int AUDIO_TARGET_MS =
     2 * MEDIA_GRAPH_TARGET_PERIOD_MS + SCHEDULE_SAFETY_MARGIN_MS;
 
-class MediaTrack;
-class MediaTrackGraphImpl;
+class MediaStream;
+class MediaStreamGraphImpl;
 
 class AudioCallbackDriver;
 class OfflineClockDriver;
 class SystemClockDriver;
 
 namespace dom {
 enum class AudioContextOperation;
 }
 
 /**
  * A driver is responsible for the scheduling of the processing, the thread
- * management, and give the different clocks to a MediaTrackGraph. This is an
- * abstract base class. A MediaTrackGraph can be driven by an
+ * management, and give the different clocks to a MediaStreamGraph. This is an
+ * abstract base class. A MediaStreamGraph can be driven by an
  * OfflineClockDriver, if the graph is offline, or a SystemClockDriver, if the
  * graph is real time.
- * A MediaTrackGraph holds an owning reference to its driver.
+ * A MediaStreamGraph holds an owning reference to its driver.
  *
  * The lifetime of drivers is a complicated affair. Here are the different
  * scenarii that can happen:
  *
- * Starting a MediaTrackGraph with an AudioCallbackDriver
+ * Starting a MediaStreamGraph with an AudioCallbackDriver
  * - A new thread T is created, from the main thread.
  * - On this thread T, cubeb is initialized if needed, and a cubeb_stream is
  *   created and started
  * - The thread T posts a message to the main thread to terminate itself.
  * - The graph runs off the audio thread
  *
- * Starting a MediaTrackGraph with a SystemClockDriver:
+ * Starting a MediaStreamGraph with a SystemClockDriver:
  * - A new thread T is created from the main thread.
  * - The graph runs off this thread.
  *
  * Switching from a SystemClockDriver to an AudioCallbackDriver:
  * - A new AudioCallabackDriver is created and initialized on the graph thread
- * - At the end of the MTG iteration, the SystemClockDriver transfers its timing
+ * - At the end of the MSG iteration, the SystemClockDriver transfers its timing
  *   info and a reference to itself to the AudioCallbackDriver. It then starts
  *   the AudioCallbackDriver.
  * - When the AudioCallbackDriver starts, it checks if it has been switched from
  *   a SystemClockDriver, and if that is the case, sends a message to the main
  *   thread to shut the SystemClockDriver thread down.
  * - The graph now runs off an audio callback
  *
  * Switching from an AudioCallbackDriver to a SystemClockDriver:
  * - A new SystemClockDriver is created, and set as mNextDriver.
- * - At the end of the MTG iteration, the AudioCallbackDriver transfers its
+ * - At the end of the MSG iteration, the AudioCallbackDriver transfers its
  *   timing info and a reference to itself to the SystemClockDriver. A new
  *   SystemClockDriver is started from the current audio thread.
  * - When starting, the SystemClockDriver checks if it has been switched from an
  *   AudioCallbackDriver. If yes, it creates a new temporary thread to release
  *   the cubeb_streams. This temporary thread closes the cubeb_stream, and
  *   then dispatches a message to the main thread to be terminated.
  * - The graph now runs off a normal thread.
  *
  * Two drivers cannot run at the same time for the same graph. The thread safety
  * of the different attributes of drivers, and they access pattern is documented
  * next to the members themselves.
  *
  */
 class GraphDriver {
  public:
-  explicit GraphDriver(MediaTrackGraphImpl* aGraphImpl);
+  explicit GraphDriver(MediaStreamGraphImpl* aGraphImpl);
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(GraphDriver);
   /* For {System,Offline}ClockDriver, this waits until it's time to process
    * more data.  For AudioCallbackDriver, this is a no-op. */
   virtual void WaitForNextIteration() = 0;
   /* Wakes up the graph if it is waiting. */
   virtual void WakeUp() = 0;
   /* Start the graph, init the driver, start the thread.
@@ -173,20 +173,20 @@ class GraphDriver {
                     GraphTime aLastSwitchNextIterationEnd);
   /**
    * Call this to indicate that another iteration of the control loop is
    * required on its regular schedule. The monitor must not be held.
    * This function has to be idempotent.
    */
   void EnsureNextIteration();
 
-  MediaTrackGraphImpl* GraphImpl() const { return mGraphImpl; }
+  MediaStreamGraphImpl* GraphImpl() const { return mGraphImpl; }
 
 #ifdef DEBUG
-  // True if the current thread is driving the MTG.
+  // True if the current thread is driving the MSG.
   bool OnGraphThread();
 #endif
   // True if the current thread is the GraphDriver's thread.
   virtual bool OnThread() = 0;
   // GraphDriver's thread has started and the thread is running.
   virtual bool ThreadRunning() = 0;
 
  protected:
@@ -195,18 +195,18 @@ class GraphDriver {
   void SetNextDriver(GraphDriver* aNextDriver);
 
   // Time of the start of this graph iteration. This must be accessed while
   // having the monitor.
   GraphTime mIterationStart;
   // Time of the end of this graph iteration. This must be accessed while having
   // the monitor.
   GraphTime mIterationEnd;
-  // The MediaTrackGraphImpl associated with this driver.
-  const RefPtr<MediaTrackGraphImpl> mGraphImpl;
+  // The MediaStreamGraphImpl associated with this driver.
+  const RefPtr<MediaStreamGraphImpl> mGraphImpl;
 
   // This is non-null only when this driver has recently switched from an other
   // driver, and has not cleaned it up yet (for example because the audio stream
   // is currently calling the callback during initialization).
   //
   // This is written to when changing driver, from the previous driver's thread,
   // or a thread created for the occasion. This is read each time we need to
   // check whether we're changing driver (in Switching()), from the graph
@@ -215,35 +215,35 @@ class GraphDriver {
   RefPtr<GraphDriver> mPreviousDriver;
   // This is non-null only when this driver is going to switch to an other
   // driver at the end of this iteration.
   // This must be accessed using the {Set,Get}NextDriver methods.
   RefPtr<GraphDriver> mNextDriver;
   virtual ~GraphDriver() {}
 };
 
-class MediaTrackGraphInitThreadRunnable;
+class MediaStreamGraphInitThreadRunnable;
 
 /**
  * This class is a driver that manages its own thread.
  */
 class ThreadedDriver : public GraphDriver {
  public:
-  explicit ThreadedDriver(MediaTrackGraphImpl* aGraphImpl);
+  explicit ThreadedDriver(MediaStreamGraphImpl* aGraphImpl);
   virtual ~ThreadedDriver();
   void WaitForNextIteration() override;
   void WakeUp() override;
   void Start() override;
   void Shutdown() override;
   /**
    * Runs main control loop on the graph thread. Normally a single invocation
    * of this runs for the entire lifetime of the graph thread.
    */
   void RunThread();
-  friend class MediaTrackGraphInitThreadRunnable;
+  friend class MediaStreamGraphInitThreadRunnable;
   uint32_t IterationDuration() override { return MEDIA_GRAPH_TARGET_PERIOD_MS; }
 
   nsIThread* Thread() { return mThread; }
 
   bool OnThread() override {
     return !mThread || mThread->EventTarget()->IsOnCurrentThread();
   }
 
@@ -263,22 +263,22 @@ class ThreadedDriver : public GraphDrive
 
  private:
   // This is true if the thread is running. It is false
   // before starting the thread and after stopping it.
   Atomic<bool> mThreadRunning;
 };
 
 /**
- * A SystemClockDriver drives a MediaTrackGraph using a system clock, and waits
+ * A SystemClockDriver drives a MediaStreamGraph using a system clock, and waits
  * using a monitor, between each iteration.
  */
 class SystemClockDriver : public ThreadedDriver {
  public:
-  explicit SystemClockDriver(MediaTrackGraphImpl* aGraphImpl);
+  explicit SystemClockDriver(MediaStreamGraphImpl* aGraphImpl);
   virtual ~SystemClockDriver();
   TimeDuration WaitInterval() override;
   MediaTime GetIntervalForIteration() override;
   void MarkAsFallback();
   bool IsFallback();
   SystemClockDriver* AsSystemClockDriver() override { return this; }
 
  private:
@@ -294,32 +294,32 @@ class SystemClockDriver : public Threade
 };
 
 /**
  * An OfflineClockDriver runs the graph as fast as possible, without waiting
  * between iteration.
  */
 class OfflineClockDriver : public ThreadedDriver {
  public:
-  OfflineClockDriver(MediaTrackGraphImpl* aGraphImpl, GraphTime aSlice);
+  OfflineClockDriver(MediaStreamGraphImpl* aGraphImpl, GraphTime aSlice);
   virtual ~OfflineClockDriver();
   TimeDuration WaitInterval() override;
   MediaTime GetIntervalForIteration() override;
   OfflineClockDriver* AsOfflineClockDriver() override { return this; }
 
  private:
   // Time, in GraphTime, for each iteration
   GraphTime mSlice;
 };
 
-struct TrackAndPromiseForOperation {
-  TrackAndPromiseForOperation(MediaTrack* aTrack, void* aPromise,
-                              dom::AudioContextOperation aOperation,
-                              dom::AudioContextOperationFlags aFlags);
-  RefPtr<MediaTrack> mTrack;
+struct StreamAndPromiseForOperation {
+  StreamAndPromiseForOperation(MediaStream* aStream, void* aPromise,
+                               dom::AudioContextOperation aOperation,
+                               dom::AudioContextOperationFlags aFlags);
+  RefPtr<MediaStream> mStream;
   void* mPromise;
   dom::AudioContextOperation mOperation;
   dom::AudioContextOperationFlags mFlags;
 };
 
 enum class AsyncCubebOperation { INIT, SHUTDOWN };
 enum class AudioInputType { Unknown, Voice };
 
@@ -347,17 +347,17 @@ class AudioCallbackDriver : public Graph
                             public MixerCallbackReceiver
 #if defined(XP_WIN)
     ,
                             public audio::DeviceChangeListener
 #endif
 {
  public:
   /** If aInputChannelCount is zero, then this driver is output-only. */
-  AudioCallbackDriver(MediaTrackGraphImpl* aGraphImpl,
+  AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl,
                       uint32_t aInputChannelCount,
                       AudioInputType aAudioInputType);
   virtual ~AudioCallbackDriver();
 
   void Start() override;
   void WaitForNextIteration() override;
   void WakeUp() override;
   void Shutdown() override;
@@ -405,18 +405,19 @@ class AudioCallbackDriver : public Graph
     if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
       return AudioInputType::Voice;
     }
     return AudioInputType::Unknown;
   }
 
   /* Enqueue a promise that is going to be resolved when a specific operation
    * occurs on the cubeb stream. */
-  void EnqueueTrackAndPromiseForOperation(
-      MediaTrack* aTrack, void* aPromise, dom::AudioContextOperation aOperation,
+  void EnqueueStreamAndPromiseForOperation(
+      MediaStream* aStream, void* aPromise,
+      dom::AudioContextOperation aOperation,
       dom::AudioContextOperationFlags aFlags);
 
   std::thread::id ThreadId() { return mAudioThreadId.load(); }
 
   bool OnThread() override {
     return mAudioThreadId.load() == std::this_thread::get_id();
   }
 
@@ -454,17 +455,17 @@ class AudioCallbackDriver : public Graph
    *  the graph will try to re-open an audio stream later. */
   void FallbackToSystemClockDriver();
 
   /* This is true when the method is executed on CubebOperation thread pool. */
   bool OnCubebOperationThread() {
     return mInitShutdownThread->IsOnCurrentThreadInfallible();
   }
 
-  /* MediaTrackGraphs are always down/up mixed to output channels. */
+  /* MediaStreamGraphs are always down/up mixed to output channels. */
   uint32_t mOutputChannels;
   /* The size of this buffer comes from the fact that some audio backends can
    * call back with a number of frames lower than one block (128 frames), so we
    * need to keep at most two block in the SpillBuffer, because we always round
    * up to block boundaries during an iteration.
    * This is only ever accessed on the audio callback thread. */
   SpillBuffer<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 2> mScratchBuffer;
   /* Wrapper to ensure we write exactly the number of frames we need in the
@@ -505,17 +506,17 @@ class AudioCallbackDriver : public Graph
     ~AutoInCallback();
     AudioCallbackDriver* mDriver;
   };
 
   /* Shared thread pool with up to one thread for off-main-thread
    * initialization and shutdown of the audio stream via AsyncCubebTask. */
   const RefPtr<SharedThreadPool> mInitShutdownThread;
   /* This must be accessed with the graph monitor held. */
-  AutoTArray<TrackAndPromiseForOperation, 1> mPromisesForOperation;
+  AutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
   cubeb_device_pref mInputDevicePreference;
   /* This is used to signal adding the mixer callback on first run
    * of audio callback. This is atomic because it is touched from different
    * threads, the audio callback thread and the state change thread. However,
    * the order of the threads does not allow concurent access. */
   Atomic<bool> mAddedMixer;
   /* Contains the id of the audio thread for as long as the callback
    * is taking place, after that it is reseted to an invalid value. */
@@ -547,14 +548,14 @@ class AsyncCubebTask : public Runnable {
  protected:
   virtual ~AsyncCubebTask();
 
  private:
   NS_IMETHOD Run() final;
 
   RefPtr<AudioCallbackDriver> mDriver;
   AsyncCubebOperation mOperation;
-  RefPtr<MediaTrackGraphImpl> mShutdownGrip;
+  RefPtr<MediaStreamGraphImpl> mShutdownGrip;
 };
 
 }  // namespace mozilla
 
 #endif  // GRAPHDRIVER_H_
--- a/dom/media/GraphRunner.cpp
+++ b/dom/media/GraphRunner.cpp
@@ -2,33 +2,33 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
 
 #include "GraphRunner.h"
 
 #include "GraphDriver.h"
-#include "MediaTrackGraph.h"
-#include "MediaTrackGraphImpl.h"
+#include "MediaStreamGraph.h"
+#include "MediaStreamGraphImpl.h"
 #include "mozilla/dom/WorkletThread.h"
 #include "nsISupportsImpl.h"
 #include "prthread.h"
 #include "Tracing.h"
 #include "audio_thread_priority.h"
 
 namespace mozilla {
 
 static void Start(void* aArg) {
   NS_SetCurrentThreadName("GraphRunner");
   GraphRunner* th = static_cast<GraphRunner*>(aArg);
   th->Run();
 }
 
-GraphRunner::GraphRunner(MediaTrackGraphImpl* aGraph)
+GraphRunner::GraphRunner(MediaStreamGraphImpl* aGraph)
     : mMonitor("GraphRunner::mMonitor"),
       mGraph(aGraph),
       mStateEnd(0),
       mStillProcessing(true),
       mThreadState(ThreadState::Wait),
       // Note that mThread needs to be initialized last, as it may pre-empt the
       // thread running this ctor and enter Run() with uninitialized members.
       mThread(PR_CreateThread(PR_SYSTEM_THREAD, &Start, this,
--- a/dom/media/GraphRunner.h
+++ b/dom/media/GraphRunner.h
@@ -12,21 +12,21 @@
 
 #include <thread>
 
 struct PRThread;
 
 namespace mozilla {
 
 class GraphDriver;
-class MediaTrackGraphImpl;
+class MediaStreamGraphImpl;
 
 class GraphRunner {
  public:
-  explicit GraphRunner(MediaTrackGraphImpl* aGraph);
+  explicit GraphRunner(MediaStreamGraphImpl* aGraph);
   ~GraphRunner();
 
   /**
    * Marks us as shut down and signals mThread, so that it runs until the end.
    */
   void Shutdown();
 
   /**
@@ -52,19 +52,19 @@ class GraphRunner {
    */
   bool RunByGraphDriver(GraphDriver* aDriver);
 #endif
 
  private:
   // Monitor used for yielding mThread through Wait(), and scheduling mThread
   // through Signal() from a GraphDriver.
   Monitor mMonitor;
-  // The MediaTrackGraph we're running. Weakptr beecause this graph owns us and
+  // The MediaStreamGraph we're running. Weakptr beecause this graph owns us and
   // guarantees that our lifetime will not go beyond that of itself.
-  MediaTrackGraphImpl* const mGraph;
+  MediaStreamGraphImpl* const mGraph;
   // GraphTime being handed over to the graph through OneIteration. Protected by
   // mMonitor.
   GraphTime mStateEnd;
   // Reply from mGraph's OneIteration. Protected by mMonitor.
   bool mStillProcessing;
 
   enum class ThreadState {
     Wait,      // Waiting for a message.  This is the initial state.
--- a/dom/media/ImageToI420.h
+++ b/dom/media/ImageToI420.h
@@ -1,18 +1,16 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef ImageToI420Converter_h
 #define ImageToI420Converter_h
 
-#include "nsError.h"
-
 namespace mozilla {
 
 namespace layers {
 class Image;
 }  // namespace layers
 
 /**
  * Converts aImage to an I420 image and writes it to the given buffers.
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -227,17 +227,17 @@ void MediaDecoder::SetVolume(double aVol
 
 RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return GetStateMachine()->InvokeSetSink(aSink);
 }
 
 void MediaDecoder::AddOutputStream(DOMMediaStream* aStream,
-                                   SharedDummyTrack* aDummyStream) {
+                                   SharedDummyStream* aDummyStream) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
   mDecoderStateMachine->EnsureOutputStreamManager(aDummyStream);
   if (mInfo) {
     mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
   }
   mDecoderStateMachine->AddOutputStream(aStream);
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -43,17 +43,17 @@ class MediaMemoryInfo;
 class AbstractThread;
 class DOMMediaStream;
 class DecoderBenchmark;
 class FrameStatistics;
 class VideoFrameContainer;
 class MediaFormatReader;
 class MediaDecoderStateMachine;
 struct MediaPlaybackEvent;
-struct SharedDummyTrack;
+struct SharedDummyStream;
 
 enum class Visibility : uint8_t;
 
 struct MOZ_STACK_CLASS MediaDecoderInit {
   MediaDecoderOwner* const mOwner;
   const double mVolume;
   const bool mPreservesPitch;
   const double mPlaybackRate;
@@ -156,25 +156,26 @@ class MediaDecoder : public DecoderDocto
 
   // Set the given device as the output device.
   RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
 
   bool GetMinimizePreroll() const { return mMinimizePreroll; }
 
   // All MediaStream-related data is protected by mReentrantMonitor.
   // We have at most one DecodedStreamData per MediaDecoder. Its stream
-  // is used as the input for each ProcessedMediaTrack created by calls to
+  // is used as the input for each ProcessedMediaStream created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
   // Add an output stream. All decoder output will be sent to the stream.
   // The stream is initially blocked. The decoder is responsible for unblocking
   // it while it is playing back.
-  void AddOutputStream(DOMMediaStream* aStream, SharedDummyTrack* aDummyStream);
+  void AddOutputStream(DOMMediaStream* aStream,
+                       SharedDummyStream* aDummyStream);
   // Remove an output stream added with AddOutputStream.
   void RemoveOutputStream(DOMMediaStream* aStream);
 
   // Update the principal for any output streams and their tracks.
   void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
--- a/dom/media/MediaDecoderOwner.h
+++ b/dom/media/MediaDecoderOwner.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef MediaDecoderOwner_h_
 #define MediaDecoderOwner_h_
 
 #include "mozilla/UniquePtr.h"
 #include "MediaInfo.h"
 #include "MediaSegment.h"
 #include "nsSize.h"
+#include "TrackID.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class GMPCrashHelper;
 class VideoFrameContainer;
 class MediaInfo;
 class MediaResult;
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -20,27 +20,24 @@
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/TaskQueue.h"
 #include "mozilla/Tuple.h"
 #include "nsIMemoryReporter.h"
 #include "nsPrintfCString.h"
 #include "nsTArray.h"
-#include "AudioSegment.h"
 #include "DOMMediaStream.h"
 #include "ImageContainer.h"
 #include "MediaDecoder.h"
 #include "MediaDecoderStateMachine.h"
 #include "MediaShutdownManager.h"
-#include "MediaTrackGraph.h"
 #include "MediaTimer.h"
 #include "ReaderProxy.h"
 #include "TimeUnits.h"
-#include "VideoSegment.h"
 #include "VideoUtils.h"
 
 namespace mozilla {
 
 using namespace mozilla::media;
 
 #define NS_DispatchToMainThread(...) \
   CompileError_UseAbstractThreadDispatchInstead
@@ -3774,17 +3771,17 @@ void MediaDecoderStateMachine::RemoveOut
         });
     nsresult rv = OwnerThread()->Dispatch(r.forget());
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
     Unused << rv;
   }
 }
 
 void MediaDecoderStateMachine::EnsureOutputStreamManager(
-    SharedDummyTrack* aDummyStream) {
+    SharedDummyStream* aDummyStream) {
   MOZ_ASSERT(NS_IsMainThread());
   if (mOutputStreamManager) {
     return;
   }
   mOutputStreamManager = new OutputStreamManager(
       aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
 }
 
@@ -3797,26 +3794,26 @@ void MediaDecoderStateMachine::EnsureOut
   if ((!aLoadedInfo.HasAudio() ||
        mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
       (!aLoadedInfo.HasVideo() ||
        mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
     return;
   }
   if (aLoadedInfo.HasAudio()) {
     MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-    RefPtr<SourceMediaTrack> dummy =
+    RefPtr<SourceMediaStream> dummy =
         mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
-    LOG("Pre-created audio track with underlying track %p", dummy.get());
+    LOG("Pre-created audio track with underlying stream %p", dummy.get());
     Unused << dummy;
   }
   if (aLoadedInfo.HasVideo()) {
     MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-    RefPtr<SourceMediaTrack> dummy =
+    RefPtr<SourceMediaStream> dummy =
         mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
-    LOG("Pre-created video track with underlying track %p", dummy.get());
+    LOG("Pre-created video track with underlying stream %p", dummy.get());
     Unused << dummy;
   }
 }
 
 class VideoQueueMemoryFunctor : public nsDequeFunctor {
  public:
   VideoQueueMemoryFunctor() : mSize(0) {}
 
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -183,17 +183,17 @@ class MediaDecoderStateMachine
   // Returns the state machine task queue.
   TaskQueue* OwnerThread() const { return mTaskQueue; }
 
   RefPtr<GenericPromise> RequestDebugInfo(
       dom::MediaDecoderStateMachineDebugInfo& aInfo);
 
   void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
   // If an OutputStreamManager does not exist, one will be created.
-  void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
+  void EnsureOutputStreamManager(SharedDummyStream* aDummyStream);
   // If an OutputStreamManager exists, tracks matching aLoadedInfo will be
   // created unless they already exist in the manager.
   void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
   // Add an output stream to the output stream manager. The manager must have
   // been created through EnsureOutputStreamManager() before this.
   void AddOutputStream(DOMMediaStream* aStream);
   // Remove an output stream added with AddOutputStream. If the last output
   // stream was removed, we will also tear down the OutputStreamManager.
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -9,16 +9,17 @@
 #  include "mozilla/UniquePtr.h"
 #  include "mozilla/RefPtr.h"
 #  include "nsDataHashtable.h"
 #  include "nsString.h"
 #  include "nsTArray.h"
 #  include "AudioConfig.h"
 #  include "ImageTypes.h"
 #  include "MediaData.h"
+#  include "TrackID.h"  // for TrackID
 #  include "TimeUnits.h"
 #  include "mozilla/gfx/Point.h"  // for gfx::IntSize
 #  include "mozilla/gfx/Rect.h"   // for gfx::IntRect
 #  include "mozilla/gfx/Types.h"  // for gfx::ColorDepth
 
 namespace mozilla {
 
 class AudioInfo;
@@ -38,17 +39,17 @@ class MetadataTag {
 
 typedef nsDataHashtable<nsCStringHashKey, nsCString> MetadataTags;
 
 class TrackInfo {
  public:
   enum TrackType { kUndefinedTrack, kAudioTrack, kVideoTrack, kTextTrack };
   TrackInfo(TrackType aType, const nsAString& aId, const nsAString& aKind,
             const nsAString& aLabel, const nsAString& aLanguage, bool aEnabled,
-            uint32_t aTrackId)
+            TrackID aTrackId)
       : mId(aId),
         mKind(aKind),
         mLabel(aLabel),
         mLanguage(aLanguage),
         mEnabled(aEnabled),
         mTrackId(aTrackId),
         mIsRenderedExternally(false),
         mType(aType) {
@@ -68,17 +69,17 @@ class TrackInfo {
 
   // Fields common with MediaTrack object.
   nsString mId;
   nsString mKind;
   nsString mLabel;
   nsString mLanguage;
   bool mEnabled;
 
-  uint32_t mTrackId;
+  TrackID mTrackId;
 
   nsCString mMimeType;
   media::TimeUnit mDuration;
   media::TimeUnit mMediaTime;
   CryptoTrack mCrypto;
 
   nsTArray<MetadataTag> mTags;
 
@@ -394,16 +395,26 @@ class MediaInfo {
 
   bool IsEncrypted() const {
     return (HasAudio() && mAudio.mCrypto.IsEncrypted()) ||
            (HasVideo() && mVideo.mCrypto.IsEncrypted());
   }
 
   bool HasValidMedia() const { return HasVideo() || HasAudio(); }
 
+  void AssertValid() const {
+    NS_ASSERTION(!HasAudio() || mAudio.mTrackId != TRACK_INVALID,
+                 "Audio track ID must be valid");
+    NS_ASSERTION(!HasVideo() || mVideo.mTrackId != TRACK_INVALID,
+                 "Audio track ID must be valid");
+    NS_ASSERTION(
+        !HasAudio() || !HasVideo() || mAudio.mTrackId != mVideo.mTrackId,
+        "Duplicate track IDs");
+  }
+
   // TODO: Store VideoInfo and AudioIndo in arrays to support multi-tracks.
   VideoInfo mVideo;
   AudioInfo mAudio;
 
   // If the metadata includes a duration, we store it here.
   media::NullableTimeUnit mMetadataDuration;
 
   // The Ogg reader tries to kinda-sorta compute the duration by seeking to the
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -1,23 +1,23 @@
 /* -*- Mode: c++; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 40 -*- */
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaManager.h"
 
-#include "AudioCaptureTrack.h"
+#include "AudioCaptureStream.h"
 #include "AudioDeviceInfo.h"
 #include "AudioStreamTrack.h"
-#include "MediaTrackGraphImpl.h"
+#include "MediaStreamGraphImpl.h"
 #include "MediaTimer.h"
 #include "mozilla/dom/MediaDeviceInfo.h"
-#include "MediaTrackListener.h"
+#include "MediaStreamListener.h"
 #include "nsArray.h"
 #include "nsContentUtils.h"
 #include "nsGlobalWindow.h"
 #include "nsHashPropertyBag.h"
 #include "nsIEventTarget.h"
 #include "nsIUUIDGenerator.h"
 #include "nsIScriptGlobalObject.h"
 #include "nsIPermissionManager.h"
@@ -290,17 +290,17 @@ void MediaManager::CallOnError(GetUserMe
 
 void MediaManager::CallOnSuccess(GetUserMediaSuccessCallback& aCallback,
                                  DOMMediaStream& aStream) {
   aCallback.Call(aStream);
 }
 
 /**
  * SourceListener has threadsafe refcounting for use across the main, media and
- * MTG threads. But it has a non-threadsafe SupportsWeakPtr for WeakPtr usage
+ * MSG threads. But it has a non-threadsafe SupportsWeakPtr for WeakPtr usage
  * only from main thread, to ensure that garbage- and cycle-collected objects
  * don't hold a reference to it during late shutdown.
  */
 class SourceListener : public SupportsWeakPtr<SourceListener> {
  public:
   typedef MozPromise<bool /* aIgnored */, RefPtr<MediaMgrError>, true>
       SourceListenerPromise;
 
@@ -311,65 +311,54 @@ class SourceListener : public SupportsWe
   SourceListener();
 
   /**
    * Registers this source listener as belonging to the given window listener.
    */
   void Register(GetUserMediaWindowListener* aListener);
 
   /**
-   * Marks this listener as active and creates internal device states.
+   * Marks this listener as active and adds itself as a listener to aStream.
    */
   void Activate(RefPtr<MediaDevice> aAudioDevice,
                 RefPtr<LocalTrackSource> aAudioTrackSource,
                 RefPtr<MediaDevice> aVideoDevice,
                 RefPtr<LocalTrackSource> aVideoTrackSource);
 
   /**
    * Posts a task to initialize and start all associated devices.
    */
   RefPtr<SourceListenerPromise> InitializeAsync();
 
   /**
-   * Stops all live tracks, ends the associated MediaTrack and cleans up the
-   * weak reference to the associated window listener.
+   * Stops all live tracks, finishes the associated MediaStream and cleans up
+   * the weak reference to the associated window listener.
    * This will also tell the window listener to remove its hard reference to
    * this SourceListener, so any caller will need to keep its own hard ref.
    */
   void Stop();
 
   /**
-   * Posts a task to stop the device associated with aTrack and notifies the
+   * Posts a task to stop the device associated with aTrackID and notifies the
    * associated window listener that a track was stopped.
    * Should this track be the last live one to be stopped, we'll also call Stop.
    * This might tell the window listener to remove its hard reference to this
    * SourceListener, so any caller will need to keep its own hard ref.
    */
-  void StopTrack(MediaTrack* aTrack);
-
-  /**
-   * Like StopTrack with the audio device's track.
-   */
-  void StopAudioTrack();
-
-  /**
-   * Like StopTrack with the video device's track.
-   */
-  void StopVideoTrack();
+  void StopTrack(TrackID aTrackID);
 
   /**
    * Gets the main thread MediaTrackSettings from the MediaEngineSource
-   * associated with aTrack.
+   * associated with aTrackID.
    */
-  void GetSettingsFor(MediaTrack* aTrack,
-                      MediaTrackSettings& aOutSettings) const;
+  void GetSettingsFor(TrackID aTrackID, MediaTrackSettings& aOutSettings) const;
 
   /**
    * Posts a task to set the enabled state of the device associated with
-   * aTrack to aEnabled and notifies the associated window listener that a
+   * aTrackID to aEnabled and notifies the associated window listener that a
    * track's state has changed.
    *
    * Turning the hardware off while the device is disabled is supported for:
    * - Camera (enabled by default, controlled by pref
    *   "media.getusermedia.camera.off_while_disabled.enabled")
    * - Microphone (disabled by default, controlled by pref
    *   "media.getusermedia.microphone.off_while_disabled.enabled")
    * Screen-, app-, or windowsharing is not supported at this time.
@@ -379,17 +368,17 @@ class SourceListener : public SupportsWe
    * This is now defaulting to 3 seconds but can be overriden by prefs:
    * - "media.getusermedia.camera.off_while_disabled.delay_ms" and
    * - "media.getusermedia.microphone.off_while_disabled.delay_ms".
    *
    * The delay is in place to prevent misuse by malicious sites. If a track is
    * re-enabled before the delay has passed, the device will not be touched
    * until another disable followed by the full delay happens.
    */
-  void SetEnabledFor(MediaTrack* aTrack, bool aEnabled);
+  void SetEnabledFor(TrackID aTrackID, bool aEnabled);
 
   /**
    * Stops all screen/app/window/audioCapture sharing, but not camera or
    * microphone.
    */
   void StopSharing();
 
   MediaDevice* GetAudioDevice() const {
@@ -406,56 +395,56 @@ class SourceListener : public SupportsWe
 
   bool CapturingVideo() const;
 
   bool CapturingAudio() const;
 
   CaptureState CapturingSource(MediaSourceEnum aSource) const;
 
   RefPtr<SourceListenerPromise> ApplyConstraintsToTrack(
-      MediaTrack* aTrack, const MediaTrackConstraints& aConstraints,
+      TrackID aTrackID, const MediaTrackConstraints& aConstraints,
       CallerType aCallerType);
 
   PrincipalHandle GetPrincipalHandle() const;
 
  private:
   virtual ~SourceListener() = default;
 
   /**
-   * Returns a pointer to the device state for aTrack.
+   * Returns a pointer to the device state for aTrackID.
    *
    * This is intended for internal use where we need to figure out which state
-   * corresponds to aTrack, not for availability checks. As such, we assert
+   * corresponds to aTrackID, not for availability checks. As such, we assert
    * that the device does indeed exist.
    *
    * Since this is a raw pointer and the state lifetime depends on the
    * SourceListener's lifetime, it's internal use only.
    */
-  DeviceState& GetDeviceStateFor(MediaTrack* aTrack) const;
+  DeviceState& GetDeviceStateFor(TrackID aTrackID) const;
 
   // true after this listener has had all devices stopped. MainThread only.
   bool mStopped;
 
   // never ever indirect off this; just for assertions
   PRThread* mMainThreadCheck;
 
   // Set in Register() on main thread, then read from any thread.
   PrincipalHandle mPrincipalHandle;
 
   // Weak pointer to the window listener that owns us. MainThread only.
   GetUserMediaWindowListener* mWindowListener;
 
-  // Accessed from MediaTrackGraph thread, MediaManager thread, and MainThread
+  // Accessed from MediaStreamGraph thread, MediaManager thread, and MainThread
   // No locking needed as they're set on Activate() and never assigned to again.
   UniquePtr<DeviceState> mAudioDeviceState;
   UniquePtr<DeviceState> mVideoDeviceState;
 };
 
 /**
- * This class represents a WindowID and handles all MediaTrackListeners
+ * This class represents a WindowID and handles all MediaStreamTrackListeners
  * (here subclassed as SourceListeners) used to feed GetUserMedia source
  * streams. It proxies feedback from them into messages for browser chrome.
  * The SourceListeners are used to Start() and Stop() the underlying
  * MediaEngineSource when MediaStreams are assigned and deassigned in content.
  */
 class GetUserMediaWindowListener {
   friend MediaManager;
 
@@ -694,16 +683,18 @@ class GetUserMediaWindowListener {
 
  private:
   ~GetUserMediaWindowListener() {
     MOZ_ASSERT(mInactiveListeners.Length() == 0,
                "Inactive listeners should already be removed");
     MOZ_ASSERT(mActiveListeners.Length() == 0,
                "Active listeners should already be removed");
     Unused << mMediaThread;
+    // It's OK to release mStream on any thread; they have thread-safe
+    // refcounts.
   }
 
   // Set at construction
   base::Thread* mMediaThread;
 
   uint64_t mWindowID;
   const PrincipalHandle mPrincipalHandle;
 
@@ -714,21 +705,22 @@ class GetUserMediaWindowListener {
   nsTArray<RefPtr<SourceListener>> mInactiveListeners;
   nsTArray<RefPtr<SourceListener>> mActiveListeners;
 };
 
 class LocalTrackSource : public MediaStreamTrackSource {
  public:
   LocalTrackSource(nsIPrincipal* aPrincipal, const nsString& aLabel,
                    const RefPtr<SourceListener>& aListener,
-                   MediaSourceEnum aSource, MediaTrack* aTrack,
-                   RefPtr<PeerIdentity> aPeerIdentity)
+                   MediaSourceEnum aSource, MediaStream* aStream,
+                   TrackID aTrackID, RefPtr<PeerIdentity> aPeerIdentity)
       : MediaStreamTrackSource(aPrincipal, aLabel),
         mSource(aSource),
-        mTrack(aTrack),
+        mStream(aStream),
+        mTrackID(aTrackID),
         mPeerIdentity(std::move(aPeerIdentity)),
         mListener(aListener.get()) {}
 
   MediaSourceEnum GetMediaSource() const override { return mSource; }
 
   const PeerIdentity* GetPeerIdentity() const override { return mPeerIdentity; }
 
   RefPtr<MediaStreamTrackSource::ApplyConstraintsPromise> ApplyConstraints(
@@ -736,107 +728,110 @@ class LocalTrackSource : public MediaStr
       CallerType aCallerType) override {
     MOZ_ASSERT(NS_IsMainThread());
     if (sHasShutdown || !mListener) {
       // Track has been stopped, or we are in shutdown. In either case
       // there's no observable outcome, so pretend we succeeded.
       return MediaStreamTrackSource::ApplyConstraintsPromise::CreateAndResolve(
           false, __func__);
     }
-    return mListener->ApplyConstraintsToTrack(mTrack, aConstraints,
+    return mListener->ApplyConstraintsToTrack(mTrackID, aConstraints,
                                               aCallerType);
   }
 
   void GetSettings(MediaTrackSettings& aOutSettings) override {
     if (mListener) {
-      mListener->GetSettingsFor(mTrack, aOutSettings);
+      mListener->GetSettingsFor(mTrackID, aOutSettings);
     }
   }
 
   void Stop() override {
     if (mListener) {
-      mListener->StopTrack(mTrack);
+      mListener->StopTrack(mTrackID);
       mListener = nullptr;
     }
-    if (!mTrack->IsDestroyed()) {
-      mTrack->Destroy();
+    if (!mStream->IsDestroyed()) {
+      mStream->Destroy();
     }
   }
 
   void Disable() override {
     if (mListener) {
-      mListener->SetEnabledFor(mTrack, false);
+      mListener->SetEnabledFor(mTrackID, false);
     }
   }
 
   void Enable() override {
     if (mListener) {
-      mListener->SetEnabledFor(mTrack, true);
+      mListener->SetEnabledFor(mTrackID, true);
     }
   }
 
   const MediaSourceEnum mSource;
-  const RefPtr<MediaTrack> mTrack;
+  const RefPtr<MediaStream> mStream;
+  const TrackID mTrackID;
   const RefPtr<const PeerIdentity> mPeerIdentity;
 
  protected:
   ~LocalTrackSource() {
     MOZ_ASSERT(NS_IsMainThread());
-    MOZ_ASSERT(mTrack->IsDestroyed());
+    MOZ_ASSERT(mStream->IsDestroyed());
   }
 
   // This is a weak pointer to avoid having the SourceListener (which may
   // have references to threads and threadpools) kept alive by DOM-objects
   // that may have ref-cycles and thus are released very late during
   // shutdown, even after xpcom-shutdown-threads. See bug 1351655 for what
   // can happen.
   WeakPtr<SourceListener> mListener;
 };
 
 class AudioCaptureTrackSource : public LocalTrackSource {
  public:
   AudioCaptureTrackSource(nsIPrincipal* aPrincipal, nsPIDOMWindowInner* aWindow,
                           const nsString& aLabel,
-                          AudioCaptureTrack* aAudioCaptureTrack,
+                          AudioCaptureStream* aAudioCaptureStream,
                           RefPtr<PeerIdentity> aPeerIdentity)
       : LocalTrackSource(aPrincipal, aLabel, nullptr,
-                         MediaSourceEnum::AudioCapture, aAudioCaptureTrack,
-                         std::move(aPeerIdentity)),
+                         MediaSourceEnum::AudioCapture, aAudioCaptureStream,
+                         kAudioTrack, std::move(aPeerIdentity)),
         mWindow(aWindow),
-        mAudioCaptureTrack(aAudioCaptureTrack) {
-    mAudioCaptureTrack->Start();
-    mAudioCaptureTrack->Graph()->RegisterCaptureTrackForWindow(
-        mWindow->WindowID(), mAudioCaptureTrack);
+        mAudioCaptureStream(aAudioCaptureStream) {
+    mAudioCaptureStream->Start();
+    mAudioCaptureStream->Graph()->RegisterCaptureStreamForWindow(
+        mWindow->WindowID(), mAudioCaptureStream);
     mWindow->SetAudioCapture(true);
   }
 
   void Stop() override {
     MOZ_ASSERT(NS_IsMainThread());
-    if (!mAudioCaptureTrack->IsDestroyed()) {
+    if (!mAudioCaptureStream->IsDestroyed()) {
       MOZ_ASSERT(mWindow);
       mWindow->SetAudioCapture(false);
-      mAudioCaptureTrack->Graph()->UnregisterCaptureTrackForWindow(
+      mAudioCaptureStream->Graph()->UnregisterCaptureStreamForWindow(
           mWindow->WindowID());
       mWindow = nullptr;
     }
-    // LocalTrackSource destroys the track.
+    // LocalTrackSource destroys the stream.
     LocalTrackSource::Stop();
-    MOZ_ASSERT(mAudioCaptureTrack->IsDestroyed());
+    MOZ_ASSERT(mAudioCaptureStream->IsDestroyed());
   }
 
-  ProcessedMediaTrack* InputTrack() const { return mAudioCaptureTrack.get(); }
+  ProcessedMediaStream* InputStream() const {
+    return mAudioCaptureStream.get();
+  }
 
  protected:
   ~AudioCaptureTrackSource() {
     MOZ_ASSERT(NS_IsMainThread());
-    MOZ_ASSERT(mAudioCaptureTrack->IsDestroyed());
+    MOZ_ASSERT(mAudioCaptureStream->IsDestroyed());
   }
 
   RefPtr<nsPIDOMWindowInner> mWindow;
-  const RefPtr<AudioCaptureTrack> mAudioCaptureTrack;
+  const RefPtr<AudioCaptureStream> mAudioCaptureStream;
 };
 
 /**
  * nsIMediaDevice implementation.
  */
 NS_IMPL_ISUPPORTS(MediaDevice, nsIMediaDevice)
 
 MediaDevice::MediaDevice(const RefPtr<MediaEngineSource>& aSource,
@@ -1041,21 +1036,22 @@ nsresult MediaDevice::Allocate(const Med
       aConstraints.mDeviceId.Value().GetAsString().EqualsASCII("bad device")) {
     return NS_ERROR_FAILURE;
   }
 
   return mSource->Allocate(aConstraints, aPrefs, aPrincipalInfo,
                            aOutBadConstraint);
 }
 
-void MediaDevice::SetTrack(const RefPtr<SourceMediaTrack>& aTrack,
+void MediaDevice::SetTrack(const RefPtr<SourceMediaStream>& aStream,
+                           TrackID aTrackID,
                            const PrincipalHandle& aPrincipalHandle) {
   MOZ_ASSERT(MediaManager::IsInMediaThread());
   MOZ_ASSERT(mSource);
-  mSource->SetTrack(aTrack, aPrincipalHandle);
+  mSource->SetTrack(aStream, aTrackID, aPrincipalHandle);
 }
 
 nsresult MediaDevice::Start() {
   MOZ_ASSERT(MediaManager::IsInMediaThread());
   MOZ_ASSERT(mSource);
   return mSource->Start();
 }
 
@@ -1113,27 +1109,27 @@ static bool IsOn(const OwningBooleanOrMe
 static const MediaTrackConstraints& GetInvariant(
     const OwningBooleanOrMediaTrackConstraints& aUnion) {
   static const MediaTrackConstraints empty;
   return aUnion.IsMediaTrackConstraints() ? aUnion.GetAsMediaTrackConstraints()
                                           : empty;
 }
 
 /**
- * Creates a MediaTrack, attaches a listener and fires off a success callback
+ * Creates a MediaStream, attaches a listener and fires off a success callback
  * to the DOM with the stream. We also pass in the error callback so it can
  * be released correctly.
  *
  * All of this must be done on the main thread!
  *
  * Note that the various GetUserMedia Runnable classes currently allow for
- * two tracks.  If we ever need to support getting more than two tracks
+ * two streams.  If we ever need to support getting more than two streams
  * at once, we could convert everything to nsTArray<RefPtr<blah> >'s,
  * though that would complicate the constructors some.  Currently the
- * GetUserMedia spec does not allow for more than 2 tracks to be obtained in
+ * GetUserMedia spec does not allow for more than 2 streams to be obtained in
  * one call, to simplify handling of constraints.
  */
 class GetUserMediaStreamRunnable : public Runnable {
  public:
   GetUserMediaStreamRunnable(
       MozPromiseHolder<MediaManager::StreamPromise>&& aHolder,
       uint64_t aWindowID, RefPtr<GetUserMediaWindowListener> aWindowListener,
       RefPtr<SourceListener> aSourceListener,
@@ -1167,76 +1163,78 @@ class GetUserMediaStreamRunnable : publi
 
     // We're on main-thread, and the windowlist can only
     // be invalidated from the main-thread (see OnNavigation)
     if (!mManager->IsWindowListenerStillActive(mWindowListener)) {
       // This window is no longer live. mListener has already been removed.
       return NS_OK;
     }
 
-    MediaTrackGraph::GraphDriverType graphDriverType =
-        mAudioDevice ? MediaTrackGraph::AUDIO_THREAD_DRIVER
-                     : MediaTrackGraph::SYSTEM_THREAD_DRIVER;
-    MediaTrackGraph* mtg = MediaTrackGraph::GetInstance(
-        graphDriverType, window, MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE);
+    MediaStreamGraph::GraphDriverType graphDriverType =
+        mAudioDevice ? MediaStreamGraph::AUDIO_THREAD_DRIVER
+                     : MediaStreamGraph::SYSTEM_THREAD_DRIVER;
+    MediaStreamGraph* msg = MediaStreamGraph::GetInstance(
+        graphDriverType, window, MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE);
 
     auto domStream = MakeRefPtr<DOMMediaStream>(window);
     RefPtr<LocalTrackSource> audioTrackSource;
     RefPtr<LocalTrackSource> videoTrackSource;
     nsCOMPtr<nsIPrincipal> principal;
     if (mPeerIdentity) {
       principal = NullPrincipal::CreateWithInheritedAttributes(
           window->GetExtantDoc()->NodePrincipal());
     } else {
       principal = window->GetExtantDoc()->NodePrincipal();
     }
     RefPtr<GenericNonExclusivePromise> firstFramePromise;
     if (mAudioDevice) {
       if (mAudioDevice->GetMediaSource() == MediaSourceEnum::AudioCapture) {
         // AudioCapture is a special case, here, in the sense that we're not
-        // really using the audio source and the SourceMediaTrack, which acts
-        // as placeholders. We re-route a number of tracks internally in the
-        // MTG and mix them down instead.
+        // really using the audio source and the SourceMediaStream, which acts
+        // as placeholders. We re-route a number of streams internally in the
+        // MSG and mix them down instead.
         NS_WARNING(
             "MediaCaptureWindowState doesn't handle "
             "MediaSourceEnum::AudioCapture. This must be fixed with UX "
             "before shipping.");
         auto audioCaptureSource = MakeRefPtr<AudioCaptureTrackSource>(
             principal, window, NS_LITERAL_STRING("Window audio capture"),
-            mtg->CreateAudioCaptureTrack(), mPeerIdentity);
+            msg->CreateAudioCaptureStream(kAudioTrack), mPeerIdentity);
         audioTrackSource = audioCaptureSource;
-        RefPtr<MediaStreamTrack> track = new dom::AudioStreamTrack(
-            window, audioCaptureSource->InputTrack(), audioCaptureSource);
+        RefPtr<MediaStreamTrack> track =
+            new dom::AudioStreamTrack(window, audioCaptureSource->InputStream(),
+                                      kAudioTrack, audioCaptureSource);
         domStream->AddTrackInternal(track);
       } else {
         nsString audioDeviceName;
         mAudioDevice->GetName(audioDeviceName);
-        RefPtr<MediaTrack> track = mtg->CreateSourceTrack(MediaSegment::AUDIO);
+        RefPtr<MediaStream> stream = msg->CreateSourceStream();
         audioTrackSource = new LocalTrackSource(
             principal, audioDeviceName, mSourceListener,
-            mAudioDevice->GetMediaSource(), track, mPeerIdentity);
+            mAudioDevice->GetMediaSource(), stream, kAudioTrack, mPeerIdentity);
         MOZ_ASSERT(IsOn(mConstraints.mAudio));
-        RefPtr<MediaStreamTrack> domTrack = new dom::AudioStreamTrack(
-            window, track, audioTrackSource, dom::MediaStreamTrackState::Live,
+        RefPtr<MediaStreamTrack> track = new dom::AudioStreamTrack(
+            window, stream, kAudioTrack, audioTrackSource,
+            dom::MediaStreamTrackState::Live,
             GetInvariant(mConstraints.mAudio));
-        domStream->AddTrackInternal(domTrack);
+        domStream->AddTrackInternal(track);
       }
     }
     if (mVideoDevice) {
       nsString videoDeviceName;
       mVideoDevice->GetName(videoDeviceName);
-      RefPtr<MediaTrack> track = mtg->CreateSourceTrack(MediaSegment::VIDEO);
+      RefPtr<MediaStream> stream = msg->CreateSourceStream();
       videoTrackSource = new LocalTrackSource(
           principal, videoDeviceName, mSourceListener,
-          mVideoDevice->GetMediaSource(), track, mPeerIdentity);
+          mVideoDevice->GetMediaSource(), stream, kVideoTrack, mPeerIdentity);
       MOZ_ASSERT(IsOn(mConstraints.mVideo));
-      RefPtr<MediaStreamTrack> domTrack = new dom::VideoStreamTrack(
-          window, track, videoTrackSource, dom::MediaStreamTrackState::Live,
-          GetInvariant(mConstraints.mVideo));
-      domStream->AddTrackInternal(domTrack);
+      RefPtr<MediaStreamTrack> track = new dom::VideoStreamTrack(
+          window, stream, kVideoTrack, videoTrackSource,
+          dom::MediaStreamTrackState::Live, GetInvariant(mConstraints.mVideo));
+      domStream->AddTrackInternal(track);
       switch (mVideoDevice->GetMediaSource()) {
         case MediaSourceEnum::Browser:
         case MediaSourceEnum::Screen:
         case MediaSourceEnum::Window:
           // Wait for first frame for screen-sharing devices, to ensure
           // with and height settings are available immediately, to pass wpt.
           firstFramePromise = mVideoDevice->mSource->GetFirstFramePromise();
           break;
@@ -4102,30 +4100,32 @@ SourceListener::InitializeAsync() {
   MOZ_DIAGNOSTIC_ASSERT(!mStopped);
 
   return MediaManager::PostTask<SourceListenerPromise>(
              __func__,
              [principal = GetPrincipalHandle(),
               audioDevice =
                   mAudioDeviceState ? mAudioDeviceState->mDevice : nullptr,
               audioStream = mAudioDeviceState
-                                ? mAudioDeviceState->mTrackSource->mTrack
+                                ? mAudioDeviceState->mTrackSource->mStream
                                 : nullptr,
               videoDevice =
                   mVideoDeviceState ? mVideoDeviceState->mDevice : nullptr,
               videoStream = mVideoDeviceState
-                                ? mVideoDeviceState->mTrackSource->mTrack
+                                ? mVideoDeviceState->mTrackSource->mStream
                                 : nullptr](
                  MozPromiseHolder<SourceListenerPromise>& aHolder) {
                if (audioDevice) {
-                 audioDevice->SetTrack(audioStream->AsSourceTrack(), principal);
+                 audioDevice->SetTrack(audioStream->AsSourceStream(),
+                                       kAudioTrack, principal);
                }
 
                if (videoDevice) {
-                 videoDevice->SetTrack(videoStream->AsSourceTrack(), principal);
+                 videoDevice->SetTrack(videoStream->AsSourceStream(),
+                                       kVideoTrack, principal);
                }
 
                if (audioDevice) {
                  nsresult rv = audioDevice->Start();
                  if (rv == NS_ERROR_NOT_AVAILABLE) {
                    PR_Sleep(200);
                    rv = audioDevice->Start();
                  }
@@ -4225,37 +4225,39 @@ void SourceListener::Stop() {
   }
   mStopped = true;
 
   LOG("SourceListener %p stopping", this);
 
   if (mAudioDeviceState) {
     mAudioDeviceState->mDisableTimer->Cancel();
     if (!mAudioDeviceState->mStopped) {
-      StopAudioTrack();
+      StopTrack(kAudioTrack);
     }
   }
   if (mVideoDeviceState) {
     mVideoDeviceState->mDisableTimer->Cancel();
     if (!mVideoDeviceState->mStopped) {
-      StopVideoTrack();
+      StopTrack(kVideoTrack);
     }
   }
 
   mWindowListener->Remove(this);
   mWindowListener = nullptr;
 }
 
-void SourceListener::StopTrack(MediaTrack* aTrack) {
+void SourceListener::StopTrack(TrackID aTrackID) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_ASSERT(Activated(), "No device to stop");
-  DeviceState& state = GetDeviceStateFor(aTrack);
-
-  LOG("SourceListener %p stopping %s track for track %p", this,
-      &state == mAudioDeviceState.get() ? "audio" : "video", aTrack);
+  MOZ_ASSERT(aTrackID == kAudioTrack || aTrackID == kVideoTrack,
+             "Unknown track id");
+  DeviceState& state = GetDeviceStateFor(aTrackID);
+
+  LOG("SourceListener %p stopping %s track %d", this,
+      aTrackID == kAudioTrack ? "audio" : "video", aTrackID);
 
   if (state.mStopped) {
     // device already stopped.
     return;
   }
   state.mStopped = true;
 
   state.mDisableTimer->Cancel();
@@ -4270,47 +4272,41 @@ void SourceListener::StopTrack(MediaTrac
 
   if ((!mAudioDeviceState || mAudioDeviceState->mStopped) &&
       (!mVideoDeviceState || mVideoDeviceState->mStopped)) {
     LOG("SourceListener %p this was the last track stopped", this);
     Stop();
   }
 }
 
-void SourceListener::StopAudioTrack() {
-  StopTrack(mAudioDeviceState->mTrackSource->mTrack);
-}
-
-void SourceListener::StopVideoTrack() {
-  StopTrack(mVideoDeviceState->mTrackSource->mTrack);
-}
-
-void SourceListener::GetSettingsFor(MediaTrack* aTrack,
+void SourceListener::GetSettingsFor(TrackID aTrackID,
                                     MediaTrackSettings& aOutSettings) const {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
-  DeviceState& state = GetDeviceStateFor(aTrack);
+  DeviceState& state = GetDeviceStateFor(aTrackID);
   state.mDevice->GetSettings(aOutSettings);
 
   MediaSourceEnum mediaSource = state.mDevice->GetMediaSource();
   if (mediaSource == MediaSourceEnum::Camera ||
       mediaSource == MediaSourceEnum::Microphone) {
     aOutSettings.mDeviceId.Construct(state.mDevice->mID);
     aOutSettings.mGroupId.Construct(state.mDevice->mGroupID);
   }
 }
 
-void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
+void SourceListener::SetEnabledFor(TrackID aTrackID, bool aEnable) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_ASSERT(Activated(), "No device to set enabled state for");
-
-  DeviceState& state = GetDeviceStateFor(aTrack);
-
-  LOG("SourceListener %p %s %s track for track %p", this,
+  MOZ_ASSERT(aTrackID == kAudioTrack || aTrackID == kVideoTrack,
+             "Unknown track id");
+
+  LOG("SourceListener %p %s %s track %d", this,
       aEnable ? "enabling" : "disabling",
-      &state == mAudioDeviceState.get() ? "audio" : "video", aTrack);
+      aTrackID == kAudioTrack ? "audio" : "video", aTrackID);
+
+  DeviceState& state = GetDeviceStateFor(aTrackID);
 
   state.mTrackEnabled = aEnable;
 
   if (state.mStopped) {
     // Device terminally stopped. Updating device state is pointless.
     return;
   }
 
@@ -4333,45 +4329,42 @@ void SourceListener::SetEnabledFor(Media
 
   RefPtr<MediaTimerPromise> timerPromise;
   if (aEnable) {
     timerPromise = MediaTimerPromise::CreateAndResolve(true, __func__);
     state.mTrackEnabledTime = TimeStamp::Now();
   } else {
     const TimeDuration maxDelay =
         TimeDuration::FromMilliseconds(Preferences::GetUint(
-            &state == mAudioDeviceState.get()
+            aTrackID == kAudioTrack
                 ? "media.getusermedia.microphone.off_while_disabled.delay_ms"
                 : "media.getusermedia.camera.off_while_disabled.delay_ms",
             3000));
     const TimeDuration durationEnabled =
         TimeStamp::Now() - state.mTrackEnabledTime;
     const TimeDuration delay = TimeDuration::Max(
         TimeDuration::FromMilliseconds(0), maxDelay - durationEnabled);
     timerPromise = state.mDisableTimer->WaitFor(delay, __func__);
   }
 
   typedef MozPromise<nsresult, bool, /* IsExclusive = */ true>
       DeviceOperationPromise;
   RefPtr<SourceListener> self = this;
   timerPromise
       ->Then(
           GetMainThreadSerialEventTarget(), __func__,
-          [self, this, &state, track = RefPtr<MediaTrack>(aTrack),
-           aEnable]() mutable {
+          [self, this, &state, aTrackID, aEnable]() mutable {
             MOZ_ASSERT(state.mDeviceEnabled != aEnable,
                        "Device operation hasn't started");
             MOZ_ASSERT(state.mOperationInProgress,
                        "It's our responsibility to reset the inProgress state");
 
-            LOG("SourceListener %p %s %s track for track %p - starting device "
-                "operation",
+            LOG("SourceListener %p %s %s track %d - starting device operation",
                 this, aEnable ? "enabling" : "disabling",
-                &state == mAudioDeviceState.get() ? "audio" : "video",
-                track.get());
+                aTrackID == kAudioTrack ? "audio" : "video", aTrackID);
 
             if (state.mStopped) {
               // Source was stopped between timer resolving and this runnable.
               return DeviceOperationPromise::CreateAndResolve(NS_ERROR_ABORT,
                                                               __func__);
             }
 
             state.mDeviceEnabled = aEnable;
@@ -4396,43 +4389,42 @@ void SourceListener::SetEnabledFor(Media
           },
           []() {
             // Timer was canceled by us. We signal this with NS_ERROR_ABORT.
             return DeviceOperationPromise::CreateAndResolve(NS_ERROR_ABORT,
                                                             __func__);
           })
       ->Then(
           GetMainThreadSerialEventTarget(), __func__,
-          [self, this, &state, track = RefPtr<MediaTrack>(aTrack),
-           aEnable](nsresult aResult) mutable {
+          [self, this, &state, aTrackID, aEnable](nsresult aResult) mutable {
             MOZ_ASSERT_IF(aResult != NS_ERROR_ABORT,
                           state.mDeviceEnabled == aEnable);
             MOZ_ASSERT(state.mOperationInProgress);
             state.mOperationInProgress = false;
 
             if (state.mStopped) {
               // Device was stopped on main thread during the operation. Nothing
               // to do.
               return;
             }
 
-            LOG("SourceListener %p %s %s track for track %p %s", this,
+            LOG("SourceListener %p %s %s track %d %s", this,
                 aEnable ? "enabling" : "disabling",
-                &state == mAudioDeviceState.get() ? "audio" : "video",
-                track.get(), NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
+                aTrackID == kAudioTrack ? "audio" : "video", aTrackID,
+                NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
 
             if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT) {
               // This path handles errors from starting or stopping the device.
               // NS_ERROR_ABORT are for cases where *we* aborted. They need
               // graceful handling.
               if (aEnable) {
                 // Starting the device failed. Stopping the track here will make
                 // the MediaStreamTrack end after a pass through the
-                // MediaTrackGraph.
-                StopTrack(track);
+                // MediaStreamGraph.
+                StopTrack(aTrackID);
               } else {
                 // Stopping the device failed. This is odd, but not fatal.
                 MOZ_ASSERT_UNREACHABLE("The device should be stoppable");
 
                 // To keep our internal state sane in this case, we disallow
                 // future stops due to disable.
                 state.mOffWhileDisabled = false;
               }
@@ -4448,19 +4440,19 @@ void SourceListener::SetEnabledFor(Media
             if (state.mTrackEnabled == state.mDeviceEnabled) {
               // Intended state is same as device's current state.
               // Nothing more to do.
               return;
             }
 
             // Track state changed during this operation. We'll start over.
             if (state.mTrackEnabled) {
-              SetEnabledFor(track, true);
+              SetEnabledFor(aTrackID, true);
             } else {
-              SetEnabledFor(track, false);
+              SetEnabledFor(aTrackID, false);
             }
           },
           []() { MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject"); });
 }
 
 void SourceListener::StopSharing() {
   MOZ_ASSERT(NS_IsMainThread());
 
@@ -4471,20 +4463,20 @@ void SourceListener::StopSharing() {
   MOZ_RELEASE_ASSERT(mWindowListener);
   LOG("SourceListener %p StopSharing", this);
 
   RefPtr<SourceListener> self(this);
   if (mVideoDeviceState && (mVideoDeviceState->mDevice->GetMediaSource() ==
                                 MediaSourceEnum::Screen ||
                             mVideoDeviceState->mDevice->GetMediaSource() ==
                                 MediaSourceEnum::Window)) {
-    // We want to stop the whole track if there's no audio;
+    // We want to stop the whole stream if there's no audio;
     // just the video track if we have both.
     // StopTrack figures this out for us.
-    StopTrack(mVideoDeviceState->mTrackSource->mTrack);
+    StopTrack(kVideoTrack);
   }
   if (mAudioDeviceState && mAudioDeviceState->mDevice->GetMediaSource() ==
                                MediaSourceEnum::AudioCapture) {
     static_cast<AudioCaptureTrackSource*>(mAudioDeviceState->mTrackSource.get())
         ->Stop();
   }
 }
 
@@ -4535,24 +4527,24 @@ CaptureState SourceListener::CapturingSo
     return CaptureState::Enabled;
   }
 
   return CaptureState::Disabled;
 }
 
 RefPtr<SourceListener::SourceListenerPromise>
 SourceListener::ApplyConstraintsToTrack(
-    MediaTrack* aTrack, const MediaTrackConstraints& aConstraints,
+    TrackID aTrackID, const MediaTrackConstraints& aConstraints,
     CallerType aCallerType) {
   MOZ_ASSERT(NS_IsMainThread());
-  DeviceState& state = GetDeviceStateFor(aTrack);
+  DeviceState& state = GetDeviceStateFor(aTrackID);
 
   if (mStopped || state.mStopped) {
-    LOG("gUM %s track for track %p applyConstraints, but source is stopped",
-        &state == mAudioDeviceState.get() ? "audio" : "video", aTrack);
+    LOG("gUM %s track %d applyConstraints, but source is stopped",
+        aTrackID == kAudioTrack ? "audio" : "video", aTrackID);
     return SourceListenerPromise::CreateAndResolve(false, __func__);
   }
 
   MediaManager* mgr = MediaManager::GetIfExists();
   if (!mgr) {
     return SourceListenerPromise::CreateAndResolve(false, __func__);
   }
 
@@ -4593,24 +4585,29 @@ SourceListener::ApplyConstraintsToTrack(
         aHolder.Resolve(false, __func__);
       });
 }
 
 PrincipalHandle SourceListener::GetPrincipalHandle() const {
   return mPrincipalHandle;
 }
 
-DeviceState& SourceListener::GetDeviceStateFor(MediaTrack* aTrack) const {
-  if (mAudioDeviceState && mAudioDeviceState->mTrackSource->mTrack == aTrack) {
-    return *mAudioDeviceState;
+DeviceState& SourceListener::GetDeviceStateFor(TrackID aTrackID) const {
+  // XXX to support multiple tracks of a type in a stream, this should key off
+  // the TrackID and not just the type
+  switch (aTrackID) {
+    case kAudioTrack:
+      MOZ_ASSERT(mAudioDeviceState, "No audio device");
+      return *mAudioDeviceState;
+    case kVideoTrack:
+      MOZ_ASSERT(mVideoDeviceState, "No video device");
+      return *mVideoDeviceState;
+    default:
+      MOZ_CRASH("Unknown track id");
   }
-  if (mVideoDeviceState && mVideoDeviceState->mTrackSource->mTrack == aTrack) {
-    return *mVideoDeviceState;
-  }
-  MOZ_CRASH("Unknown track");
 }
 
 // Doesn't kill audio
 void GetUserMediaWindowListener::StopSharing() {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
 
   for (auto& l : nsTArray<RefPtr<SourceListener>>(mActiveListeners)) {
     l->StopSharing();
@@ -4620,24 +4617,24 @@ void GetUserMediaWindowListener::StopSha
 void GetUserMediaWindowListener::StopRawID(const nsString& removedDeviceID) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
 
   for (auto& source : nsTArray<RefPtr<SourceListener>>(mActiveListeners)) {
     if (source->GetAudioDevice()) {
       nsString id;
       source->GetAudioDevice()->GetRawId(id);
       if (removedDeviceID.Equals(id)) {
-        source->StopAudioTrack();
+        source->StopTrack(kAudioTrack);
       }
     }
     if (source->GetVideoDevice()) {
       nsString id;
       source->GetVideoDevice()->GetRawId(id);
       if (removedDeviceID.Equals(id)) {
-        source->StopVideoTrack();
+        source->StopTrack(kVideoTrack);
       }
     }
   }
 }
 
 void GetUserMediaWindowListener::ChromeAffectingStateChanged() {
   MOZ_ASSERT(NS_IsMainThread());
 
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -80,17 +80,17 @@ class MediaDevice : public nsIMediaDevic
   uint32_t GetBestFitnessDistance(
       const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
       bool aIsChrome);
 
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
                     const mozilla::ipc::PrincipalInfo& aPrincipalInfo,
                     const char** aOutBadConstraint);
-  void SetTrack(const RefPtr<SourceMediaTrack>& aTrack,
+  void SetTrack(const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
                 const PrincipalHandle& aPrincipal);
   nsresult Start();
   nsresult Reconfigure(const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const char** aOutBadConstraint);
   nsresult FocusOnSelectedSource();
   nsresult Stop();
   nsresult Deallocate();
@@ -194,17 +194,17 @@ class MediaManager final : public nsIMed
   typedef dom::NavigatorUserMediaSuccessCallback GetUserMediaSuccessCallback;
   typedef dom::NavigatorUserMediaErrorCallback GetUserMediaErrorCallback;
 
   MOZ_CAN_RUN_SCRIPT
   static void CallOnError(GetUserMediaErrorCallback& aCallback,
                           dom::MediaStreamError& aError);
   MOZ_CAN_RUN_SCRIPT
   static void CallOnSuccess(GetUserMediaSuccessCallback& aCallback,
-                            DOMMediaStream& aTrack);
+                            DOMMediaStream& aStream);
 
   typedef nsTArray<RefPtr<MediaDevice>> MediaDeviceSet;
   typedef media::Refcountable<MediaDeviceSet> MediaDeviceSetRefCnt;
 
   typedef MozPromise<RefPtr<DOMMediaStream>, RefPtr<MediaMgrError>, true>
       StreamPromise;
   typedef MozPromise<RefPtr<MediaDeviceSetRefCnt>, RefPtr<MediaMgrError>, true>
       DevicesPromise;
--- a/dom/media/MediaPlaybackDelayPolicy.cpp
+++ b/dom/media/MediaPlaybackDelayPolicy.cpp
@@ -2,17 +2,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaPlaybackDelayPolicy.h"
 
 #include "nsPIDOMWindow.h"
 #include "mozilla/dom/HTMLMediaElement.h"
-#include "mozilla/StaticPrefs_media.h"
 
 namespace mozilla {
 namespace dom {
 
 using AudibleState = AudioChannelService::AudibleState;
 
 static AudibleState DetermineMediaAudibleState(const HTMLMediaElement* aElement,
                                                bool aIsAudible) {
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -2,22 +2,22 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaRecorder.h"
 
 #include "AudioNodeEngine.h"
-#include "AudioNodeTrack.h"
+#include "AudioNodeStream.h"
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 #include "MediaDecoder.h"
 #include "MediaEncoder.h"
-#include "MediaTrackGraphImpl.h"
+#include "MediaStreamGraphImpl.h"
 #include "VideoUtils.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/dom/AudioStreamTrack.h"
 #include "mozilla/dom/BlobEvent.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/MediaRecorderErrorEvent.h"
 #include "mozilla/dom/MutableBlobStorage.h"
 #include "mozilla/dom/VideoStreamTrack.h"
@@ -175,25 +175,25 @@ NS_IMPL_RELEASE_INHERITED(MediaRecorder,
  * MediaRecorder::Start before Read Thread shutdown, the same recording context
  * in MediaRecorder might be access by two Reading Threads, which cause a
  * problem. In the new design, we put recording context into Session object,
  * including Read Thread.  Each Session has its own recording context and Read
  * Thread, problem is been resolved.
  *
  * Life cycle of a Session object.
  * 1) Initialization Stage (in main thread)
- *    Setup media tracks in MTG, and bind MediaEncoder with Source Stream when
+ *    Setup media streams in MSG, and bind MediaEncoder with Source Stream when
  * mStream is available. Resource allocation, such as encoded data cache buffer
  * and MediaEncoder. Create read thread. Automatically switch to Extract stage
  * in the end of this stage. 2) Extract Stage (in Read Thread) Pull encoded A/V
  * frames from MediaEncoder, dispatch to OnDataAvailable handler. Unless a
  * client calls Session::Stop, Session object keeps stay in this stage. 3)
  * Destroy Stage (in main thread) Switch from Extract stage to Destroy stage by
- * calling Session::Stop. Release session resource and remove associated tracks
- * from MTG.
+ * calling Session::Stop. Release session resource and remove associated streams
+ * from MSG.
  *
  * Lifetime of MediaRecorder and Session objects.
  * 1) MediaRecorder creates a Session in MediaRecorder::Start function and holds
  *    a reference to Session. Then the Session registers itself to a
  *    ShutdownBlocker and also holds a reference to MediaRecorder.
  *    Therefore, the reference dependency in gecko is:
  *    ShutdownBlocker -> Session <-> MediaRecorder, note that there is a cycle
  *    reference between Session and MediaRecorder.
@@ -1183,26 +1183,26 @@ class MediaRecorder::Session : public Pr
   Result<RunningState, nsresult> mRunningState;
 };
 
 MediaRecorder::~MediaRecorder() {
   LOG(LogLevel::Debug, ("~MediaRecorder (%p)", this));
   UnRegisterActivityObserver();
 }
 
-MediaRecorder::MediaRecorder(DOMMediaStream& aSourceMediaTrack,
+MediaRecorder::MediaRecorder(DOMMediaStream& aSourceMediaStream,
                              nsPIDOMWindowInner* aOwnerWindow)
     : DOMEventTargetHelper(aOwnerWindow),
       mAudioNodeOutput(0),
       mState(RecordingState::Inactive),
       mAudioBitsPerSecond(0),
       mVideoBitsPerSecond(0),
       mBitsPerSecond(0) {
   MOZ_ASSERT(aOwnerWindow);
-  mDOMStream = &aSourceMediaTrack;
+  mDOMStream = &aSourceMediaStream;
 
   RegisterActivityObserver();
 }
 
 MediaRecorder::MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput,
                              nsPIDOMWindowInner* aOwnerWindow)
     : DOMEventTargetHelper(aOwnerWindow),
       mAudioNodeOutput(aSrcOutput),
--- a/dom/media/MediaRecorder.h
+++ b/dom/media/MediaRecorder.h
@@ -11,20 +11,21 @@
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/MozPromise.h"
 #include "nsIDocumentActivity.h"
 
 // Max size for allowing queue encoded data in memory
 #define MAX_ALLOW_MEMORY_BUFFER 1024000
 namespace mozilla {
 
-class AudioNodeTrack;
+class AudioNodeStream;
 class DOMMediaStream;
 class ErrorResult;
 struct MediaRecorderOptions;
+class MediaStream;
 class GlobalObject;
 
 namespace dom {
 
 class AudioNode;
 class Blob;
 class Document;
 class DOMException;
@@ -44,17 +45,17 @@ class DOMException;
  * or RequestData function called by UA.
  */
 
 class MediaRecorder final : public DOMEventTargetHelper,
                             public nsIDocumentActivity {
  public:
   class Session;
 
-  MediaRecorder(DOMMediaStream& aSourceMediaTrack,
+  MediaRecorder(DOMMediaStream& aSourceMediaStream,
                 nsPIDOMWindowInner* aOwnerWindow);
   MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput,
                 nsPIDOMWindowInner* aOwnerWindow);
 
   static nsTArray<RefPtr<Session>> GetSessions();
 
   // nsWrapperCache
   JSObject* WrapObject(JSContext* aCx,
--- a/dom/media/MediaSegment.h
+++ b/dom/media/MediaSegment.h
@@ -1,18 +1,19 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_MEDIASEGMENT_H_
 #define MOZILLA_MEDIASEGMENT_H_
 
-#include "PrincipalHandle.h"
 #include "nsTArray.h"
+#include "nsIPrincipal.h"
+#include "nsProxyRelease.h"
 #ifdef MOZILLA_INTERNAL_API
 #  include "mozilla/TimeStamp.h"
 #endif
 #include <algorithm>
 
 namespace mozilla {
 
 /**
@@ -20,60 +21,103 @@ namespace mozilla {
  * maximum avoids overflow in conversions between track rates and conversions
  * from seconds.
  */
 typedef int32_t TrackRate;
 const int64_t TRACK_RATE_MAX_BITS = 20;
 const TrackRate TRACK_RATE_MAX = 1 << TRACK_RATE_MAX_BITS;
 
 /**
- * A number of ticks at a rate determined by some underlying track (e.g., audio
- * sample rate). We want to make sure that multiplying TrackTicks by a TrackRate
- * doesn't overflow, so we set its max accordingly.
- * TrackTime should be used instead when we're working with MediaTrackGraph's
- * rate, but TrackTicks can be used outside MediaTracks when we have data at a
- * different rate.
+ * A number of ticks at a rate determined by some underlying track (e.g.
+ * audio sample rate). We want to make sure that multiplying TrackTicks by
+ * a TrackRate doesn't overflow, so we set its max accordingly.
+ * StreamTime should be used instead when we're working with MediaStreamGraph's
+ * rate, but TrackTicks can be used outside MediaStreams when we have data
+ * at a different rate.
  */
 typedef int64_t TrackTicks;
 const int64_t TRACK_TICKS_MAX = INT64_MAX >> TRACK_RATE_MAX_BITS;
 
 /**
  * We represent media times in 64-bit audio frame counts or ticks.
- * All tracks in a MediaTrackGraph have the same rate.
+ * All tracks in a MediaStreamGraph have the same rate.
  */
 typedef int64_t MediaTime;
 const int64_t MEDIA_TIME_MAX = TRACK_TICKS_MAX;
 
 /**
- * Media time relative to the start of a MediaTrack.
+ * Media time relative to the start of a StreamTracks.
  */
-typedef MediaTime TrackTime;
-const TrackTime TRACK_TIME_MAX = MEDIA_TIME_MAX;
+typedef MediaTime StreamTime;
+const StreamTime STREAM_TIME_MAX = MEDIA_TIME_MAX;
 
 /**
  * Media time relative to the start of the graph timeline.
  */
 typedef MediaTime GraphTime;
 const GraphTime GRAPH_TIME_MAX = MEDIA_TIME_MAX;
 
 /**
  * The number of chunks allocated by default for a MediaSegment.
  * Appending more chunks than this will cause further allocations.
  *
  * 16 is an arbitrary number intended to cover the most common cases in the
- * MediaTrackGraph (1 with silence and 1-2 with data for a realtime track)
+ * MediaStreamGraph (1 with silence and 1-2 with data for a realtime track)
  * with some margin.
  */
 const size_t DEFAULT_SEGMENT_CAPACITY = 16;
 
 /**
+ * We pass the principal through the MediaStreamGraph by wrapping it in a thread
+ * safe nsMainThreadPtrHandle, since it cannot be used directly off the main
+ * thread. We can compare two PrincipalHandles to each other on any thread, but
+ * they can only be created and converted back to nsIPrincipal* on main thread.
+ */
+typedef nsMainThreadPtrHandle<nsIPrincipal> PrincipalHandle;
+
+inline PrincipalHandle MakePrincipalHandle(nsIPrincipal* aPrincipal) {
+  RefPtr<nsMainThreadPtrHolder<nsIPrincipal>> holder =
+      new nsMainThreadPtrHolder<nsIPrincipal>(
+          "MakePrincipalHandle::nsIPrincipal", aPrincipal);
+  return PrincipalHandle(holder);
+}
+
+#define PRINCIPAL_HANDLE_NONE nullptr
+
+inline nsIPrincipal* GetPrincipalFromHandle(
+    const PrincipalHandle& aPrincipalHandle) {
+  MOZ_ASSERT(NS_IsMainThread());
+  return aPrincipalHandle.get();
+}
+
+inline bool PrincipalHandleMatches(const PrincipalHandle& aPrincipalHandle,
+                                   nsIPrincipal* aOther) {
+  if (!aOther) {
+    return false;
+  }
+
+  nsIPrincipal* principal = GetPrincipalFromHandle(aPrincipalHandle);
+  if (!principal) {
+    return false;
+  }
+
+  bool result;
+  if (NS_FAILED(principal->Equals(aOther, &result))) {
+    NS_ERROR("Principal check failed");
+    return false;
+  }
+
+  return result;
+}
+
+/**
  * A MediaSegment is a chunk of media data sequential in time. Different
  * types of data have different subclasses of MediaSegment, all inheriting
  * from MediaSegmentBase.
- * All MediaSegment data is timed using TrackTime. The actual tick rate
+ * All MediaSegment data is timed using StreamTime. The actual tick rate
  * is defined on a per-track basis. For some track types, this can be
  * a fixed constant for all tracks of that type (e.g. 1MHz for video).
  *
  * Each media segment defines a concept of "null media data" (e.g. silence
  * for audio or "no video frame" for video), which can be efficiently
  * represented. This is used for padding.
  */
 class MediaSegment {
@@ -83,72 +127,67 @@ class MediaSegment {
 
   virtual ~MediaSegment() { MOZ_COUNT_DTOR(MediaSegment); }
 
   enum Type { AUDIO, VIDEO, TYPE_COUNT };
 
   /**
    * Gets the total duration of the segment.
    */
-  TrackTime GetDuration() const { return mDuration; }
+  StreamTime GetDuration() const { return mDuration; }
   Type GetType() const { return mType; }
 
   /**
    * Gets the last principal id that was appended to this segment.
    */
   const PrincipalHandle& GetLastPrincipalHandle() const {
     return mLastPrincipalHandle;
   }
   /**
-   * Called by the MediaTrackGraph as it appends a chunk with a different
+   * Called by the MediaStreamGraph as it appends a chunk with a different
    * principal id than the current one.
    */
   void SetLastPrincipalHandle(PrincipalHandle aLastPrincipalHandle) {
     mLastPrincipalHandle = std::forward<PrincipalHandle>(aLastPrincipalHandle);
   }
 
   /**
    * Returns true if all chunks in this segment are null.
    */
   virtual bool IsNull() const = 0;
 
   /**
-   * Returns true if this segment contains no chunks.
-   */
-  virtual bool IsEmpty() const = 0;
-
-  /**
    * Create a MediaSegment of the same type.
    */
   virtual MediaSegment* CreateEmptyClone() const = 0;
   /**
    * Moves contents of aSource to the end of this segment.
    */
   virtual void AppendFrom(MediaSegment* aSource) = 0;
   /**
    * Append a slice of aSource to this segment.
    */
-  virtual void AppendSlice(const MediaSegment& aSource, TrackTime aStart,
-                           TrackTime aEnd) = 0;
+  virtual void AppendSlice(const MediaSegment& aSource, StreamTime aStart,
+                           StreamTime aEnd) = 0;
   /**
    * Replace all contents up to aDuration with null data.
    */
-  virtual void ForgetUpTo(TrackTime aDuration) = 0;
+  virtual void ForgetUpTo(StreamTime aDuration) = 0;
   /**
    * Forget all data buffered after a given point
    */
-  virtual void FlushAfter(TrackTime aNewEnd) = 0;
+  virtual void FlushAfter(StreamTime aNewEnd) = 0;
   /**
    * Insert aDuration of null data at the start of the segment.
    */
-  virtual void InsertNullDataAtStart(TrackTime aDuration) = 0;
+  virtual void InsertNullDataAtStart(StreamTime aDuration) = 0;
   /**
    * Insert aDuration of null data at the end of the segment.
    */
-  virtual void AppendNullData(TrackTime aDuration) = 0;
+  virtual void AppendNullData(StreamTime aDuration) = 0;
   /**
    * Replace contents with disabled (silence/black) data of the same duration
    */
   virtual void ReplaceWithDisabled() = 0;
   /**
    * Replace contents with null data of the same duration
    */
   virtual void ReplaceWithNull() = 0;
@@ -175,20 +214,20 @@ class MediaSegment {
 
   MediaSegment(MediaSegment&& aSegment)
       : mDuration(std::move(aSegment.mDuration)),
         mType(std::move(aSegment.mType)),
         mLastPrincipalHandle(std::move(aSegment.mLastPrincipalHandle)) {
     MOZ_COUNT_CTOR(MediaSegment);
   }
 
-  TrackTime mDuration;  // total of mDurations of all chunks
+  StreamTime mDuration;  // total of mDurations of all chunks
   Type mType;
 
-  // The latest principal handle that the MediaTrackGraph has processed for
+  // The latest principal handle that the MediaStreamGraph has processed for
   // this segment.
   PrincipalHandle mLastPrincipalHandle;
 };
 
 /**
  * C is the implementation class subclassed from MediaSegmentBase.
  * C must contain a Chunk class.
  */
@@ -199,87 +238,86 @@ class MediaSegmentBase : public MediaSeg
     for (typename C::ConstChunkIterator iter(*this); !iter.IsEnded();
          iter.Next()) {
       if (!iter->IsNull()) {
         return false;
       }
     }
     return true;
   }
-  bool IsEmpty() const override { return mChunks.IsEmpty(); }
   MediaSegment* CreateEmptyClone() const override { return new C(); }
   void AppendFrom(MediaSegment* aSource) override {
     NS_ASSERTION(aSource->GetType() == C::StaticType(), "Wrong type");
     AppendFromInternal(static_cast<C*>(aSource));
   }
   void AppendFrom(C* aSource) { AppendFromInternal(aSource); }
-  void AppendSlice(const MediaSegment& aSource, TrackTime aStart,
-                   TrackTime aEnd) override {
+  void AppendSlice(const MediaSegment& aSource, StreamTime aStart,
+                   StreamTime aEnd) override {
     NS_ASSERTION(aSource.GetType() == C::StaticType(), "Wrong type");
     AppendSliceInternal(static_cast<const C&>(aSource), aStart, aEnd);
   }
-  void AppendSlice(const C& aOther, TrackTime aStart, TrackTime aEnd) {
+  void AppendSlice(const C& aOther, StreamTime aStart, StreamTime aEnd) {
     AppendSliceInternal(aOther, aStart, aEnd);
   }
   /**
    * Replace the first aDuration ticks with null media data, because the data
    * will not be required again.
    */
-  void ForgetUpTo(TrackTime aDuration) override {
+  void ForgetUpTo(StreamTime aDuration) override {
     if (mChunks.IsEmpty() || aDuration <= 0) {
       return;
     }
     if (mChunks[0].IsNull()) {
-      TrackTime extraToForget =
+      StreamTime extraToForget =
           std::min(aDuration, mDuration) - mChunks[0].GetDuration();
       if (extraToForget > 0) {
         RemoveLeading(extraToForget, 1);
         mChunks[0].mDuration += extraToForget;
         mDuration += extraToForget;
       }
       return;
     }
     RemoveLeading(aDuration, 0);
     mChunks.InsertElementAt(0)->SetNull(aDuration);
     mDuration += aDuration;
   }
-  void FlushAfter(TrackTime aNewEnd) override {
+  void FlushAfter(StreamTime aNewEnd) override {
     if (mChunks.IsEmpty()) {
       return;
     }
 
     if (mChunks[0].IsNull()) {
-      TrackTime extraToKeep = aNewEnd - mChunks[0].GetDuration();
+      StreamTime extraToKeep = aNewEnd - mChunks[0].GetDuration();
       if (extraToKeep < 0) {
         // reduce the size of the Null, get rid of everthing else
         mChunks[0].SetNull(aNewEnd);
         extraToKeep = 0;
       }
       RemoveTrailing(extraToKeep, 1);
     } else {
       if (aNewEnd > mDuration) {
         NS_ASSERTION(aNewEnd <= mDuration, "can't add data in FlushAfter");
         return;
       }
       RemoveTrailing(aNewEnd, 0);
     }
     mDuration = aNewEnd;
   }
-  void InsertNullDataAtStart(TrackTime aDuration) override {
+  void InsertNullDataAtStart(StreamTime aDuration) override {
     if (aDuration <= 0) {
       return;
     }
     if (!mChunks.IsEmpty() && mChunks[0].IsNull()) {
       mChunks[0].mDuration += aDuration;
     } else {
       mChunks.InsertElementAt(0)->SetNull(aDuration);
     }
     mDuration += aDuration;
   }
-  void AppendNullData(TrackTime aDuration) override {
+  void AppendNullData(StreamTime aDuration) override {
     if (aDuration <= 0) {
       return;
     }
     if (!mChunks.IsEmpty() && mChunks[mChunks.Length() - 1].IsNull()) {
       mChunks[mChunks.Length() - 1].mDuration += aDuration;
     } else {
       mChunks.AppendElement()->SetNull(aDuration);
     }
@@ -287,17 +325,17 @@ class MediaSegmentBase : public MediaSeg
   }
   void ReplaceWithDisabled() override {
     if (GetType() != AUDIO) {
       MOZ_CRASH("Disabling unknown segment type");
     }
     ReplaceWithNull();
   }
   void ReplaceWithNull() override {
-    TrackTime duration = GetDuration();
+    StreamTime duration = GetDuration();
     Clear();
     AppendNullData(duration);
   }
   void Clear() override {
     mDuration = 0;
     mChunks.ClearAndRetainStorage();
     mChunks.SetCapacity(DEFAULT_SEGMENT_CAPACITY);
   }
@@ -324,17 +362,17 @@ class MediaSegmentBase : public MediaSeg
     const Chunk& operator*() { return mSegment.mChunks[mIndex]; }
     const Chunk* operator->() { return &mSegment.mChunks[mIndex]; }
 
    private:
     const MediaSegmentBase<C, Chunk>& mSegment;
     uint32_t mIndex;
   };
 
-  void RemoveLeading(TrackTime aDuration) { RemoveLeading(aDuration, 0); }
+  void RemoveLeading(StreamTime aDuration) { RemoveLeading(aDuration, 0); }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
     size_t amount = mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
     for (size_t i = 0; i < mChunks.Length(); i++) {
       amount += mChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
     }
     return amount;
   }
@@ -382,52 +420,52 @@ class MediaSegmentBase : public MediaSeg
     }
 
     aSource->mChunks.ClearAndRetainStorage();
     MOZ_ASSERT(aSource->mChunks.Capacity() >= DEFAULT_SEGMENT_CAPACITY,
                "Capacity must be retained after appending from aSource");
   }
 
   void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource,
-                           TrackTime aStart, TrackTime aEnd) {
+                           StreamTime aStart, StreamTime aEnd) {
     MOZ_ASSERT(aStart <= aEnd, "Endpoints inverted");
     NS_ASSERTION(aStart >= 0 && aEnd <= aSource.mDuration,
                  "Slice out of range");
     mDuration += aEnd - aStart;
-    TrackTime offset = 0;
+    StreamTime offset = 0;
     for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) {
       const Chunk& c = aSource.mChunks[i];
-      TrackTime start = std::max(aStart, offset);
-      TrackTime nextOffset = offset + c.GetDuration();
-      TrackTime end = std::min(aEnd, nextOffset);
+      StreamTime start = std::max(aStart, offset);
+      StreamTime nextOffset = offset + c.GetDuration();
+      StreamTime end = std::min(aEnd, nextOffset);
       if (start < end) {
         if (!mChunks.IsEmpty() &&
             mChunks[mChunks.Length() - 1].CanCombineWithFollowing(c)) {
           MOZ_ASSERT(start - offset >= 0 && end - offset <= aSource.mDuration,
                      "Slice out of bounds");
           mChunks[mChunks.Length() - 1].mDuration += end - start;
         } else {
           mChunks.AppendElement(c)->SliceTo(start - offset, end - offset);
         }
       }
       offset = nextOffset;
     }
   }
 
-  Chunk* AppendChunk(TrackTime aDuration) {
+  Chunk* AppendChunk(StreamTime aDuration) {
     MOZ_ASSERT(aDuration >= 0);
     Chunk* c = mChunks.AppendElement();
     c->mDuration = aDuration;
     mDuration += aDuration;
     return c;
   }
 
-  void RemoveLeading(TrackTime aDuration, uint32_t aStartIndex) {
+  void RemoveLeading(StreamTime aDuration, uint32_t aStartIndex) {
     NS_ASSERTION(aDuration >= 0, "Can't remove negative duration");
-    TrackTime t = aDuration;
+    StreamTime t = aDuration;
     uint32_t chunksToRemove = 0;
     for (uint32_t i = aStartIndex; i < mChunks.Length() && t > 0; ++i) {
       Chunk* c = &mChunks[i];
       if (c->GetDuration() > t) {
         c->SliceTo(t, c->GetDuration());
         t = 0;
         break;
       }
@@ -440,19 +478,19 @@ class MediaSegmentBase : public MediaSeg
       mChunks.RemoveElementsAt(aStartIndex, chunksToRemove);
     }
     mDuration -= aDuration - t;
 
     MOZ_ASSERT(mChunks.Capacity() >= DEFAULT_SEGMENT_CAPACITY,
                "Capacity must be retained after removing chunks");
   }
 
-  void RemoveTrailing(TrackTime aKeep, uint32_t aStartIndex) {
+  void RemoveTrailing(StreamTime aKeep, uint32_t aStartIndex) {
     NS_ASSERTION(aKeep >= 0, "Can't keep negative duration");
-    TrackTime t = aKeep;
+    StreamTime t = aKeep;
     uint32_t i;
     for (i = aStartIndex; i < mChunks.Length(); ++i) {
       Chunk* c = &mChunks[i];
       if (c->GetDuration() > t) {
         c->SliceTo(0, t);
         break;
       }
       t -= c->GetDuration();
rename from dom/media/MediaTrackGraph.cpp
rename to dom/media/MediaStreamGraph.cpp
--- a/dom/media/MediaTrackGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -1,32 +1,32 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "MediaTrackGraphImpl.h"
+#include "MediaStreamGraphImpl.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/Unused.h"
 
 #include "AudioSegment.h"
 #include "VideoSegment.h"
 #include "nsContentUtils.h"
 #include "nsIObserver.h"
 #include "nsPrintfCString.h"
 #include "nsServiceManagerUtils.h"
 #include "prerror.h"
 #include "mozilla/Logging.h"
 #include "mozilla/Attributes.h"
-#include "ForwardedInputTrack.h"
+#include "TrackUnionStream.h"
 #include "ImageContainer.h"
-#include "AudioCaptureTrack.h"
-#include "AudioNodeTrack.h"
-#include "AudioNodeExternalInputTrack.h"
-#include "MediaTrackListener.h"
+#include "AudioCaptureStream.h"
+#include "AudioNodeStream.h"
+#include "AudioNodeExternalInputStream.h"
+#include "MediaStreamListener.h"
 #include "mozilla/dom/BaseAudioContextBinding.h"
 #include "mozilla/media/MediaUtils.h"
 #include <algorithm>
 #include "GeckoProfiler.h"
 #include "VideoFrameContainer.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/StaticPrefs_dom.h"
 #include "mozilla/Unused.h"
@@ -38,265 +38,318 @@
 #include "webaudio/blink/DenormalDisabler.h"
 #include "webaudio/blink/HRTFDatabaseLoader.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 using namespace mozilla::media;
 
-mozilla::AsyncLogger gMTGTraceLogger("MTGTracing");
+mozilla::AsyncLogger gMSGTraceLogger("MSGTracing");
 
 namespace mozilla {
 
-LazyLogModule gMediaTrackGraphLog("MediaTrackGraph");
+LazyLogModule gMediaStreamGraphLog("MediaStreamGraph");
 #ifdef LOG
 #  undef LOG
 #endif  // LOG
-#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg)
+#define LOG(type, msg) MOZ_LOG(gMediaStreamGraphLog, type, msg)
+
+enum SourceMediaStream::TrackCommands : uint32_t {
+  TRACK_CREATE = TrackEventCommand::TRACK_EVENT_CREATED,
+  TRACK_END = TrackEventCommand::TRACK_EVENT_ENDED,
+};
 
 /**
  * A hash table containing the graph instances, one per document.
  *
  * The key is a hash of nsPIDOMWindowInner, see `WindowToHash`.
  */
-static nsDataHashtable<nsUint32HashKey, MediaTrackGraphImpl*> gGraphs;
-
-MediaTrackGraphImpl::~MediaTrackGraphImpl() {
-  MOZ_ASSERT(mTracks.IsEmpty() && mSuspendedTracks.IsEmpty(),
-             "All tracks should have been destroyed by messages from the main "
+static nsDataHashtable<nsUint32HashKey, MediaStreamGraphImpl*> gGraphs;
+
+MediaStreamGraphImpl::~MediaStreamGraphImpl() {
+  MOZ_ASSERT(mStreams.IsEmpty() && mSuspendedStreams.IsEmpty(),
+             "All streams should have been destroyed by messages from the main "
              "thread");
-  LOG(LogLevel::Debug, ("MediaTrackGraph %p destroyed", this));
-  LOG(LogLevel::Debug, ("MediaTrackGraphImpl::~MediaTrackGraphImpl"));
+  LOG(LogLevel::Debug, ("MediaStreamGraph %p destroyed", this));
+  LOG(LogLevel::Debug, ("MediaStreamGraphImpl::~MediaStreamGraphImpl"));
 
 #ifdef TRACING
-  gMTGTraceLogger.Stop();
+  gMSGTraceLogger.Stop();
 #endif
 }
 
-void MediaTrackGraphImpl::AddTrackGraphThread(MediaTrack* aTrack) {
+void MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  aTrack->mStartTime = mProcessedTime;
-
-  if (aTrack->IsSuspended()) {
-    mSuspendedTracks.AppendElement(aTrack);
+  aStream->mTracksStartTime = mProcessedTime;
+
+  if (aStream->IsSuspended()) {
+    mSuspendedStreams.AppendElement(aStream);
     LOG(LogLevel::Debug,
-        ("%p: Adding media track %p, in the suspended track array", this,
-         aTrack));
+        ("%p: Adding media stream %p, in the suspended stream array", this,
+         aStream));
   } else {
-    mTracks.AppendElement(aTrack);
-    LOG(LogLevel::Debug, ("%p:  Adding media track %p, count %zu", this, aTrack,
-                          mTracks.Length()));
+    mStreams.AppendElement(aStream);
+    LOG(LogLevel::Debug, ("%p:  Adding media stream %p, count %zu", this,
+                          aStream, mStreams.Length()));
   }
 
-  SetTrackOrderDirty();
+  SetStreamOrderDirty();
 }
 
-void MediaTrackGraphImpl::RemoveTrackGraphThread(MediaTrack* aTrack) {
+void MediaStreamGraphImpl::RemoveStreamGraphThread(MediaStream* aStream) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  // Remove references in mTrackUpdates before we allow aTrack to die.
+  // Remove references in mStreamUpdates before we allow aStream to die.
   // Pending updates are not needed (since the main thread has already given
-  // up the track) so we will just drop them.
+  // up the stream) so we will just drop them.
   {
     MonitorAutoLock lock(mMonitor);
-    for (uint32_t i = 0; i < mTrackUpdates.Length(); ++i) {
-      if (mTrackUpdates[i].mTrack == aTrack) {
-        mTrackUpdates[i].mTrack = nullptr;
+    for (uint32_t i = 0; i < mStreamUpdates.Length(); ++i) {
+      if (mStreamUpdates[i].mStream == aStream) {
+        mStreamUpdates[i].mStream = nullptr;
       }
     }
   }
 
   // Ensure that mFirstCycleBreaker and mMixer are updated when necessary.
-  SetTrackOrderDirty();
-
-  if (aTrack->IsSuspended()) {
-    mSuspendedTracks.RemoveElement(aTrack);
+  SetStreamOrderDirty();
+
+  if (aStream->IsSuspended()) {
+    mSuspendedStreams.RemoveElement(aStream);
   } else {
-    mTracks.RemoveElement(aTrack);
+    mStreams.RemoveElement(aStream);
   }
 
-  LOG(LogLevel::Debug, ("%p: Removed media track %p, count %zu", this, aTrack,
-                        mTracks.Length()));
-
-  NS_RELEASE(aTrack);  // probably destroying it
+  LOG(LogLevel::Debug, ("%p: Removed media stream %p, count %zu", this, aStream,
+                        mStreams.Length()));
+
+  NS_RELEASE(aStream);  // probably destroying it
 }
 
-TrackTime MediaTrackGraphImpl::GraphTimeToTrackTimeWithBlocking(
-    const MediaTrack* aTrack, GraphTime aTime) const {
+StreamTime MediaStreamGraphImpl::GraphTimeToStreamTimeWithBlocking(
+    const MediaStream* aStream, GraphTime aTime) const {
   MOZ_ASSERT(
       aTime <= mStateComputedTime,
       "Don't ask about times where we haven't made blocking decisions yet");
-  return std::max<TrackTime>(
-      0, std::min(aTime, aTrack->mStartBlocking) - aTrack->mStartTime);
+  return std::max<StreamTime>(
+      0, std::min(aTime, aStream->mStartBlocking) - aStream->mTracksStartTime);
 }
 
-GraphTime MediaTrackGraphImpl::IterationEnd() const {
+GraphTime MediaStreamGraphImpl::IterationEnd() const {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
   return CurrentDriver()->IterationEnd();
 }
 
-void MediaTrackGraphImpl::UpdateCurrentTimeForTracks(
+void MediaStreamGraphImpl::UpdateCurrentTimeForStreams(
     GraphTime aPrevCurrentTime) {
   MOZ_ASSERT(OnGraphThread());
-  for (MediaTrack* track : AllTracks()) {
-    // Shouldn't have already notified of ended *and* have output!
-    MOZ_ASSERT_IF(track->mStartBlocking > aPrevCurrentTime,
-                  !track->mNotifiedEnded);
+  for (MediaStream* stream : AllStreams()) {
+    // Shouldn't have already notified of finish *and* have output!
+    MOZ_ASSERT_IF(stream->mStartBlocking > aPrevCurrentTime,
+                  !stream->mNotifiedFinished);
 
     // Calculate blocked time and fire Blocked/Unblocked events
-    GraphTime blockedTime = mStateComputedTime - track->mStartBlocking;
+    GraphTime blockedTime = mStateComputedTime - stream->mStartBlocking;
     NS_ASSERTION(blockedTime >= 0, "Error in blocking time");
-    track->AdvanceTimeVaryingValuesToCurrentTime(mStateComputedTime,
-                                                 blockedTime);
+    stream->AdvanceTimeVaryingValuesToCurrentTime(mStateComputedTime,
+                                                  blockedTime);
     LOG(LogLevel::Verbose,
-        ("%p: MediaTrack %p bufferStartTime=%f blockedTime=%f", this, track,
-         MediaTimeToSeconds(track->mStartTime),
+        ("%p: MediaStream %p bufferStartTime=%f blockedTime=%f", this, stream,
+         MediaTimeToSeconds(stream->mTracksStartTime),
          MediaTimeToSeconds(blockedTime)));
-    track->mStartBlocking = mStateComputedTime;
-
-    TrackTime trackCurrentTime =
-        track->GraphTimeToTrackTime(mStateComputedTime);
-    if (track->mEnded) {
-      MOZ_ASSERT(track->GetEnd() <= trackCurrentTime);
-      if (!track->mNotifiedEnded) {
-        // Playout of this track ended and listeners have not been notified.
-        track->mNotifiedEnded = true;
-        SetTrackOrderDirty();
-        for (const auto& listener : track->mTrackListeners) {
-          listener->NotifyOutput(this, track->GetEnd());
-          listener->NotifyEnded(this);
+    stream->mStartBlocking = mStateComputedTime;
+
+    for (StreamTracks::TrackIter track(stream->mTracks); !track.IsEnded();
+         track.Next()) {
+      StreamTime streamCurrentTime =
+          stream->GraphTimeToStreamTime(mStateComputedTime);
+      if (track->IsEnded() && track->GetEnd() <= streamCurrentTime) {
+        if (!track->NotifiedEnded()) {
+          // Playout of this track ended and listeners have not been notified.
+          track->NotifyEnded();
+          for (const TrackBound<MediaStreamTrackListener>& listener :
+               stream->mTrackListeners) {
+            if (listener.mTrackID == track->GetID()) {
+              listener.mListener->NotifyOutput(
+                  this, track->GetEnd() - track->GetStart());
+              listener.mListener->NotifyEnded(this);
+            }
+          }
+        }
+      } else {
+        for (const TrackBound<MediaStreamTrackListener>& listener :
+             stream->mTrackListeners) {
+          if (listener.mTrackID == track->GetID()) {
+            listener.mListener->NotifyOutput(
+                this, streamCurrentTime - track->GetStart());
+          }
         }
       }
-    } else {
-      for (const auto& listener : track->mTrackListeners) {
-        listener->NotifyOutput(this, trackCurrentTime);
-      }
+    }
+
+    // The stream is fully finished when all of its track data has been played
+    // out.
+    if (stream->mFinished && !stream->mNotifiedFinished &&
+        mProcessedTime >= stream->StreamTimeToGraphTime(
+                              stream->GetStreamTracks().GetLatestTrackEnd())) {
+      stream->mNotifiedFinished = true;
+      SetStreamOrderDirty();
     }
   }
 }
 
 template <typename C, typename Chunk>
-void MediaTrackGraphImpl::ProcessChunkMetadataForInterval(MediaTrack* aTrack,
-                                                          C& aSegment,
-                                                          TrackTime aStart,
-                                                          TrackTime aEnd) {
+void MediaStreamGraphImpl::ProcessChunkMetadataForInterval(MediaStream* aStream,
+                                                           TrackID aTrackID,
+                                                           C& aSegment,
+                                                           StreamTime aStart,
+                                                           StreamTime aEnd) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  MOZ_ASSERT(aTrack);
-
-  TrackTime offset = 0;
+  MOZ_ASSERT(aStream);
+  MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
+
+  StreamTime offset = 0;
   for (typename C::ConstChunkIterator chunk(aSegment); !chunk.IsEnded();
        chunk.Next()) {
     if (offset >= aEnd) {
       break;
     }
     offset += chunk->GetDuration();
     if (chunk->IsNull() || offset < aStart) {
       continue;
     }
     const PrincipalHandle& principalHandle = chunk->GetPrincipalHandle();
     if (principalHandle != aSegment.GetLastPrincipalHandle()) {
       aSegment.SetLastPrincipalHandle(principalHandle);
       LOG(LogLevel::Debug,
-          ("%p: MediaTrack %p, principalHandle "
+          ("%p: MediaStream %p track %d, principalHandle "
            "changed in %sChunk with duration %lld",
-           this, aTrack,
+           this, aStream, aTrackID,
            aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
            (long long)chunk->GetDuration()));
-      for (const auto& listener : aTrack->mTrackListeners) {
-        listener->NotifyPrincipalHandleChanged(this, principalHandle);
+      for (const TrackBound<MediaStreamTrackListener>& listener :
+           aStream->mTrackListeners) {
+        if (listener.mTrackID == aTrackID) {
+          listener.mListener->NotifyPrincipalHandleChanged(this,
+                                                           principalHandle);
+        }
       }
     }
   }
 }
 
-void MediaTrackGraphImpl::ProcessChunkMetadata(GraphTime aPrevCurrentTime) {
+void MediaStreamGraphImpl::ProcessChunkMetadata(GraphTime aPrevCurrentTime) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  for (MediaTrack* track : AllTracks()) {
-    TrackTime iterationStart = track->GraphTimeToTrackTime(aPrevCurrentTime);
-    TrackTime iterationEnd = track->GraphTimeToTrackTime(mProcessedTime);
-    if (!track->mSegment) {
-      continue;
-    }
-    if (track->mType == MediaSegment::AUDIO) {
-      ProcessChunkMetadataForInterval<AudioSegment, AudioChunk>(
-          track, *track->GetData<AudioSegment>(), iterationStart, iterationEnd);
-    } else if (track->mType == MediaSegment::VIDEO) {
-      ProcessChunkMetadataForInterval<VideoSegment, VideoChunk>(
-          track, *track->GetData<VideoSegment>(), iterationStart, iterationEnd);
-    } else {
-      MOZ_CRASH("Unknown track type");
+  for (MediaStream* stream : AllStreams()) {
+    StreamTime iterationStart = stream->GraphTimeToStreamTime(aPrevCurrentTime);
+    StreamTime iterationEnd = stream->GraphTimeToStreamTime(mProcessedTime);
+    for (StreamTracks::TrackIter tracks(stream->mTracks); !tracks.IsEnded();
+         tracks.Next()) {
+      MediaSegment* segment = tracks->GetSegment();
+      if (!segment) {
+        continue;
+      }
+      if (tracks->GetType() == MediaSegment::AUDIO) {
+        AudioSegment* audio = static_cast<AudioSegment*>(segment);
+        ProcessChunkMetadataForInterval<AudioSegment, AudioChunk>(
+            stream, tracks->GetID(), *audio, iterationStart, iterationEnd);
+      } else if (tracks->GetType() == MediaSegment::VIDEO) {
+        VideoSegment* video = static_cast<VideoSegment*>(segment);
+        ProcessChunkMetadataForInterval<VideoSegment, VideoChunk>(
+            stream, tracks->GetID(), *video, iterationStart, iterationEnd);
+      } else {
+        MOZ_CRASH("Unknown track type");
+      }
     }
   }
 }
 
-GraphTime MediaTrackGraphImpl::WillUnderrun(MediaTrack* aTrack,
-                                            GraphTime aEndBlockingDecisions) {
-  // Ended tracks can't underrun. ProcessedMediaTracks also can't cause
+GraphTime MediaStreamGraphImpl::WillUnderrun(MediaStream* aStream,
+                                             GraphTime aEndBlockingDecisions) {
+  // Finished streams can't underrun. ProcessedMediaStreams also can't cause
   // underrun currently, since we'll always be able to produce data for them
-  // unless they block on some other track.
-  if (aTrack->mEnded || aTrack->AsProcessedTrack()) {
+  // unless they block on some other stream.
+  if (aStream->mFinished || aStream->AsProcessedStream()) {
     return aEndBlockingDecisions;
   }
-  // This track isn't ended or suspended. We don't need to call
-  // TrackTimeToGraphTime since an underrun is the only thing that can block
+  // This stream isn't finished or suspended. We don't need to call
+  // StreamTimeToGraphTime since an underrun is the only thing that can block
   // it.
-  GraphTime bufferEnd = aTrack->GetEnd() + aTrack->mStartTime;
+  GraphTime bufferEnd = aStream->GetTracksEnd() + aStream->mTracksStartTime;
 #ifdef DEBUG
   if (bufferEnd < mProcessedTime) {
-    LOG(LogLevel::Error, ("%p: MediaTrack %p underrun, "
+    LOG(LogLevel::Error, ("%p: MediaStream %p underrun, "
                           "bufferEnd %f < mProcessedTime %f (%" PRId64
-                          " < %" PRId64 "), TrackTime %" PRId64,
-                          this, aTrack, MediaTimeToSeconds(bufferEnd),
+                          " < %" PRId64 "), Streamtime %" PRId64,
+                          this, aStream, MediaTimeToSeconds(bufferEnd),
                           MediaTimeToSeconds(mProcessedTime), bufferEnd,
-                          mProcessedTime, aTrack->GetEnd()));
+                          mProcessedTime, aStream->GetTracksEnd()));
+    aStream->DumpTrackInfo();
     NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
   }
 #endif
   return std::min(bufferEnd, aEndBlockingDecisions);
 }
 
 namespace {
-// Value of mCycleMarker for unvisited tracks in cycle detection.
+// Value of mCycleMarker for unvisited streams in cycle detection.
 const uint32_t NOT_VISITED = UINT32_MAX;
-// Value of mCycleMarker for ordered tracks in muted cycles.
+// Value of mCycleMarker for ordered streams in muted cycles.
 const uint32_t IN_MUTED_CYCLE = 1;
 }  // namespace
 
-bool MediaTrackGraphImpl::AudioTrackPresent() {
+bool MediaStreamGraphImpl::AudioTrackPresent() {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
 
   bool audioTrackPresent = false;
-  for (MediaTrack* track : mTracks) {
-    if (track->AsAudioNodeTrack()) {
+  for (MediaStream* stream : mStreams) {
+    if (stream->AsAudioNodeStream()) {
       audioTrackPresent = true;
       break;
     }
 
-    if (track->mType == MediaSegment::AUDIO && !track->mNotifiedEnded) {
-      audioTrackPresent = true;
+    for (StreamTracks::TrackIter it(stream->GetStreamTracks()); !it.IsEnded();
+         it.Next()) {
+      if (it->GetType() == MediaSegment::AUDIO && !it->NotifiedEnded()) {
+        audioTrackPresent = true;
+        break;
+      }
+    }
+
+    if (audioTrackPresent) {
+      break;
+    }
+
+    if (SourceMediaStream* source = stream->AsSourceStream()) {
+      if (source->HasPendingAudioTrack()) {
+        audioTrackPresent = true;
+      }
+    }
+
+    if (audioTrackPresent) {
       break;
     }
   }
 
   // XXX For some reason, there are race conditions when starting an audio input
   // where we find no active audio tracks.  In any case, if we have an active
   // audio input we should not allow a switch back to a SystemClockDriver
   if (!audioTrackPresent && mInputDeviceUsers.Count() != 0) {
     NS_WARNING("No audio tracks, but full-duplex audio is enabled!!!!!");
     audioTrackPresent = true;
   }
 
   return audioTrackPresent;
 }
 
-void MediaTrackGraphImpl::UpdateTrackOrder() {
+void MediaStreamGraphImpl::UpdateStreamOrder() {
   MOZ_ASSERT(OnGraphThread());
   bool audioTrackPresent = AudioTrackPresent();
 
-  // Note that this looks for any audio tracks, input or output, and switches
+  // Note that this looks for any audio streams, input or output, and switches
   // to a SystemClockDriver if there are none.  However, if another is already
   // pending, let that switch happen.
 
   if (!audioTrackPresent && mRealtime &&
       CurrentDriver()->AsAudioCallbackDriver()) {
     MonitorAutoLock mon(mMonitor);
     if (CurrentDriver()->AsAudioCallbackDriver()->IsStarted() &&
         !(CurrentDriver()->Switching())) {
@@ -318,338 +371,370 @@ void MediaTrackGraphImpl::UpdateTrackOrd
     MonitorAutoLock mon(mMonitor);
     if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
       AudioCallbackDriver* driver = new AudioCallbackDriver(
           this, AudioInputChannelCount(), AudioInputDevicePreference());
       CurrentDriver()->SwitchAtNextIteration(driver);
     }
   }
 
-  if (!mTrackOrderDirty) {
+  if (!mStreamOrderDirty) {
     return;
   }
 
-  mTrackOrderDirty = false;
+  mStreamOrderDirty = false;
 
   // The algorithm for finding cycles is based on Tim Leslie's iterative
   // implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
   // connected components (SCC) algorithm.  There are variations (a) to
-  // distinguish whether tracks in SCCs of size 1 are in a cycle and (b) to
+  // distinguish whether streams in SCCs of size 1 are in a cycle and (b) to
   // re-run the algorithm over SCCs with breaks at DelayNodes.
   //
   // [1] http://www.timl.id.au/?p=327
   // [2]
   // https://github.com/scipy/scipy/blob/e2c502fca/scipy/sparse/csgraph/_traversal.pyx#L582
   // [3] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.1707
   //
   // There are two stacks.  One for the depth-first search (DFS),
-  mozilla::LinkedList<MediaTrack> dfsStack;
-  // and another for tracks popped from the DFS stack, but still being
-  // considered as part of SCCs involving tracks on the stack.
-  mozilla::LinkedList<MediaTrack> sccStack;
-
-  // An index into mTracks for the next track found with no unsatisfied
+  mozilla::LinkedList<MediaStream> dfsStack;
+  // and another for streams popped from the DFS stack, but still being
+  // considered as part of SCCs involving streams on the stack.
+  mozilla::LinkedList<MediaStream> sccStack;
+
+  // An index into mStreams for the next stream found with no unsatisfied
   // upstream dependencies.
-  uint32_t orderedTrackCount = 0;
-
-  for (uint32_t i = 0; i < mTracks.Length(); ++i) {
-    MediaTrack* t = mTracks[i];
-    ProcessedMediaTrack* pt = t->AsProcessedTrack();
-    if (pt) {
-      // The dfsStack initially contains a list of all processed tracks in
+  uint32_t orderedStreamCount = 0;
+
+  for (uint32_t i = 0; i < mStreams.Length(); ++i) {
+    MediaStream* s = mStreams[i];
+    ProcessedMediaStream* ps = s->AsProcessedStream();
+    if (ps) {
+      // The dfsStack initially contains a list of all processed streams in
       // unchanged order.
-      dfsStack.insertBack(t);
-      pt->mCycleMarker = NOT_VISITED;
+      dfsStack.insertBack(s);
+      ps->mCycleMarker = NOT_VISITED;
     } else {
-      // SourceMediaTracks have no inputs and so can be ordered now.
-      mTracks[orderedTrackCount] = t;
-      ++orderedTrackCount;
+      // SourceMediaStreams have no inputs and so can be ordered now.
+      mStreams[orderedStreamCount] = s;
+      ++orderedStreamCount;
     }
   }
 
   // mNextStackMarker corresponds to "index" in Tarjan's algorithm.  It is a
-  // counter to label mCycleMarker on the next visited track in the DFS
-  // uniquely in the set of visited tracks that are still being considered.
+  // counter to label mCycleMarker on the next visited stream in the DFS
+  // uniquely in the set of visited streams that are still being considered.
   //
   // In this implementation, the counter descends so that the values are
-  // strictly greater than the values that mCycleMarker takes when the track
+  // strictly greater than the values that mCycleMarker takes when the stream
   // has been ordered (0 or IN_MUTED_CYCLE).
   //
-  // Each new track labelled, as the DFS searches upstream, receives a value
-  // less than those used for all other tracks being considered.
+  // Each new stream labelled, as the DFS searches upstream, receives a value
+  // less than those used for all other streams being considered.
   uint32_t nextStackMarker = NOT_VISITED - 1;
-  // Reset list of DelayNodes in cycles stored at the tail of mTracks.
-  mFirstCycleBreaker = mTracks.Length();
-
-  // Rearrange dfsStack order as required to DFS upstream and pop tracks
-  // in processing order to place in mTracks.
-  while (auto pt = static_cast<ProcessedMediaTrack*>(dfsStack.getFirst())) {
-    const auto& inputs = pt->mInputs;
-    MOZ_ASSERT(pt->AsProcessedTrack());
-    if (pt->mCycleMarker == NOT_VISITED) {
+  // Reset list of DelayNodes in cycles stored at the tail of mStreams.
+  mFirstCycleBreaker = mStreams.Length();
+
+  // Rearrange dfsStack order as required to DFS upstream and pop streams
+  // in processing order to place in mStreams.
+  while (auto ps = static_cast<ProcessedMediaStream*>(dfsStack.getFirst())) {
+    const auto& inputs = ps->mInputs;
+    MOZ_ASSERT(ps->AsProcessedStream());
+    if (ps->mCycleMarker == NOT_VISITED) {
       // Record the position on the visited stack, so that any searches
-      // finding this track again know how much of the stack is in the cycle.
-      pt->mCycleMarker = nextStackMarker;
+      // finding this stream again know how much of the stack is in the cycle.
+      ps->mCycleMarker = nextStackMarker;
       --nextStackMarker;
-      // Not-visited input tracks should be processed first.
-      // SourceMediaTracks have already been ordered.
+      // Not-visited input streams should be processed first.
+      // SourceMediaStreams have already been ordered.
       for (uint32_t i = inputs.Length(); i--;) {
         if (inputs[i]->mSource->IsSuspended()) {
           continue;
         }
-        auto input = inputs[i]->mSource->AsProcessedTrack();
+        auto input = inputs[i]->mSource->AsProcessedStream();
         if (input && input->mCycleMarker == NOT_VISITED) {
-          // It can be that this track has an input which is from a suspended
+          // It can be that this stream has an input which is from a suspended
           // AudioContext.
           if (input->isInList()) {
             input->remove();
             dfsStack.insertFront(input);
           }
         }
       }
       continue;
     }
 
     // Returning from DFS.  Pop from dfsStack.
-    pt->remove();
+    ps->remove();
 
     // cycleStackMarker keeps track of the highest marker value on any
-    // upstream track, if any, found receiving input, directly or indirectly,
+    // upstream stream, if any, found receiving input, directly or indirectly,
     // from the visited stack (and so from |ps|, making a cycle).  In a
     // variation from Tarjan's SCC algorithm, this does not include |ps|
     // unless it is part of the cycle.
     uint32_t cycleStackMarker = 0;
     for (uint32_t i = inputs.Length(); i--;) {
       if (inputs[i]->mSource->IsSuspended()) {
         continue;
       }
-      auto input = inputs[i]->mSource->AsProcessedTrack();
+      auto input = inputs[i]->mSource->AsProcessedStream();
       if (input) {
         cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
       }
     }
 
     if (cycleStackMarker <= IN_MUTED_CYCLE) {
       // All inputs have been ordered and their stack markers have been removed.
-      // This track is not part of a cycle.  It can be processed next.
-      pt->mCycleMarker = 0;
-      mTracks[orderedTrackCount] = pt;
-      ++orderedTrackCount;
+      // This stream is not part of a cycle.  It can be processed next.
+      ps->mCycleMarker = 0;
+      mStreams[orderedStreamCount] = ps;
+      ++orderedStreamCount;
       continue;
     }
 
-    // A cycle has been found.  Record this track for ordering when all
-    // tracks in this SCC have been popped from the DFS stack.
-    sccStack.insertFront(pt);
-
-    if (cycleStackMarker > pt->mCycleMarker) {
-      // Cycles have been found that involve tracks that remain on the stack.
-      // Leave mCycleMarker indicating the most downstream (last) track on
+    // A cycle has been found.  Record this stream for ordering when all
+    // streams in this SCC have been popped from the DFS stack.
+    sccStack.insertFront(ps);
+
+    if (cycleStackMarker > ps->mCycleMarker) {
+      // Cycles have been found that involve streams that remain on the stack.
+      // Leave mCycleMarker indicating the most downstream (last) stream on
       // the stack known to be part of this SCC.  In this way, any searches on
       // other paths that find |ps| will know (without having to traverse from
-      // this track again) that they are part of this SCC (i.e. part of an
+      // this stream again) that they are part of this SCC (i.e. part of an
       // intersecting cycle).
-      pt->mCycleMarker = cycleStackMarker;
+      ps->mCycleMarker = cycleStackMarker;
       continue;
     }
 
-    // |pit| is the root of an SCC involving no other tracks on dfsStack, the
-    // complete SCC has been recorded, and tracks in this SCC are part of at
+    // |ps| is the root of an SCC involving no other streams on dfsStack, the
+    // complete SCC has been recorded, and streams in this SCC are part of at
     // least one cycle.
-    MOZ_ASSERT(cycleStackMarker == pt->mCycleMarker);
+    MOZ_ASSERT(cycleStackMarker == ps->mCycleMarker);
     // If there are DelayNodes in this SCC, then they may break the cycles.
     bool haveDelayNode = false;
     auto next = sccStack.getFirst();
-    // Tracks in this SCC are identified by mCycleMarker <= cycleStackMarker.
-    // (There may be other tracks later in sccStack from other incompletely
-    // searched SCCs, involving tracks still on dfsStack.)
+    // Streams in this SCC are identified by mCycleMarker <= cycleStackMarker.
+    // (There may be other streams later in sccStack from other incompletely
+    // searched SCCs, involving streams still on dfsStack.)
     //
     // DelayNodes in cycles must behave differently from those not in cycles,
     // so all DelayNodes in the SCC must be identified.
-    while (next && static_cast<ProcessedMediaTrack*>(next)->mCycleMarker <=
+    while (next && static_cast<ProcessedMediaStream*>(next)->mCycleMarker <=
                        cycleStackMarker) {
-      auto nt = next->AsAudioNodeTrack();
+      auto ns = next->AsAudioNodeStream();
       // Get next before perhaps removing from list below.
       next = next->getNext();
-      if (nt && nt->Engine()->AsDelayNodeEngine()) {
+      if (ns && ns->Engine()->AsDelayNodeEngine()) {
         haveDelayNode = true;
         // DelayNodes break cycles by producing their output in a
         // preprocessing phase; they do not need to be ordered before their
-        // consumers.  Order them at the tail of mTracks so that they can be
+        // consumers.  Order them at the tail of mStreams so that they can be
         // handled specially.  Do so now, so that DFS ignores them.
-        nt->remove();
-        nt->mCycleMarker = 0;
+        ns->remove();
+        ns->mCycleMarker = 0;
         --mFirstCycleBreaker;
-        mTracks[mFirstCycleBreaker] = nt;
+        mStreams[mFirstCycleBreaker] = ns;
       }
     }
     auto after_scc = next;
     while ((next = sccStack.getFirst()) != after_scc) {
       next->remove();
-      auto removed = static_cast<ProcessedMediaTrack*>(next);
+      auto removed = static_cast<ProcessedMediaStream*>(next);
       if (haveDelayNode) {
-        // Return tracks to the DFS stack again (to order and detect cycles
-        // without delayNodes).  Any of these tracks that are still inputs
-        // for tracks on the visited stack must be returned to the front of
+        // Return streams to the DFS stack again (to order and detect cycles
+        // without delayNodes).  Any of these streams that are still inputs
+        // for streams on the visited stack must be returned to the front of
         // the stack to be ordered before their dependents.  We know that none
-        // of these tracks need input from tracks on the visited stack, so
+        // of these streams need input from streams on the visited stack, so
         // they can all be searched and ordered before the current stack head
         // is popped.
         removed->mCycleMarker = NOT_VISITED;
         dfsStack.insertFront(removed);
       } else {
-        // Tracks in cycles without any DelayNodes must be muted, and so do
+        // Streams in cycles without any DelayNodes must be muted, and so do
         // not need input and can be ordered now.  They must be ordered before
         // their consumers so that their muted output is available.
         removed->mCycleMarker = IN_MUTED_CYCLE;
-        mTracks[orderedTrackCount] = removed;
-        ++orderedTrackCount;
+        mStreams[orderedStreamCount] = removed;
+        ++orderedStreamCount;
       }
     }
   }
 
-  MOZ_ASSERT(orderedTrackCount == mFirstCycleBreaker);
+  MOZ_ASSERT(orderedStreamCount == mFirstCycleBreaker);
 }
 
-void MediaTrackGraphImpl::CreateOrDestroyAudioTracks(MediaTrack* aTrack) {
+void MediaStreamGraphImpl::CreateOrDestroyAudioStreams(MediaStream* aStream) {
   MOZ_ASSERT(OnGraphThread());
   MOZ_ASSERT(mRealtime,
-             "Should only attempt to create audio tracks in real-time mode");
-
-  if (aTrack->mAudioOutputs.IsEmpty()) {
-    aTrack->mAudioOutputStream = nullptr;
+             "Should only attempt to create audio streams in real-time mode");
+
+  if (aStream->mAudioOutputs.IsEmpty()) {
+    aStream->mAudioOutputStreams.Clear();
     return;
   }
 
-  if (aTrack->mAudioOutputStream) {
+  if (!aStream->GetStreamTracks().GetAndResetTracksDirty() &&
+      !aStream->mAudioOutputStreams.IsEmpty()) {
     return;
   }
 
   LOG(LogLevel::Debug,
-      ("%p: Updating AudioOutputStream for MediaTrack %p", this, aTrack));
-
-  aTrack->mAudioOutputStream = MakeUnique<MediaTrack::AudioOutputStream>();
-  aTrack->mAudioOutputStream->mAudioPlaybackStartTime = mProcessedTime;
-  aTrack->mAudioOutputStream->mBlockedAudioTime = 0;
-  aTrack->mAudioOutputStream->mLastTickWritten = 0;
-
-  bool switching = false;
-  {
-    MonitorAutoLock lock(mMonitor);
-    switching = CurrentDriver()->Switching();
+      ("%p: Updating AudioOutputStreams for MediaStream %p", this, aStream));
+
+  AutoTArray<bool, 2> audioOutputStreamsFound;
+  for (uint32_t i = 0; i < aStream->mAudioOutputStreams.Length(); ++i) {
+    audioOutputStreamsFound.AppendElement(false);
   }
 
-  if (!CurrentDriver()->AsAudioCallbackDriver() && !switching) {
-    MonitorAutoLock mon(mMonitor);
-    if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
-      AudioCallbackDriver* driver = new AudioCallbackDriver(
-          this, AudioInputChannelCount(), AudioInputDevicePreference());
-      CurrentDriver()->SwitchAtNextIteration(driver);
+  for (StreamTracks::TrackIter tracks(aStream->GetStreamTracks(),
+                                      MediaSegment::AUDIO);
+       !tracks.IsEnded(); tracks.Next()) {
+    uint32_t i;
+    for (i = 0; i < audioOutputStreamsFound.Length(); ++i) {
+      if (aStream->mAudioOutputStreams[i].mTrackID == tracks->GetID()) {
+        break;
+      }
+    }
+    if (i < audioOutputStreamsFound.Length()) {
+      audioOutputStreamsFound[i] = true;
+    } else {
+      MediaStream::AudioOutputStream* audioOutputStream =
+          aStream->mAudioOutputStreams.AppendElement();
+      audioOutputStream->mAudioPlaybackStartTime = mProcessedTime;
+      audioOutputStream->mBlockedAudioTime = 0;
+      audioOutputStream->mLastTickWritten = 0;
+      audioOutputStream->mTrackID = tracks->GetID();
+
+      bool switching = false;
+
+      {
+        MonitorAutoLock lock(mMonitor);
+        switching = CurrentDriver()->Switching();
+      }
+
+      if (!CurrentDriver()->AsAudioCallbackDriver() && !switching) {
+        MonitorAutoLock mon(mMonitor);
+        if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
+          AudioCallbackDriver* driver = new AudioCallbackDriver(
+              this, AudioInputChannelCount(), AudioInputDevicePreference());
+          CurrentDriver()->SwitchAtNextIteration(driver);
+        }
+      }
+    }
+  }
+
+  for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
+    if (!audioOutputStreamsFound[i]) {
+      aStream->mAudioOutputStreams.RemoveElementAt(i);
     }
   }
 }
 
-TrackTime MediaTrackGraphImpl::PlayAudio(MediaTrack* aTrack) {
+StreamTime MediaStreamGraphImpl::PlayAudio(MediaStream* aStream) {
   MOZ_ASSERT(OnGraphThread());
   MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");
 
   float volume = 0.0f;
-  for (uint32_t i = 0; i < aTrack->mAudioOutputs.Length(); ++i) {
-    volume += aTrack->mAudioOutputs[i].mVolume * mGlobalVolume;
+  for (uint32_t i = 0; i < aStream->mAudioOutputs.Length(); ++i) {
+    volume += aStream->mAudioOutputs[i].mVolume * mGlobalVolume;
   }
 
-  TrackTime ticksWritten = 0;
-
-  if (aTrack->mAudioOutputStream) {
+  StreamTime ticksWritten = 0;
+
+  for (uint32_t i = 0; i < aStream->mAudioOutputStreams.Length(); ++i) {
     ticksWritten = 0;
 
-    MediaTrack::AudioOutputStream& audioOutput = *aTrack->mAudioOutputStream;
-    AudioSegment* audio = aTrack->GetData<AudioSegment>();
+    MediaStream::AudioOutputStream& audioOutput =
+        aStream->mAudioOutputStreams[i];
+    StreamTracks::Track* track =
+        aStream->mTracks.FindTrack(audioOutput.mTrackID);
+    AudioSegment* audio = track->Get<AudioSegment>();
     AudioSegment output;
 
-    TrackTime offset = aTrack->GraphTimeToTrackTime(mProcessedTime);
-
-    // We don't update aTrack->mTracksStartTime here to account for time spent
-    // blocked. Instead, we'll update it in UpdateCurrentTimeForTracks after
+    StreamTime offset = aStream->GraphTimeToStreamTime(mProcessedTime);
+
+    // We don't update aStream->mTracksStartTime here to account for time spent
+    // blocked. Instead, we'll update it in UpdateCurrentTimeForStreams after
     // the blocked period has completed. But we do need to make sure we play
-    // from the right offsets in the track buffer, even if we've already
+    // from the right offsets in the stream buffer, even if we've already
     // written silence for some amount of blocked time after the current time.
     GraphTime t = mProcessedTime;
     while (t < mStateComputedTime) {
-      bool blocked = t >= aTrack->mStartBlocking;
-      GraphTime end = blocked ? mStateComputedTime : aTrack->mStartBlocking;
+      bool blocked = t >= aStream->mStartBlocking;
+      GraphTime end = blocked ? mStateComputedTime : aStream->mStartBlocking;
       NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!");
 
       // Check how many ticks of sound we can provide if we are blocked some
       // time in the middle of this cycle.
-      TrackTime toWrite = end - t;
+      StreamTime toWrite = end - t;
 
       if (blocked) {
         output.InsertNullDataAtStart(toWrite);
         ticksWritten += toWrite;
         LOG(LogLevel::Verbose,
-            ("%p: MediaTrack %p writing %" PRId64
+            ("%p: MediaStream %p writing %" PRId64
              " blocking-silence samples for "
              "%f to %f (%" PRId64 " to %" PRId64 ")",
-             this, aTrack, toWrite, MediaTimeToSeconds(t),
+             this, aStream, toWrite, MediaTimeToSeconds(t),
              MediaTimeToSeconds(end), offset, offset + toWrite));
       } else {
-        TrackTime endTicksNeeded = offset + toWrite;
-        TrackTime endTicksAvailable = audio->GetDuration();
+        StreamTime endTicksNeeded = offset + toWrite;
+        StreamTime endTicksAvailable = audio->GetDuration();
 
         if (endTicksNeeded <= endTicksAvailable) {
           LOG(LogLevel::Verbose,
-              ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
+              ("%p: MediaStream %p writing %" PRId64 " samples for %f to %f "
                "(samples %" PRId64 " to %" PRId64 ")",
-               this, aTrack, toWrite, MediaTimeToSeconds(t),
+               this, aStream, toWrite, MediaTimeToSeconds(t),
                MediaTimeToSeconds(end), offset, endTicksNeeded));
           output.AppendSlice(*audio, offset, endTicksNeeded);
           ticksWritten += toWrite;
           offset = endTicksNeeded;
         } else {
           // MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not
           // ended."); If we are at the end of the track, maybe write the
           // remaining samples, and pad with/output silence.
           if (endTicksNeeded > endTicksAvailable &&
               offset < endTicksAvailable) {
             output.AppendSlice(*audio, offset, endTicksAvailable);
             LOG(LogLevel::Verbose,
-                ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
+                ("%p: MediaStream %p writing %" PRId64 " samples for %f to %f "
                  "(samples %" PRId64 " to %" PRId64 ")",
-                 this, aTrack, toWrite, MediaTimeToSeconds(t),
+                 this, aStream, toWrite, MediaTimeToSeconds(t),
                  MediaTimeToSeconds(end), offset, endTicksNeeded));
             uint32_t available = endTicksAvailable - offset;
             ticksWritten += available;
             toWrite -= available;
             offset = endTicksAvailable;
           }
           output.AppendNullData(toWrite);
           LOG(LogLevel::Verbose,
-              ("%p MediaTrack %p writing %" PRId64
+              ("%p MediaStream %p writing %" PRId64
                " padding slsamples for %f to "
                "%f (samples %" PRId64 " to %" PRId64 ")",
-               this, aTrack, toWrite, MediaTimeToSeconds(t),
+               this, aStream, toWrite, MediaTimeToSeconds(t),
                MediaTimeToSeconds(end), offset, endTicksNeeded));
           ticksWritten += toWrite;
         }
         output.ApplyVolume(volume);
       }
       t = end;
     }
     audioOutput.mLastTickWritten = offset;
 
     output.WriteTo(mMixer, AudioOutputChannelCount(), mSampleRate);
   }
   return ticksWritten;
 }
 
-void MediaTrackGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
-                                             AudioDataListener* aListener) {
+void MediaStreamGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
+                                              AudioDataListener* aListener) {
   MOZ_ASSERT(OnGraphThread());
-  // Only allow one device per MTG (hence, per document), but allow opening a
+  // Only allow one device per MSG (hence, per document), but allow opening a
   // device multiple times
   nsTArray<RefPtr<AudioDataListener>>& listeners =
       mInputDeviceUsers.GetOrInsert(aID);
   if (listeners.IsEmpty() && mInputDeviceUsers.Count() > 1) {
     // We don't support opening multiple input device in a graph for now.
     listeners.RemoveElement(aID);
     return;
   }
@@ -671,38 +756,38 @@ void MediaTrackGraphImpl::OpenAudioInput
       CurrentDriver()->SwitchAtNextIteration(driver);
     } else {
       LOG(LogLevel::Error, ("OpenAudioInput in shutdown!"));
       MOZ_ASSERT_UNREACHABLE("Can't open cubeb inputs in shutdown");
     }
   }
 }
 
-nsresult MediaTrackGraphImpl::OpenAudioInput(CubebUtils::AudioDeviceID aID,
-                                             AudioDataListener* aListener) {
+nsresult MediaStreamGraphImpl::OpenAudioInput(CubebUtils::AudioDeviceID aID,
+                                              AudioDataListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   class Message : public ControlMessage {
    public:
-    Message(MediaTrackGraphImpl* aGraph, CubebUtils::AudioDeviceID aID,
+    Message(MediaStreamGraphImpl* aGraph, CubebUtils::AudioDeviceID aID,
             AudioDataListener* aListener)
         : ControlMessage(nullptr),
           mGraph(aGraph),
           mID(aID),
           mListener(aListener) {}
     void Run() override { mGraph->OpenAudioInputImpl(mID, mListener); }
-    MediaTrackGraphImpl* mGraph;
+    MediaStreamGraphImpl* mGraph;
     CubebUtils::AudioDeviceID mID;
     RefPtr<AudioDataListener> mListener;
   };
   // XXX Check not destroyed!
   this->AppendMessage(MakeUnique<Message>(this, aID, aListener));
   return NS_OK;
 }
 
-void MediaTrackGraphImpl::CloseAudioInputImpl(
+void MediaStreamGraphImpl::CloseAudioInputImpl(
     Maybe<CubebUtils::AudioDeviceID>& aID, AudioDataListener* aListener) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
   // It is possible to not know the ID here, find it first.
   if (aID.isNothing()) {
     for (auto iter = mInputDeviceUsers.Iter(); !iter.Done(); iter.Next()) {
       if (iter.Data().Contains(aListener)) {
         aID = Some(iter.Key());
       }
@@ -712,17 +797,17 @@ void MediaTrackGraphImpl::CloseAudioInpu
 
   nsTArray<RefPtr<AudioDataListener>>* listeners =
       mInputDeviceUsers.GetValue(aID.value());
 
   MOZ_ASSERT(listeners);
   DebugOnly<bool> wasPresent = listeners->RemoveElement(aListener);
   MOZ_ASSERT(wasPresent);
 
-  // Breaks the cycle between the MTG and the listener.
+  // Breaks the cycle between the MSG and the listener.
   aListener->Disconnect(this);
 
   if (!listeners->IsEmpty()) {
     // There is still a consumer for this audio input device
     return;
   }
 
   mInputDeviceID = nullptr;  // reset to default
@@ -748,39 +833,39 @@ void MediaTrackGraphImpl::CloseAudioInpu
           ("%p: CloseInput: no output present (SystemClockCallback)", this));
 
       driver = new SystemClockDriver(this);
       CurrentDriver()->SwitchAtNextIteration(driver);
     }  // else SystemClockDriver->SystemClockDriver, no switch
   }
 }
 
-void MediaTrackGraphImpl::CloseAudioInput(Maybe<CubebUtils::AudioDeviceID>& aID,
-                                          AudioDataListener* aListener) {
+void MediaStreamGraphImpl::CloseAudioInput(
+    Maybe<CubebUtils::AudioDeviceID>& aID, AudioDataListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   class Message : public ControlMessage {
    public:
-    Message(MediaTrackGraphImpl* aGraph, Maybe<CubebUtils::AudioDeviceID>& aID,
+    Message(MediaStreamGraphImpl* aGraph, Maybe<CubebUtils::AudioDeviceID>& aID,
             AudioDataListener* aListener)
         : ControlMessage(nullptr),
           mGraph(aGraph),
           mID(aID),
           mListener(aListener) {}
     void Run() override { mGraph->CloseAudioInputImpl(mID, mListener); }
-    MediaTrackGraphImpl* mGraph;
+    MediaStreamGraphImpl* mGraph;
     Maybe<CubebUtils::AudioDeviceID> mID;
     RefPtr<AudioDataListener> mListener;
   };
   this->AppendMessage(MakeUnique<Message>(this, aID, aListener));
 }
 
 // All AudioInput listeners get the same speaker data (at least for now).
-void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer,
-                                           size_t aFrames, TrackRate aRate,
-                                           uint32_t aChannels) {
+void MediaStreamGraphImpl::NotifyOutputData(AudioDataValue* aBuffer,
+                                            size_t aFrames, TrackRate aRate,
+                                            uint32_t aChannels) {
 #ifdef ANDROID
   // On Android, mInputDeviceID is always null and represents the default
   // device.
   // The absence of an input consumer is enough to know we need to bail out
   // here.
   if (!mInputDeviceUsers.GetValue(mInputDeviceID)) {
     return;
   }
@@ -794,19 +879,19 @@ void MediaTrackGraphImpl::NotifyOutputDa
   nsTArray<RefPtr<AudioDataListener>>* listeners =
       mInputDeviceUsers.GetValue(mInputDeviceID);
   MOZ_ASSERT(listeners);
   for (auto& listener : *listeners) {
     listener->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
   }
 }
 
-void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer,
-                                          size_t aFrames, TrackRate aRate,
-                                          uint32_t aChannels) {
+void MediaStreamGraphImpl::NotifyInputData(const AudioDataValue* aBuffer,
+                                           size_t aFrames, TrackRate aRate,
+                                           uint32_t aChannels) {
 #ifdef ANDROID
   if (!mInputDeviceUsers.GetValue(mInputDeviceID)) {
     return;
   }
 #else
 #  ifdef DEBUG
   {
     MonitorAutoLock lock(mMonitor);
@@ -823,17 +908,17 @@ void MediaTrackGraphImpl::NotifyInputDat
   nsTArray<RefPtr<AudioDataListener>>* listeners =
       mInputDeviceUsers.GetValue(mInputDeviceID);
   MOZ_ASSERT(listeners);
   for (auto& listener : *listeners) {
     listener->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels);
   }
 }
 
-void MediaTrackGraphImpl::DeviceChangedImpl() {
+void MediaStreamGraphImpl::DeviceChangedImpl() {
   MOZ_ASSERT(OnGraphThread());
 
 #ifdef ANDROID
   if (!mInputDeviceUsers.GetValue(mInputDeviceID)) {
     return;
   }
 #else
   if (!mInputDeviceID) {
@@ -843,48 +928,49 @@ void MediaTrackGraphImpl::DeviceChangedI
 
   nsTArray<RefPtr<AudioDataListener>>* listeners =
       mInputDeviceUsers.GetValue(mInputDeviceID);
   for (auto& listener : *listeners) {
     listener->DeviceChanged(this);
   }
 }
 
-void MediaTrackGraphImpl::DeviceChanged() {
+void MediaStreamGraphImpl::DeviceChanged() {
   // This is safe to be called from any thread: this message comes from an
   // underlying platform API, and we don't have much guarantees. If it is not
   // called from the main thread (and it probably will rarely be), it will post
   // itself to the main thread, and the actual device change message will be ran
   // and acted upon on the graph thread.
   if (!NS_IsMainThread()) {
-    RefPtr<nsIRunnable> runnable = WrapRunnable(
-        RefPtr<MediaTrackGraphImpl>(this), &MediaTrackGraphImpl::DeviceChanged);
+    RefPtr<nsIRunnable> runnable =
+        WrapRunnable(RefPtr<MediaStreamGraphImpl>(this),
+                     &MediaStreamGraphImpl::DeviceChanged);
     mAbstractMainThread->Dispatch(runnable.forget());
     return;
   }
 
   class Message : public ControlMessage {
    public:
-    explicit Message(MediaTrackGraph* aGraph)
+    explicit Message(MediaStreamGraph* aGraph)
         : ControlMessage(nullptr),
-          mGraphImpl(static_cast<MediaTrackGraphImpl*>(aGraph)) {}
+          mGraphImpl(static_cast<MediaStreamGraphImpl*>(aGraph)) {}
     void Run() override { mGraphImpl->DeviceChangedImpl(); }
     // We know that this is valid, because the graph can't shutdown if it has
     // messages.
-    MediaTrackGraphImpl* mGraphImpl;
+    MediaStreamGraphImpl* mGraphImpl;
   };
 
   // Reset the latency, it will get fetched again next time it's queried.
   MOZ_ASSERT(NS_IsMainThread());
   mAudioOutputLatency = 0.0;
 
   AppendMessage(MakeUnique<Message>(this));
 }
 
-void MediaTrackGraphImpl::ReevaluateInputDevice() {
+void MediaStreamGraphImpl::ReevaluateInputDevice() {
   MOZ_ASSERT(OnGraphThread());
   bool needToSwitch = false;
 
   if (CurrentDriver()->AsAudioCallbackDriver()) {
     AudioCallbackDriver* audioCallbackDriver =
         CurrentDriver()->AsAudioCallbackDriver();
     if (audioCallbackDriver->InputChannelCount() != AudioInputChannelCount()) {
       needToSwitch = true;
@@ -909,38 +995,38 @@ void MediaTrackGraphImpl::ReevaluateInpu
         this, AudioInputChannelCount(), AudioInputDevicePreference());
     {
       MonitorAutoLock lock(mMonitor);
       CurrentDriver()->SwitchAtNextIteration(newDriver);
     }
   }
 }
 
-bool MediaTrackGraphImpl::OnGraphThreadOrNotRunning() const {
+bool MediaStreamGraphImpl::OnGraphThreadOrNotRunning() const {
   // either we're on the right thread (and calling CurrentDriver() is safe),
   // or we're going to fail the assert anyway, so don't cross-check
   // via CurrentDriver().
   return mDetectedNotRunning ? NS_IsMainThread() : OnGraphThread();
 }
 
-bool MediaTrackGraphImpl::OnGraphThread() const {
+bool MediaStreamGraphImpl::OnGraphThread() const {
   // we're on the right thread (and calling mDriver is safe),
   MOZ_ASSERT(mDriver);
   if (mGraphRunner && mGraphRunner->OnThread()) {
     return true;
   }
   return mDriver->OnThread();
 }
 
-bool MediaTrackGraphImpl::Destroyed() const {
+bool MediaStreamGraphImpl::Destroyed() const {
   MOZ_ASSERT(NS_IsMainThread());
   return !mSelfRef;
 }
 
-bool MediaTrackGraphImpl::ShouldUpdateMainThread() {
+bool MediaStreamGraphImpl::ShouldUpdateMainThread() {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
   if (mRealtime) {
     return true;
   }
 
   TimeStamp now = TimeStamp::Now();
   // For offline graphs, update now if there is no pending iteration or if it
   // has been long enough since the last update.
@@ -948,300 +1034,307 @@ bool MediaTrackGraphImpl::ShouldUpdateMa
       ((now - mLastMainThreadUpdate).ToMilliseconds() >
        CurrentDriver()->IterationDuration())) {
     mLastMainThreadUpdate = now;
     return true;
   }
   return false;
 }
 
-void MediaTrackGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate) {
+void MediaStreamGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate) {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
   mMonitor.AssertCurrentThreadOwns();
 
   // We don't want to frequently update the main thread about timing update
   // when we are not running in realtime.
   if (aFinalUpdate || ShouldUpdateMainThread()) {
     // Strip updates that will be obsoleted below, so as to keep the length of
-    // mTrackUpdates sane.
+    // mStreamUpdates sane.
     size_t keptUpdateCount = 0;
-    for (size_t i = 0; i < mTrackUpdates.Length(); ++i) {
-      MediaTrack* track = mTrackUpdates[i].mTrack;
-      // RemoveTrackGraphThread() clears mTrack in updates for
-      // tracks that are removed from the graph.
-      MOZ_ASSERT(!track || track->GraphImpl() == this);
-      if (!track || track->MainThreadNeedsUpdates()) {
-        // Discard this update as it has either been cleared when the track
+    for (size_t i = 0; i < mStreamUpdates.Length(); ++i) {
+      MediaStream* stream = mStreamUpdates[i].mStream;
+      // RemoveStreamGraphThread() clears mStream in updates for
+      // streams that are removed from the graph.
+      MOZ_ASSERT(!stream || stream->GraphImpl() == this);
+      if (!stream || stream->MainThreadNeedsUpdates()) {
+        // Discard this update as it has either been cleared when the stream
         // was destroyed or there will be a newer update below.
         continue;
       }
       if (keptUpdateCount != i) {
-        mTrackUpdates[keptUpdateCount] = std::move(mTrackUpdates[i]);
-        MOZ_ASSERT(!mTrackUpdates[i].mTrack);
+        mStreamUpdates[keptUpdateCount] = std::move(mStreamUpdates[i]);
+        MOZ_ASSERT(!mStreamUpdates[i].mStream);
       }
       ++keptUpdateCount;
     }
-    mTrackUpdates.TruncateLength(keptUpdateCount);
-
-    mTrackUpdates.SetCapacity(mTrackUpdates.Length() + mTracks.Length() +
-                              mSuspendedTracks.Length());
-    for (MediaTrack* track : AllTracks()) {
-      if (!track->MainThreadNeedsUpdates()) {
+    mStreamUpdates.TruncateLength(keptUpdateCount);
+
+    mStreamUpdates.SetCapacity(mStreamUpdates.Length() + mStreams.Length() +
+                               mSuspendedStreams.Length());
+    for (MediaStream* stream : AllStreams()) {
+      if (!stream->MainThreadNeedsUpdates()) {
         continue;
       }
-      TrackUpdate* update = mTrackUpdates.AppendElement();
-      update->mTrack = track;
+      StreamUpdate* update = mStreamUpdates.AppendElement();
+      update->mStream = stream;
       // No blocking to worry about here, since we've passed
-      // UpdateCurrentTimeForTracks.
+      // UpdateCurrentTimeForStreams.
       update->mNextMainThreadCurrentTime =
-          track->GraphTimeToTrackTime(mProcessedTime);
-      update->mNextMainThreadEnded = track->mNotifiedEnded;
+          stream->GraphTimeToStreamTime(mProcessedTime);
+      update->mNextMainThreadFinished = stream->mNotifiedFinished;
     }
     mNextMainThreadGraphTime = mProcessedTime;
     if (!mPendingUpdateRunnables.IsEmpty()) {
       mUpdateRunnables.AppendElements(std::move(mPendingUpdateRunnables));
     }
   }
 
   // If this is the final update, then a stable state event will soon be
   // posted just before this thread finishes, and so there is no need to also
   // post here.
   if (!aFinalUpdate &&
       // Don't send the message to the main thread if it's not going to have
       // any work to do.
-      !(mUpdateRunnables.IsEmpty() && mTrackUpdates.IsEmpty())) {
+      !(mUpdateRunnables.IsEmpty() && mStreamUpdates.IsEmpty())) {
     EnsureStableStateEventPosted();
   }
 }
 
-GraphTime MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(GraphTime aTime) {
+GraphTime MediaStreamGraphImpl::RoundUpToEndOfAudioBlock(GraphTime aTime) {
   if (aTime % WEBAUDIO_BLOCK_SIZE == 0) {
     return aTime;
   }
   return RoundUpToNextAudioBlock(aTime);
 }
 
-GraphTime MediaTrackGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime) {
+GraphTime MediaStreamGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime) {
   uint64_t block = aTime >> WEBAUDIO_BLOCK_SIZE_BITS;
   uint64_t nextBlock = block + 1;
   GraphTime nextTime = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS;
   return nextTime;
 }
 
-void MediaTrackGraphImpl::ProduceDataForTracksBlockByBlock(
-    uint32_t aTrackIndex, TrackRate aSampleRate) {
+void MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(
+    uint32_t aStreamIndex, TrackRate aSampleRate) {
   MOZ_ASSERT(OnGraphThread());
-  MOZ_ASSERT(aTrackIndex <= mFirstCycleBreaker,
-             "Cycle breaker is not AudioNodeTrack?");
+  MOZ_ASSERT(aStreamIndex <= mFirstCycleBreaker,
+             "Cycle breaker is not AudioNodeStream?");
   GraphTime t = mProcessedTime;
   while (t < mStateComputedTime) {
     GraphTime next = RoundUpToNextAudioBlock(t);
-    for (uint32_t i = mFirstCycleBreaker; i < mTracks.Length(); ++i) {
-      auto nt = static_cast<AudioNodeTrack*>(mTracks[i]);
-      MOZ_ASSERT(nt->AsAudioNodeTrack());
-      nt->ProduceOutputBeforeInput(t);
+    for (uint32_t i = mFirstCycleBreaker; i < mStreams.Length(); ++i) {
+      auto ns = static_cast<AudioNodeStream*>(mStreams[i]);
+      MOZ_ASSERT(ns->AsAudioNodeStream());
+      ns->ProduceOutputBeforeInput(t);
     }
-    for (uint32_t i = aTrackIndex; i < mTracks.Length(); ++i) {
-      ProcessedMediaTrack* pt = mTracks[i]->AsProcessedTrack();
-      if (pt) {
-        pt->ProcessInput(
-            t, next,
-            (next == mStateComputedTime) ? ProcessedMediaTrack::ALLOW_END : 0);
+    for (uint32_t i = aStreamIndex; i < mStreams.Length(); ++i) {
+      ProcessedMediaStream* ps = mStreams[i]->AsProcessedStream();
+      if (ps) {
+        ps->ProcessInput(t, next,
+                         (next == mStateComputedTime)
+                             ? ProcessedMediaStream::ALLOW_FINISH
+                             : 0);
       }
     }
     t = next;
   }
   NS_ASSERTION(t == mStateComputedTime,
                "Something went wrong with rounding to block boundaries");
 }
 
-void MediaTrackGraphImpl::RunMessageAfterProcessing(
+void MediaStreamGraphImpl::RunMessageAfterProcessing(
     UniquePtr<ControlMessage> aMessage) {
   MOZ_ASSERT(OnGraphThread());
 
   if (mFrontMessageQueue.IsEmpty()) {
     mFrontMessageQueue.AppendElement();
   }
 
   // Only one block is used for messages from the graph thread.
   MOZ_ASSERT(mFrontMessageQueue.Length() == 1);
   mFrontMessageQueue[0].mMessages.AppendElement(std::move(aMessage));
 }
 
-void MediaTrackGraphImpl::RunMessagesInQueue() {
+void MediaStreamGraphImpl::RunMessagesInQueue() {
   TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
   // Calculate independent action times for each batch of messages (each
   // batch corresponding to an event loop task). This isolates the performance
   // of different scripts to some extent.
   for (uint32_t i = 0; i < mFrontMessageQueue.Length(); ++i) {
     nsTArray<UniquePtr<ControlMessage>>& messages =
         mFrontMessageQueue[i].mMessages;
 
     for (uint32_t j = 0; j < messages.Length(); ++j) {
       messages[j]->Run();
     }
   }
   mFrontMessageQueue.Clear();
 }
 
-void MediaTrackGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions) {
+void MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions) {
   TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
   MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
   // The next state computed time can be the same as the previous: it
   // means the driver would have been blocking indefinitly, but the graph has
   // been woken up right after having been to sleep.
   MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
 
-  UpdateTrackOrder();
+  UpdateStreamOrder();
 
   bool ensureNextIteration = false;
 
-  for (MediaTrack* track : mTracks) {
-    if (SourceMediaTrack* is = track->AsSourceTrack()) {
+  for (MediaStream* stream : mStreams) {
+    if (SourceMediaStream* is = stream->AsSourceStream()) {
       ensureNextIteration |= is->PullNewData(aEndBlockingDecisions);
       is->ExtractPendingInput(mStateComputedTime, aEndBlockingDecisions);
     }
-    if (track->mEnded) {
-      // The track's not suspended, and since it's ended, underruns won't
+    if (stream->mFinished) {
+      // The stream's not suspended, and since it's finished, underruns won't
       // stop it playing out. So there's no blocking other than what we impose
       // here.
-      GraphTime endTime = track->GetEnd() + track->mStartTime;
+      GraphTime endTime = stream->GetStreamTracks().GetLatestTrackEnd() +
+                          stream->mTracksStartTime;
       if (endTime <= mStateComputedTime) {
         LOG(LogLevel::Verbose,
-            ("%p: MediaTrack %p is blocked due to being ended", this, track));
-        track->mStartBlocking = mStateComputedTime;
+            ("%p: MediaStream %p is blocked due to being finished", this,
+             stream));
+        stream->mStartBlocking = mStateComputedTime;
       } else {
         LOG(LogLevel::Verbose,
-            ("%p: MediaTrack %p has ended, but is not blocked yet (current "
-             "time %f, end at %f)",
-             this, track, MediaTimeToSeconds(mStateComputedTime),
+            ("%p: MediaStream %p is finished, but not blocked yet (end at %f, "
+             "with "
+             "blocking at %f)",
+             this, stream, MediaTimeToSeconds(stream->GetTracksEnd()),
              MediaTimeToSeconds(endTime)));
-        // Data can't be added to a ended track, so underruns are irrelevant.
-        MOZ_ASSERT(endTime <= aEndBlockingDecisions);
-        track->mStartBlocking = endTime;
+        // Data can't be added to a finished stream, so underruns are
+        // irrelevant.
+        stream->mStartBlocking = std::min(endTime, aEndBlockingDecisions);
       }
     } else {
-      track->mStartBlocking = WillUnderrun(track, aEndBlockingDecisions);
+      stream->mStartBlocking = WillUnderrun(stream, aEndBlockingDecisions);
 
 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
-      if (SourceMediaTrack* s = track->AsSourceTrack()) {
-        if (s->Ended()) {
-          continue;
-        }
-        {
-          MutexAutoLock lock(s->mMutex);
-          if (!s->mUpdateTrack->mPullingEnabled) {
-            // The invariant that data must be provided is only enforced when
-            // pulling.
+      if (SourceMediaStream* s = stream->AsSourceStream()) {
+        for (StreamTracks::TrackIter i(s->mTracks); !i.IsEnded(); i.Next()) {
+          if (i->IsEnded()) {
+            continue;
+          }
+          SourceMediaStream::TrackData* data;
+          {
+            MutexAutoLock lock(s->mMutex);
+            data = s->FindDataForTrack(i->GetID());
+          }
+          MOZ_ASSERT(data);
+          if (!data->mPullingEnabled) {
             continue;
           }
-        }
-        if (track->GetEnd() <
-            track->GraphTimeToTrackTime(aEndBlockingDecisions)) {
-          LOG(LogLevel::Error,
-              ("%p: SourceMediaTrack %p (%s) is live and pulled, "
-               "but wasn't fed "
-               "enough data. TrackListeners=%zu. Track-end=%f, "
-               "Iteration-end=%f",
-               this, track,
-               (track->mType == MediaSegment::AUDIO ? "audio" : "video"),
-               track->mTrackListeners.Length(),
-               MediaTimeToSeconds(track->GetEnd()),
-               MediaTimeToSeconds(
-                   track->GraphTimeToTrackTime(aEndBlockingDecisions))));
-          MOZ_DIAGNOSTIC_ASSERT(false,
-                                "A non-ended SourceMediaTrack wasn't fed "
-                                "enough data by NotifyPull");
+          if (i->GetEnd() <
+              stream->GraphTimeToStreamTime(aEndBlockingDecisions)) {
+            LOG(LogLevel::Error,
+                ("%p: SourceMediaStream %p track %u (%s) is live and pulled, "
+                 "but wasn't fed "
+                 "enough data. TrackListeners=%zu. Track-end=%f, "
+                 "Iteration-end=%f",
+                 this, stream, i->GetID(),
+                 (i->GetType() == MediaSegment::AUDIO ? "audio" : "video"),
+                 stream->mTrackListeners.Length(),
+                 MediaTimeToSeconds(i->GetEnd()),
+                 MediaTimeToSeconds(
+                     stream->GraphTimeToStreamTime(aEndBlockingDecisions))));
+            MOZ_DIAGNOSTIC_ASSERT(false,
+                                  "A non-finished SourceMediaStream wasn't fed "
+                                  "enough data by NotifyPull");
+          }
         }
       }
 #endif /* MOZ_DIAGNOSTIC_ASSERT_ENABLED */
     }
   }
 
-  for (MediaTrack* track : mSuspendedTracks) {
-    track->mStartBlocking = mStateComputedTime;
+  for (MediaStream* stream : mSuspendedStreams) {
+    stream->mStartBlocking = mStateComputedTime;
   }
 
   // If the loop is woken up so soon that IterationEnd() barely advances or
   // if an offline graph is not currently rendering, we end up having
   // aEndBlockingDecisions == mStateComputedTime.
   // Since the process interval [mStateComputedTime, aEndBlockingDecision) is
-  // empty, Process() will not find any unblocked track and so will not
+  // empty, Process() will not find any unblocked stream and so will not
   // ensure another iteration.  If the graph should be rendering, then ensure
   // another iteration to render.
   if (ensureNextIteration || (aEndBlockingDecisions == mStateComputedTime &&
                               mStateComputedTime < mEndTime)) {
     EnsureNextIteration();
   }
 }
 
-void MediaTrackGraphImpl::Process() {
+void MediaStreamGraphImpl::Process() {
   TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
-  // Play track contents.
+  // Play stream contents.
   bool allBlockedForever = true;
-  // True when we've done ProcessInput for all processed tracks.
+  // True when we've done ProcessInput for all processed streams.
   bool doneAllProducing = false;
   // This is the number of frame that are written to the AudioStreams, for
   // this cycle.
-  TrackTime ticksPlayed = 0;
+  StreamTime ticksPlayed = 0;
 
   mMixer.StartMixing();
 
-  // Figure out what each track wants to do
-  for (uint32_t i = 0; i < mTracks.Length(); ++i) {
-    MediaTrack* track = mTracks[i];
+  // Figure out what each stream wants to do
+  for (uint32_t i = 0; i < mStreams.Length(); ++i) {
+    MediaStream* stream = mStreams[i];
     if (!doneAllProducing) {
-      ProcessedMediaTrack* pt = track->AsProcessedTrack();
-      if (pt) {
-        AudioNodeTrack* n = track->AsAudioNodeTrack();
+      ProcessedMediaStream* ps = stream->AsProcessedStream();
+      if (ps) {
+        AudioNodeStream* n = stream->AsAudioNodeStream();
         if (n) {
 #ifdef DEBUG
-          // Verify that the sampling rate for all of the following tracks is
+          // Verify that the sampling rate for all of the following streams is
           // the same
-          for (uint32_t j = i + 1; j < mTracks.Length(); ++j) {
-            AudioNodeTrack* nextTrack = mTracks[j]->AsAudioNodeTrack();
-            if (nextTrack) {
-              MOZ_ASSERT(n->mSampleRate == nextTrack->mSampleRate,
-                         "All AudioNodeTracks in the graph must have the same "
+          for (uint32_t j = i + 1; j < mStreams.Length(); ++j) {
+            AudioNodeStream* nextStream = mStreams[j]->AsAudioNodeStream();
+            if (nextStream) {
+              MOZ_ASSERT(n->SampleRate() == nextStream->SampleRate(),
+                         "All AudioNodeStreams in the graph must have the same "
                          "sampling rate");
             }
           }
 #endif
-          // Since an AudioNodeTrack is present, go ahead and
-          // produce audio block by block for all the rest of the tracks.
-          ProduceDataForTracksBlockByBlock(i, n->mSampleRate);
+          // Since an AudioNodeStream is present, go ahead and
+          // produce audio block by block for all the rest of the streams.
+          ProduceDataForStreamsBlockByBlock(i, n->SampleRate());
           doneAllProducing = true;
         } else {
-          pt->ProcessInput(mProcessedTime, mStateComputedTime,
-                           ProcessedMediaTrack::ALLOW_END);
-          // Assert that a live track produced enough data
-          MOZ_ASSERT_IF(!track->mEnded,
-                        track->GetEnd() >= GraphTimeToTrackTimeWithBlocking(
-                                               track, mStateComputedTime));
+          ps->ProcessInput(mProcessedTime, mStateComputedTime,
+                           ProcessedMediaStream::ALLOW_FINISH);
+          NS_ASSERTION(
+              stream->mTracks.GetEarliestTrackEnd() >=
+                  GraphTimeToStreamTimeWithBlocking(stream, mStateComputedTime),
+              "Stream did not produce enough data");
         }
       }
     }
     // Only playback audio and video in real-time mode
     if (mRealtime) {
-      CreateOrDestroyAudioTracks(track);
+      CreateOrDestroyAudioStreams(stream);
       if (CurrentDriver()->AsAudioCallbackDriver()) {
-        TrackTime ticksPlayedForThisTrack = PlayAudio(track);
+        StreamTime ticksPlayedForThisStream = PlayAudio(stream);
         if (!ticksPlayed) {
-          ticksPlayed = ticksPlayedForThisTrack;
+          ticksPlayed = ticksPlayedForThisStream;
         } else {
-          MOZ_ASSERT(!ticksPlayedForThisTrack ||
-                         ticksPlayedForThisTrack == ticksPlayed,
-                     "Each track should have the same number of frame.");
+          MOZ_ASSERT(!ticksPlayedForThisStream ||
+                         ticksPlayedForThisStream == ticksPlayed,
+                     "Each stream should have the same number of frame.");
         }
       }
     }
-    if (track->mStartBlocking > mProcessedTime) {
+    if (stream->mStartBlocking > mProcessedTime) {
       allBlockedForever = false;
     }
   }
 
   if (CurrentDriver()->AsAudioCallbackDriver()) {
     if (!ticksPlayed) {
       // Nothing was played, so the mixer doesn't know how many frames were
       // processed. We still tell it so AudioCallbackDriver knows how much has
@@ -1253,50 +1346,50 @@ void MediaTrackGraphImpl::Process() {
     mMixer.FinishMixing();
   }
 
   if (!allBlockedForever) {
     EnsureNextIteration();
   }
 }
 
-bool MediaTrackGraphImpl::UpdateMainThreadState() {
+bool MediaStreamGraphImpl::UpdateMainThreadState() {
   MOZ_ASSERT(OnGraphThread());
   if (mForceShutDown) {
-    for (MediaTrack* track : AllTracks()) {
-      track->NotifyForcedShutdown();
+    for (MediaStream* stream : AllStreams()) {
+      stream->NotifyForcedShutdown();
     }
   }
 
   MonitorAutoLock lock(mMonitor);
   bool finalUpdate =
       mForceShutDown || (IsEmpty() && mBackMessageQueue.IsEmpty());
   PrepareUpdatesToMainThreadState(finalUpdate);
   if (finalUpdate) {
     // Enter shutdown mode when this iteration is completed.
-    // No need to Destroy tracks here. The main-thread owner of each
-    // track is responsible for calling Destroy on them.
+    // No need to Destroy streams here. The main-thread owner of each
+    // stream is responsible for calling Destroy on them.
     return false;
   }
 
   CurrentDriver()->WaitForNextIteration();
 
   SwapMessageQueues();
   return true;
 }
 
-bool MediaTrackGraphImpl::OneIteration(GraphTime aStateEnd) {
+bool MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd) {
   if (mGraphRunner) {
     return mGraphRunner->OneIteration(aStateEnd);
   }
 
   return OneIterationImpl(aStateEnd);
 }
 
-bool MediaTrackGraphImpl::OneIterationImpl(GraphTime aStateEnd) {
+bool MediaStreamGraphImpl::OneIterationImpl(GraphTime aStateEnd) {
   TRACE_AUDIO_CALLBACK();
 
   // Changes to LIFECYCLE_RUNNING occur before starting or reviving the graph
   // thread, and so the monitor need not be held to check mLifecycleState.
   // LIFECYCLE_THREAD_NOT_STARTED is possible when shutting down offline
   // graphs that have not started.
   MOZ_DIAGNOSTIC_ASSERT(mLifecycleState <= LIFECYCLE_RUNNING);
   MOZ_ASSERT(OnGraphThread());
@@ -1311,130 +1404,130 @@ bool MediaTrackGraphImpl::OneIterationIm
 
   mStateComputedTime = stateEnd;
 
   Process();
 
   GraphTime oldProcessedTime = mProcessedTime;
   mProcessedTime = stateEnd;
 
-  UpdateCurrentTimeForTracks(oldProcessedTime);
+  UpdateCurrentTimeForStreams(oldProcessedTime);
 
   ProcessChunkMetadata(oldProcessedTime);
 
   // Process graph messages queued from RunMessageAfterProcessing() on this
   // thread during the iteration.
   RunMessagesInQueue();
 
   return UpdateMainThreadState();
 }
 
-void MediaTrackGraphImpl::ApplyTrackUpdate(TrackUpdate* aUpdate) {
+void MediaStreamGraphImpl::ApplyStreamUpdate(StreamUpdate* aUpdate) {
   MOZ_ASSERT(NS_IsMainThread());
   mMonitor.AssertCurrentThreadOwns();
 
-  MediaTrack* track = aUpdate->mTrack;
-  if (!track) return;
-  track->mMainThreadCurrentTime = aUpdate->mNextMainThreadCurrentTime;
-  track->mMainThreadEnded = aUpdate->mNextMainThreadEnded;
-
-  if (track->ShouldNotifyTrackEnded()) {
-    track->NotifyMainThreadListeners();
+  MediaStream* stream = aUpdate->mStream;
+  if (!stream) return;
+  stream->mMainThreadCurrentTime = aUpdate->mNextMainThreadCurrentTime;
+  stream->mMainThreadFinished = aUpdate->mNextMainThreadFinished;
+
+  if (stream->ShouldNotifyStreamFinished()) {
+    stream->NotifyMainThreadListeners();
   }
 }
 
-void MediaTrackGraphImpl::ForceShutDown() {
+void MediaStreamGraphImpl::ForceShutDown() {
   MOZ_ASSERT(NS_IsMainThread(), "Must be called on main thread");
-  LOG(LogLevel::Debug, ("%p: MediaTrackGraph::ForceShutdown", this));
+  LOG(LogLevel::Debug, ("%p: MediaStreamGraph::ForceShutdown", this));
 
   if (mShutdownBlocker) {
     // Avoid waiting forever for a graph to shut down
     // synchronously.  Reports are that some 3rd-party audio drivers
     // occasionally hang in shutdown (both for us and Chrome).
     NS_NewTimerWithCallback(
         getter_AddRefs(mShutdownTimer), this,
-        MediaTrackGraph::AUDIO_CALLBACK_DRIVER_SHUTDOWN_TIMEOUT,
+        MediaStreamGraph::AUDIO_CALLBACK_DRIVER_SHUTDOWN_TIMEOUT,
         nsITimer::TYPE_ONE_SHOT);
   }
 
   class Message final : public ControlMessage {
    public:
-    explicit Message(MediaTrackGraphImpl* aGraph)
+    explicit Message(MediaStreamGraphImpl* aGraph)
         : ControlMessage(nullptr), mGraph(aGraph) {}
     void Run() override { mGraph->mForceShutDown = true; }
     // The graph owns this message.
-    MediaTrackGraphImpl* MOZ_NON_OWNING_REF mGraph;
+    MediaStreamGraphImpl* MOZ_NON_OWNING_REF mGraph;
   };
 
-  if (mMainThreadTrackCount > 0 || mMainThreadPortCount > 0) {
-    // If both the track and port counts are zero, the regular shutdown
+  if (mMainThreadStreamCount > 0 || mMainThreadPortCount > 0) {
+    // If both the stream and port counts are zero, the regular shutdown
     // sequence will progress shortly to shutdown threads and destroy the graph.
     AppendMessage(MakeUnique<Message>(this));
   }
 }
 
 NS_IMETHODIMP
-MediaTrackGraphImpl::Notify(nsITimer* aTimer) {
+MediaStreamGraphImpl::Notify(nsITimer* aTimer) {
   MOZ_ASSERT(NS_IsMainThread());
   NS_ASSERTION(!mShutdownBlocker,
-               "MediaTrackGraph took too long to shut down!");
+               "MediaStreamGraph took too long to shut down!");
   // Sigh, graph took too long to shut down.  Stop blocking system
   // shutdown and hope all is well.
   RemoveShutdownBlocker();
   return NS_OK;
 }
 
-void MediaTrackGraphImpl::AddShutdownBlocker() {
+void MediaStreamGraphImpl::AddShutdownBlocker() {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(!mShutdownBlocker);
 
   class Blocker : public media::ShutdownBlocker {
-    const RefPtr<MediaTrackGraphImpl> mGraph;
+    const RefPtr<MediaStreamGraphImpl> mGraph;
 
    public:
-    Blocker(MediaTrackGraphImpl* aGraph, const nsString& aName)
+    Blocker(MediaStreamGraphImpl* aGraph, const nsString& aName)
         : media::ShutdownBlocker(aName), mGraph(aGraph) {}
 
     NS_IMETHOD
     BlockShutdown(nsIAsyncShutdownClient* aProfileBeforeChange) override {
       mGraph->ForceShutDown();
       return NS_OK;
     }
   };
 
   // Blocker names must be distinct.
   nsString blockerName;
-  blockerName.AppendPrintf("MediaTrackGraph %p shutdown", this);
+  blockerName.AppendPrintf("MediaStreamGraph %p shutdown", this);
   mShutdownBlocker = MakeAndAddRef<Blocker>(this, blockerName);
   nsresult rv = media::GetShutdownBarrier()->AddBlocker(
       mShutdownBlocker, NS_LITERAL_STRING(__FILE__), __LINE__,
-      NS_LITERAL_STRING("MediaTrackGraph shutdown"));
+      NS_LITERAL_STRING("MediaStreamGraph shutdown"));
   MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
 }
 
-void MediaTrackGraphImpl::RemoveShutdownBlocker() {
+void MediaStreamGraphImpl::RemoveShutdownBlocker() {
   if (!mShutdownBlocker) {
     return;
   }
   media::GetShutdownBarrier()->RemoveBlocker(mShutdownBlocker);
   mShutdownBlocker = nullptr;
 }
 
 NS_IMETHODIMP
-MediaTrackGraphImpl::GetName(nsACString& aName) {
-  aName.AssignLiteral("MediaTrackGraphImpl");
+MediaStreamGraphImpl::GetName(nsACString& aName) {
+  aName.AssignLiteral("MediaStreamGraphImpl");
   return NS_OK;
 }
 
 namespace {
 
-class MediaTrackGraphShutDownRunnable : public Runnable {
+class MediaStreamGraphShutDownRunnable : public Runnable {
  public:
-  explicit MediaTrackGraphShutDownRunnable(MediaTrackGraphImpl* aGraph)
-      : Runnable("MediaTrackGraphShutDownRunnable"), mGraph(aGraph) {}
+  explicit MediaStreamGraphShutDownRunnable(MediaStreamGraphImpl* aGraph)
+      : Runnable("MediaStreamGraphShutDownRunnable"), mGraph(aGraph) {}
   NS_IMETHOD Run() override {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(mGraph->mDetectedNotRunning && mGraph->mDriver,
                "We should know the graph thread control loop isn't running!");
 
     LOG(LogLevel::Debug, ("%p: Shutting down graph", mGraph.get()));
 
     // We've asserted the graph isn't running.  Use mDriver instead of
@@ -1455,17 +1548,17 @@ class MediaTrackGraphShutDownRunnable : 
     mGraph->mDriver
         ->Shutdown();  // This will wait until it's shutdown since
                        // we'll start tearing down the graph after this
 
     // Release the driver now so that an AudioCallbackDriver will release its
     // SharedThreadPool reference.  Each SharedThreadPool reference must be
     // released before SharedThreadPool::SpinUntilEmpty() runs on
     // xpcom-shutdown-threads.  Don't wait for GC/CC to release references to
-    // objects owning tracks, or for expiration of mGraph->mShutdownTimer,
+    // objects owning streams, or for expiration of mGraph->mShutdownTimer,
     // which won't otherwise release its reference on the graph until
     // nsTimerImpl::Shutdown(), which runs after xpcom-shutdown-threads.
     {
       MonitorAutoLock mon(mGraph->mMonitor);
       mGraph->SetCurrentDriver(nullptr);
     }
 
     // Safe to access these without the monitor since the graph isn't running.
@@ -1478,204 +1571,210 @@ class MediaTrackGraphShutDownRunnable : 
           " continue - freezing and leaking");
 
       // The timer fired, so we may be deeper in shutdown now.  Block any
       // further teardown and just leak, for safety.
       return NS_OK;
     }
 
     // mGraph's thread is not running so it's OK to do whatever here
-    for (MediaTrack* track : mGraph->AllTracks()) {
+    for (MediaStream* stream : mGraph->AllStreams()) {
       // Clean up all MediaSegments since we cannot release Images too
       // late during shutdown. Also notify listeners that they were removed
       // so they can clean up any gfx resources.
-      track->RemoveAllResourcesAndListenersImpl();
+      if (SourceMediaStream* source = stream->AsSourceStream()) {
+        // Finishing a SourceStream prevents new data from being appended.
+        source->FinishOnGraphThread();
+      }
+      stream->GetStreamTracks().Clear();
+      stream->RemoveAllListenersImpl();
     }
 
     MOZ_ASSERT(mGraph->mUpdateRunnables.IsEmpty());
     mGraph->mPendingUpdateRunnables.Clear();
 
     mGraph->RemoveShutdownBlocker();
 
-    // We can't block past the final LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION
-    // stage, since completion of that stage requires all tracks to be freed,
+    // We can't block past the final LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION
+    // stage, since completion of that stage requires all streams to be freed,
     // which requires shutdown to proceed.
 
     if (mGraph->IsEmpty()) {
       // mGraph is no longer needed, so delete it.
       mGraph->Destroy();
     } else {
       // The graph is not empty.  We must be in a forced shutdown, either for
       // process shutdown or a non-realtime graph that has finished
       // processing. Some later AppendMessage will detect that the graph has
       // been emptied, and delete it.
       NS_ASSERTION(mGraph->mForceShutDown, "Not in forced shutdown?");
       mGraph->LifecycleStateRef() =
-          MediaTrackGraphImpl::LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION;
+          MediaStreamGraphImpl::LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION;
     }
     return NS_OK;
   }
 
  private:
-  RefPtr<MediaTrackGraphImpl> mGraph;
+  RefPtr<MediaStreamGraphImpl> mGraph;
 };
 
-class MediaTrackGraphStableStateRunnable : public Runnable {
+class MediaStreamGraphStableStateRunnable : public Runnable {
  public:
-  explicit MediaTrackGraphStableStateRunnable(MediaTrackGraphImpl* aGraph,
-                                              bool aSourceIsMTG)
-      : Runnable("MediaTrackGraphStableStateRunnable"),
+  explicit MediaStreamGraphStableStateRunnable(MediaStreamGraphImpl* aGraph,
+                                               bool aSourceIsMSG)
+      : Runnable("MediaStreamGraphStableStateRunnable"),
         mGraph(aGraph),
-        mSourceIsMTG(aSourceIsMTG) {}
+        mSourceIsMSG(aSourceIsMSG) {}
   NS_IMETHOD Run() override {
     TRACE();
     if (mGraph) {
-      mGraph->RunInStableState(mSourceIsMTG);
+      mGraph->RunInStableState(mSourceIsMSG);
     }
     return NS_OK;
   }
 
  private:
-  RefPtr<MediaTrackGraphImpl> mGraph;
-  bool mSourceIsMTG;
+  RefPtr<MediaStreamGraphImpl> mGraph;
+  bool mSourceIsMSG;
 };
 
 /*
  * Control messages forwarded from main thread to graph manager thread
  */
 class CreateMessage : public ControlMessage {
  public:
-  explicit CreateMessage(MediaTrack* aTrack) : ControlMessage(aTrack) {}
-  void Run() override { mTrack->GraphImpl()->AddTrackGraphThread(mTrack); }
+  explicit CreateMessage(MediaStream* aStream) : ControlMessage(aStream) {}
+  void Run() override { mStream->GraphImpl()->AddStreamGraphThread(mStream); }
   void RunDuringShutdown() override {
     // Make sure to run this message during shutdown too, to make sure
-    // that we balance the number of tracks registered with the graph
+    // that we balance the number of streams registered with the graph
     // as they're destroyed during shutdown.
     Run();
   }
 };
 
 }  // namespace
 
-void MediaTrackGraphImpl::RunInStableState(bool aSourceIsMTG) {
+void MediaStreamGraphImpl::RunInStableState(bool aSourceIsMSG) {
   MOZ_ASSERT(NS_IsMainThread(), "Must be called on main thread");
 
   nsTArray<nsCOMPtr<nsIRunnable>> runnables;
   // When we're doing a forced shutdown, pending control messages may be
   // run on the main thread via RunDuringShutdown. Those messages must
   // run without the graph monitor being held. So, we collect them here.
   nsTArray<UniquePtr<ControlMessage>> controlMessagesToRunDuringShutdown;
 
   {
     MonitorAutoLock lock(mMonitor);
-    if (aSourceIsMTG) {
+    if (aSourceIsMSG) {
       MOZ_ASSERT(mPostedRunInStableStateEvent);
       mPostedRunInStableStateEvent = false;
     }
 
     // This should be kept in sync with the LifecycleState enum in
-    // MediaTrackGraphImpl.h
+    // MediaStreamGraphImpl.h
     const char* LifecycleState_str[] = {
         "LIFECYCLE_THREAD_NOT_STARTED", "LIFECYCLE_RUNNING",
         "LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP",
         "LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN",
-        "LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION"};
+        "LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION"};
 
     if (LifecycleStateRef() != LIFECYCLE_RUNNING) {
       LOG(LogLevel::Debug,
           ("%p: Running stable state callback. Current state: %s", this,
            LifecycleState_str[LifecycleStateRef()]));
     }
 
     runnables.SwapElements(mUpdateRunnables);
-    for (uint32_t i = 0; i < mTrackUpdates.Length(); ++i) {
-      TrackUpdate* update = &mTrackUpdates[i];
-      if (update->mTrack) {
-        ApplyTrackUpdate(update);
+    for (uint32_t i = 0; i < mStreamUpdates.Length(); ++i) {
+      StreamUpdate* update = &mStreamUpdates[i];
+      if (update->mStream) {
+        ApplyStreamUpdate(update);
       }
     }
-    mTrackUpdates.Clear();
+    mStreamUpdates.Clear();
 
     mMainThreadGraphTime = mNextMainThreadGraphTime;
 
     if (mCurrentTaskMessageQueue.IsEmpty()) {
       if (LifecycleStateRef() == LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP &&
           IsEmpty()) {
         // Complete shutdown. First, ensure that this graph is no longer used.
         // A new graph graph will be created if one is needed.
         // Asynchronously clean up old graph. We don't want to do this
         // synchronously because it spins the event loop waiting for threads
         // to shut down, and we don't want to do that in a stable state handler.
         LifecycleStateRef() = LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
         LOG(LogLevel::Debug,
-            ("%p: Sending MediaTrackGraphShutDownRunnable", this));
-        nsCOMPtr<nsIRunnable> event = new MediaTrackGraphShutDownRunnable(this);
+            ("%p: Sending MediaStreamGraphShutDownRunnable", this));
+        nsCOMPtr<nsIRunnable> event =
+            new MediaStreamGraphShutDownRunnable(this);
         mAbstractMainThread->Dispatch(event.forget());
       }
     } else {
       if (LifecycleStateRef() <= LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
         MessageBlock* block = mBackMessageQueue.AppendElement();
         block->mMessages.SwapElements(mCurrentTaskMessageQueue);
         EnsureNextIterationLocked();
       }
 
-      // If this MediaTrackGraph has entered regular (non-forced) shutdown it
+      // If this MediaStreamGraph has entered regular (non-forced) shutdown it
       // is not able to process any more messages. Those messages being added to
       // the graph in the first place is an error.
       MOZ_DIAGNOSTIC_ASSERT(mForceShutDown ||
                             LifecycleStateRef() <
                                 LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP);
     }
 
     if (LifecycleStateRef() == LIFECYCLE_THREAD_NOT_STARTED) {
       LifecycleStateRef() = LIFECYCLE_RUNNING;
       // Start the thread now. We couldn't start it earlier because
-      // the graph might exit immediately on finding it has no tracks. The
-      // first message for a new graph must create a track.
+      // the graph might exit immediately on finding it has no streams. The
+      // first message for a new graph must create a stream.
       {
-        // We should exit the monitor for now, because starting a track might
+        // We should exit the monitor for now, because starting a stream might
         // take locks, and we don't want to deadlock.
         LOG(LogLevel::Debug,
             ("%p: Starting a graph with a %s", this,
              CurrentDriver()->AsAudioCallbackDriver() ? "AudioCallbackDriver"
                                                       : "SystemClockDriver"));
         RefPtr<GraphDriver> driver = CurrentDriver();
         MonitorAutoUnlock unlock(mMonitor);
         driver->Start();
         // It's not safe to Shutdown() a thread from StableState, and
         // releasing this may shutdown a SystemClockDriver thread.
         // Proxy the release to outside of StableState.
-        NS_ReleaseOnMainThreadSystemGroup("MediaTrackGraphImpl::CurrentDriver",
+        NS_ReleaseOnMainThreadSystemGroup("MediaStreamGraphImpl::CurrentDriver",
                                           driver.forget(),
                                           true);  // always proxy
       }
     }
 
     if (LifecycleStateRef() == LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP &&
         mForceShutDown) {
       // Defer calls to RunDuringShutdown() to happen while mMonitor is not
       // held.
       for (uint32_t i = 0; i < mBackMessageQueue.Length(); ++i) {
         MessageBlock& mb = mBackMessageQueue[i];
         controlMessagesToRunDuringShutdown.AppendElements(
             std::move(mb.mMessages));
       }
       mBackMessageQueue.Clear();
       MOZ_ASSERT(mCurrentTaskMessageQueue.IsEmpty());
-      // Stop MediaTrackGraph threads.
+      // Stop MediaStreamGraph threads.
       LifecycleStateRef() = LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
-      nsCOMPtr<nsIRunnable> event = new MediaTrackGraphShutDownRunnable(this);
+      nsCOMPtr<nsIRunnable> event = new MediaStreamGraphShutDownRunnable(this);
       mAbstractMainThread->Dispatch(event.forget());
     }
 
     mDetectedNotRunning = LifecycleStateRef() > LIFECYCLE_RUNNING;
   }
 
   // Make sure we get a new current time in the next event loop task
-  if (!aSourceIsMTG) {
+  if (!aSourceIsMSG) {
     MOZ_ASSERT(mPostedRunInStableState);
     mPostedRunInStableState = false;
   }
 
   for (uint32_t i = 0; i < controlMessagesToRunDuringShutdown.Length(); ++i) {
     controlMessagesToRunDuringShutdown[i]->RunDuringShutdown();
   }
 
@@ -1685,55 +1784,55 @@ void MediaTrackGraphImpl::RunInStableSta
       LifecycleStateRef() >= LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
 #endif
 
   for (uint32_t i = 0; i < runnables.Length(); ++i) {
     runnables[i]->Run();
   }
 }
 
-void MediaTrackGraphImpl::EnsureRunInStableState() {
+void MediaStreamGraphImpl::EnsureRunInStableState() {
   MOZ_ASSERT(NS_IsMainThread(), "main thread only");
 
   if (mPostedRunInStableState) return;
   mPostedRunInStableState = true;
   nsCOMPtr<nsIRunnable> event =
-      new MediaTrackGraphStableStateRunnable(this, false);
+      new MediaStreamGraphStableStateRunnable(this, false);
   nsContentUtils::RunInStableState(event.forget());
 }
 
-void MediaTrackGraphImpl::EnsureStableStateEventPosted() {
+void MediaStreamGraphImpl::EnsureStableStateEventPosted() {
   MOZ_ASSERT(OnGraphThread());
   mMonitor.AssertCurrentThreadOwns();
 
   if (mPostedRunInStableStateEvent) return;
   mPostedRunInStableStateEvent = true;
   nsCOMPtr<nsIRunnable> event =
-      new MediaTrackGraphStableStateRunnable(this, true);
+      new MediaStreamGraphStableStateRunnable(this, true);
   mAbstractMainThread->Dispatch(event.forget());
 }
 
-void MediaTrackGraphImpl::SignalMainThreadCleanup() {
+void MediaStreamGraphImpl::SignalMainThreadCleanup() {
   MOZ_ASSERT(mDriver->OnThread());
 
   MonitorAutoLock lock(mMonitor);
   // LIFECYCLE_THREAD_NOT_STARTED is possible when shutting down offline
   // graphs that have not started.
   MOZ_DIAGNOSTIC_ASSERT(mLifecycleState <= LIFECYCLE_RUNNING);
   LOG(LogLevel::Debug,
-      ("%p: MediaTrackGraph waiting for main thread cleanup", this));
+      ("%p: MediaStreamGraph waiting for main thread cleanup", this));
   LifecycleStateRef() =
-      MediaTrackGraphImpl::LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP;
+      MediaStreamGraphImpl::LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP;
   EnsureStableStateEventPosted();
 }
 
-void MediaTrackGraphImpl::AppendMessage(UniquePtr<ControlMessage> aMessage) {
+void MediaStreamGraphImpl::AppendMessage(UniquePtr<ControlMessage> aMessage) {
   MOZ_ASSERT(NS_IsMainThread(), "main thread only");
-  MOZ_ASSERT_IF(aMessage->GetTrack(), !aMessage->GetTrack()->IsDestroyed());
-  MOZ_DIAGNOSTIC_ASSERT(mMainThreadTrackCount > 0 || mMainThreadPortCount > 0);
+  MOZ_ASSERT_IF(aMessage->GetStream(), !aMessage->GetStream()->IsDestroyed());
+  MOZ_DIAGNOSTIC_ASSERT(mMainThreadStreamCount > 0 || mMainThreadPortCount > 0);
 
   if (mDetectedNotRunning &&
       LifecycleStateRef() > LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
     // The graph control loop is not running and main thread cleanup has
     // happened. From now on we can't append messages to
     // mCurrentTaskMessageQueue, because that will never be processed again, so
     // just RunDuringShutdown this message. This should only happen during
     // forced shutdown, or after a non-realtime graph has finished processing.
@@ -1741,637 +1840,722 @@ void MediaTrackGraphImpl::AppendMessage(
     MOZ_ASSERT(mCanRunMessagesSynchronously);
     mCanRunMessagesSynchronously = false;
 #endif
     aMessage->RunDuringShutdown();
 #ifdef DEBUG
     mCanRunMessagesSynchronously = true;
 #endif
     if (IsEmpty() &&
-        LifecycleStateRef() >= LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION) {
+        LifecycleStateRef() >= LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION) {
       Destroy();
     }
     return;
   }
 
   mCurrentTaskMessageQueue.AppendElement(std::move(aMessage));
   EnsureRunInStableState();
 }
 
-void MediaTrackGraphImpl::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
+void MediaStreamGraphImpl::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
   mAbstractMainThread->Dispatch(std::move(aRunnable));
 }
 
-MediaTrack::MediaTrack(TrackRate aSampleRate, MediaSegment::Type aType,
-                       MediaSegment* aSegment)
-    : mSampleRate(aSampleRate),
-      mType(aType),
-      mSegment(aSegment),
-      mStartTime(0),
-      mForgottenTime(0),
-      mEnded(false),
-      mNotifiedEnded(false),
-      mDisabledMode(DisabledTrackMode::ENABLED),
+MediaStream::MediaStream()
+    : mTracksStartTime(0),
       mStartBlocking(GRAPH_TIME_MAX),
       mSuspendedCount(0),
+      mFinished(false),
+      mNotifiedFinished(false),
       mMainThreadCurrentTime(0),
-      mMainThreadEnded(false),
-      mEndedNotificationSent(false),
+      mMainThreadFinished(false),
+      mFinishedNotificationSent(false),
       mMainThreadDestroyed(false),
       mGraph(nullptr) {
-  MOZ_COUNT_CTOR(MediaTrack);
-  MOZ_ASSERT_IF(mSegment, mSegment->GetType() == aType);
+  MOZ_COUNT_CTOR(MediaStream);
 }
 
-MediaTrack::~MediaTrack() {
-  MOZ_COUNT_DTOR(MediaTrack);
+MediaStream::~MediaStream() {
+  MOZ_COUNT_DTOR(MediaStream);
   NS_ASSERTION(mMainThreadDestroyed, "Should have been destroyed already");
   NS_ASSERTION(mMainThreadListeners.IsEmpty(),
                "All main thread listeners should have been removed");
 }
 
-size_t MediaTrack::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+size_t MediaStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
   size_t amount = 0;
 
   // Not owned:
   // - mGraph - Not reported here
   // - mConsumers - elements
   // Future:
   // - mLastPlayedVideoFrame
   // - mTrackListeners - elements
   // - mAudioOutputStream - elements
 
+  amount += mTracks.SizeOfExcludingThis(aMallocSizeOf);
   amount += mAudioOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
   amount += mTrackListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
   amount += mMainThreadListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
+  amount += mDisabledTracks.ShallowSizeOfExcludingThis(aMallocSizeOf);
   amount += mConsumers.ShallowSizeOfExcludingThis(aMallocSizeOf);
-  amount += aMallocSizeOf(mAudioOutputStream.get());
 
   return amount;
 }
 
-size_t MediaTrack::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+size_t MediaStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
   return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
 }
 
-void MediaTrack::IncrementSuspendCount() {
+void MediaStream::IncrementSuspendCount() {
   ++mSuspendedCount;
   if (mSuspendedCount == 1) {
     for (uint32_t i = 0; i < mConsumers.Length(); ++i) {
       mConsumers[i]->Suspended();
     }
   }
 }
 
-void MediaTrack::DecrementSuspendCount() {
+void MediaStream::DecrementSuspendCount() {
   NS_ASSERTION(mSuspendedCount > 0, "Suspend count underrun");
   --mSuspendedCount;
   if (mSuspendedCount == 0) {
     for (uint32_t i = 0; i < mConsumers.Length(); ++i) {
       mConsumers[i]->Resumed();
     }
   }
 }
 
-MediaTrackGraphImpl* MediaTrack::GraphImpl() { return mGraph; }
-
-const MediaTrackGraphImpl* MediaTrack::GraphImpl() const { return mGraph; }
-
-MediaTrackGraph* MediaTrack::Graph() { return mGraph; }
-
-const MediaTrackGraph* MediaTrack::Graph() const { return mGraph; }
-
-void MediaTrack::SetGraphImpl(MediaTrackGraphImpl* aGraph) {
+MediaStreamGraphImpl* MediaStream::GraphImpl() { return mGraph; }
+
+const MediaStreamGraphImpl* MediaStream::GraphImpl() const { return mGraph; }
+
+MediaStreamGraph* MediaStream::Graph() { return mGraph; }
+
+void MediaStream::SetGraphImpl(MediaStreamGraphImpl* aGraph) {
   MOZ_ASSERT(!mGraph, "Should only be called once");
-  MOZ_ASSERT(mSampleRate == aGraph->GraphRate());
   mGraph = aGraph;
+  mTracks.InitGraphRate(aGraph->GraphRate());
 }
 
-void MediaTrack::SetGraphImpl(MediaTrackGraph* aGraph) {
-  MediaTrackGraphImpl* graph = static_cast<MediaTrackGraphImpl*>(aGraph);
+void MediaStream::SetGraphImpl(MediaStreamGraph* aGraph) {
+  MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(aGraph);
   SetGraphImpl(graph);
 }
 
-TrackTime MediaTrack::GraphTimeToTrackTime(GraphTime aTime) const {
+StreamTime MediaStream::GraphTimeToStreamTime(GraphTime aTime) const {
   NS_ASSERTION(mStartBlocking == GraphImpl()->mStateComputedTime ||
                    aTime <= mStartBlocking,
                "Incorrectly ignoring blocking!");
-  return aTime - mStartTime;
+  return aTime - mTracksStartTime;
 }
 
-GraphTime MediaTrack::TrackTimeToGraphTime(TrackTime aTime) const {
+GraphTime MediaStream::StreamTimeToGraphTime(StreamTime aTime) const {
   NS_ASSERTION(mStartBlocking == GraphImpl()->mStateComputedTime ||
-                   aTime + mStartTime <= mStartBlocking,
+                   aTime + mTracksStartTime <= mStartBlocking,
                "Incorrectly ignoring blocking!");
-  return aTime + mStartTime;
+  return aTime + mTracksStartTime;
+}
+
+StreamTime MediaStream::GraphTimeToStreamTimeWithBlocking(
+    GraphTime aTime) const {
+  return GraphImpl()->GraphTimeToStreamTimeWithBlocking(this, aTime);
 }
 
-TrackTime MediaTrack::GraphTimeToTrackTimeWithBlocking(GraphTime aTime) const {
-  return GraphImpl()->GraphTimeToTrackTimeWithBlocking(this, aTime);
+void MediaStream::FinishOnGraphThread() {
+  if (mFinished) {
+    return;
+  }
+  LOG(LogLevel::Debug, ("MediaStream %p will finish", this));
+#ifdef DEBUG
+  if (!mGraph->mForceShutDown) {
+    // All tracks must be ended by the source before the stream finishes.
+    // The exception is in forced shutdown, where we finish all streams as is.
+    for (StreamTracks::TrackIter track(mTracks); !track.IsEnded();
+         track.Next()) {
+      if (!track->IsEnded()) {
+        LOG(LogLevel::Error,
+            ("MediaStream %p will finish, but track %d has not ended.", this,
+             track->GetID()));
+        NS_ASSERTION(false, "Finished stream cannot contain live track");
+      }
+    }
+  }
+#endif
+  mFinished = true;
+
+  // Let the MSG knows that this stream can be destroyed if necessary to avoid
+  // unnecessarily processing it in the future.
+  GraphImpl()->SetStreamOrderDirty();
 }
 
-void MediaTrack::RemoveAllResourcesAndListenersImpl() {
+StreamTracks::Track* MediaStream::FindTrack(TrackID aID) const {
+  return mTracks.FindTrack(aID);
+}
+
+StreamTracks::Track* MediaStream::EnsureTrack(TrackID aTrackId) {
+  StreamTracks::Track* track = mTracks.FindTrack(aTrackId);
+  if (!track) {
+    track = &mTracks.AddTrack(aTrackId, 0, new AudioSegment());
+  }
+  return track;
+}
+
+void MediaStream::RemoveAllListenersImpl() {
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
 
   auto trackListeners(mTrackListeners);
   for (auto& l : trackListeners) {
-    l->NotifyRemoved(Graph());
+    l.mListener->NotifyRemoved(Graph());
   }
   mTrackListeners.Clear();
 
   RemoveAllDirectListenersImpl();
-
-  if (mSegment) {
-    mSegment->Clear();
-  }
 }
 
-void MediaTrack::DestroyImpl() {
+void MediaStream::DestroyImpl() {
   for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
     mConsumers[i]->Disconnect();
   }
-  if (mSegment) {
-    mSegment->Clear();
-  }
+  mTracks.Clear();
   mGraph = nullptr;
 }
 
-void MediaTrack::Destroy() {
-  // Keep this track alive until we leave this method
-  RefPtr<MediaTrack> kungFuDeathGrip = this;
+void MediaStream::Destroy() {
+  // Keep this stream alive until we leave this method
+  RefPtr<MediaStream> kungFuDeathGrip = this;
 
   class Message : public ControlMessage {
    public:
-    explicit Message(MediaTrack* aTrack) : ControlMessage(aTrack) {}
+    explicit Message(MediaStream* aStream) : ControlMessage(aStream) {}
     void Run() override {
-      mTrack->RemoveAllResourcesAndListenersImpl();
-      auto graph = mTrack->GraphImpl();
-      mTrack->DestroyImpl();
-      graph->RemoveTrackGraphThread(mTrack);
+      mStream->RemoveAllListenersImpl();
+      auto graph = mStream->GraphImpl();
+      mStream->DestroyImpl();
+      graph->RemoveStreamGraphThread(mStream);
     }
     void RunDuringShutdown() override { Run(); }
   };
   // Keep a reference to the graph, since Message might RunDuringShutdown()
   // synchronously and make GraphImpl() invalid.
-  RefPtr<MediaTrackGraphImpl> graph = GraphImpl();
+  RefPtr<MediaStreamGraphImpl> graph = GraphImpl();
   graph->AppendMessage(MakeUnique<Message>(this));
-  graph->RemoveTrack(this);
-  // Message::RunDuringShutdown may have removed this track from the graph,
-  // but our kungFuDeathGrip above will have kept this track alive if
+  graph->RemoveStream(this);
+  // Message::RunDuringShutdown may have removed this stream from the graph,
+  // but our kungFuDeathGrip above will have kept this stream alive if
   // necessary.
   mMainThreadDestroyed = true;
 }
 
-TrackTime MediaTrack::GetEnd() const {
-  return mSegment ? mSegment->GetDuration() : 0;
-}
-
-void MediaTrack::AddAudioOutput(void* aKey) {
+void MediaStream::AddAudioOutput(void* aKey) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, void* aKey)
-        : ControlMessage(aTrack), mKey(aKey) {}
-    void Run() override { mTrack->AddAudioOutputImpl(mKey); }
+    Message(MediaStream* aStream, void* aKey)
+        : ControlMessage(aStream), mKey(aKey) {}
+    void Run() override { mStream->AddAudioOutputImpl(mKey); }
     void* mKey;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
 }
 
-void MediaTrack::SetAudioOutputVolumeImpl(void* aKey, float aVolume) {
+void MediaStream::SetAudioOutputVolumeImpl(void* aKey, float aVolume) {
   for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) {
     if (mAudioOutputs[i].mKey == aKey) {
       mAudioOutputs[i].mVolume = aVolume;
       return;
     }
   }
   NS_ERROR("Audio output key not found");
 }
 
-void MediaTrack::SetAudioOutputVolume(void* aKey, float aVolume) {
+void MediaStream::SetAudioOutputVolume(void* aKey, float aVolume) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, void* aKey, float aVolume)
-        : ControlMessage(aTrack), mKey(aKey), mVolume(aVolume) {}
-    void Run() override { mTrack->SetAudioOutputVolumeImpl(mKey, mVolume); }
+    Message(MediaStream* aStream, void* aKey, float aVolume)
+        : ControlMessage(aStream), mKey(aKey), mVolume(aVolume) {}
+    void Run() override { mStream->SetAudioOutputVolumeImpl(mKey, mVolume); }
     void* mKey;
     float mVolume;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey, aVolume));
 }
 
-void MediaTrack::AddAudioOutputImpl(void* aKey) {
+void MediaStream::AddAudioOutputImpl(void* aKey) {
   LOG(LogLevel::Info,
-      ("MediaTrack %p Adding AudioOutput for key %p", this, aKey));
+      ("MediaStream %p Adding AudioOutput for key %p", this, aKey));
   mAudioOutputs.AppendElement(AudioOutput(aKey));
 }
 
-void MediaTrack::RemoveAudioOutputImpl(void* aKey) {
+void MediaStream::RemoveAudioOutputImpl(void* aKey) {
   LOG(LogLevel::Info,
-      ("MediaTrack %p Removing AudioOutput for key %p", this, aKey));
+      ("MediaStream %p Removing AudioOutput for key %p", this, aKey));
   for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) {
     if (mAudioOutputs[i].mKey == aKey) {
       mAudioOutputs.RemoveElementAt(i);
       return;
     }
   }
   NS_ERROR("Audio output key not found");
 }
 
-void MediaTrack::RemoveAudioOutput(void* aKey) {
+void MediaStream::RemoveAudioOutput(void* aKey) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, void* aKey)
-        : ControlMessage(aTrack), mKey(aKey) {}
-    void Run() override { mTrack->RemoveAudioOutputImpl(mKey); }
+    Message(MediaStream* aStream, void* aKey)
+        : ControlMessage(aStream), mKey(aKey) {}
+    void Run() override { mStream->RemoveAudioOutputImpl(mKey); }
     void* mKey;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
 }
 
-void MediaTrack::Suspend() {
+void MediaStream::Suspend() {
   class Message : public ControlMessage {
    public:
-    explicit Message(MediaTrack* aTrack) : ControlMessage(aTrack) {}
-    void Run() override { mTrack->GraphImpl()->IncrementSuspendCount(mTrack); }
+    explicit Message(MediaStream* aStream) : ControlMessage(aStream) {}
+    void Run() override {
+      mStream->GraphImpl()->IncrementSuspendCount(mStream);
+    }
   };
 
   // This can happen if this method has been called asynchronously, and the
-  // track has been destroyed since then.
+  // stream has been destroyed since then.
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(MakeUnique<Message>(this));
 }
 
-void MediaTrack::Resume() {
+void MediaStream::Resume() {
   class Message : public ControlMessage {
    public:
-    explicit Message(MediaTrack* aTrack) : ControlMessage(aTrack) {}
-    void Run() override { mTrack->GraphImpl()->DecrementSuspendCount(mTrack); }
+    explicit Message(MediaStream* aStream) : ControlMessage(aStream) {}
+    void Run() override {
+      mStream->GraphImpl()->DecrementSuspendCount(mStream);
+    }
   };
 
   // This can happen if this method has been called asynchronously, and the
-  // track has been destroyed since then.
+  // stream has been destroyed since then.
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(MakeUnique<Message>(this));
 }
 
-void MediaTrack::AddListenerImpl(
-    already_AddRefed<MediaTrackListener> aListener) {
-  RefPtr<MediaTrackListener> l(aListener);
-  mTrackListeners.AppendElement(std::move(l));
-
-  PrincipalHandle lastPrincipalHandle = mSegment->GetLastPrincipalHandle();
-  mTrackListeners.LastElement()->NotifyPrincipalHandleChanged(
-      Graph(), lastPrincipalHandle);
-  if (mNotifiedEnded) {
-    mTrackListeners.LastElement()->NotifyEnded(Graph());
+void MediaStream::AddTrackListenerImpl(
+    already_AddRefed<MediaStreamTrackListener> aListener, TrackID aTrackID) {
+  TrackBound<MediaStreamTrackListener>* l = mTrackListeners.AppendElement();
+  l->mListener = aListener;
+  l->mTrackID = aTrackID;
+
+  StreamTracks::Track* track = FindTrack(aTrackID);
+  if (!track) {
+    return;
   }
-  if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
-    mTrackListeners.LastElement()->NotifyEnabledStateChanged(Graph(), false);
+  PrincipalHandle lastPrincipalHandle =
+      track->GetSegment()->GetLastPrincipalHandle();
+  l->mListener->NotifyPrincipalHandleChanged(Graph(), lastPrincipalHandle);
+  if (track->IsEnded() &&
+      track->GetEnd() <=
+          GraphTimeToStreamTime(GraphImpl()->mStateComputedTime)) {
+    l->mListener->NotifyEnded(Graph());
+  }
+  if (GetDisabledTrackMode(aTrackID) == DisabledTrackMode::SILENCE_BLACK) {
+    l->mListener->NotifyEnabledStateChanged(Graph(), false);
   }
 }
 
-void MediaTrack::AddListener(MediaTrackListener* aListener) {
+void MediaStream::AddTrackListener(MediaStreamTrackListener* aListener,
+                                   TrackID aTrackID) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, MediaTrackListener* aListener)
-        : ControlMessage(aTrack), mListener(aListener) {}
-    void Run() override { mTrack->AddListenerImpl(mListener.forget()); }
-    RefPtr<MediaTrackListener> mListener;
+    Message(MediaStream* aStream, MediaStreamTrackListener* aListener,
+            TrackID aTrackID)
+        : ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
+    void Run() override {
+      mStream->AddTrackListenerImpl(mListener.forget(), mTrackID);
+    }
+    RefPtr<MediaStreamTrackListener> mListener;
+    TrackID mTrackID;
   };
-  MOZ_ASSERT(mSegment, "Segment-less tracks do not support listeners");
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
 }
 
-void MediaTrack::RemoveListenerImpl(MediaTrackListener* aListener) {
+void MediaStream::RemoveTrackListenerImpl(MediaStreamTrackListener* aListener,
+                                          TrackID aTrackID) {
   for (size_t i = 0; i < mTrackListeners.Length(); ++i) {
-    if (mTrackListeners[i] == aListener) {
-      mTrackListeners[i]->NotifyRemoved(Graph());
+    if (mTrackListeners[i].mListener == aListener &&
+        mTrackListeners[i].mTrackID == aTrackID) {
+      mTrackListeners[i].mListener->NotifyRemoved(Graph());
       mTrackListeners.RemoveElementAt(i);
       return;
     }
   }
 }
 
-void MediaTrack::RemoveListener(MediaTrackListener* aListener) {
+void MediaStream::RemoveTrackListener(MediaStreamTrackListener* aListener,
+                                      TrackID aTrackID) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, MediaTrackListener* aListener)
-        : ControlMessage(aTrack), mListener(aListener) {}
-    void Run() override { mTrack->RemoveListenerImpl(mListener); }
+    Message(MediaStream* aStream, MediaStreamTrackListener* aListener,
+            TrackID aTrackID)
+        : ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
+    void Run() override {
+      mStream->RemoveTrackListenerImpl(mListener, mTrackID);
+    }
     void RunDuringShutdown() override {
       // During shutdown we still want the listener's NotifyRemoved to be
       // called, since not doing that might block shutdown of other modules.
       Run();
     }
-    RefPtr<MediaTrackListener> mListener;
+    RefPtr<MediaStreamTrackListener> mListener;
+    TrackID mTrackID;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
 }
 
-void MediaTrack::AddDirectListenerImpl(
-    already_AddRefed<DirectMediaTrackListener> aListener) {
-  // Base implementation, for tracks that don't support direct track listeners.
-  RefPtr<DirectMediaTrackListener> listener = aListener;
+void MediaStream::AddDirectTrackListenerImpl(
+    already_AddRefed<DirectMediaStreamTrackListener> aListener,
+    TrackID aTrackID) {
+  // Base implementation, for streams that don't support direct track listeners.
+  RefPtr<DirectMediaStreamTrackListener> listener = aListener;
   listener->NotifyDirectListenerInstalled(
-      DirectMediaTrackListener::InstallationResult::TRACK_NOT_SUPPORTED);
+      DirectMediaStreamTrackListener::InstallationResult::STREAM_NOT_SUPPORTED);
 }
 
-void MediaTrack::AddDirectListener(DirectMediaTrackListener* aListener) {
+void MediaStream::AddDirectTrackListener(
+    DirectMediaStreamTrackListener* aListener, TrackID aTrackID) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, DirectMediaTrackListener* aListener)
-        : ControlMessage(aTrack), mListener(aListener) {}
-    void Run() override { mTrack->AddDirectListenerImpl(mListener.forget()); }
-    RefPtr<DirectMediaTrackListener> mListener;
+    Message(MediaStream* aStream, DirectMediaStreamTrackListener* aListener,
+            TrackID aTrackID)
+        : ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
+    void Run() override {
+      mStream->AddDirectTrackListenerImpl(mListener.forget(), mTrackID);
+    }
+    RefPtr<DirectMediaStreamTrackListener> mListener;
+    TrackID mTrackID;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
 }
 
-void MediaTrack::RemoveDirectListenerImpl(DirectMediaTrackListener* aListener) {
+void MediaStream::RemoveDirectTrackListenerImpl(
+    DirectMediaStreamTrackListener* aListener, TrackID aTrackID) {
   // Base implementation, the listener was never added so nothing to do.
+  RefPtr<DirectMediaStreamTrackListener> listener = aListener;
 }
 
-void MediaTrack::RemoveDirectListener(DirectMediaTrackListener* aListener) {
+void MediaStream::RemoveDirectTrackListener(
+    DirectMediaStreamTrackListener* aListener, TrackID aTrackID) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, DirectMediaTrackListener* aListener)
-        : ControlMessage(aTrack), mListener(aListener) {}
-    void Run() override { mTrack->RemoveDirectListenerImpl(mListener); }
+    Message(MediaStream* aStream, DirectMediaStreamTrackListener* aListener,
+            TrackID aTrackID)
+        : ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
+    void Run() override {
+      mStream->RemoveDirectTrackListenerImpl(mListener, mTrackID);
+    }
     void RunDuringShutdown() override {
       // During shutdown we still want the listener's
       // NotifyDirectListenerUninstalled to be called, since not doing that
       // might block shutdown of other modules.
       Run();
     }
-    RefPtr<DirectMediaTrackListener> mListener;
+    RefPtr<DirectMediaStreamTrackListener> mListener;
+    TrackID mTrackID;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
 }
 
-void MediaTrack::RunAfterPendingUpdates(
+void MediaStream::RunAfterPendingUpdates(
     already_AddRefed<nsIRunnable> aRunnable) {
   MOZ_ASSERT(NS_IsMainThread());
-  MediaTrackGraphImpl* graph = GraphImpl();
+  MediaStreamGraphImpl* graph = GraphImpl();
   nsCOMPtr<nsIRunnable> runnable(aRunnable);
 
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, already_AddRefed<nsIRunnable> aRunnable)
-        : ControlMessage(aTrack), mRunnable(aRunnable) {}
+    Message(MediaStream* aStream, already_AddRefed<nsIRunnable> aRunnable)
+        : ControlMessage(aStream), mRunnable(aRunnable) {}
     void Run() override {
-      mTrack->Graph()->DispatchToMainThreadStableState(mRunnable.forget());
+      mStream->Graph()->DispatchToMainThreadStableState(mRunnable.forget());
     }
     void RunDuringShutdown() override {
       // Don't run mRunnable now as it may call AppendMessage() which would
       // assume that there are no remaining controlMessagesToRunDuringShutdown.
       MOZ_ASSERT(NS_IsMainThread());
-      mTrack->GraphImpl()->Dispatch(mRunnable.forget());
+      mStream->GraphImpl()->Dispatch(mRunnable.forget());
     }
 
    private:
     nsCOMPtr<nsIRunnable> mRunnable;
   };
 
   graph->AppendMessage(MakeUnique<Message>(this, runnable.forget()));
 }
 
-void MediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
+void MediaStream::SetTrackEnabledImpl(TrackID aTrackID,
+                                      DisabledTrackMode aMode) {
   if (aMode == DisabledTrackMode::ENABLED) {
-    mDisabledMode = DisabledTrackMode::ENABLED;
-    for (const auto& l : mTrackListeners) {
-      l->NotifyEnabledStateChanged(Graph(), true);
+    for (int32_t i = mDisabledTracks.Length() - 1; i >= 0; --i) {
+      if (aTrackID == mDisabledTracks[i].mTrackID) {
+        mDisabledTracks.RemoveElementAt(i);
+        for (TrackBound<MediaStreamTrackListener>& l : mTrackListeners) {
+          if (l.mTrackID == aTrackID) {
+            l.mListener->NotifyEnabledStateChanged(Graph(), true);
+          }
+        }
+        return;
+      }
     }
   } else {
-    MOZ_DIAGNOSTIC_ASSERT(
-        mDisabledMode == DisabledTrackMode::ENABLED,
-        "Changing disabled track mode for a track is not allowed");
-    mDisabledMode = aMode;
+    for (const DisabledTrack& t : mDisabledTracks) {
+      if (aTrackID == t.mTrackID) {
+        NS_ERROR("Changing disabled track mode for a track is not allowed");
+        return;
+      }
+    }
+    mDisabledTracks.AppendElement(DisabledTrack(aTrackID, aMode));
     if (aMode == DisabledTrackMode::SILENCE_BLACK) {
-      for (const auto& l : mTrackListeners) {
-        l->NotifyEnabledStateChanged(Graph(), false);
+      for (TrackBound<MediaStreamTrackListener>& l : mTrackListeners) {
+        if (l.mTrackID == aTrackID) {
+          l.mListener->NotifyEnabledStateChanged(Graph(), false);
+        }
       }
     }
   }
 }
 
-void MediaTrack::SetEnabled(DisabledTrackMode aMode) {
+DisabledTrackMode MediaStream::GetDisabledTrackMode(TrackID aTrackID) {
+  for (const DisabledTrack& t : mDisabledTracks) {
+    if (t.mTrackID == aTrackID) {
+      return t.mMode;
+    }
+  }
+  return DisabledTrackMode::ENABLED;
+}
+
+void MediaStream::SetTrackEnabled(TrackID aTrackID, DisabledTrackMode aMode) {
   class Message : public ControlMessage {
    public:
-    Message(MediaTrack* aTrack, DisabledTrackMode aMode)
-        : ControlMessage(aTrack), mMode(aMode) {}
-    void Run() override { mTrack->SetEnabledImpl(mMode); }
+    Message(MediaStream* aStream, TrackID aTrackID, DisabledTrackMode aMode)
+        : ControlMessage(aStream), mTrackID(aTrackID), mMode(aMode) {}
+    void Run() override { mStream->SetTrackEnabledImpl(mTrackID, mMode); }
+    TrackID mTrackID;
     DisabledTrackMode mMode;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aMode));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aTrackID, aMode));
 }
 
-void MediaTrack::ApplyTrackDisabling(MediaSegment* aSegment,
-                                     MediaSegment* aRawSegment) {
-  if (mDisabledMode == DisabledTrackMode::ENABLED) {
+void MediaStream::ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment,
+                                      MediaSegment* aRawSegment) {
+  DisabledTrackMode mode = GetDisabledTrackMode(aTrackID);
+  if (mode == DisabledTrackMode::ENABLED) {
     return;
   }
-  if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
+  if (mode == DisabledTrackMode::SILENCE_BLACK) {
     aSegment->ReplaceWithDisabled();
     if (aRawSegment) {
       aRawSegment->ReplaceWithDisabled();
     }
-  } else if (mDisabledMode == DisabledTrackMode::SILENCE_FREEZE) {
+  } else if (mode == DisabledTrackMode::SILENCE_FREEZE) {
     aSegment->ReplaceWithNull();
     if (aRawSegment) {
       aRawSegment->ReplaceWithNull();
     }
   } else {
     MOZ_CRASH("Unsupported mode");
   }
 }
 
-void MediaTrack::AddMainThreadListener(
-    MainThreadMediaTrackListener* aListener) {
+void MediaStream::AddMainThreadListener(
+    MainThreadMediaStreamListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aListener);
   MOZ_ASSERT(!mMainThreadListeners.Contains(aListener));
 
   mMainThreadListeners.AppendElement(aListener);
 
-  // If it is not yet time to send the notification, then exit here.
-  if (!mEndedNotificationSent) {
+  // If it is not yet time to send the notification, then finish here.
+  if (!mFinishedNotificationSent) {
     return;
   }
 
   class NotifyRunnable final : public Runnable {
    public:
-    explicit NotifyRunnable(MediaTrack* aTrack)
-        : Runnable("MediaTrack::NotifyRunnable"), mTrack(aTrack) {}
+    explicit NotifyRunnable(MediaStream* aStream)
+        : Runnable("MediaStream::NotifyRunnable"), mStream(aStream) {}
 
     NS_IMETHOD Run() override {
       MOZ_ASSERT(NS_IsMainThread());
-      mTrack->NotifyMainThreadListeners();
+      mStream->NotifyMainThreadListeners();
       return NS_OK;
     }
 
    private:
     ~NotifyRunnable() {}
 
-    RefPtr<MediaTrack> mTrack;
+    RefPtr<MediaStream> mStream;
   };
 
   nsCOMPtr<nsIRunnable> runnable = new NotifyRunnable(this);
   GraphImpl()->Dispatch(runnable.forget());
 }
 
-void MediaTrack::AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
-                                                       GraphTime aBlockedTime) {
-  mStartTime += aBlockedTime;
-
-  if (!mSegment) {
-    // No data to be forgotten.
-    return;
-  }
-
-  TrackTime time = aCurrentTime - mStartTime;
-  // Only prune if there is a reasonable chunk (50ms @ 48kHz) to forget, so we
-  // don't spend too much time pruning segments.
-  const TrackTime minChunkSize = 2400;
-  if (time < mForgottenTime + minChunkSize) {
-    return;
-  }
-
-  mForgottenTime = std::min(GetEnd() - 1, time);
-  mSegment->ForgetUpTo(mForgottenTime);
-}
-
-SourceMediaTrack::SourceMediaTrack(MediaSegment::Type aType,
-                                   TrackRate aSampleRate)
-    : MediaTrack(aSampleRate, aType,
-                 aType == MediaSegment::AUDIO
-                     ? static_cast<MediaSegment*>(new AudioSegment())
-                     : static_cast<MediaSegment*>(new VideoSegment())),
-      mMutex("mozilla::media::SourceMediaTrack") {
-  mUpdateTrack = MakeUnique<TrackData>();
-  mUpdateTrack->mInputRate = aSampleRate;
-  mUpdateTrack->mResamplerChannelCount = 0;
-  mUpdateTrack->mData = UniquePtr<MediaSegment>(mSegment->CreateEmptyClone());
-  mUpdateTrack->mEnded = false;
-  mUpdateTrack->mPullingEnabled = false;
-}
-
-nsresult SourceMediaTrack::OpenAudioInput(CubebUtils::AudioDeviceID aID,
-                                          AudioDataListener* aListener) {
+SourceMediaStream::SourceMediaStream()
+    : MediaStream(),
+      mMutex("mozilla::media::SourceMediaStream"),
+      mFinishPending(false) {}
+
+nsresult SourceMediaStream::OpenAudioInput(CubebUtils::AudioDeviceID aID,
+                                           AudioDataListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(GraphImpl());
   MOZ_ASSERT(!mInputListener);
   mInputListener = aListener;
   return GraphImpl()->OpenAudioInput(aID, aListener);
 }
 
-void SourceMediaTrack::CloseAudioInput(Maybe<CubebUtils::AudioDeviceID>& aID) {
+void SourceMediaStream::CloseAudioInput(Maybe<CubebUtils::AudioDeviceID>& aID) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(GraphImpl());
   if (!mInputListener) {
     return;
   }
   GraphImpl()->CloseAudioInput(aID, mInputListener);
   mInputListener = nullptr;
 }
 
-void SourceMediaTrack::Destroy() {
+void SourceMediaStream::Destroy() {
   MOZ_ASSERT(NS_IsMainThread());
   Maybe<CubebUtils::AudioDeviceID> id = Nothing();
   CloseAudioInput(id);
 
-  MediaTrack::Destroy();
+  MediaStream::Destroy();
 }
 
-void SourceMediaTrack::DestroyImpl() {
+void SourceMediaStream::DestroyImpl() {
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
   for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
     // Disconnect before we come under mMutex's lock since it can call back
-    // through RemoveDirectListenerImpl() and deadlock.
+    // through RemoveDirectTrackListenerImpl() and deadlock.
     mConsumers[i]->Disconnect();
   }
 
   // Hold mMutex while mGraph is reset so that other threads holding mMutex
   // can null-check know that the graph will not destroyed.
   MutexAutoLock lock(mMutex);
-  mUpdateTrack = nullptr;
-  MediaTrack::DestroyImpl();
+  mUpdateTracks.Clear();
+  MediaStream::DestroyImpl();
 }
 
-void SourceMediaTrack::SetPullingEnabled(bool aEnabled) {
+void SourceMediaStream::SetPullingEnabled(TrackID aTrackID, bool aEnabled) {
   class Message : public ControlMessage {
    public:
-    Message(SourceMediaTrack* aTrack, bool aEnabled)
-        : ControlMessage(nullptr), mTrack(aTrack), mEnabled(aEnabled) {}
+    Message(SourceMediaStream* aStream, TrackID aTrackID, bool aEnabled)
+        : ControlMessage(nullptr),
+          mStream(aStream),
+          mTrackID(aTrackID),
+          mEnabled(aEnabled) {}
     void Run() override {
-      MutexAutoLock lock(mTrack->mMutex);
-      if (!mTrack->mUpdateTrack) {
-        // We can't enable pulling for a track that has ended. We ignore
+      MutexAutoLock lock(mStream->mMutex);
+      TrackData* data = mStream->FindDataForTrack(mTrackID);
+      if (!data) {
+        // We can't enable pulling for a track that was never added. We ignore
         // this if we're disabling pulling, since shutdown sequences are
         // complex. If there's truly an issue we'll have issues enabling anyway.
-        MOZ_ASSERT_IF(mEnabled, mTrack->mEnded);
+        MOZ_ASSERT_IF(mEnabled,
+                      mStream->mTracks.FindTrack(mTrackID) &&
+                          mStream->mTracks.FindTrack(mTrackID)->IsEnded());
         return;
       }
-      MOZ_ASSERT(mTrack->mType == MediaSegment::AUDIO,
+      MOZ_ASSERT(data->mData->GetType() == MediaSegment::AUDIO,
                  "Pulling is not allowed for video");
-      mTrack->mUpdateTrack->mPullingEnabled = mEnabled;
+      data->mPullingEnabled = mEnabled;
     }
-    SourceMediaTrack* mTrack;
+    SourceMediaStream* mStream;
+    TrackID mTrackID;
     bool mEnabled;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aEnabled));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aTrackID, aEnabled));
 }
 
-bool SourceMediaTrack::PullNewData(GraphTime aDesiredUpToTime) {
-  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaTrack %p", this);
-  TrackTime t;
-  TrackTime current;
-  {
-    if (mEnded) {
-      return false;
-    }
-    MutexAutoLock lock(mMutex);
-    if (mUpdateTrack->mEnded) {
-      return false;
-    }
-    if (!mUpdateTrack->mPullingEnabled) {
-      return false;
-    }
-    // Compute how much track time we'll need assuming we don't block
-    // the track at all.
-    t = GraphTimeToTrackTime(aDesiredUpToTime);
-    current = GetEnd() + mUpdateTrack->mData->GetDuration();
-  }
-  if (t <= current) {
+bool SourceMediaStream::PullNewData(GraphTime aDesiredUpToTime) {
+  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p", this);
+  MutexAutoLock lock(mMutex);
+  if (mFinished) {
     return false;
   }
-  LOG(LogLevel::Verbose, ("%p: Calling NotifyPull track=%p t=%f current end=%f",
-                          GraphImpl(), this, GraphImpl()->MediaTimeToSeconds(t),
-                          GraphImpl()->MediaTimeToSeconds(current)));
-  for (auto& l : mTrackListeners) {
-    l->NotifyPull(Graph(), current, t);
+  bool streamPullingEnabled = false;
+  for (const TrackData& track : mUpdateTracks) {
+    if (!(track.mCommands & TrackEventCommand::TRACK_EVENT_ENDED) &&
+        track.mPullingEnabled) {
+      // At least one track in this stream is pulled. We want to consume it in
+      // real-time (i.e., not block the stream).
+      streamPullingEnabled = true;
+      break;
+    }
+  }
+  // Compute how much stream time we'll need assuming we don't block
+  // the stream at all.
+  StreamTime t = GraphTimeToStreamTime(aDesiredUpToTime);
+  for (const TrackData& track : mUpdateTracks) {
+    if (track.mCommands & TRACK_END) {
+      continue;
+    }
+    StreamTime current;
+    if (track.mCommands & TRACK_CREATE) {
+      // This track hasn't been created yet. Use the stream's current time
+      // (which the track will get as its start time later).
+      current = GraphTimeToStreamTime(GraphImpl()->mStateComputedTime);
+    } else {
+      current = track.mEndOfFlushedData + track.mData->GetDuration();
+    }
+    if (t <= current) {
+      continue;
+    }
+    if (!track.mPullingEnabled &&
+        track.mData->GetType() == MediaSegment::AUDIO) {
+      if (streamPullingEnabled) {
+        LOG(LogLevel::Verbose,
+            ("%p: Pulling disabled for track but enabled for stream, append "
+             "null data; stream=%p track=%d t=%f current end=%f",
+             GraphImpl(), this, track.mID, GraphImpl()->MediaTimeToSeconds(t),
+             GraphImpl()->MediaTimeToSeconds(current)));
+        track.mData->AppendNullData(t - current);
+      }
+      continue;
+    }
+    LOG(LogLevel::Verbose,
+        ("%p: Calling NotifyPull stream=%p track=%d t=%f current end=%f",
+         GraphImpl(), this, track.mID, GraphImpl()->MediaTimeToSeconds(t),
+         GraphImpl()->MediaTimeToSeconds(current)));
+    MutexAutoUnlock unlock(mMutex);
+    for (TrackBound<MediaStreamTrackListener>& l : mTrackListeners) {
+      if (l.mTrackID == track.mID) {
+        l.mListener->NotifyPull(Graph(), current, t);
+      }
+    }
   }
   return true;
 }
 
 /**
  * This moves chunks from aIn to aOut. For audio this is simple. For video
  * we carry durations over if present, or extend up to aDesiredUpToTime if not.
  *
  * We also handle "resetters" from captured media elements. This type of source
  * pushes future frames into the track, and should it need to remove some, e.g.,
  * because of a seek or pause, it tells us by letting time go backwards. Without
  * this, tracks would be live for too long after a seek or pause.
  */
-static void MoveToSegment(SourceMediaTrack* aTrack, MediaSegment* aIn,
-                          MediaSegment* aOut, TrackTime aCurrentTime,
-                          TrackTime aDesiredUpToTime) {
+static void MoveToSegment(SourceMediaStream* aStream, MediaSegment* aIn,
+                          MediaSegment* aOut, StreamTime aCurrentTime,
+                          StreamTime aDesiredUpToTime) {
   MOZ_ASSERT(aIn->GetType() == aOut->GetType());
   MOZ_ASSERT(aOut->GetDuration() >= aCurrentTime);
   if (aIn->GetType() == MediaSegment::AUDIO) {
     aOut->AppendFrom(aIn);
   } else {
     VideoSegment* in = static_cast<VideoSegment*>(aIn);
     VideoSegment* out = static_cast<VideoSegment*>(aOut);
     for (VideoSegment::ConstChunkIterator c(*in); !c.IsEnded(); c.Next()) {
@@ -2411,260 +2595,376 @@ static void MoveToSegment(SourceMediaTra
     if (out->GetDuration() < aDesiredUpToTime) {
       out->ExtendLastFrameBy(aDesiredUpToTime - out->GetDuration());
     }
     in->Clear();
   }
   MOZ_ASSERT(aIn->GetDuration() == 0, "aIn must be consumed");
 }
 
-void SourceMediaTrack::ExtractPendingInput(GraphTime aCurrentTime,
-                                           GraphTime aDesiredUpToTime) {
+void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime,
+                                            GraphTime aDesiredUpToTime) {
   MutexAutoLock lock(mMutex);
 
-  if (!mUpdateTrack) {
-    MOZ_ASSERT(mEnded);
-    return;
-  }
-
-  TrackTime trackCurrentTime = GraphTimeToTrackTime(aCurrentTime);
-
-  ApplyTrackDisabling(mUpdateTrack->mData.get());
-
-  if (!mUpdateTrack->mData->IsEmpty()) {
-    for (const auto& l : mTrackListeners) {
-      l->NotifyQueuedChanges(GraphImpl(), GetEnd(), *mUpdateTrack->mData);
+  bool finished = mFinishPending;
+  StreamTime streamCurrentTime = GraphTimeToStreamTime(aCurrentTime);
+  StreamTime streamDesiredUpToTime = GraphTimeToStreamTime(aDesiredUpToTime);
+
+  for (int32_t i = mUpdateTracks.Length() - 1; i >= 0; --i) {
+    SourceMediaStream::TrackData* data = &mUpdateTracks[i];
+    ApplyTrackDisabling(data->mID, data->mData);
+    // Dealing with NotifyQueuedTrackChanges and NotifyQueuedAudioData part.
+
+    // The logic is different from the manipulating of aStream->mTracks part.
+    // So it is not combined with the manipulating of aStream->mTracks part.
+    StreamTime offset =
+        (data->mCommands & SourceMediaStream::TRACK_CREATE)
+            ? streamCurrentTime
+            : mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
+
+    for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
+      if (b.mTrackID != data->mID) {
+        continue;
+      }
+      b.mListener->NotifyQueuedChanges(GraphImpl(), offset, *data->mData);
+    }
+    if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
+      MediaSegment* segment = data->mData->CreateEmptyClone();
+      LOG(LogLevel::Debug,
+          ("%p: SourceMediaStream %p creating track %d, start %" PRId64
+           ", initial end %" PRId64,
+           GraphImpl(), this, data->mID, int64_t(streamCurrentTime),
+           int64_t(segment->GetDuration())));
+
+      segment->AppendNullData(streamCurrentTime);
+      MoveToSegment(this, data->mData, segment, streamCurrentTime,
+                    streamDesiredUpToTime);
+      data->mEndOfFlushedData += segment->GetDuration();
+      mTracks.AddTrack(data->mID, streamCurrentTime, segment);
+      data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
+    } else {
+      StreamTracks::Track* track = mTracks.FindTrack(data->mID);
+      MediaSegment* dest = track->GetSegment();
+      LOG(LogLevel::Verbose,
+          ("%p: SourceMediaStream %p track %d, advancing end from %" PRId64
+           " to %" PRId64,
+           GraphImpl(), this, data->mID, int64_t(dest->GetDuration()),
+           int64_t(dest->GetDuration() + data->mData->GetDuration())));
+      data->mEndOfFlushedData += data->mData->GetDuration();
+      MoveToSegment(this, data->mData, dest, streamCurrentTime,
+                    streamDesiredUpToTime);
+    }
+    if (data->mCommands & SourceMediaStream::TRACK_END) {
+      mTracks.FindTrack(data->mID)->SetEnded();
+      mUpdateTracks.RemoveElementAt(i);
     }
   }
-  TrackTime trackDesiredUpToTime = GraphTimeToTrackTime(aDesiredUpToTime);
-  TrackTime endTime = trackDesiredUpToTime;
-  if (mUpdateTrack->mEnded) {
-    endTime = std::min(trackDesiredUpToTime,
-                       GetEnd() + mUpdateTrack->mData->GetDuration());
-  }
-  LOG(LogLevel::Verbose,
-      ("%p: SourceMediaTrack %p advancing end from %" PRId64 " to %" PRId64,
-       GraphImpl(), this, int64_t(trackCurrentTime), int64_t(endTime)));
-  MoveToSegment(this, mUpdateTrack->mData.get(), mSegment.get(),
-                trackCurrentTime, endTime);
-  if (mUpdateTrack->mEnded && GetEnd() < trackDesiredUpToTime) {
-    mEnded = true;
-    mUpdateTrack = nullptr;
+
+  if (finished) {
+    FinishOnGraphThread();
   }
 }
 
-void SourceMediaTrack::ResampleAudioToGraphSampleRate(MediaSegment* aSegment) {
-  mMutex.AssertCurrentThreadOwns();
+void SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate,
+                                         MediaSegment* aSegment,
+                                         uint32_t aFlags) {
+  MutexAutoLock lock(mMutex);
+  nsTArray<TrackData>* track_data =
+      (aFlags & ADDTRACK_QUEUED) ? &mPendingTracks : &mUpdateTracks;
+  TrackData* data = track_data->AppendElement();
+  LOG(LogLevel::Debug,
+      ("%p: AddTrackInternal: %lu/%lu", GraphImpl(),
+       (long)mPendingTracks.Length(), (long)mUpdateTracks.Length()));
+  data->mID = aID;
+  data->mInputRate = aRate;
+  data->mResamplerChannelCount = 0;
+  data->mEndOfFlushedData = 0;
+  data->mCommands = TRACK_CREATE;
+  data->mData = aSegment;
+  data->mPullingEnabled = false;
+  ResampleAudioToGraphSampleRate(data, aSegment);
+  if (!(aFlags & ADDTRACK_QUEUED) && GraphImpl()) {
+    GraphImpl()->EnsureNextIteration();
+  }
+}
+
+void SourceMediaStream::AddAudioTrack(TrackID aID, TrackRate aRate,
+                                      AudioSegment* aSegment, uint32_t aFlags) {
+  AddTrackInternal(aID, aRate, aSegment, aFlags);
+}
+
+void SourceMediaStream::FinishAddTracks() {
+  MutexAutoLock lock(mMutex);
+  mUpdateTracks.AppendElements(std::move(mPendingTracks));
+  LOG(LogLevel::Debug,
+      ("%p: FinishAddTracks: %lu/%lu", GraphImpl(),
+       (long)mPendingTracks.Length(), (long)mUpdateTracks.Length()));
+  if (GraphImpl()) {
+    GraphImpl()->EnsureNextIteration();
+  }
+}
+
+void SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData,
+                                                       MediaSegment* aSegment) {
   if (aSegment->GetType() != MediaSegment::AUDIO ||
-      mUpdateTrack->mInputRate == GraphImpl()->GraphRate()) {
+      aTrackData->mInputRate == GraphImpl()->GraphRate()) {
     return;
   }
   AudioSegment* segment = static_cast<AudioSegment*>(aSegment);
   int channels = segment->ChannelCount();
 
   // If this segment is just silence, we delay instanciating the resampler. We
-  // also need to recreate the resampler if the channel count or input rate
-  // changes.
-  if (channels && mUpdateTrack->mResamplerChannelCount != channels) {
+  // also need to recreate the resampler if the channel count changes.
+  if (channels && aTrackData->mResamplerChannelCount != channels) {
     SpeexResamplerState* state = speex_resampler_init(
-        channels, mUpdateTrack->mInputRate, GraphImpl()->GraphRate(),
+        channels, aTrackData->mInputRate, GraphImpl()->GraphRate(),
         SPEEX_RESAMPLER_QUALITY_MIN, nullptr);
     if (!state) {
       return;
     }
-    mUpdateTrack->mResampler.own(state);
-    mUpdateTrack->mResamplerChannelCount = channels;
+    aTrackData->mResampler.own(state);
+    aTrackData->mResamplerChannelCount = channels;
   }
-  segment->ResampleChunks(mUpdateTrack->mResampler, mUpdateTrack->mInputRate,
+  segment->ResampleChunks(aTrackData->mResampler, aTrackData->mInputRate,
                           GraphImpl()->GraphRate());
 }
 
-void SourceMediaTrack::AdvanceTimeVaryingValuesToCurrentTime(
+void SourceMediaStream::AdvanceTimeVaryingValuesToCurrentTime(
     GraphTime aCurrentTime, GraphTime aBlockedTime) {
   MutexAutoLock lock(mMutex);
-  MediaTrack::AdvanceTimeVaryingValuesToCurrentTime(aCurrentTime, aBlockedTime);
+  mTracksStartTime += aBlockedTime;
+  mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
+}
+
+StreamTime SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment,
+                                            MediaSegment* aRawSegment) {
+  MutexAutoLock lock(mMutex);
+  // ::EndAllTrackAndFinished() can end these before the sources notice
+  StreamTime appended = 0;
+  auto graph = GraphImpl();
+  if (!mFinished && graph) {
+    TrackData* track = FindDataForTrack(aID);
+    if (track) {
+      // Data goes into mData, and on the next iteration of the MSG moves
+      // into the track's segment after NotifyQueuedTrackChanges().  This adds
+      // 0-10ms of delay before data gets to direct listeners.
+      // Indirect listeners (via subsequent TrackUnion nodes) are synced to
+      // playout time, and so can be delayed by buffering.
+
+      // Apply track disabling before notifying any consumers directly
+      // or inserting into the graph
+      ApplyTrackDisabling(aID, aSegment, aRawSegment);
+
+      ResampleAudioToGraphSampleRate(track, aSegment);
+
+      // Must notify first, since AppendFrom() will empty out aSegment
+      NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
+      appended = aSegment->GetDuration();
+      track->mData->AppendFrom(aSegment);  // note: aSegment is now dead
+      GraphImpl()->EnsureNextIteration();
+    } else {
+      aSegment->Clear();
+    }
+  }
+  return appended;
 }
 
-void SourceMediaTrack::SetAppendDataSourceRate(TrackRate aRate) {
+void SourceMediaStream::NotifyDirectConsumers(TrackData* aTrack,
+                                              MediaSegment* aSegment) {
+  mMutex.AssertCurrentThreadOwns();
+  MOZ_ASSERT(aTrack);
+
+  for (const TrackBound<DirectMediaStreamTrackListener>& source :
+       mDirectTrackListeners) {
+    if (aTrack->mID != source.mTrackID) {
+      continue;
+    }
+    StreamTime offset = 0;  // FIX! need a separate StreamTime.... or the end of
+                            // the internal buffer
+    source.mListener->NotifyRealtimeTrackDataAndApplyTrackDisabling(
+        Graph(), offset, *aSegment);
+  }
+}
+
+void SourceMediaStream::AddDirectTrackListenerImpl(
+    already_AddRefed<DirectMediaStreamTrackListener> aListener,
+    TrackID aTrackID) {
+  MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
   MutexAutoLock lock(mMutex);
-  if (!mUpdateTrack) {
+
+  RefPtr<DirectMediaStreamTrackListener> listener = aListener;
+  LOG(LogLevel::Debug, ("%p: Adding direct track listener %p bound to track %d "
+                        "to source stream %p",
+                        GraphImpl(), listener.get(), aTrackID, this));
+
+  StreamTracks::Track* track = FindTrack(aTrackID);
+
+  if (!track) {
+    LOG(LogLevel::Warning,
+        ("%p: Couldn't find source track for direct track listener %p",
+         GraphImpl(), listener.get()));
+    listener->NotifyDirectListenerInstalled(
+        DirectMediaStreamTrackListener::InstallationResult::
+            TRACK_NOT_FOUND_AT_SOURCE);
     return;
   }
-  MOZ_DIAGNOSTIC_ASSERT(mSegment->GetType() == MediaSegment::AUDIO);
-  // Set the new input rate and reset the resampler.
-  mUpdateTrack->mInputRate = aRate;
-  mUpdateTrack->mResampler.own(nullptr);
-  mUpdateTrack->mResamplerChannelCount = 0;
-}
-
-TrackTime SourceMediaTrack::AppendData(MediaSegment* aSegment,
-                                       MediaSegment* aRawSegment) {
-  MutexAutoLock lock(mMutex);
-  MOZ_DIAGNOSTIC_ASSERT(aSegment->GetType() == mType);
-  TrackTime appended = 0;
-  auto graph = GraphImpl();
-  if (!mUpdateTrack || mUpdateTrack->mEnded || !graph) {
-    aSegment->Clear();
-    return appended;
-  }
-
-  // Data goes into mData, and on the next iteration of the MTG moves
-  // into the track's segment after NotifyQueuedTrackChanges().  This adds
-  // 0-10ms of delay before data gets to direct listeners.
-  // Indirect listeners (via subsequent TrackUnion nodes) are synced to
-  // playout time, and so can be delayed by buffering.
-
-  // Apply track disabling before notifying any consumers directly
-  // or inserting into the graph
-  ApplyTrackDisabling(aSegment, aRawSegment);
-
-  ResampleAudioToGraphSampleRate(aSegment);
-
-  // Must notify first, since AppendFrom() will empty out aSegment
-  NotifyDirectConsumers(aRawSegment ? aRawSegment : aSegment);
-  appended = aSegment->GetDuration();
-  mUpdateTrack->mData->AppendFrom(aSegment);  // note: aSegment is now dead
-  graph->EnsureNextIteration();
-
-  return appended;
-}
-
-void SourceMediaTrack::NotifyDirectConsumers(MediaSegment* aSegment) {
-  mMutex.AssertCurrentThreadOwns();
-
-  for (const auto& l : mDirectTrackListeners) {
-    TrackTime offset = 0;  // FIX! need a separate TrackTime.... or the end of
-                           // the internal buffer
-    l->NotifyRealtimeTrackDataAndApplyTrackDisabling(Graph(), offset,
-                                                     *aSegment);
-  }
-}
-
-void SourceMediaTrack::AddDirectListenerImpl(
-    already_AddRefed<DirectMediaTrackListener> aListener) {
-  MutexAutoLock lock(mMutex);
-
-  RefPtr<DirectMediaTrackListener> listener = aListener;
-  LOG(LogLevel::Debug,
-      ("%p: Adding direct track listener %p to source track %p", GraphImpl(),
-       listener.get(), this));
-
-  MOZ_ASSERT(mType == MediaSegment::VIDEO);
-  for (const auto& l : mDirectTrackListeners) {
-    if (l == listener) {
+
+  MOZ_ASSERT(track->GetType() == MediaSegment::VIDEO);
+  for (auto entry : mDirectTrackListeners) {
+    if (entry.mListener == listener &&
+        (entry.mTrackID == TRACK_ANY || entry.mTrackID == aTrackID)) {
       listener->NotifyDirectListenerInstalled(
-          DirectMediaTrackListener::InstallationResult::ALREADY_EXISTS);
+          DirectMediaStreamTrackListener::InstallationResult::ALREADY_EXISTS);
       return;
     }
   }
 
-  mDirectTrackListeners.AppendElement(listener);
+  TrackBound<DirectMediaStreamTrackListener>* sourceListener =
+      mDirectTrackListeners.AppendElement();
+  sourceListener->mListener = listener;
+  sourceListener->mTrackID = aTrackID;
 
   LOG(LogLevel::Debug,
       ("%p: Added direct track listener %p", GraphImpl(), listener.get()));
   listener->NotifyDirectListenerInstalled(
-      DirectMediaTrackListener::InstallationResult::SUCCESS);
-
-  if (mEnded) {
-    return;
-  }
+      DirectMediaStreamTrackListener::InstallationResult::SUCCESS);
 
   // Pass buffered data to the listener
   VideoSegment bufferedData;
   size_t videoFrames = 0;
-  VideoSegment& segment = *GetData<VideoSegment>();
-  for (VideoSegment::ConstChunkIterator iter(segment); !iter.IsEnded();
+  VideoSegment& trackSegment = static_cast<VideoSegment&>(*track->GetSegment());
+  for (VideoSegment::ConstChunkIterator iter(trackSegment); !iter.IsEnded();
        iter.Next()) {
     if (iter->mTimeStamp.IsNull()) {
       // No timestamp means this is only for the graph's internal book-keeping,
       // denoting a late start of the track.
       continue;
     }
     ++videoFrames;
     bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
                              iter->mFrame.GetIntrinsicSize(),
                              iter->mFrame.GetPrincipalHandle(),
                              iter->mFrame.GetForceBlack(), iter->mTimeStamp);
   }
 
-  VideoSegment& video = static_cast<VideoSegment&>(*mUpdateTrack->mData);
-  for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
-       iter.Next()) {
-    ++videoFrames;
-    MOZ_ASSERT(!iter->mTimeStamp.IsNull());
-    bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
-                             iter->mFrame.GetIntrinsicSize(),
-                             iter->mFrame.GetPrincipalHandle(),
-                             iter->mFrame.GetForceBlack(), iter->mTimeStamp);
+  if (TrackData* updateData = FindDataForTrack(aTrackID)) {
+    VideoSegment& video = static_cast<VideoSegment&>(*updateData->mData);
+    for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
+         iter.Next()) {
+      ++videoFrames;
+      MOZ_ASSERT(!iter->mTimeStamp.IsNull());
+      bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
+                               iter->mFrame.GetIntrinsicSize(),
+                               iter->mFrame.GetPrincipalHandle(),
+                               iter->mFrame.GetForceBlack(), iter->mTimeStamp);
+    }
   }
 
   LOG(LogLevel::Info,
       ("%p: Notifying direct listener %p of %zu video frames and duration "
        "%" PRId64,
        GraphImpl(), listener.get(), videoFrames, bufferedData.GetDuration()));
   listener->NotifyRealtimeTrackData(Graph(), 0, bufferedData);
 }
 
-void SourceMediaTrack::RemoveDirectListenerImpl(
-    DirectMediaTrackListener* aListener) {
+void SourceMediaStream::RemoveDirectTrackListenerImpl(
+    DirectMediaStreamTrackListener* aListener, TrackID aTrackID) {
   MutexAutoLock lock(mMutex);
   for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
-    const RefPtr<DirectMediaTrackListener>& l = mDirectTrackListeners[i];
-    if (l == aListener) {
+    const TrackBound<DirectMediaStreamTrackListener>& source =
+        mDirectTrackListeners[i];
+    if (source.mListener == aListener && source.mTrackID == aTrackID) {
       aListener->NotifyDirectListenerUninstalled();
       mDirectTrackListeners.RemoveElementAt(i);
     }
   }
 }
 
-void SourceMediaTrack::End() {
+void SourceMediaStream::EndTrack(TrackID aID) {
   MutexAutoLock lock(mMutex);
-  if (!mUpdateTrack) {
-    // Already ended
-    return;
+  TrackData* track = FindDataForTrack(aID);
+  if (track) {
+    track->mCommands |= TrackEventCommand::TRACK_EVENT_ENDED;
   }
-  mUpdateTrack->mEnded = true;
+  if (auto graph = GraphImpl()) {
+    graph->EnsureNextIteration();
+  }
+}
+
+void SourceMediaStream::FinishPendingWithLockHeld() {
+  mMutex.AssertCurrentThreadOwns();
+  mFinishPending = true;
   if (auto graph = GraphImpl()) {
     graph->EnsureNextIteration();
   }
 }
 
-void SourceMediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
+void SourceMediaStream::SetTrackEnabledImpl(TrackID aTrackID,
+                                            DisabledTrackMode aMode) {
   {
     MutexAutoLock lock(mMutex);
-    for (const auto& l : mDirectTrackListeners) {
-      DisabledTrackMode oldMode = mDisabledMode;
+    for (TrackBound<DirectMediaStreamTrackListener>& l :
+         mDirectTrackListeners) {
+      if (l.mTrackID != aTrackID) {
+        continue;
+      }
+      DisabledTrackMode oldMode = GetDisabledTrackMode(aTrackID);
       bool oldEnabled = oldMode == DisabledTrackMode::ENABLED;
       if (!oldEnabled && aMode == DisabledTrackMode::ENABLED) {
-        LOG(LogLevel::Debug, ("%p: SourceMediaTrack %p setting "
+        LOG(LogLevel::Debug, ("%p: SourceMediaStream %p track %d setting "
                               "direct listener enabled",
-                              GraphImpl(), this));
-        l->DecreaseDisabled(oldMode);
+                              GraphImpl(), this, aTrackID));
+        l.mListener->DecreaseDisabled(oldMode);
       } else if (oldEnabled && aMode != DisabledTrackMode::ENABLED) {
-        LOG(LogLevel::Debug, ("%p: SourceMediaTrack %p setting "
+        LOG(LogLevel::Debug, ("%p: SourceMediaStream %p track %d setting "
                               "direct listener disabled",
-                              GraphImpl(), this));
-        l->IncreaseDisabled(aMode);
+                              GraphImpl(), this, aTrackID));
+        l.mListener->IncreaseDisabled(aMode);
       }
     }
   }
-  MediaTrack::SetEnabledImpl(aMode);
+  MediaStream::SetTrackEnabledImpl(aTrackID, aMode);
 }
 
-void SourceMediaTrack::RemoveAllDirectListenersImpl() {
+void SourceMediaStream::EndAllTrackAndFinish() {
+  MutexAutoLock lock(mMutex);
+  for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) {
+    SourceMediaStream::TrackData* data = &mUpdateTracks[i];
+    data->mCommands |= TrackEventCommand::TRACK_EVENT_ENDED;
+  }
+  mPendingTracks.Clear();
+  FinishPendingWithLockHeld();
+  // we will call NotifyEvent() to let GetUserMedia know
+}
+
+void SourceMediaStream::RemoveAllDirectListenersImpl() {
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
 
   auto directListeners(mDirectTrackListeners);
   for (auto& l : directListeners) {
-    l->NotifyDirectListenerUninstalled();
+    l.mListener->NotifyDirectListenerUninstalled();
   }
   mDirectTrackListeners.Clear();
 }
 
-SourceMediaTrack::~SourceMediaTrack() {}
+SourceMediaStream::~SourceMediaStream() {}
+
+bool SourceMediaStream::HasPendingAudioTrack() {
+  MutexAutoLock lock(mMutex);
+  bool audioTrackPresent = false;
+
+  for (auto& data : mPendingTracks) {
+    if (data.mData->GetType() == MediaSegment::AUDIO) {
+      audioTrackPresent = true;
+      break;
+    }
+  }
+
+  return audioTrackPresent;
+}
 
 void MediaInputPort::Init() {
   LOG(LogLevel::Debug, ("%p: Adding MediaInputPort %p (from %p to %p)",
                         mSource->GraphImpl(), this, mSource, mDest));
   // Only connect the port if it wasn't disconnected on allocation.
   if (mSource) {
     mSource->AddConsumer(this);
     mDest->AddInput(this);
@@ -2679,35 +2979,30 @@ void MediaInputPort::Disconnect() {
                "mSource must either both be null or both non-null");
   if (!mSource) return;
 
   mSource->RemoveConsumer(this);
   mDest->RemoveInput(this);
   mSource = nullptr;
   mDest = nullptr;
 
-  GraphImpl()->SetTrackOrderDirty();
+  GraphImpl()->SetStreamOrderDirty();
 }
 
 MediaInputPort::InputInterval MediaInputPort::GetNextInputInterval(
-    MediaInputPort const* aPort, GraphTime aTime) {
+    GraphTime aTime) const {
   InputInterval result = {GRAPH_TIME_MAX, GRAPH_TIME_MAX, false};
-  if (!aPort) {
-    result.mStart = aTime;
-    result.mInputIsBlocked = true;
-    return result;
-  }
-  if (aTime >= aPort->mDest->mStartBlocking) {
+  if (aTime >= mDest->mStartBlocking) {
     return result;
   }
   result.mStart = aTime;
-  result.mEnd = aPort->mDest->mStartBlocking;
-  result.mInputIsBlocked = aTime >= aPort->mSource->mStartBlocking;
+  result.mEnd = mDest->mStartBlocking;
+  result.mInputIsBlocked = aTime >= mSource->mStartBlocking;
   if (!result.mInputIsBlocked) {
-    result.mEnd = std::min(result.mEnd, aPort->mSource->mStartBlocking);
+    result.mEnd = std::min(result.mEnd, mSource->mStartBlocking);
   }
   return result;
 }
 
 void MediaInputPort::Suspended() { mDest->InputSuspended(this); }
 
 void MediaInputPort::Resumed() { mDest->InputResumed(this); }
 
@@ -2722,166 +3017,243 @@ void MediaInputPort::Destroy() {
       mPort->SetGraphImpl(nullptr);
       NS_RELEASE(mPort);
     }
     void RunDuringShutdown() override { Run(); }
     MediaInputPort* mPort;
   };
   // Keep a reference to the graph, since Message might RunDuringShutdown()
   // synchronously and make GraphImpl() invalid.
-  RefPtr<MediaTrackGraphImpl> graph = GraphImpl();
+  RefPtr<MediaStreamGraphImpl> graph = GraphImpl();
   graph->AppendMessage(MakeUnique<Message>(this));
   --graph->mMainThreadPortCount;
 }
 
-MediaTrackGraphImpl* MediaInputPort::GraphImpl() { return mGraph; }
-
-MediaTrackGraph* MediaInputPort::Graph() { return mGraph; }
-
-void MediaInputPort::SetGraphImpl(MediaTrackGraphImpl* aGraph) {
+MediaStreamGraphImpl* MediaInputPort::GraphImpl() { return mGraph; }
+
+MediaStreamGraph* MediaInputPort::Graph() { return mGraph; }
+
+void MediaInputPort::SetGraphImpl(MediaStreamGraphImpl* aGraph) {
   MOZ_ASSERT(!mGraph || !aGraph, "Should only be set once");
   mGraph = aGraph;
 }
 
-already_AddRefed<MediaInputPort> ProcessedMediaTrack::AllocateInputPort(
-    MediaTrack* aTrack, uint16_t aInputNumber, uint16_t aOutputNumber) {
+void MediaInputPort::BlockSourceTrackIdImpl(TrackID aTrackId,
+                                            BlockingMode aBlockingMode) {
+  mBlockedTracks.AppendElement(
+      Pair<TrackID, BlockingMode>(aTrackId, aBlockingMode));
+}
+
+RefPtr<GenericPromise> MediaInputPort::BlockSourceTrackId(
+    TrackID aTrackId, BlockingMode aBlockingMode) {
+  class Message : public ControlMessage {
+   public:
+    Message(MediaInputPort* aPort, TrackID aTrackId, BlockingMode aBlockingMode,
+            already_AddRefed<nsIRunnable> aRunnable)
+        : ControlMessage(aPort->GetDestination()),
+          mPort(aPort),
+          mTrackId(aTrackId),
+          mBlockingMode(aBlockingMode),
+          mRunnable(aRunnable) {}
+    void Run() override {
+      mPort->BlockSourceTrackIdImpl(mTrackId, mBlockingMode);
+      if (mRunnable) {
+        mStream->Graph()->DispatchToMainThreadStableState(mRunnable.forget());
+      }
+    }
+    void RunDuringShutdown() override { Run(); }
+    RefPtr<MediaInputPort> mPort;
+    TrackID mTrackId;
+    BlockingMode mBlockingMode;
+    nsCOMPtr<nsIRunnable> mRunnable;
+  };
+
+  MOZ_ASSERT(IsTrackIDExplicit(aTrackId), "Only explicit TrackID is allowed");
+
+  MozPromiseHolder<GenericPromise> holder;
+  RefPtr<GenericPromise> p = holder.Ensure(__func__);
+
+  class HolderRunnable : public Runnable {
+   public:
+    explicit HolderRunnable(MozPromiseHolder<GenericPromise>&& aHolder)
+        : Runnable("MediaInputPort::HolderRunnable"),
+          mHolder(std::move(aHolder)) {}
+
+    NS_IMETHOD Run() override {
+      MOZ_ASSERT(NS_IsMainThread());
+      mHolder.Resolve(true, __func__);
+      return NS_OK;
+    }
+
+   private:
+    ~HolderRunnable() {
+      mHolder.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    }
+    MozPromiseHolder<GenericPromise> mHolder;
+  };
+
+  auto runnable = MakeRefPtr<HolderRunnable>(std::move(holder));
+  GraphImpl()->AppendMessage(
+      MakeUnique<Message>(this, aTrackId, aBlockingMode, runnable.forget()));
+  return p;
+}
+
+already_AddRefed<MediaInputPort> ProcessedMediaStream::AllocateInputPort(
+    MediaStream* aStream, TrackID aTrackID, TrackID aDestTrackID,
+    uint16_t aInputNumber, uint16_t aOutputNumber,
+    nsTArray<TrackID>* aBlockedTracks) {
   // This method creates two references to the MediaInputPort: one for
-  // the main thread, and one for the MediaTrackGraph.
+  // the main thread, and one for the MediaStreamGraph.
   class Message : public ControlMessage {
    public:
     explicit Message(MediaInputPort* aPort)
         : ControlMessage(aPort->GetDestination()), mPort(aPort) {}
     void Run() override {
       mPort->Init();
       // The graph holds its reference implicitly
-      mPort->GraphImpl()->SetTrackOrderDirty();
+      mPort->GraphImpl()->SetStreamOrderDirty();
       Unused << mPort.forget();
     }
     void RunDuringShutdown() override { Run(); }
     RefPtr<MediaInputPort> mPort;
   };
 
-  MOZ_DIAGNOSTIC_ASSERT(aTrack->mType == mType);
+  MOZ_ASSERT(aTrackID == TRACK_ANY || IsTrackIDExplicit(aTrackID),
+             "Only TRACK_ANY and explicit ID are allowed for source track");
+  MOZ_ASSERT(
+      aDestTrackID == TRACK_ANY || IsTrackIDExplicit(aDestTrackID),
+      "Only TRACK_ANY and explicit ID are allowed for destination track");
+  MOZ_ASSERT(
+      aTrackID != TRACK_ANY || aDestTrackID == TRACK_ANY,
+      "Generic MediaInputPort cannot produce a single destination track");
   RefPtr<MediaInputPort> port;
-  if (aTrack->IsDestroyed()) {
+  if (aStream->IsDestroyed()) {
     // Create a port that's disconnected, which is what it'd be after its source
-    // track is Destroy()ed normally. Disconnect() is idempotent so destroying
+    // stream is Destroy()ed normally. Disconnect() is idempotent so destroying
     // this later is fine.
-    port = new MediaInputPort(nullptr, nullptr, aInputNumber, aOutputNumber);
+    port = new MediaInputPort(nullptr, aTrackID, nullptr, aDestTrackID,
+                              aInputNumber, aOutputNumber);
   } else {
-    MOZ_ASSERT(aTrack->GraphImpl() == GraphImpl());
-    port = new MediaInputPort(aTrack, this, aInputNumber, aOutputNumber);
+    MOZ_ASSERT(aStream->GraphImpl() == GraphImpl());
+    port = new MediaInputPort(aStream, aTrackID, this, aDestTrackID,
+                              aInputNumber, aOutputNumber);
+  }
+  if (aBlockedTracks) {
+    for (TrackID trackID : *aBlockedTracks) {
+      port->BlockSourceTrackIdImpl(trackID, BlockingMode::CREATION);
+    }
   }
   port->SetGraphImpl(GraphImpl());
   ++GraphImpl()->mMainThreadPortCount;
   GraphImpl()->AppendMessage(MakeUnique<Message>(port));
   return port.forget();
 }
 
-void ProcessedMediaTrack::QueueSetAutoend(bool aAutoend) {
+void ProcessedMediaStream::QueueSetAutofinish(bool aAutofinish) {
   class Message : public ControlMessage {
    public:
-    Message(ProcessedMediaTrack* aTrack, bool aAutoend)
-        : ControlMessage(aTrack), mAutoend(aAutoend) {}
+    Message(ProcessedMediaStream* aStream, bool aAutofinish)
+        : ControlMessage(aStream), mAutofinish(aAutofinish) {}
     void Run() override {
-      static_cast<ProcessedMediaTrack*>(mTrack)->SetAutoendImpl(mAutoend);
+      static_cast<ProcessedMediaStream*>(mStream)->SetAutofinishImpl(
+          mAutofinish);
     }
-    bool mAutoend;
+    bool mAutofinish;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aAutoend));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aAutofinish));
 }
 
-void ProcessedMediaTrack::DestroyImpl() {
+void ProcessedMediaStream::DestroyImpl() {
   for (int32_t i = mInputs.Length() - 1; i >= 0; --i) {
     mInputs[i]->Disconnect();
   }
 
   for (int32_t i = mSuspendedInputs.Length() - 1; i >= 0; --i) {
     mSuspendedInputs[i]->Disconnect();
   }
 
-  MediaTrack::DestroyImpl();
-  // The track order is only important if there are connections, in which
-  // case MediaInputPort::Disconnect() called SetTrackOrderDirty().
-  // MediaTrackGraphImpl::RemoveTrackGraphThread() will also call
-  // SetTrackOrderDirty(), for other reasons.
+  MediaStream::DestroyImpl();
+  // The stream order is only important if there are connections, in which
+  // case MediaInputPort::Disconnect() called SetStreamOrderDirty().
+  // MediaStreamGraphImpl::RemoveStreamGraphThread() will also call
+  // SetStreamOrderDirty(), for other reasons.
 }
 
-MediaTrackGraphImpl::MediaTrackGraphImpl(GraphDriverType aDriverRequested,
-                                         GraphRunType aRunTypeRequested,
-                                         TrackRate aSampleRate,
-                                         uint32_t aChannelCount,
-                                         AbstractThread* aMainThread)
-    : MediaTrackGraph(aSampleRate),
+MediaStreamGraphImpl::MediaStreamGraphImpl(GraphDriverType aDriverRequested,
+                                           GraphRunType aRunTypeRequested,
+                                           TrackRate aSampleRate,
+                                           uint32_t aChannelCount,
+                                           AbstractThread* aMainThread)
+    : MediaStreamGraph(aSampleRate),
       mGraphRunner(aRunTypeRequested == SINGLE_THREAD ? new GraphRunner(this)
                                                       : nullptr),
       mFirstCycleBreaker(0)
       // An offline graph is not initially processing.
       ,
       mEndTime(aDriverRequested == OFFLINE_THREAD_DRIVER ? 0 : GRAPH_TIME_MAX),
       mPortCount(0),
       mInputDeviceID(nullptr),
       mOutputDeviceID(nullptr),
       mNeedAnotherIteration(false),
       mGraphDriverAsleep(false),
-      mMonitor("MediaTrackGraphImpl"),
+      mMonitor("MediaStreamGraphImpl"),
       mLifecycleState(LIFECYCLE_THREAD_NOT_STARTED),
       mForceShutDown(false),
       mPostedRunInStableStateEvent(false),
       mDetectedNotRunning(false),
       mPostedRunInStableState(false),
       mRealtime(aDriverRequested != OFFLINE_THREAD_DRIVER),
-      mTrackOrderDirty