Backed out 14 changesets (bug 1536766) for causing RunWatchdog and mediaElementAudioSource wpt failures CLOSED TREE
authorCiure Andrei <aciure@mozilla.com>
Fri, 19 Apr 2019 05:55:57 +0300
changeset 470142 41f1dcbe9caab2d6c9b9407ec60bd83928d17c08
parent 470141 08e2094f4b6b96b860a023c00c470665d53ce45d
child 470143 bcd124c140a275c26c764c1793f4b036a37c639f
push id35888
push useraiakab@mozilla.com
push dateFri, 19 Apr 2019 09:47:45 +0000
treeherdermozilla-central@0160424142d1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1536766
milestone68.0a1
backs out72d37a08f281c24833d1c49e947fb6db4df2fb87
63fc8588506005ac9eb0f1ec45ed34e430c6cf4a
addbb04415cb8b82b5cdd5ff8a3905cafa6221fb
f2923dfcf33cd9567b83e2e2793a026b52893408
25f3a33ec51b06b14d8f5eef12ebc3afe4c21e88
199efe6aec594ef6d9f3c4c8af16b2e997a8bd51
f9d1f1bfe2b126dc1588e56724b860674519805a
87616997f16017b85f463691262261db6490c30b
36f99fa3c95664b13b4839a0900217debdb74539
000260ba28de8b30791c72a6871d5879b92fe59d
6386ed1b7d741daadea35af8389f58f8df0376b3
ab27d9f5902a6c6896dd16c89f8b05de474d6ff7
4f6d240c210d25b38d22d4c6b6474acfcda8c032
3e0e3030314dbf9cc4be5bac8721c04dd78d63ed
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 14 changesets (bug 1536766) for causing RunWatchdog and mediaElementAudioSource wpt failures CLOSED TREE Backed out changeset 72d37a08f281 (bug 1536766) Backed out changeset 63fc85885060 (bug 1536766) Backed out changeset addbb04415cb (bug 1536766) Backed out changeset f2923dfcf33c (bug 1536766) Backed out changeset 25f3a33ec51b (bug 1536766) Backed out changeset 199efe6aec59 (bug 1536766) Backed out changeset f9d1f1bfe2b1 (bug 1536766) Backed out changeset 87616997f160 (bug 1536766) Backed out changeset 36f99fa3c956 (bug 1536766) Backed out changeset 000260ba28de (bug 1536766) Backed out changeset 6386ed1b7d74 (bug 1536766) Backed out changeset ab27d9f5902a (bug 1536766) Backed out changeset 4f6d240c210d (bug 1536766) Backed out changeset 3e0e3030314d (bug 1536766)
dom/html/HTMLMediaElement.cpp
dom/html/HTMLMediaElement.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/TrackUnionStream.cpp
dom/media/VideoStreamTrack.cpp
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/test/manifest.js
dom/media/test/test_streams_element_capture.html
dom/media/webaudio/AudioNodeStream.cpp
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -2621,20 +2621,21 @@ nsresult HTMLMediaElement::LoadWithChann
 
 bool HTMLMediaElement::Seeking() const {
   return mDecoder && mDecoder->IsSeeking();
 }
 
 double HTMLMediaElement::CurrentTime() const {
   if (MediaStream* stream = GetSrcMediaStream()) {
     MediaStreamGraph* graph = stream->Graph();
-    GraphTime currentGraphTime =
-        mSrcStreamPausedGraphTime.valueOr(graph->CurrentTime());
-    StreamTime currentStreamTime = currentGraphTime - mSrcStreamGraphTimeOffset;
-    return stream->StreamTimeToSeconds(currentStreamTime);
+    GraphTime currentTime =
+        mSrcStreamPausedGraphTime == GRAPH_TIME_MAX
+            ? graph->CurrentTime() - mSrcStreamGraphTimeOffset
+            : mSrcStreamPausedGraphTime;
+    return stream->StreamTimeToSeconds(currentTime);
   }
 
   if (mDefaultPlaybackStartPosition == 0.0 && mDecoder) {
     return mDecoder->GetCurrentTime();
   }
 
   return mDefaultPlaybackStartPosition;
 }
@@ -4660,19 +4661,19 @@ void HTMLMediaElement::UpdateSrcMediaStr
   mSrcStreamIsPlaying = shouldPlay;
 
   LOG(LogLevel::Debug,
       ("MediaElement %p %s playback of DOMMediaStream %p", this,
        shouldPlay ? "Setting up" : "Removing", mSrcStream.get()));
 
   if (shouldPlay) {
     mSrcStreamPlaybackEnded = false;
-    mSrcStreamGraphTimeOffset +=
-        graph->CurrentTime() - mSrcStreamPausedGraphTime.ref();
-    mSrcStreamPausedGraphTime = Nothing();
+    mSrcStreamGraphTimeOffset =
+        graph->CurrentTime() - mSrcStreamPausedGraphTime;
+    mSrcStreamPausedGraphTime = GRAPH_TIME_MAX;
 
     mWatchManager.Watch(graph->CurrentTime(),
                         &HTMLMediaElement::UpdateSrcStreamTime);
 
     stream->AddAudioOutput(this);
     SetVolumeInternal();
     if (mSink.second()) {
       NS_WARNING(
@@ -4687,18 +4688,18 @@ void HTMLMediaElement::UpdateSrcMediaStr
     }
 
     SetCapturedOutputStreamsEnabled(true);  // Unmute
     // If the input is a media stream, we don't check its data and always regard
     // it as audible when it's playing.
     SetAudibleState(true);
   } else {
     if (stream) {
-      MOZ_DIAGNOSTIC_ASSERT(mSrcStreamPausedGraphTime.isNothing());
-      mSrcStreamPausedGraphTime = Some(graph->CurrentTime().Ref());
+      mSrcStreamPausedGraphTime =
+          graph->CurrentTime() - mSrcStreamGraphTimeOffset;
 
       mWatchManager.Unwatch(graph->CurrentTime(),
                             &HTMLMediaElement::UpdateSrcStreamTime);
 
       stream->RemoveAudioOutput(this);
       VideoFrameContainer* container = GetVideoFrameContainer();
       if (mSelectedVideoStreamTrack && container) {
         mSelectedVideoStreamTrack->RemoveVideoOutput(container);
@@ -4728,22 +4729,21 @@ void HTMLMediaElement::SetupSrcMediaStre
 
   mSrcStream = aStream;
 
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return;
   }
 
-  mSrcStreamPausedGraphTime = Some(0);
+  mSrcStreamPausedGraphTime = 0;
   if (MediaStream* stream = GetSrcMediaStream()) {
     if (MediaStreamGraph* graph = stream->Graph()) {
       // The current graph time will represent 0 for this media element.
-      mSrcStreamGraphTimeOffset = graph->CurrentTime();
-      mSrcStreamPausedGraphTime = Some(mSrcStreamGraphTimeOffset);
+      mSrcStreamPausedGraphTime = graph->CurrentTime();
     }
   }
 
   UpdateSrcMediaStreamPlaying();
 
   // If we pause this media element, track changes in the underlying stream
   // will continue to fire events at this element and alter its track list.
   // That's simpler than delaying the events, but probably confusing...
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -1309,20 +1309,20 @@ class HTMLMediaElement : public nsGeneri
   // actually playing.
   // At most one of mDecoder and mSrcStream can be non-null.
   RefPtr<DOMMediaStream> mSrcStream;
 
   // True once mSrcStream's initial set of tracks are known.
   bool mSrcStreamTracksAvailable = false;
 
   // While mPaused is true and mSrcStream is set, this is the value to use for
-  // CurrentTime(). Otherwise this is Nothing.
-  Maybe<GraphTime> mSrcStreamPausedGraphTime;
+  // CurrentTime(). Otherwise this is set to GRAPH_TIME_MAX.
+  GraphTime mSrcStreamPausedGraphTime = GRAPH_TIME_MAX;
 
-  // The offset in GraphTime at which this media element started playing the
+  // The offset in GraphTime that this media element started playing the
   // playback stream of mSrcStream.
   GraphTime mSrcStreamGraphTimeOffset = 0;
 
   // True once PlaybackEnded() is called and we're playing a MediaStream.
   // Reset to false if we start playing mSrcStream again.
   bool mSrcStreamPlaybackEnded = false;
 
   // Holds a reference to the stream connecting this stream to the capture sink.
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -162,18 +162,17 @@ void MediaStreamGraphImpl::UpdateCurrent
           stream->GraphTimeToStreamTime(mStateComputedTime);
       if (track->IsEnded() && track->GetEnd() <= streamCurrentTime) {
         if (!track->NotifiedEnded()) {
           // Playout of this track ended and listeners have not been notified.
           track->NotifyEnded();
           for (const TrackBound<MediaStreamTrackListener>& listener :
                stream->mTrackListeners) {
             if (listener.mTrackID == track->GetID()) {
-              listener.mListener->NotifyOutput(
-                  this, track->GetEnd() - track->GetStart());
+              listener.mListener->NotifyOutput(this, track->GetEnd());
               listener.mListener->NotifyEnded();
             }
           }
         }
       } else {
         for (const TrackBound<MediaStreamTrackListener>& listener :
              stream->mTrackListeners) {
           if (listener.mTrackID == track->GetID()) {
@@ -1856,16 +1855,17 @@ void MediaStreamGraphImpl::Dispatch(alre
 }
 
 MediaStream::MediaStream()
     : mTracksStartTime(0),
       mStartBlocking(GRAPH_TIME_MAX),
       mSuspendedCount(0),
       mFinished(false),
       mNotifiedFinished(false),
+      mHasCurrentData(false),
       mMainThreadCurrentTime(0),
       mMainThreadFinished(false),
       mFinishedNotificationSent(false),
       mMainThreadDestroyed(false),
       mNrOfMainThreadUsers(0),
       mGraph(nullptr) {
   MOZ_COUNT_CTOR(MediaStream);
 }
@@ -2513,18 +2513,17 @@ bool SourceMediaStream::PullNewData(Grap
       // (which the track will get as its start time later).
       current = GraphTimeToStreamTime(GraphImpl()->mStateComputedTime);
     } else {
       current = track.mEndOfFlushedData + track.mData->GetDuration();
     }
     if (t <= current) {
       continue;
     }
-    if (!track.mPullingEnabled &&
-        track.mData->GetType() == MediaSegment::AUDIO) {
+    if (!track.mPullingEnabled) {
       if (streamPullingEnabled) {
         LOG(LogLevel::Verbose,
             ("%p: Pulling disabled for track but enabled for stream, append "
              "null data; stream=%p track=%d t=%f current end=%f",
              GraphImpl(), this, track.mID, GraphImpl()->MediaTimeToSeconds(t),
              GraphImpl()->MediaTimeToSeconds(current)));
         track.mData->AppendNullData(t - current);
       }
@@ -2539,77 +2538,16 @@ bool SourceMediaStream::PullNewData(Grap
       if (l.mTrackID == track.mID) {
         l.mListener->NotifyPull(Graph(), current, t);
       }
     }
   }
   return true;
 }
 
-/**
- * This moves chunks from aIn to aOut. For audio this is simple. For video
- * we carry durations over if present, or extend up to aDesiredUpToTime if not.
- *
- * We also handle "resetters" from captured media elements. This type of source
- * pushes future frames into the track, and should it need to remove some, e.g.,
- * because of a seek or pause, it tells us by letting time go backwards. Without
- * this, tracks would be live for too long after a seek or pause.
- */
-static void MoveToSegment(SourceMediaStream* aStream, MediaSegment* aIn,
-                          MediaSegment* aOut, StreamTime aCurrentTime,
-                          StreamTime aDesiredUpToTime) {
-  MOZ_ASSERT(aIn->GetType() == aOut->GetType());
-  MOZ_ASSERT(aOut->GetDuration() >= aCurrentTime);
-  if (aIn->GetType() == MediaSegment::AUDIO) {
-    aOut->AppendFrom(aIn);
-  } else {
-    VideoSegment* in = static_cast<VideoSegment*>(aIn);
-    VideoSegment* out = static_cast<VideoSegment*>(aOut);
-    for (VideoSegment::ConstChunkIterator c(*in); !c.IsEnded(); c.Next()) {
-      MOZ_ASSERT(!c->mTimeStamp.IsNull());
-      VideoChunk* last = out->GetLastChunk();
-      if (!last || last->mTimeStamp.IsNull()) {
-        // This is the first frame, or the last frame pushed to `out` has been
-        // all consumed. Just append and we deal with its duration later.
-        out->AppendFrame(do_AddRef(c->mFrame.GetImage()),
-                         c->mFrame.GetIntrinsicSize(),
-                         c->mFrame.GetPrincipalHandle(),
-                         c->mFrame.GetForceBlack(), c->mTimeStamp);
-        if (c->GetDuration() > 0) {
-          out->ExtendLastFrameBy(c->GetDuration());
-        }
-        continue;
-      }
-
-      // We now know when this frame starts, aka when the last frame ends.
-
-      if (c->mTimeStamp < last->mTimeStamp) {
-        // Time is going backwards. This is a resetting frame from
-        // DecodedStream. Clear everything up to currentTime.
-        out->Clear();
-        out->AppendNullData(aCurrentTime);
-      }
-
-      // Append the current frame (will have duration 0).
-      out->AppendFrame(do_AddRef(c->mFrame.GetImage()),
-                       c->mFrame.GetIntrinsicSize(),
-                       c->mFrame.GetPrincipalHandle(),
-                       c->mFrame.GetForceBlack(), c->mTimeStamp);
-      if (c->GetDuration() > 0) {
-        out->ExtendLastFrameBy(c->GetDuration());
-      }
-    }
-    if (out->GetDuration() < aDesiredUpToTime) {
-      out->ExtendLastFrameBy(aDesiredUpToTime - out->GetDuration());
-    }
-    in->Clear();
-  }
-  MOZ_ASSERT(aIn->GetDuration() == 0, "aIn must be consumed");
-}
-
 void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime,
                                             GraphTime aDesiredUpToTime) {
   MutexAutoLock lock(mMutex);
 
   bool finished = mFinishPending;
   StreamTime streamCurrentTime = GraphTimeToStreamTime(aCurrentTime);
   StreamTime streamDesiredUpToTime = GraphTimeToStreamTime(aDesiredUpToTime);
 
@@ -2627,47 +2565,60 @@ void SourceMediaStream::ExtractPendingIn
 
     for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
       if (b.mTrackID != data->mID) {
         continue;
       }
       b.mListener->NotifyQueuedChanges(GraphImpl(), offset, *data->mData);
     }
     if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
-      MediaSegment* segment = data->mData->CreateEmptyClone();
+      MediaSegment* segment = data->mData.forget();
       LOG(LogLevel::Debug,
           ("%p: SourceMediaStream %p creating track %d, start %" PRId64
            ", initial end %" PRId64,
            GraphImpl(), this, data->mID, int64_t(streamCurrentTime),
            int64_t(segment->GetDuration())));
 
-      segment->AppendNullData(streamCurrentTime);
-      MoveToSegment(this, data->mData, segment, streamCurrentTime,
-                    streamDesiredUpToTime);
+      segment->InsertNullDataAtStart(streamCurrentTime);
       data->mEndOfFlushedData += segment->GetDuration();
       mTracks.AddTrack(data->mID, streamCurrentTime, segment);
+      // The track has taken ownership of data->mData, so let's replace
+      // data->mData with an empty clone.
+      data->mData = segment->CreateEmptyClone();
       data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
     } else {
-      StreamTracks::Track* track = mTracks.FindTrack(data->mID);
-      MediaSegment* dest = track->GetSegment();
+      MediaSegment* dest = mTracks.FindTrack(data->mID)->GetSegment();
       LOG(LogLevel::Verbose,
           ("%p: SourceMediaStream %p track %d, advancing end from %" PRId64
            " to %" PRId64,
            GraphImpl(), this, data->mID, int64_t(dest->GetDuration()),
            int64_t(dest->GetDuration() + data->mData->GetDuration())));
       data->mEndOfFlushedData += data->mData->GetDuration();
-      MoveToSegment(this, data->mData, dest, streamCurrentTime,
-                    streamDesiredUpToTime);
+      dest->AppendFrom(data->mData);
     }
     if (data->mCommands & SourceMediaStream::TRACK_END) {
       mTracks.FindTrack(data->mID)->SetEnded();
       mUpdateTracks.RemoveElementAt(i);
+    } else if (!data->mPullingEnabled &&
+               data->mData->GetType() == MediaSegment::VIDEO) {
+      // This video track is pushed. Since we use timestamps rather than
+      // durations for video we avoid making the video track block the stream
+      // by extending the duration when there's not enough video data, so a
+      // video track always has valid data.
+      VideoSegment* segment = static_cast<VideoSegment*>(
+          mTracks.FindTrack(data->mID)->GetSegment());
+      StreamTime missingTime = streamDesiredUpToTime - segment->GetDuration();
+      segment->ExtendLastFrameBy(missingTime);
     }
   }
 
+  if (mTracks.GetEarliestTrackEnd() > 0) {
+    mHasCurrentData = true;
+  }
+
   if (finished) {
     FinishOnGraphThread();
   }
 }
 
 void SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate,
                                          MediaSegment* aSegment,
                                          uint32_t aFlags) {
@@ -2830,49 +2781,54 @@ void SourceMediaStream::AddDirectTrackLi
   LOG(LogLevel::Debug,
       ("%p: Added direct track listener %p", GraphImpl(), listener.get()));
   listener->NotifyDirectListenerInstalled(
       DirectMediaStreamTrackListener::InstallationResult::SUCCESS);
 
   // Pass buffered data to the listener
   VideoSegment bufferedData;
   size_t videoFrames = 0;
+  // For video we append all non-null chunks, as we're only interested in
+  // real frames and their timestamps.
   VideoSegment& trackSegment = static_cast<VideoSegment&>(*track->GetSegment());
   for (VideoSegment::ConstChunkIterator iter(trackSegment); !iter.IsEnded();
        iter.Next()) {
-    if (iter->mTimeStamp.IsNull()) {
-      // No timestamp means this is only for the graph's internal book-keeping,
-      // denoting a late start of the track.
+    if (iter->IsNull()) {
       continue;
     }
     ++videoFrames;
+    MOZ_ASSERT(!iter->mTimeStamp.IsNull());
     bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
                              iter->mFrame.GetIntrinsicSize(),
                              iter->mFrame.GetPrincipalHandle(),
                              iter->mFrame.GetForceBlack(), iter->mTimeStamp);
   }
 
   if (TrackData* updateData = FindDataForTrack(aTrackID)) {
     VideoSegment& video = static_cast<VideoSegment&>(*updateData->mData);
     for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
          iter.Next()) {
+      if (iter->IsNull()) {
+        continue;
+      }
       ++videoFrames;
-      MOZ_ASSERT(!iter->mTimeStamp.IsNull());
       bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
                                iter->mFrame.GetIntrinsicSize(),
                                iter->mFrame.GetPrincipalHandle(),
                                iter->mFrame.GetForceBlack(), iter->mTimeStamp);
     }
   }
 
   LOG(LogLevel::Info,
       ("%p: Notifying direct listener %p of %zu video frames and duration "
        "%" PRId64,
        GraphImpl(), listener.get(), videoFrames, bufferedData.GetDuration()));
-  listener->NotifyRealtimeTrackData(Graph(), 0, bufferedData);
+  if (!bufferedData.IsNull()) {
+    listener->NotifyRealtimeTrackData(Graph(), 0, bufferedData);
+  }
 }
 
 void SourceMediaStream::RemoveDirectTrackListenerImpl(
     DirectMediaStreamTrackListener* aListener, TrackID aTrackID) {
   MutexAutoLock lock(mMutex);
   for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
     const TrackBound<DirectMediaStreamTrackListener>& source =
         mDirectTrackListeners[i];
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -485,16 +485,18 @@ class MediaStream : public mozilla::Link
    * having its blocking time calculated in UpdateGraph and its blocking time
    * taken account of in UpdateCurrentTimeForStreams.
    */
   GraphTime StreamTimeToGraphTime(StreamTime aTime) const;
 
   bool IsFinishedOnGraphThread() const { return mFinished; }
   virtual void FinishOnGraphThread();
 
+  bool HasCurrentData() const { return mHasCurrentData; }
+
   /**
    * Find track by track id.
    */
   StreamTracks::Track* FindTrack(TrackID aID) const;
 
   StreamTracks::Track* EnsureTrack(TrackID aTrack);
 
   virtual void ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment,
@@ -603,16 +605,23 @@ class MediaStream : public mozilla::Link
    * Only accessed on the graph thread
    */
   bool mFinished;
   /**
    * When true, mFinished is true and we've played all the data in this stream
    * and fired NotifyFinished notifications.
    */
   bool mNotifiedFinished;
+  /**
+   * True if some data can be present by this stream if/when it's unblocked.
+   * Set by the stream itself on the MediaStreamGraph thread. Only changes
+   * from false to true once a stream has data, since we won't
+   * unblock it until there's more data.
+   */
+  bool mHasCurrentData;
 
   // Main-thread views of state
   StreamTime mMainThreadCurrentTime;
   bool mMainThreadFinished;
   bool mFinishedNotificationSent;
   bool mMainThreadDestroyed;
   int mNrOfMainThreadUsers;
 
--- a/dom/media/TrackUnionStream.cpp
+++ b/dom/media/TrackUnionStream.cpp
@@ -76,24 +76,28 @@ void TrackUnionStream::ProcessInput(Grap
     mappedTracksFinished.AppendElement(true);
     mappedTracksWithMatchingInputTracks.AppendElement(false);
   }
 
   AutoTArray<MediaInputPort*, 32> inputs(mInputs);
   inputs.AppendElements(mSuspendedInputs);
 
   bool allFinished = !inputs.IsEmpty();
+  bool allHaveCurrentData = !inputs.IsEmpty();
   for (uint32_t i = 0; i < inputs.Length(); ++i) {
     MediaStream* stream = inputs[i]->GetSource();
     if (!stream->IsFinishedOnGraphThread()) {
       // XXX we really should check whether 'stream' has finished within time
       // aTo, not just that it's finishing when all its queued data eventually
       // runs out.
       allFinished = false;
     }
+    if (!stream->HasCurrentData()) {
+      allHaveCurrentData = false;
+    }
     for (StreamTracks::TrackIter tracks(stream->GetStreamTracks());
          !tracks.IsEnded(); tracks.Next()) {
       bool found = false;
       for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
         TrackMapEntry* map = &mTrackMap[j];
         if (map->mInputPort == inputs[i] &&
             map->mInputTrackID == tracks->GetID()) {
           bool trackFinished = false;
@@ -135,16 +139,20 @@ void TrackUnionStream::ProcessInput(Grap
     }
   }
   if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
     // All streams have finished and won't add any more tracks, and
     // all our tracks have actually finished and been removed from our map,
     // so we're finished now.
     FinishOnGraphThread();
   }
+  if (allHaveCurrentData) {
+    // We can make progress if we're not blocked
+    mHasCurrentData = true;
+  }
 }
 
 uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort,
                                     StreamTracks::Track* aTrack,
                                     GraphTime aFrom) {
   STREAM_LOG(LogLevel::Verbose,
              ("TrackUnionStream %p adding track %d for "
               "input stream %p track %d, desired id %d",
--- a/dom/media/VideoStreamTrack.cpp
+++ b/dom/media/VideoStreamTrack.cpp
@@ -30,16 +30,18 @@ static bool SetImageToBlackPixel(PlanarY
   return aImage->CopyData(data);
 }
 
 class VideoOutput : public DirectMediaStreamTrackListener {
  protected:
   virtual ~VideoOutput() = default;
 
   void DropPastFrames() {
+    mMutex.AssertCurrentThreadOwns();
+
     TimeStamp now = TimeStamp::Now();
     size_t nrChunksInPast = 0;
     for (const auto& idChunkPair : mFrames) {
       const VideoChunk& chunk = idChunkPair.second();
       if (chunk.mTimeStamp > now) {
         break;
       }
       ++nrChunksInPast;
@@ -47,22 +49,19 @@ class VideoOutput : public DirectMediaSt
     if (nrChunksInPast > 1) {
       // We need to keep one frame that starts in the past, because it only ends
       // when the next frame starts (which also needs to be in the past for it
       // to drop).
       mFrames.RemoveElementsAt(0, nrChunksInPast - 1);
     }
   }
 
-  void SendFramesEnsureLocked() {
+  void SendFrames() {
     mMutex.AssertCurrentThreadOwns();
-    SendFrames();
-  }
 
-  void SendFrames() {
     DropPastFrames();
 
     if (mFrames.IsEmpty()) {
       return;
     }
 
     // Collect any new frames produced in this iteration.
     AutoTArray<ImageContainer::NonOwningImage, 16> images;
@@ -139,59 +138,38 @@ class VideoOutput : public DirectMediaSt
         // it seeks, as the previously buffered frames would stretch into the
         // future. If this happens, we clear the buffered frames and start over.
         mFrames.ClearAndRetainStorage();
       }
       mFrames.AppendElement(MakePair(mVideoFrameContainer->NewFrameID(), *i));
       mLastFrameTime = i->mTimeStamp;
     }
 
-    SendFramesEnsureLocked();
+    SendFrames();
   }
   void NotifyRemoved() override {
     // Doesn't need locking by mMutex, since the direct listener is removed from
     // the track before we get notified.
-    if (mFrames.Length() <= 1) {
-      // The compositor has already received the last frame.
-      mFrames.ClearAndRetainStorage();
-      mVideoFrameContainer->ClearFutureFrames();
-      return;
-    }
-
-    // The compositor has multiple frames. ClearFutureFrames() would only retain
-    // the first as that's normally the current one. We however stop doing
-    // SetCurrentFrames() once we've received the last frame in a track, so
-    // there might be old frames lingering. We'll find the current one and
-    // re-send that.
-    DropPastFrames();
-    mFrames.RemoveElementsAt(1, mFrames.Length() - 1);
-    SendFrames();
     mFrames.ClearAndRetainStorage();
+    mVideoFrameContainer->ClearFutureFrames();
   }
   void NotifyEnded() override {
     // Doesn't need locking by mMutex, since for the track to end, it must have
     // been ended by the source, meaning that the source won't append more data.
-    if (mFrames.IsEmpty()) {
-      return;
-    }
-
-    // Re-send only the last one to the compositor.
-    mFrames.RemoveElementsAt(0, mFrames.Length() - 1);
-    SendFrames();
     mFrames.ClearAndRetainStorage();
   }
   void NotifyEnabledStateChanged(bool aEnabled) override {
     MutexAutoLock lock(mMutex);
     mEnabled = aEnabled;
     // Since mEnabled will affect whether frames are real, or black, we assign
     // new FrameIDs whenever this changes.
     for (auto& idChunkPair : mFrames) {
       idChunkPair.first() = mVideoFrameContainer->NewFrameID();
     }
-    SendFramesEnsureLocked();
+    SendFrames();
   }
 
   Mutex mMutex;
   TimeStamp mLastFrameTime;
   // Once the frame is forced to black, we initialize mBlackImage for use in any
   // following forced-black frames.
   RefPtr<Image> mBlackImage;
   bool mEnabled = true;
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -44,128 +44,68 @@ class DecodedStreamTrackListener : publi
 
   void NotifyOutput(MediaStreamGraph* aGraph,
                     StreamTime aCurrentTrackTime) override;
   void NotifyEnded() override;
 
  private:
   const RefPtr<DecodedStreamGraphListener> mGraphListener;
   const RefPtr<SourceMediaStream> mStream;
-  const TrackID mTrackID;
+  const mozilla::TrackID mTrackID;
 };
 
 class DecodedStreamGraphListener {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
  public:
   DecodedStreamGraphListener(
       SourceMediaStream* aStream, TrackID aAudioTrackID,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
       TrackID aVideoTrackID,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
       AbstractThread* aMainThread)
       : mAudioTrackListener(IsTrackIDExplicit(aAudioTrackID)
                                 ? MakeRefPtr<DecodedStreamTrackListener>(
                                       this, aStream, aAudioTrackID)
                                 : nullptr),
+        mAudioTrackID(aAudioTrackID),
         mAudioEndedHolder(std::move(aAudioEndedHolder)),
         mVideoTrackListener(IsTrackIDExplicit(aVideoTrackID)
                                 ? MakeRefPtr<DecodedStreamTrackListener>(
                                       this, aStream, aVideoTrackID)
                                 : nullptr),
+        mVideoTrackID(aVideoTrackID),
         mVideoEndedHolder(std::move(aVideoEndedHolder)),
         mStream(aStream),
-        mAudioTrackID(aAudioTrackID),
-        mVideoTrackID(aVideoTrackID),
         mAbstractMainThread(aMainThread) {
     MOZ_ASSERT(NS_IsMainThread());
     if (mAudioTrackListener) {
       mStream->AddTrackListener(mAudioTrackListener, mAudioTrackID);
     } else {
-      mAudioEnded = true;
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     }
 
     if (mVideoTrackListener) {
       mStream->AddTrackListener(mVideoTrackListener, mVideoTrackID);
     } else {
-      mVideoEnded = true;
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     }
   }
 
   void NotifyOutput(TrackID aTrackID, StreamTime aCurrentTrackTime) {
-    if (aTrackID == mAudioTrackID) {
-      if (aCurrentTrackTime >= mAudioEnd) {
-        mStream->EndTrack(mAudioTrackID);
-      }
-    } else if (aTrackID == mVideoTrackID) {
-      if (aCurrentTrackTime >= mVideoEnd) {
-        mStream->EndTrack(mVideoTrackID);
-      }
-    } else {
-      MOZ_CRASH("Unexpected TrackID");
-    }
-    if (aTrackID != mAudioTrackID && mAudioTrackID != TRACK_NONE &&
-        !mAudioEnded) {
-      // Only audio playout drives the clock forward, if present and live.
+    if (aTrackID != mAudioTrackID && mAudioTrackID != TRACK_NONE) {
+      // Only audio playout drives the clock forward, if present.
       return;
     }
-    MOZ_ASSERT_IF(aTrackID == mAudioTrackID, !mAudioEnded);
-    MOZ_ASSERT_IF(aTrackID == mVideoTrackID, !mVideoEnded);
     mOnOutput.Notify(mStream->StreamTimeToMicroseconds(aCurrentTrackTime));
   }
 
-  void NotifyEnded(TrackID aTrackID) {
-    if (aTrackID == mAudioTrackID) {
-      mAudioEnded = true;
-    } else if (aTrackID == mVideoTrackID) {
-      mVideoEnded = true;
-    } else {
-      MOZ_CRASH("Unexpected TrackID");
-    }
-    mStream->Graph()->DispatchToMainThreadStableState(
-        NewRunnableMethod<TrackID>(
-            "DecodedStreamGraphListener::DoNotifyTrackEnded", this,
-            &DecodedStreamGraphListener::DoNotifyTrackEnded, aTrackID));
-  }
-
   TrackID AudioTrackID() const { return mAudioTrackID; }
 
   TrackID VideoTrackID() const { return mVideoTrackID; }
 
-  /**
-   * Tell the graph listener to end the given track after it has seen at least
-   * aEnd worth of output reported as processed by the graph.
-   *
-   * A StreamTime of STREAM_TIME_MAX indicates that the track has no end and is
-   * the default.
-   *
-   * This method of ending tracks is needed because the MediaStreamGraph
-   * processes ended tracks (through SourceMediaStream::EndTrack) at the
-   * beginning of an iteration, but waits until the end of the iteration to
-   * process any ControlMessages. When such a ControlMessage is a listener that
-   * is to be added to a track that has ended in its very first iteration, the
-   * track ends before the listener tracking this ending is added. This can lead
-   * to a MediaStreamTrack ending on main thread (it uses another listener)
-   * before the listeners to render the track get added, potentially meaning a
-   * media element doesn't progress before reaching the end although data was
-   * available.
-   *
-   * Callable from any thread.
-   */
-  void EndTrackAt(TrackID aTrackID, StreamTime aEnd) {
-    if (aTrackID == mAudioTrackID) {
-      mAudioEnd = aEnd;
-    } else if (aTrackID == mVideoTrackID) {
-      mVideoEnd = aEnd;
-    } else {
-      MOZ_CRASH("Unexpected TrackID");
-    }
-  }
-
   void DoNotifyTrackEnded(TrackID aTrackID) {
     MOZ_ASSERT(NS_IsMainThread());
     if (aTrackID == mAudioTrackID) {
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     } else if (aTrackID == mVideoTrackID) {
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     } else {
       MOZ_CRASH("Unexpected track id");
@@ -197,45 +137,41 @@ class DecodedStreamGraphListener {
     MOZ_ASSERT(mAudioEndedHolder.IsEmpty());
     MOZ_ASSERT(mVideoEndedHolder.IsEmpty());
   }
 
   MediaEventProducer<int64_t> mOnOutput;
 
   // Main thread only.
   RefPtr<DecodedStreamTrackListener> mAudioTrackListener;
+  const TrackID mAudioTrackID;
   MozPromiseHolder<DecodedStream::EndedPromise> mAudioEndedHolder;
   RefPtr<DecodedStreamTrackListener> mVideoTrackListener;
+  const TrackID mVideoTrackID;
   MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;
 
-  // Graph thread only.
-  bool mAudioEnded = false;
-  bool mVideoEnded = false;
-
-  // Any thread.
   const RefPtr<SourceMediaStream> mStream;
-  const TrackID mAudioTrackID;
-  Atomic<StreamTime> mAudioEnd{STREAM_TIME_MAX};
-  const TrackID mVideoTrackID;
-  Atomic<StreamTime> mVideoEnd{STREAM_TIME_MAX};
   const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamTrackListener::DecodedStreamTrackListener(
     DecodedStreamGraphListener* aGraphListener, SourceMediaStream* aStream,
-    TrackID aTrackID)
+    mozilla::TrackID aTrackID)
     : mGraphListener(aGraphListener), mStream(aStream), mTrackID(aTrackID) {}
 
 void DecodedStreamTrackListener::NotifyOutput(MediaStreamGraph* aGraph,
                                               StreamTime aCurrentTrackTime) {
   mGraphListener->NotifyOutput(mTrackID, aCurrentTrackTime);
 }
 
 void DecodedStreamTrackListener::NotifyEnded() {
-  mGraphListener->NotifyEnded(mTrackID);
+  mStream->Graph()->DispatchToMainThreadStableState(
+      NewRunnableMethod<mozilla::TrackID>(
+          "DecodedStreamGraphListener::DoNotifyTrackEnded", mGraphListener,
+          &DecodedStreamGraphListener::DoNotifyTrackEnded, mTrackID));
 }
 
 /*
  * All MediaStream-related data is protected by the decoder's monitor.
  * We have at most one DecodedStreamDaata per MediaDecoder. Its stream
  * is used as the input for each ProcessedMediaStream created by calls to
  * captureStream(UntilEnded). Seeking creates a new source stream, as does
  * replaying after the input as ended. In the latter case, the new source is
@@ -248,46 +184,30 @@ class DecodedStreamData {
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
       AbstractThread* aMainThread);
   ~DecodedStreamData();
   MediaEventSource<int64_t>& OnOutput();
   void Forget();
   nsCString GetDebugInfo();
 
-  void WriteVideoToSegment(layers::Image* aImage, const TimeUnit& aStart,
-                           const TimeUnit& aEnd,
-                           const gfx::IntSize& aIntrinsicSize,
-                           const TimeStamp& aTimeStamp, VideoSegment* aOutput,
-                           const PrincipalHandle& aPrincipalHandle);
-
   /* The following group of fields are protected by the decoder's monitor
    * and can be read or written on any thread.
    */
   // Count of audio frames written to the stream
   int64_t mAudioFramesWritten;
   // Count of video frames written to the stream in the stream's rate
   StreamTime mStreamVideoWritten;
   // Count of audio frames written to the stream in the stream's rate
   StreamTime mStreamAudioWritten;
-  // mNextAudioTime is the end timestamp for the last packet sent to the stream.
-  // Therefore audio packets starting at or after this time need to be copied
-  // to the output stream.
-  TimeUnit mNextAudioTime;
-  // mLastVideoStartTime is the start timestamp for the last packet sent to the
-  // stream. Therefore video packets starting after this time need to be copied
+  // mNextVideoTime is the end timestamp for the last packet sent to the stream.
+  // Therefore video packets starting at or after this time need to be copied
   // to the output stream.
-  Maybe<TimeUnit> mLastVideoStartTime;
-  // mLastVideoEndTime is the end timestamp for the last packet sent to the
-  // stream. It is used to adjust durations of chunks sent to the output stream
-  // when there are overlaps in VideoData.
-  Maybe<TimeUnit> mLastVideoEndTime;
-  // The timestamp of the last frame, so we can ensure time never goes
-  // backwards.
-  TimeStamp mLastVideoTimeStamp;
+  TimeUnit mNextVideoTime;
+  TimeUnit mNextAudioTime;
   // The last video image sent to the stream. Useful if we need to replicate
   // the image.
   RefPtr<layers::Image> mLastVideoImage;
   gfx::IntSize mLastVideoImageDisplaySize;
   bool mHaveSentFinishAudio;
   bool mHaveSentFinishVideo;
 
   // The decoder is responsible for calling Destroy() on this stream.
@@ -301,16 +221,17 @@ class DecodedStreamData {
 DecodedStreamData::DecodedStreamData(
     OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
     AbstractThread* aMainThread)
     : mAudioFramesWritten(0),
       mStreamVideoWritten(0),
       mStreamAudioWritten(0),
+      mNextVideoTime(aInit.mStartTime),
       mNextAudioTime(aInit.mStartTime),
       mHaveSentFinishAudio(false),
       mHaveSentFinishVideo(false),
       mStream(aOutputStreamManager->mSourceStream),
       // DecodedStreamGraphListener will resolve these promises.
       mListener(MakeRefPtr<DecodedStreamGraphListener>(
           mStream, aInit.mAudioTrackID, std::move(aAudioEndedPromise),
           aInit.mVideoTrackID, std::move(aVideoEndedPromise), aMainThread)),
@@ -354,25 +275,20 @@ MediaEventSource<int64_t>& DecodedStream
 }
 
 void DecodedStreamData::Forget() { mListener->Forget(); }
 
 nsCString DecodedStreamData::GetDebugInfo() {
   return nsPrintfCString(
       "DecodedStreamData=%p mAudioFramesWritten=%" PRId64
       " mStreamAudioWritten=%" PRId64 " mStreamVideoWritten=%" PRId64
-      " mNextAudioTime=%" PRId64 " mLastVideoStartTime=%" PRId64
-      " mLastVideoEndTime=%" PRId64
-      " mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
+      " mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64
+      "mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
       this, mAudioFramesWritten, mStreamAudioWritten, mStreamVideoWritten,
-      mNextAudioTime.ToMicroseconds(),
-      mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
-          .ToMicroseconds(),
-      mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
-          .ToMicroseconds(),
+      mNextAudioTime.ToMicroseconds(), mNextVideoTime.ToMicroseconds(),
       mHaveSentFinishAudio, mHaveSentFinishVideo);
 }
 
 DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
                              AbstractThread* aMainThread,
                              MediaQueue<AudioData>& aAudioQueue,
                              MediaQueue<VideoData>& aVideoQueue,
                              OutputStreamManager* aOutputStreamManager,
@@ -483,17 +399,17 @@ nsresult DecodedStream::Start(const Time
   mAudioEndedPromise = audioEndedHolder.Ensure(__func__);
   MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
   mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
   PlaybackInfoInit init{aStartTime, aInfo, TRACK_INVALID, TRACK_INVALID};
   nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
                                   std::move(videoEndedHolder),
                                   mOutputStreamManager, mAbstractMainThread);
   SyncRunnable::DispatchToThread(
-      SystemGroup::EventTargetFor(TaskCategory::Other), r);
+      SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), r);
   mData = static_cast<R*>(r.get())->ReleaseData();
 
   if (mData) {
     mInfo.mAudio.mTrackId = mData->mListener->AudioTrackID();
     mInfo.mVideo.mTrackId = mData->mListener->VideoTrackID();
     mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
                                                 &DecodedStream::NotifyOutput);
     SendData();
@@ -648,46 +564,46 @@ void DecodedStream::SendAudio(double aVo
 
   output.ApplyVolume(aVolume);
 
   if (!aIsSameOrigin) {
     output.ReplaceWithDisabled();
   }
 
   // |mNextAudioTime| is updated as we process each audio sample in
-  // SendStreamAudio().
+  // SendStreamAudio(). This is consistent with how |mNextVideoTime|
+  // is updated for video samples.
   if (output.GetDuration() > 0) {
     mData->mStreamAudioWritten +=
         sourceStream->AppendToTrack(audioTrackId, &output);
   }
 
   if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
-    mData->mListener->EndTrackAt(audioTrackId, mData->mStreamAudioWritten);
+    sourceStream->EndTrack(audioTrackId);
     mData->mHaveSentFinishAudio = true;
   }
 }
 
-void DecodedStreamData::WriteVideoToSegment(
-    layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
-    const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
-    VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
+static void WriteVideoToMediaStream(MediaStream* aStream, layers::Image* aImage,
+                                    const TimeUnit& aStart,
+                                    const TimeUnit& aEnd,
+                                    const mozilla::gfx::IntSize& aIntrinsicSize,
+                                    const TimeStamp& aTimeStamp,
+                                    VideoSegment* aOutput,
+                                    const PrincipalHandle& aPrincipalHandle) {
   RefPtr<layers::Image> image = aImage;
-  auto end = mStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
+  auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
   auto start =
-      mStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
+      aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
   aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                        aTimeStamp);
   // Extend this so we get accurate durations for all frames.
   // Because this track is pushed, we need durations so the graph can track
   // when playout of the track has finished.
   aOutput->ExtendLastFrameBy(end - start);
-
-  mLastVideoStartTime = Some(aStart);
-  mLastVideoEndTime = Some(aEnd);
-  mLastVideoTimeStamp = aTimeStamp;
 }
 
 static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
   // Get the last video frame's start time in VideoSegment aInput.
   // If the start time is equal to the duration of aInput, means the last video
   // frame's duration is zero.
   StreamTime lastVideoStratTime;
   aInput.GetLastFrame(&lastVideoStratTime);
@@ -715,30 +631,25 @@ void DecodedStream::ResetVideo(const Pri
   // an ugly hack because the direct listeners of the MediaStreamGraph do not
   // have an API that supports clearing the future frames. ImageContainer and
   // VideoFrameContainer do though, and we will need to move to a similar API
   // for video tracks as part of bug 1493618.
   resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                        aPrincipalHandle, false, currentTime);
   mData->mStream->AppendToTrack(mInfo.mVideo.mTrackId, &resetter);
 
-  // Consumer buffers have been reset. We now set the next time to the start
+  // Consumer buffers have been reset. We now set mNextVideoTime to the start
   // time of the current frame, so that it can be displayed again on resuming.
   if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
-    mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
-    mData->mLastVideoEndTime = Some(v->mTime);
+    mData->mNextVideoTime = v->mTime;
   } else {
-    // There was no current frame in the queue. We set the next time to the
-    // current time, so we at least don't resume starting in the future.
-    mData->mLastVideoStartTime =
-        Some(currentPosition - TimeUnit::FromMicroseconds(1));
-    mData->mLastVideoEndTime = Some(currentPosition);
+    // There was no current frame in the queue. We set the next time to push to
+    // the current time, so we at least don't resume starting in the future.
+    mData->mNextVideoTime = currentPosition;
   }
-
-  mData->mLastVideoTimeStamp = currentTime;
 }
 
 void DecodedStream::SendVideo(bool aIsSameOrigin,
                               const PrincipalHandle& aPrincipalHandle) {
   AssertOwnerThread();
 
   if (!mInfo.HasVideo()) {
     return;
@@ -750,63 +661,53 @@ void DecodedStream::SendVideo(bool aIsSa
 
   VideoSegment output;
   TrackID videoTrackId = mInfo.mVideo.mTrackId;
   AutoTArray<RefPtr<VideoData>, 10> video;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the VideoData because VideoData
   // is ref-counted.
-  mVideoQueue.GetElementsAfter(
-      mData->mLastVideoStartTime.valueOr(mStartTime.ref()), &video);
+  mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
 
   TimeStamp currentTime;
   TimeUnit currentPosition = GetPosition(&currentTime);
 
-  if (mData->mLastVideoTimeStamp.IsNull()) {
-    mData->mLastVideoTimeStamp = currentTime;
-  }
-
   for (uint32_t i = 0; i < video.Length(); ++i) {
     VideoData* v = video[i];
-    TimeUnit lastStart = mData->mLastVideoStartTime.valueOr(mStartTime.ref());
-    TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
 
-    if (lastEnd < v->mTime) {
+    if (mData->mNextVideoTime < v->mTime) {
       // Write last video frame to catch up. mLastVideoImage can be null here
       // which is fine, it just means there's no video.
 
       // TODO: |mLastVideoImage| should come from the last image rendered
       // by the state machine. This will avoid the black frame when capture
       // happens in the middle of playback (especially in th middle of a
       // video frame). E.g. if we have a video frame that is 30 sec long
       // and capture happens at 15 sec, we'll have to append a black frame
       // that is 15 sec long.
-      TimeStamp t =
-          std::max(mData->mLastVideoTimeStamp,
-                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
-      mData->WriteVideoToSegment(mData->mLastVideoImage, lastEnd, v->mTime,
-                                 mData->mLastVideoImageDisplaySize, t, &output,
-                                 aPrincipalHandle);
-    } else if (lastStart < v->mTime) {
-      // This frame starts after the last frame's start. Note that this could be
-      // before the last frame's end time for some videos. This only matters for
-      // the track's lifetime in the MSG, as rendering is based on timestamps,
-      // aka frame start times.
-      TimeStamp t =
-          std::max(mData->mLastVideoTimeStamp,
-                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
-      TimeUnit end = std::max(
-          v->GetEndTime(),
-          lastEnd + TimeUnit::FromMicroseconds(
-                        sourceStream->StreamTimeToMicroseconds(1) + 1));
+      WriteVideoToMediaStream(
+          sourceStream, mData->mLastVideoImage, mData->mNextVideoTime, v->mTime,
+          mData->mLastVideoImageDisplaySize,
+          currentTime +
+              (mData->mNextVideoTime - currentPosition).ToTimeDuration(),
+          &output, aPrincipalHandle);
+      mData->mNextVideoTime = v->mTime;
+    }
+
+    if (mData->mNextVideoTime < v->GetEndTime()) {
+      WriteVideoToMediaStream(
+          sourceStream, v->mImage, mData->mNextVideoTime, v->GetEndTime(),
+          v->mDisplay,
+          currentTime +
+              (mData->mNextVideoTime - currentPosition).ToTimeDuration(),
+          &output, aPrincipalHandle);
+      mData->mNextVideoTime = v->GetEndTime();
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
-      mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
-                                 &output, aPrincipalHandle);
     }
   }
 
   // Check the output is not empty.
   bool compensateEOS = false;
   if (output.GetLastFrame()) {
     compensateEOS = ZeroDurationAtLastChunk(output);
   }
@@ -816,49 +717,36 @@ void DecodedStream::SendVideo(bool aIsSa
   }
 
   if (output.GetDuration() > 0) {
     mData->mStreamVideoWritten +=
         sourceStream->AppendToTrack(videoTrackId, &output);
   }
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
-    if (!mData->mLastVideoImage) {
-      // We have video, but the video queue finished before we received any
-      // frame. We insert a black frame to progress any consuming
-      // HTMLMediaElement. This mirrors the behavior of VideoSink.
-
-      // Force a frame - can be null
-      compensateEOS = true;
-      // Force frame to be black
-      aIsSameOrigin = false;
-      // Override the frame's size (will be 0x0 otherwise)
-      mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
-    }
     if (compensateEOS) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
-      // We round the nr of microseconds up, because WriteVideoToSegment
-      // will round the conversion from microseconds to StreamTime down.
-      auto deviation = TimeUnit::FromMicroseconds(
-          sourceStream->StreamTimeToMicroseconds(1) + 1);
-      auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
-      mData->WriteVideoToSegment(
-          mData->mLastVideoImage, start, start + deviation,
-          mData->mLastVideoImageDisplaySize,
-          currentTime + (start + deviation - currentPosition).ToTimeDuration(),
+      auto deviation =
+          FromMicroseconds(sourceStream->StreamTimeToMicroseconds(1));
+      WriteVideoToMediaStream(
+          sourceStream, mData->mLastVideoImage, mData->mNextVideoTime,
+          mData->mNextVideoTime + deviation, mData->mLastVideoImageDisplaySize,
+          currentTime + (mData->mNextVideoTime + deviation - currentPosition)
+                            .ToTimeDuration(),
           &endSegment, aPrincipalHandle);
+      mData->mNextVideoTime += deviation;
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (!aIsSameOrigin) {
         endSegment.ReplaceWithDisabled();
       }
       mData->mStreamVideoWritten +=
           sourceStream->AppendToTrack(videoTrackId, &endSegment);
     }
-    mData->mListener->EndTrackAt(videoTrackId, mData->mStreamVideoWritten);
+    sourceStream->EndTrack(videoTrackId);
     mData->mHaveSentFinishVideo = true;
   }
 }
 
 StreamTime DecodedStream::SentDuration() {
   AssertOwnerThread();
 
   if (!mData) {
@@ -889,17 +777,17 @@ TimeUnit DecodedStream::GetEndTime(Track
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
     auto t = mStartTime.ref() +
              FramesToTimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
     if (t.IsValid()) {
       return t;
     }
   } else if (aType == TrackInfo::kVideoTrack && mData) {
-    return mData->mLastVideoEndTime.valueOr(mStartTime.ref());
+    return mData->mNextVideoTime;
   }
   return TimeUnit::Zero();
 }
 
 TimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) const {
   AssertOwnerThread();
   // This is only called after MDSM starts playback. So mStartTime is
   // guaranteed to be something.
@@ -907,22 +795,17 @@ TimeUnit DecodedStream::GetPosition(Time
   if (aTimeStamp) {
     *aTimeStamp = TimeStamp::Now();
   }
   return mStartTime.ref() + mLastOutputTime;
 }
 
 void DecodedStream::NotifyOutput(int64_t aTime) {
   AssertOwnerThread();
-  TimeUnit time = TimeUnit::FromMicroseconds(aTime);
-  if (time == mLastOutputTime) {
-    return;
-  }
-  MOZ_ASSERT(mLastOutputTime < time);
-  mLastOutputTime = time;
+  mLastOutputTime = FromMicroseconds(aTime);
   auto currentTime = GetPosition();
 
   // Remove audio samples that have been played by MSG from the queue.
   RefPtr<AudioData> a = mAudioQueue.PeekFront();
   for (; a && a->mTime < currentTime;) {
     RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
     a = mAudioQueue.PeekFront();
   }
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -68,16 +68,19 @@ class DecodedStream : public MediaSink {
   void Shutdown() override;
 
   nsCString GetDebugInfo() override;
 
  protected:
   virtual ~DecodedStream();
 
  private:
+  media::TimeUnit FromMicroseconds(int64_t aTime) {
+    return media::TimeUnit::FromMicroseconds(aTime);
+  }
   void DestroyData(UniquePtr<DecodedStreamData>&& aData);
   void SendAudio(double aVolume, bool aIsSameOrigin,
                  const PrincipalHandle& aPrincipalHandle);
   void SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
   void ResetVideo(const PrincipalHandle& aPrincipalHandle);
   StreamTime SentDuration();
   void SendData();
   void NotifyOutput(int64_t aTime);
--- a/dom/media/test/manifest.js
+++ b/dom/media/test/manifest.js
@@ -195,51 +195,51 @@ var gPlayTests = [
   // Ogg stream without eof marker
   { name:"bug461281.ogg", type:"application/ogg", duration:2.208 },
 
   // oggz-chop stream
   { name:"bug482461.ogv", type:"video/ogg", duration:4.34 },
   // Theora only oggz-chop stream
   { name:"bug482461-theora.ogv", type:"video/ogg", duration:4.138 },
   // With first frame a "duplicate" (empty) frame.
-  { name:"bug500311.ogv", type:"video/ogg", duration:1.96, contentDuration:1.958 },
+  { name:"bug500311.ogv", type:"video/ogg", duration:1.96 },
   // Small audio file
   { name:"small-shot.ogg", type:"audio/ogg", duration:0.276 },
   // More audio in file than video.
   { name:"short-video.ogv", type:"video/ogg", duration:1.081 },
   // First Theora data packet is zero bytes.
   { name:"bug504613.ogv", type:"video/ogg", duration:Number.NaN },
   // Multiple audio streams.
   { name:"bug516323.ogv", type:"video/ogg", duration:4.208 },
   // oggz-chop with non-keyframe as first frame
-  { name:"bug556821.ogv", type:"video/ogg", duration:2.936, contentDuration:2.903 },
+  { name:"bug556821.ogv", type:"video/ogg", duration:2.936 },
 
   // Encoded with vorbis beta1, includes unusually sized codebooks
   { name:"beta-phrasebook.ogg", type:"audio/ogg", duration:4.01 },
   // Small file, only 1 frame with audio only.
   { name:"bug520493.ogg", type:"audio/ogg", duration:0.458 },
   // Small file with vorbis comments with 0 length values and names.
   { name:"bug520500.ogg", type:"audio/ogg", duration:0.123 },
 
   // Various weirdly formed Ogg files
-  { name:"bug499519.ogv", type:"video/ogg", duration:0.24, contentDuration:0.22 },
+  { name:"bug499519.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug506094.ogv", type:"video/ogg", duration:0 },
   { name:"bug498855-1.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug498855-2.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug498855-3.ogv", type:"video/ogg", duration:0.24 },
-  { name:"bug504644.ogv", type:"video/ogg", duration:1.6, contentDuration:1.52 },
-  { name:"chain.ogv", type:"video/ogg", duration:Number.NaN, contentDuration:0.266 },
-  { name:"bug523816.ogv", type:"video/ogg", duration:0.766, contentDuration:0 },
+  { name:"bug504644.ogv", type:"video/ogg", duration:1.6 },
+  { name:"chain.ogv", type:"video/ogg", duration:Number.NaN },
+  { name:"bug523816.ogv", type:"video/ogg", duration:0.766 },
   { name:"bug495129.ogv", type:"video/ogg", duration:2.41 },
-  { name:"bug498380.ogv", type:"video/ogg", duration:0.7663, contentDuration:0 },
+  { name:"bug498380.ogv", type:"video/ogg", duration:0.7663 },
   { name:"bug495794.ogg", type:"audio/ogg", duration:0.3 },
   { name:"bug557094.ogv", type:"video/ogg", duration:0.24 },
   { name:"multiple-bos.ogg", type:"video/ogg", duration:0.431 },
-  { name:"audio-overhang.ogg", type:"video/ogg", duration:2.3 },
-  { name:"video-overhang.ogg", type:"video/ogg", duration:3.966 },
+  { name:"audio-overhang.ogg", type:"audio/ogg", duration:2.3 },
+  { name:"video-overhang.ogg", type:"audio/ogg", duration:3.966 },
 
   // bug461281.ogg with the middle second chopped out.
   { name:"audio-gaps.ogg", type:"audio/ogg", duration:2.208 },
 
   // Test playback/metadata work after a redirect
   { name:"redirect.sjs?domain=mochi.test:8888&file=320x240.ogv",
     type:"video/ogg", duration:0.266 },
 
@@ -256,21 +256,21 @@ var gPlayTests = [
   { name:"resolution-change.webm", type:"video/webm", duration:6.533 },
 
   // A really short, low sample rate, single channel file. This tests whether
   // we can handle playing files when only push very little audio data to the
   // hardware.
   { name:"spacestorm-1000Hz-100ms.ogg", type:"audio/ogg", duration:0.099 },
 
   // Opus data in an ogg container
-  { name:"detodos-short.opus", type:"audio/ogg; codecs=opus", duration:0.22, contentDuration:0.2135 },
+  { name:"detodos-short.opus", type:"audio/ogg; codecs=opus", duration:0.22 },
   // Opus data in a webm container
-  { name:"detodos-short.webm", type:"audio/webm; codecs=opus", duration:0.26, contentDuration:0.2535 },
+  { name:"detodos-short.webm", type:"audio/webm; codecs=opus", duration:0.26 },
   // Opus in webm channel mapping=2 sample file
-  { name:"opus-mapping2.webm", type:"audio/webm; codecs=opus", duration:10.01, contentDuration:9.99 },
+  { name:"opus-mapping2.webm", type:"audio/webm; codecs=opus", duration:10.01 },
   { name:"bug1066943.webm", type:"audio/webm; codecs=opus", duration:1.383 },
 
   // Multichannel Opus in an ogg container
   { name:"test-1-mono.opus", type:"audio/ogg; codecs=opus", duration:1.044 },
   { name:"test-2-stereo.opus", type:"audio/ogg; codecs=opus", duration:2.925 },
   { name:"test-3-LCR.opus", type:"audio/ogg; codecs=opus", duration:4.214 },
   { name:"test-4-quad.opus", type:"audio/ogg; codecs=opus", duration:6.234 },
   { name:"test-5-5.0.opus", type:"audio/ogg; codecs=opus", duration:7.558 },
@@ -280,18 +280,18 @@ var gPlayTests = [
 
   { name:"gizmo-short.mp4", type:"video/mp4", duration:0.27 },
   // Test playback of a MP4 file with a non-zero start time (and audio starting
   // a second later).
   { name:"bipbop-lateaudio.mp4", type:"video/mp4" },
   // Ambisonics AAC, requires AAC extradata to be set when creating decoder (see bug 1431169)
   // Also test 4.0 decoding.
   { name:"ambisonics.mp4", type:"audio/mp4", duration:16.48 },
-  // Opus in MP4 channel mapping=0 sample file (content shorter due to preskip)
-  { name:"opus-sample.mp4", type:"audio/mp4; codecs=opus", duration:10.92, contentDuration:10.09 },
+  // Opus in MP4 channel mapping=0 sample file
+  { name:"opus-sample.mp4", type:"audio/mp4; codecs=opus", duration:10.92 },
   // Opus in MP4 channel mapping=2 sample file
   { name:"opus-mapping2.mp4", type:"audio/mp4; codecs=opus", duration:10.0 },
 
   { name:"small-shot.m4a", type:"audio/mp4", duration:0.29 },
   { name:"small-shot.mp3", type:"audio/mpeg", duration:0.27 },
   { name:"owl.mp3", type:"audio/mpeg", duration:3.343 },
   // owl.mp3 as above, but with something funny going on in the ID3v2 tag
   // that caused DirectShow to fail.
@@ -303,23 +303,23 @@ var gPlayTests = [
   // frame is at such a high offset into the file, MP3FrameParser will give up
   // and report that the stream is not MP3. However, it does not count ID3 tags
   // in that offset. This test case makes sure that ID3 exclusion holds.
   { name:"huge-id3.mp3", type:"audio/mpeg", duration:1.00 },
   // A truncated VBR MP3 with just enough frames to keep most decoders happy.
   // The Xing header reports the length of the file to be around 10 seconds, but
   // there is really only one second worth of data. We want MP3FrameParser to
   // trust the header, so this should be reported as 10 seconds.
-  { name:"vbr-head.mp3", type:"audio/mpeg", duration:10.00, contentDuration:1.019 },
+  { name:"vbr-head.mp3", type:"audio/mpeg", duration:10.00 },
 
   // A flac file where the STREAMINFO block was removed.
   // It is necessary to parse the file to find an audio frame instead.
   { name:"flac-noheader-s16.flac", type:"audio/flac", duration:4.0 },
   { name:"flac-s24.flac", type:"audio/flac", duration:4.04 },
-  { name:"flac-sample.mp4", type:"audio/mp4; codecs=flac", duration:4.95, contentDuration:5.03 },
+  { name:"flac-sample.mp4", type:"audio/mp4; codecs=flac", duration:4.95 },
   // Ogg with theora video and flac audio.
   { name:"A4.ogv", type:"video/ogg", width:320, height:240, duration:3.13 },
 
   // Invalid file
   { name:"bogus.duh", type:"bogus/duh", duration:Number.NaN },
 ];
 
 if (!(manifestNavigator().userAgent.includes("Windows") &&
@@ -349,17 +349,17 @@ var gSeekToNextFrameTests = [
   // Various weirdly formed Ogg files
   { name:"bug498855-1.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug498855-2.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug498855-3.ogv", type:"video/ogg", duration:0.24 },
   { name:"bug504644.ogv", type:"video/ogg", duration:1.6 },
 
   { name:"bug523816.ogv", type:"video/ogg", duration:0.766 },
 
-  { name:"bug498380.ogv", type:"video/ogg", duration:0.2 },
+  { name:"bug498380.ogv", type:"video/ogg", duration:0.766 },
   { name:"bug557094.ogv", type:"video/ogg", duration:0.24 },
   { name:"multiple-bos.ogg", type:"video/ogg", duration:0.431 },
   // Test playback/metadata work after a redirect
   { name:"redirect.sjs?domain=mochi.test:8888&file=320x240.ogv",
     type:"video/ogg", duration:0.266 },
   // Test playback of a webm file
   { name:"seek-short.webm", type:"video/webm", duration:0.23 },
   // Test playback of a WebM file with non-zero start time.
--- a/dom/media/test/test_streams_element_capture.html
+++ b/dom/media/test/test_streams_element_capture.html
@@ -4,112 +4,97 @@
   <title>Test that a MediaStream captured from one element plays back in another</title>
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
-let manager = new MediaTestManager();
+SimpleTest.waitForExplicitFinish();
 
-function checkDrawImage(vout, msg) {
+// longer timeout for slow platforms
+if (isSlowPlatform()) {
+  SimpleTest.requestLongerTimeout(3);
+  SimpleTest.requestCompleteLog();
+}
+
+function checkDrawImage(vout) {
   var canvas = document.createElement("canvas");
   var ctx = canvas.getContext("2d");
   ctx.drawImage(vout, 0, 0);
   var imgData = ctx.getImageData(0, 0, 1, 1);
-  is(imgData.data[3], 255, msg);
+  is(imgData.data[3], 255, "Check video frame pixel has been drawn");
 }
 
 function isGreaterThanOrEqualEps(a, b, msg) {
-  ok(a >= b, `Got ${a}, expected at least ${b}; ${msg}`);
+  ok(a >= b - 0.01,
+     "Got " + a + ", expected at least " + b + "; " + msg);
 }
 
-function startTest(test, token) {
-  manager.started(token);
+function startTest(test) {
   var v = document.createElement('video');
   var vout = document.createElement('video');
 
-  v.id = "MediaDecoder";
-  vout.id = "MediaStream";
-
   v.src = test.name;
   var stream;
 
   var checkEnded = function() {
-    let duration = test.duration;
-    if (typeof(test.contentDuration) == "number") {
-      duration = test.contentDuration;
-    }
-    if (duration) {
-      isGreaterThanOrEqualEps(vout.currentTime, duration,
-        `${token} current time at end`);
+    if (test.duration) {
+      isGreaterThanOrEqualEps(vout.currentTime, test.duration,
+         test.name + " current time at end");
     }
-    is(vout.readyState, vout.HAVE_CURRENT_DATA,
-      `${token} checking readyState`);
-    ok(vout.ended, `${token} checking playback has ended`);
-    isnot(stream.getTracks().length, 0, `${token} results in some tracks`);
-    if (stream.getVideoTracks().length > 0) {
-      ok(test.type.match(/^video/), `${token} is a video resource`);
-      checkDrawImage(vout, `${token} checking video frame pixel has been drawn`);
+    is(vout.readyState, vout.HAVE_CURRENT_DATA, test.name + " checking readyState");
+    ok(vout.ended, test.name + " checking playback has ended");
+    if (test.type.match(/^video/)) {
+      checkDrawImage(vout);
     }
     vout.remove();
     removeNodeAndSource(v);
-    manager.finished(token);
+    SimpleTest.finish();
   };
-	Promise.race([
-    Promise.all([
-      new Promise(r => vout.addEventListener("ended", r, {once:true})),
-      new Promise(r => v.addEventListener("ended", r, {once:true})),
-		]),
-		new Promise((res, rej) => vout.addEventListener("error", rej, {once:true})),
-		new Promise((res, rej) => v.addEventListener("error", rej, {once:true})),
-	]).then(() => checkEnded(), e => {
-		ok(false, `Error: ${e.target.id} ${token}, ${e.target.error.message}`);
-		manager.finished(token);
-	});
-
+  vout.addEventListener("ended", checkEnded);
 
   document.body.appendChild(vout);
 
-  var onloadedmetadata = async function (ev) {
+  var onloadedmetadata = function (ev) {
     stream = v.mozCaptureStreamUntilEnded();
     vout.srcObject = stream;
-    is(vout.srcObject, stream,
-      `${token} set output element .srcObject correctly`);
-    // Wait for the resource fetch algorithm to have run, so that the media
-    // element is hooked up to the MediaStream and ready to go. If we don't do
-    // this, we're not guaranteed to render the very first video frame, which
-    // can make this test fail the drawImage test when a video resource only
-    // contains one frame.
-    await new Promise(r => vout.addEventListener('loadstart', r));
+    is(vout.srcObject, stream, test.name + " set output element .srcObject correctly");
     v.play();
     vout.play();
   }
 
   v.preload = 'metadata';
   v.addEventListener('loadedmetadata', onloadedmetadata);
 
   // Log events for debugging.
   var events = ["suspend", "play", "canplay", "canplaythrough", "loadstart", "loadedmetadata",
                 "loadeddata", "playing", "ended", "error", "stalled", "emptied", "abort",
                 "waiting", "pause"];
   function logEvent(e) {
-    Log(token, `${e.target.id} got ${e.type}`);
+    Log(e.target.name, "got " + e.type);
   }
   events.forEach(function(e) {
     v.addEventListener(e, logEvent);
     vout.addEventListener(e, logEvent);
   });
 
 }
 
-(async () => {
-  SimpleTest.waitForExplicitFinish();
-  await SpecialPowers.pushPrefEnv(
-    { "set": [["privacy.reduceTimerPrecision", false]]});
-  // Filter out bug1377278.webm due to bug 1541401.
-  manager.runTests(gPlayTests.filter(t => !t.name.includes("1377278")), startTest);
-})();
+// We only test one playable video because for some of the audio files
+// --- small-shot.mp3.mp4 and small-shot.m4a --- GStreamer doesn't decode
+// as much data as indicated by the duration, causing this test to fail on
+// Linux. See bug 1084185.
+var testVideo = getPlayableVideo(gSmallTests);
+if (testVideo) {
+  SpecialPowers.pushPrefEnv(
+    { "set": [["privacy.reduceTimerPrecision", false]]},
+    function() {
+      startTest(testVideo);
+    });
+} else {
+  todo(false, "No playable video");
+}
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -38,16 +38,18 @@ AudioNodeStream::AudioNodeStream(AudioNo
       mIsActive(aEngine->IsActive()),
       mMarkAsFinishedAfterThisBlock(false),
       mAudioParamStream(false),
       mPassThrough(false) {
   MOZ_ASSERT(NS_IsMainThread());
   mSuspendedCount = !(mIsActive || mFlags & EXTERNAL_OUTPUT);
   mChannelCountMode = ChannelCountMode::Max;
   mChannelInterpretation = ChannelInterpretation::Speakers;
+  // AudioNodes are always producing data
+  mHasCurrentData = true;
   mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount()));
   MOZ_COUNT_CTOR(AudioNodeStream);
 }
 
 AudioNodeStream::~AudioNodeStream() {
   MOZ_ASSERT(mActiveInputCount == 0);
   MOZ_COUNT_DTOR(AudioNodeStream);
 }