Bug 1172394 - Perform some forgotten Stream -> Track renaming in DecodedStream. r=padenot
☠☠ backed out by 7272d77d4e80 ☠ ☠
authorAndreas Pehrson <apehrson@mozilla.com>
Wed, 13 Nov 2019 08:47:48 +0000
changeset 501783 afb4b226ff0409682b7bb2eb2475f227bc52aa96
parent 501782 4d198d162b2aaf7ba6d86fc6c71942b6ee8e43ef
child 501784 744fb77a58333b632dbf6820c77c7d8e97674b2c
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1172394
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1172394 - Perform some forgotten Stream -> Track renaming in DecodedStream. r=padenot Differential Revision: https://phabricator.services.mozilla.com/D52036
dom/media/mediasink/DecodedStream.cpp
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -49,75 +49,75 @@ class DecodedStreamTrackListener : publi
   const RefPtr<DecodedStreamGraphListener> mGraphListener;
   const RefPtr<SourceMediaTrack> mTrack;
 };
 
 class DecodedStreamGraphListener {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
  public:
   DecodedStreamGraphListener(
-      SourceMediaTrack* aAudioStream,
+      SourceMediaTrack* aAudioTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
-      SourceMediaTrack* aVideoStream,
+      SourceMediaTrack* aVideoTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
       AbstractThread* aMainThread)
       : mAudioTrackListener(
-            aAudioStream
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
+            aAudioTrack
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
                 : nullptr),
         mAudioEndedHolder(std::move(aAudioEndedHolder)),
         mVideoTrackListener(
-            aVideoStream
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
+            aVideoTrack
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
                 : nullptr),
         mVideoEndedHolder(std::move(aVideoEndedHolder)),
-        mAudioStream(aAudioStream),
-        mVideoStream(aVideoStream),
+        mAudioTrack(aAudioTrack),
+        mVideoTrack(aVideoTrack),
         mAbstractMainThread(aMainThread) {
     MOZ_ASSERT(NS_IsMainThread());
     if (mAudioTrackListener) {
-      mAudioStream->AddListener(mAudioTrackListener);
+      mAudioTrack->AddListener(mAudioTrackListener);
     } else {
       mAudioEnded = true;
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     }
 
     if (mVideoTrackListener) {
-      mVideoStream->AddListener(mVideoTrackListener);
+      mVideoTrack->AddListener(mVideoTrackListener);
     } else {
       mVideoEnded = true;
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     }
   }
 
   void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       if (aCurrentTrackTime >= mAudioEnd) {
-        mAudioStream->End();
+        mAudioTrack->End();
       }
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       if (aCurrentTrackTime >= mVideoEnd) {
-        mVideoStream->End();
+        mVideoTrack->End();
       }
     } else {
       MOZ_CRASH("Unexpected source track");
     }
-    if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
+    if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) {
       // Only audio playout drives the clock forward, if present and live.
       return;
     }
-    MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
-    MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
+    MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded);
+    MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded);
     mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
   }
 
   void NotifyEnded(SourceMediaTrack* aTrack) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEnded = true;
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEnded = true;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
     aTrack->Graph()->DispatchToMainThreadStableState(
         NewRunnableMethod<RefPtr<SourceMediaTrack>>(
             "DecodedStreamGraphListener::DoNotifyTrackEnded", this,
             &DecodedStreamGraphListener::DoNotifyTrackEnded, aTrack));
@@ -140,49 +140,49 @@ class DecodedStreamGraphListener {
    * to a MediaStreamTrack ending on main thread (it uses another listener)
    * before the listeners to render the track get added, potentially meaning a
    * media element doesn't progress before reaching the end although data was
    * available.
    *
    * Callable from any thread.
    */
   void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEnd = aEnd;
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEnd = aEnd;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
     MOZ_ASSERT(NS_IsMainThread());
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEndedHolder.ResolveIfExists(true, __func__);
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void Forget() {
     MOZ_ASSERT(NS_IsMainThread());
 
-    if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
-      mAudioStream->End();
-      mAudioStream->RemoveListener(mAudioTrackListener);
+    if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) {
+      mAudioTrack->End();
+      mAudioTrack->RemoveListener(mAudioTrackListener);
     }
     mAudioTrackListener = nullptr;
     mAudioEndedHolder.ResolveIfExists(false, __func__);
 
-    if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
-      mVideoStream->End();
-      mVideoStream->RemoveListener(mVideoTrackListener);
+    if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
+      mVideoTrack->End();
+      mVideoTrack->RemoveListener(mVideoTrackListener);
     }
     mVideoTrackListener = nullptr;
     mVideoEndedHolder.ResolveIfExists(false, __func__);
   }
 
   MediaEventSource<int64_t>& OnOutput() { return mOnOutput; }
 
  private:
@@ -199,18 +199,18 @@ class DecodedStreamGraphListener {
   RefPtr<DecodedStreamTrackListener> mVideoTrackListener;
   MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;
 
   // Graph thread only.
   bool mAudioEnded = false;
   bool mVideoEnded = false;
 
   // Any thread.
-  const RefPtr<SourceMediaTrack> mAudioStream;
-  const RefPtr<SourceMediaTrack> mVideoStream;
+  const RefPtr<SourceMediaTrack> mAudioTrack;
+  const RefPtr<SourceMediaTrack> mVideoTrack;
   Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
   Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
   const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamTrackListener::DecodedStreamTrackListener(
     DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aTrack)
     : mGraphListener(aGraphListener), mTrack(aTrack) {}
@@ -231,18 +231,18 @@ void DecodedStreamTrackListener::NotifyE
  * captureStream/UntilEnded. Seeking creates new source tracks, as does
  * replaying after the input as ended. In the latter case, the new sources are
  * not connected to tracks created by captureStreamUntilEnded.
  */
 class DecodedStreamData final {
  public:
   DecodedStreamData(
       OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-      RefPtr<SourceMediaTrack> aAudioStream,
-      RefPtr<SourceMediaTrack> aVideoStream,
+      RefPtr<SourceMediaTrack> aAudioTrack,
+      RefPtr<SourceMediaTrack> aVideoTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
       AbstractThread* aMainThread);
   ~DecodedStreamData();
   MediaEventSource<int64_t>& OnOutput();
   void Forget();
   void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
 
@@ -253,19 +253,19 @@ class DecodedStreamData final {
                            const PrincipalHandle& aPrincipalHandle);
 
   /* The following group of fields are protected by the decoder's monitor
    * and can be read or written on any thread.
    */
   // Count of audio frames written to the track
   int64_t mAudioFramesWritten;
   // Count of video frames written to the track in the track's rate
-  TrackTime mVideoStreamWritten;
+  TrackTime mVideoTrackWritten;
   // Count of audio frames written to the track in the track's rate
-  TrackTime mAudioStreamWritten;
+  TrackTime mAudioTrackWritten;
   // mNextAudioTime is the end timestamp for the last packet sent to the track.
   // Therefore audio packets starting at or after this time need to be copied
   // to the output track.
   TimeUnit mNextAudioTime;
   // mLastVideoStartTime is the start timestamp for the last packet sent to the
   // track. Therefore video packets starting after this time need to be copied
   // to the output track.
   NullableTimeUnit mLastVideoStartTime;
@@ -278,63 +278,62 @@ class DecodedStreamData final {
   TimeStamp mLastVideoTimeStamp;
   // The last video image sent to the track. Useful if we need to replicate
   // the image.
   RefPtr<layers::Image> mLastVideoImage;
   gfx::IntSize mLastVideoImageDisplaySize;
   bool mHaveSentFinishAudio;
   bool mHaveSentFinishVideo;
 
-  const RefPtr<SourceMediaTrack> mAudioStream;
-  const RefPtr<SourceMediaTrack> mVideoStream;
+  const RefPtr<SourceMediaTrack> mAudioTrack;
+  const RefPtr<SourceMediaTrack> mVideoTrack;
   const RefPtr<DecodedStreamGraphListener> mListener;
 
   const RefPtr<OutputStreamManager> mOutputStreamManager;
   const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamData::DecodedStreamData(
     OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-    RefPtr<SourceMediaTrack> aAudioStream,
-    RefPtr<SourceMediaTrack> aVideoStream,
+    RefPtr<SourceMediaTrack> aAudioTrack, RefPtr<SourceMediaTrack> aVideoTrack,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
     AbstractThread* aMainThread)
     : mAudioFramesWritten(0),
-      mVideoStreamWritten(0),
-      mAudioStreamWritten(0),
+      mVideoTrackWritten(0),
+      mAudioTrackWritten(0),
       mNextAudioTime(aInit.mStartTime),
       mHaveSentFinishAudio(false),
       mHaveSentFinishVideo(false),
-      mAudioStream(std::move(aAudioStream)),
-      mVideoStream(std::move(aVideoStream)),
+      mAudioTrack(std::move(aAudioTrack)),
+      mVideoTrack(std::move(aVideoTrack)),
       // DecodedStreamGraphListener will resolve these promises.
       mListener(MakeRefPtr<DecodedStreamGraphListener>(
-          mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
+          mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
           std::move(aVideoEndedPromise), aMainThread)),
       mOutputStreamManager(aOutputStreamManager),
       mAbstractMainThread(aMainThread) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(
-      mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
+      mOutputStreamManager->HasTracks(mAudioTrack, mVideoTrack),
       "Tracks must be pre-created on main thread");
 }
 
 DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
 
 MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
   return mListener->OnOutput();
 }
 
 void DecodedStreamData::Forget() { mListener->Forget(); }
 
 void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
   aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
   aInfo.mAudioFramesWritten = mAudioFramesWritten;
-  aInfo.mStreamAudioWritten = mAudioStreamWritten;
+  aInfo.mStreamAudioWritten = mAudioTrackWritten;
   aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
   aInfo.mLastVideoStartTime =
       mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mLastVideoEndTime =
       mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
@@ -419,36 +418,36 @@ nsresult DecodedStream::Start(const Time
       // This happens when RemoveOutput() is called immediately after
       // StartPlayback().
       if (mOutputStreamManager->IsEmpty()) {
         // Resolve the promise to indicate the end of playback.
         mAudioEndedPromise.Resolve(true, __func__);
         mVideoEndedPromise.Resolve(true, __func__);
         return NS_OK;
       }
-      RefPtr<SourceMediaTrack> audioStream =
+      RefPtr<SourceMediaTrack> audioTrack =
           mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
-      if (mInit.mInfo.HasAudio() && !audioStream) {
+      if (mInit.mInfo.HasAudio() && !audioTrack) {
         MOZ_DIAGNOSTIC_ASSERT(
             !mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-        audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
+        audioTrack = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
       }
-      if (audioStream) {
-        audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
+      if (audioTrack) {
+        audioTrack->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
       }
-      RefPtr<SourceMediaTrack> videoStream =
+      RefPtr<SourceMediaTrack> videoTrack =
           mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
-      if (mInit.mInfo.HasVideo() && !videoStream) {
+      if (mInit.mInfo.HasVideo() && !videoTrack) {
         MOZ_DIAGNOSTIC_ASSERT(
             !mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-        videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
+        videoTrack = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
       }
       mData = MakeUnique<DecodedStreamData>(
-          mOutputStreamManager, std::move(mInit), std::move(audioStream),
-          std::move(videoStream), std::move(mAudioEndedPromise),
+          mOutputStreamManager, std::move(mInit), std::move(audioTrack),
+          std::move(videoTrack), std::move(mAudioEndedPromise),
           std::move(mVideoEndedPromise), mAbstractMainThread);
       return NS_OK;
     }
     UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
 
    private:
     PlaybackInfoInit mInit;
     Promise mAudioEndedPromise;
@@ -623,35 +622,34 @@ void DecodedStream::SendAudio(double aVo
                     aPrincipalHandle);
   }
 
   output.ApplyVolume(aVolume);
 
   // |mNextAudioTime| is updated as we process each audio sample in
   // SendStreamAudio().
   if (output.GetDuration() > 0) {
-    mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
+    mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output);
   }
 
   if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
-    mData->mListener->EndTrackAt(mData->mAudioStream,
-                                 mData->mAudioStreamWritten);
+    mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten);
     mData->mHaveSentFinishAudio = true;
   }
 }
 
 void DecodedStreamData::WriteVideoToSegment(
     layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
     const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
     VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
   RefPtr<layers::Image> image = aImage;
   auto end =
-      mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
+      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
   auto start =
-      mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
+      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
   aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                        aTimeStamp);
   // Extend this so we get accurate durations for all frames.
   // Because this track is pushed, we need durations so the graph can track
   // when playout of the track has finished.
   aOutput->ExtendLastFrameBy(end - start);
 
   mLastVideoStartTime = Some(aStart);
@@ -687,17 +685,17 @@ void DecodedStream::ResetVideo(const Pri
   // nullptr) at an earlier time than the previous, will signal to that consumer
   // to discard any frames ahead in time of the new frame. To be honest, this is
   // an ugly hack because the direct listeners of the MediaTrackGraph do not
   // have an API that supports clearing the future frames. ImageContainer and
   // VideoFrameContainer do though, and we will need to move to a similar API
   // for video tracks as part of bug 1493618.
   resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                        aPrincipalHandle, false, currentTime);
-  mData->mVideoStream->AppendData(&resetter);
+  mData->mVideoTrack->AppendData(&resetter);
 
   // Consumer buffers have been reset. We now set the next time to the start
   // time of the current frame, so that it can be displayed again on resuming.
   if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
     mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
     mData->mLastVideoEndTime = Some(v->mTime);
   } else {
     // There was no current frame in the queue. We set the next time to the
@@ -767,33 +765,33 @@ void DecodedStream::SendVideo(const Prin
       // the track's lifetime in the MTG, as rendering is based on timestamps,
       // aka frame start times.
       TimeStamp t =
           std::max(mData->mLastVideoTimeStamp,
                    currentTime + (lastEnd - currentPosition).ToTimeDuration());
       TimeUnit end = std::max(
           v->GetEndTime(),
           lastEnd + TimeUnit::FromMicroseconds(
-                        mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
+                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
       mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
                                  &output, aPrincipalHandle);
     }
   }
 
   // Check the output is not empty.
   bool compensateEOS = false;
   bool forceBlack = false;
   if (output.GetLastFrame()) {
     compensateEOS = ZeroDurationAtLastChunk(output);
   }
 
   if (output.GetDuration() > 0) {
-    mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
+    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
   }
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (!mData->mLastVideoImage) {
       // We have video, but the video queue finished before we received any
       // frame. We insert a black frame to progress any consuming
       // HTMLMediaElement. This mirrors the behavior of VideoSink.
 
@@ -805,32 +803,30 @@ void DecodedStream::SendVideo(const Prin
       mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
     }
     if (compensateEOS) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       // We round the nr of microseconds up, because WriteVideoToSegment
       // will round the conversion from microseconds to TrackTime down.
       auto deviation = TimeUnit::FromMicroseconds(
-          mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
+          mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1);
       auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
       mData->WriteVideoToSegment(
           mData->mLastVideoImage, start, start + deviation,
           mData->mLastVideoImageDisplaySize,
           currentTime + (start + deviation - currentPosition).ToTimeDuration(),
           &endSegment, aPrincipalHandle);
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (forceBlack) {
         endSegment.ReplaceWithDisabled();
       }
-      mData->mVideoStreamWritten +=
-          mData->mVideoStream->AppendData(&endSegment);
+      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
     }
-    mData->mListener->EndTrackAt(mData->mVideoStream,
-                                 mData->mVideoStreamWritten);
+    mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten);
     mData->mHaveSentFinishVideo = true;
   }
 }
 
 void DecodedStream::SendData() {
   AssertOwnerThread();
 
   // Not yet created on the main thread. MDSM will try again later.