Bug 1423253 - Handle MediaDecoder pauses when future frames are already buffered. r=padenot
authorAndreas Pehrson <apehrson@mozilla.com>
Fri, 22 Mar 2019 11:45:51 +0000
changeset 465664 ab393db34aa5eb2f6958f5177e2e4ce60228f7f9
parent 465663 56569d42dd8062b3a7815a8f1607b562bce6bbdc
child 465665 e5fc0c881af02fe17288ea08f3f5c3a85a5070e8
push id35744
push userapavel@mozilla.com
push dateFri, 22 Mar 2019 16:44:08 +0000
treeherdermozilla-central@e66a2b59914d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1423253
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1423253 - Handle MediaDecoder pauses when future frames are already buffered. r=padenot Differential Revision: https://phabricator.services.mozilla.com/D23588
dom/media/VideoFrameConverter.h
dom/media/VideoStreamTrack.cpp
dom/media/gtest/TestVideoFrameConverter.cpp
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
--- a/dom/media/VideoFrameConverter.h
+++ b/dom/media/VideoFrameConverter.h
@@ -54,17 +54,18 @@ class VideoFrameConverter {
  public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
 
   VideoFrameConverter()
       : mTaskQueue(
             new TaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
                           "VideoFrameConverter")),
         mPacingTimer(new MediaTimer()),
-        mLastImage(-1),  // -1 is not a guaranteed invalid serial (Bug 1262134).
+        mLastImage(
+            -2),  // -2 or -1 are not guaranteed invalid serials (Bug 1262134).
         mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE),
         mLastFrameQueuedForProcessing(TimeStamp::Now()),
         mEnabled(true) {
     MOZ_COUNT_CTOR(VideoFrameConverter);
   }
 
   void QueueVideoChunk(const VideoChunk& aChunk, bool aForceBlack) {
     gfx::IntSize size = aChunk.mFrame.GetIntrinsicSize();
@@ -198,16 +199,20 @@ class VideoFrameConverter {
                           gfx::IntSize aSize, bool aForceBlack) {
     MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     int32_t serial;
     if (aForceBlack || !mEnabled) {
       // Set the last-img check to indicate black.
       // -1 is not a guaranteed invalid serial. See bug 1262134.
       serial = -1;
+    } else if (!aImage) {
+      // Set the last-img check to indicate reset.
+      // -2 is not a guaranteed invalid serial. See bug 1262134.
+      serial = -2;
     } else {
       serial = aImage->GetSerial();
     }
 
     if (serial == mLastImage) {
       // With a non-direct listener we get passed duplicate frames every ~10ms
       // even with no frame change.
       return;
@@ -266,17 +271,21 @@ class VideoFrameConverter {
       webrtc::I420Buffer::SetBlack(buffer);
 
       webrtc::VideoFrame frame(buffer, 0, // not setting rtp timestamp
                                now, webrtc::kVideoRotation_0);
       VideoFrameConverted(frame);
       return;
     }
 
-    MOZ_RELEASE_ASSERT(aImage, "Must have image if not forcing black");
+    if (!aImage) {
+      // Don't send anything for null images.
+      return;
+    }
+
     MOZ_ASSERT(aImage->GetSize() == aSize);
 
     if (layers::PlanarYCbCrImage* image = aImage->AsPlanarYCbCrImage()) {
       dom::ImageUtils utils(image);
       if (utils.GetFormat() == dom::ImageBitmapFormat::YUV420P &&
           image->GetData()) {
         const layers::PlanarYCbCrData* data = image->GetData();
         rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
--- a/dom/media/VideoStreamTrack.cpp
+++ b/dom/media/VideoStreamTrack.cpp
@@ -93,18 +93,20 @@ class VideoOutput : public DirectMediaSt
         continue;
       }
       images.AppendElement(
           ImageContainer::NonOwningImage(image, chunk.mTimeStamp, frameId));
 
       lastPrincipalHandle = chunk.GetPrincipalHandle();
     }
 
-    // Don't update if there are no changes.
     if (images.IsEmpty()) {
+      // This could happen if the only images in mFrames are null. We leave the
+      // container at the current frame in this case.
+      mVideoFrameContainer->ClearFutureFrames();
       return;
     }
 
     bool principalHandleChanged =
         lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
         lastPrincipalHandle != mVideoFrameContainer->GetLastPrincipalHandle();
 
     if (principalHandleChanged) {
@@ -112,18 +114,16 @@ class VideoOutput : public DirectMediaSt
           lastPrincipalHandle, images.LastElement().mFrameID);
     }
 
     mVideoFrameContainer->SetCurrentFrames(
         mFrames[0].second().mFrame.GetIntrinsicSize(), images);
     mMainThread->Dispatch(NewRunnableMethod("VideoFrameContainer::Invalidate",
                                             mVideoFrameContainer,
                                             &VideoFrameContainer::Invalidate));
-
-    images.ClearAndRetainStorage();
   }
 
  public:
   VideoOutput(VideoFrameContainer* aContainer, AbstractThread* aMainThread)
       : mMutex("VideoOutput::mMutex"),
         mVideoFrameContainer(aContainer),
         mMainThread(aMainThread) {}
   void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
--- a/dom/media/gtest/TestVideoFrameConverter.cpp
+++ b/dom/media/gtest/TestVideoFrameConverter.cpp
@@ -168,8 +168,44 @@ TEST_F(VideoFrameConverterTest, BlackOnD
   ASSERT_EQ(frames.size(), 2U);
   EXPECT_EQ(frames[0].first().width(), 640);
   EXPECT_EQ(frames[0].first().height(), 480);
   EXPECT_GT(frames[0].second(), future1);
   EXPECT_EQ(frames[1].first().width(), 640);
   EXPECT_EQ(frames[1].first().height(), 480);
   EXPECT_GT(frames[1].second(), now + TimeDuration::FromMilliseconds(1100));
 }
+
+TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
+  TimeStamp now = TimeStamp::Now();
+  TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
+  TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
+  TimeStamp future3 = now + TimeDuration::FromMilliseconds(150);
+
+  mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
+  WaitForNConverted(1);
+
+  // We are now at t=100ms+. Queue a future frame and jump back in time to
+  // signal a reset.
+
+  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
+  VideoChunk nullChunk;
+  nullChunk.mFrame = VideoFrame(nullptr, gfx::IntSize(800, 600));
+  nullChunk.mTimeStamp = TimeStamp::Now();
+  ASSERT_GT(nullChunk.mTimeStamp, future1);
+  mConverter->QueueVideoChunk(nullChunk, false);
+
+  // We queue one more chunk after the reset so we don't have to wait a full
+  // second for the same-frame timer. It has a different time and resolution
+  // so we can differentiate them.
+  mConverter->QueueVideoChunk(GenerateChunk(320, 240, future3), false);
+
+  auto frames = WaitForNConverted(2);
+  EXPECT_GT(TimeStamp::Now(), future3);
+  EXPECT_LT(TimeStamp::Now(), future2);
+  ASSERT_EQ(frames.size(), 2U);
+  EXPECT_EQ(frames[0].first().width(), 640);
+  EXPECT_EQ(frames[0].first().height(), 480);
+  EXPECT_GT(frames[0].second(), future1);
+  EXPECT_EQ(frames[1].first().width(), 320);
+  EXPECT_EQ(frames[1].first().height(), 240);
+  EXPECT_GT(frames[1].second(), future3);
+}
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -416,19 +416,20 @@ nsresult DecodedStream::Start(const Time
   }
   return NS_OK;
 }
 
 void DecodedStream::Stop() {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
 
+  DisconnectListener();
+  ResetVideo(mPrincipalHandle);
   mStreamTimeOffset += SentDuration();
   mStartTime.reset();
-  DisconnectListener();
   mAudioEndedPromise = nullptr;
   mVideoEndedPromise = nullptr;
 
   // Clear mData immediately when this playback session ends so we won't
   // send data to the wrong stream in SendData() in next playback session.
   DestroyData(std::move(mData));
 }
 
@@ -604,16 +605,53 @@ static bool ZeroDurationAtLastChunk(Vide
   // Get the last video frame's start time in VideoSegment aInput.
   // If the start time is equal to the duration of aInput, means the last video
   // frame's duration is zero.
   StreamTime lastVideoStratTime;
   aInput.GetLastFrame(&lastVideoStratTime);
   return lastVideoStratTime == aInput.GetDuration();
 }
 
+void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
+  AssertOwnerThread();
+
+  if (!mData) {
+    return;
+  }
+
+  if (!mInfo.HasVideo()) {
+    return;
+  }
+
+  VideoSegment resetter;
+  TimeStamp currentTime;
+  TimeUnit currentPosition = GetPosition(&currentTime);
+
+  // Giving direct consumers a frame (really *any* frame, so in this case:
+  // nullptr) at an earlier time than the previous, will signal to that consumer
+  // to discard any frames ahead in time of the new frame. To be honest, this is
+  // an ugly hack because the direct listeners of the MediaStreamGraph do not
+  // have an API that supports clearing the future frames. ImageContainer and
+  // VideoFrameContainer do though, and we will need to move to a similar API
+  // for video tracks as part of bug 1493618.
+  resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
+                       aPrincipalHandle, false, currentTime);
+  mData->mStream->AppendToTrack(mInfo.mVideo.mTrackId, &resetter);
+
+  // Consumer buffers have been reset. We now set mNextVideoTime to the start
+  // time of the current frame, so that it can be displayed again on resuming.
+  if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
+    mData->mNextVideoTime = v->mTime;
+  } else {
+    // There was no current frame in the queue. We set the next time to push to
+    // the current time, so we at least don't resume starting in the future.
+    mData->mNextVideoTime = currentPosition;
+  }
+}
+
 void DecodedStream::SendVideo(bool aIsSameOrigin,
                               const PrincipalHandle& aPrincipalHandle) {
   AssertOwnerThread();
 
   if (!mInfo.HasVideo()) {
     return;
   }
 
@@ -722,16 +760,20 @@ void DecodedStream::SendData() {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
 
   // Not yet created on the main thread. MDSM will try again later.
   if (!mData) {
     return;
   }
 
+  if (!mPlaying) {
+    return;
+  }
+
   SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle);
   SendVideo(mSameOrigin, mPrincipalHandle);
 }
 
 TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
     auto t = mStartTime.ref() +
@@ -767,16 +809,21 @@ void DecodedStream::NotifyOutput(int64_t
     RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
     a = mAudioQueue.PeekFront();
   }
 }
 
 void DecodedStream::PlayingChanged() {
   AssertOwnerThread();
 
+  if (!mPlaying) {
+    // On seek or pause we discard future frames.
+    ResetVideo(mPrincipalHandle);
+  }
+
   mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
       "OutputStreamManager::SetPlaying", mOutputStreamManager,
       &OutputStreamManager::SetPlaying, mPlaying));
 }
 
 void DecodedStream::ConnectListener() {
   AssertOwnerThread();
 
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -75,16 +75,17 @@ class DecodedStream : public MediaSink {
  private:
   media::TimeUnit FromMicroseconds(int64_t aTime) {
     return media::TimeUnit::FromMicroseconds(aTime);
   }
   void DestroyData(UniquePtr<DecodedStreamData>&& aData);
   void SendAudio(double aVolume, bool aIsSameOrigin,
                  const PrincipalHandle& aPrincipalHandle);
   void SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
+  void ResetVideo(const PrincipalHandle& aPrincipalHandle);
   StreamTime SentDuration();
   void SendData();
   void NotifyOutput(int64_t aTime);
 
   void AssertOwnerThread() const {
     MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
   }