Bug 1423253 - Drop old frames before they get processed in VideoFrameConverter. r=padenot
authorAndreas Pehrson <apehrson@mozilla.com>
Fri, 22 Mar 2019 11:44:40 +0000
changeset 465654 1437c364fe9948d98c09578f6ccba79ae97af212
parent 465653 b5e813887d3ffc1bf1cd9f6869351c1c6b96c1f8
child 465655 c608a0db006386acb5b125dd5aa3f4dbf093093e
push id35744
push userapavel@mozilla.com
push dateFri, 22 Mar 2019 16:44:08 +0000
treeherdermozilla-central@e66a2b59914d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1423253
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1423253 - Drop old frames before they get processed in VideoFrameConverter. r=padenot To avoid building up a queue of frames when the machine cannot keep up. Differential Revision: https://phabricator.services.mozilla.com/D23710
dom/media/VideoFrameConverter.h
dom/media/gtest/TestVideoFrameConverter.cpp
--- a/dom/media/VideoFrameConverter.h
+++ b/dom/media/VideoFrameConverter.h
@@ -55,51 +55,54 @@ class VideoFrameConverter {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
 
   VideoFrameConverter()
       : mTaskQueue(
             new TaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
                           "VideoFrameConverter")),
         mPacingTimer(new MediaTimer()),
         mLastImage(-1),  // -1 is not a guaranteed invalid serial (Bug 1262134).
-        mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE) {
+        mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE),
+        mLastFrameQueuedForProcessing(TimeStamp::Now()) {
     MOZ_COUNT_CTOR(VideoFrameConverter);
   }
 
   void QueueVideoChunk(const VideoChunk& aChunk, bool aForceBlack) {
     gfx::IntSize size = aChunk.mFrame.GetIntrinsicSize();
     if (size.width == 0 || size.width == 0) {
       return;
     }
 
     TimeStamp t = aChunk.mTimeStamp;
     MOZ_ASSERT(!t.IsNull());
 
-    if (!mLastFrameSent.IsNull() && t < mLastFrameSent) {
+    if (!mLastFrameQueuedForPacing.IsNull() && t < mLastFrameQueuedForPacing) {
       // With a direct listener we can have buffered up future frames in
       // mPacingTimer. The source could start sending us frames that start
       // before some previously buffered frames (e.g., a MediaDecoder does that
       // when it seeks). We don't want to just append these to the pacing timer,
       // as frames at different times on the MediaDecoder timeline would get
       // passed to the encoder in a mixed order. We don't have an explicit way
       // of signaling this, so we must detect here if time goes backwards.
       MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
               ("Clearing pacer because of source reset (%.3f)",
-               (mLastFrameSent - t).ToSeconds()));
+               (mLastFrameQueuedForPacing - t).ToSeconds()));
       mPacingTimer->Cancel();
     }
-    mLastFrameSent = t;
+
+    mLastFrameQueuedForPacing = t;
 
     mPacingTimer->WaitUntil(t, __func__)
         ->Then(mTaskQueue, __func__,
                [self = RefPtr<VideoFrameConverter>(this), this,
                 image = RefPtr<layers::Image>(aChunk.mFrame.GetImage()),
                 t = std::move(t), size = std::move(size), aForceBlack] {
-                 ProcessVideoFrame(image, t, size, aForceBlack);
-               });
+                 QueueForProcessing(std::move(image), t, size, aForceBlack);
+               },
+               [] {});
   }
 
   void AddListener(const RefPtr<VideoConverterListener>& aListener) {
     nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
         "VideoFrameConverter::AddListener",
         [self = RefPtr<VideoFrameConverter>(this), this, aListener] {
           MOZ_ASSERT(!mListeners.Contains(aListener));
           mListeners.AppendElement(aListener);
@@ -136,47 +139,47 @@ class VideoFrameConverter {
  protected:
   virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
 
   static void SameFrameTick(nsITimer* aTimer, void* aClosure) {
     MOZ_ASSERT(aClosure);
     VideoFrameConverter* self = static_cast<VideoFrameConverter*>(aClosure);
     MOZ_ASSERT(self->mTaskQueue->IsCurrentThreadIn());
 
-    if (!self->mLastFrame) {
+    if (!self->mLastFrameConverted) {
       return;
     }
 
     for (RefPtr<VideoConverterListener>& listener : self->mListeners) {
-      listener->OnVideoFrameConverted(*self->mLastFrame);
+      listener->OnVideoFrameConverted(*self->mLastFrameConverted);
     }
   }
 
   void VideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) {
     MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     if (mSameFrameTimer) {
       mSameFrameTimer->Cancel();
     }
 
     const int sameFrameIntervalInMs = 1000;
     NS_NewTimerWithFuncCallback(
         getter_AddRefs(mSameFrameTimer), &SameFrameTick, this,
         sameFrameIntervalInMs, nsITimer::TYPE_REPEATING_SLACK,
         "VideoFrameConverter::mSameFrameTimer", mTaskQueue);
 
-    mLastFrame = MakeUnique<webrtc::VideoFrame>(aVideoFrame);
+    mLastFrameConverted = MakeUnique<webrtc::VideoFrame>(aVideoFrame);
 
     for (RefPtr<VideoConverterListener>& listener : mListeners) {
       listener->OnVideoFrameConverted(aVideoFrame);
     }
   }
 
-  void ProcessVideoFrame(layers::Image* aImage, TimeStamp aTime,
-                         gfx::IntSize aSize, bool aForceBlack) {
+  void QueueForProcessing(RefPtr<layers::Image> aImage, TimeStamp aTime,
+                          gfx::IntSize aSize, bool aForceBlack) {
     MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     int32_t serial;
     if (aForceBlack) {
       // Set the last-img check to indicate black.
       // -1 is not a guaranteed invalid serial. See bug 1262134.
       serial = -1;
     } else {
@@ -184,17 +187,47 @@ class VideoFrameConverter {
     }
 
     if (serial == mLastImage) {
       // With a non-direct listener we get passed duplicate frames every ~10ms
       // even with no frame change.
       return;
     }
 
+    if (aTime <= mLastFrameQueuedForProcessing) {
+      MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
+              ("Dropping a frame because time did not progress (%.3f)",
+               (mLastFrameQueuedForProcessing - aTime).ToSeconds()));
+      return;
+    }
+
     mLastImage = serial;
+    mLastFrameQueuedForProcessing = aTime;
+
+    nsresult rv = mTaskQueue->Dispatch(
+        NewRunnableMethod<StoreCopyPassByRRef<RefPtr<layers::Image>>, TimeStamp,
+                          gfx::IntSize, bool>(
+            "VideoFrameConverter::ProcessVideoFrame", this,
+            &VideoFrameConverter::ProcessVideoFrame, std::move(aImage), aTime,
+            aSize, aForceBlack));
+    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+    Unused << rv;
+  }
+
+  void ProcessVideoFrame(RefPtr<layers::Image> aImage, TimeStamp aTime,
+                         gfx::IntSize aSize, bool aForceBlack) {
+    MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+    if (aTime < mLastFrameQueuedForProcessing) {
+      MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
+              ("Dropping a frame that is %.3f seconds behind latest",
+               (mLastFrameQueuedForProcessing - aTime).ToSeconds()));
+      return;
+    }
+
     // See Bug 1529581 - Ideally we'd use the mTimestamp from the chunk
     // passed into QueueVideoChunk rather than the webrtc.org clock here.
     int64_t now = webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
 
     if (aForceBlack) {
       // Send a black image.
       rtc::scoped_refptr<webrtc::I420Buffer> buffer =
           mBufferPool.CreateBuffer(aSize.width, aSize.height);
@@ -271,21 +304,22 @@ class VideoFrameConverter {
 
   const RefPtr<TaskQueue> mTaskQueue;
 
   // Used to pace future frames close to their rendering-time. Thread-safe.
   const RefPtr<MediaTimer> mPacingTimer;
 
   // Written and read from the queueing thread (normally MSG).
   // Last time we queued a frame in the pacer
-  TimeStamp mLastFrameSent;
+  TimeStamp mLastFrameQueuedForPacing;
 
   // Accessed only from mTaskQueue.
   int32_t mLastImage;  // Serial number of last processed Image
   webrtc::I420BufferPool mBufferPool;
   nsCOMPtr<nsITimer> mSameFrameTimer;
-  UniquePtr<webrtc::VideoFrame> mLastFrame;
+  TimeStamp mLastFrameQueuedForProcessing;
+  UniquePtr<webrtc::VideoFrame> mLastFrameConverted;
   nsTArray<RefPtr<VideoConverterListener>> mListeners;
 };
 
 }  // namespace mozilla
 
 #endif // VideoFrameConverter_h
--- a/dom/media/gtest/TestVideoFrameConverter.cpp
+++ b/dom/media/gtest/TestVideoFrameConverter.cpp
@@ -131,8 +131,22 @@ TEST_F(VideoFrameConverterTest, Duplicat
   ASSERT_EQ(frames.size(), 2U);
   EXPECT_EQ(frames[0].first().width(), 640);
   EXPECT_EQ(frames[0].first().height(), 480);
   EXPECT_GT(frames[0].second(), future1);
   EXPECT_EQ(frames[1].first().width(), 640);
   EXPECT_EQ(frames[1].first().height(), 480);
   EXPECT_GT(frames[1].second(), now + TimeDuration::FromMilliseconds(1100));
 }
+
+TEST_F(VideoFrameConverterTest, DropsOld) {
+  TimeStamp now = TimeStamp::Now();
+  TimeStamp future1 = now + TimeDuration::FromMilliseconds(1000);
+  TimeStamp future2 = now + TimeDuration::FromMilliseconds(100);
+  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
+  mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
+  auto frames = WaitForNConverted(1);
+  EXPECT_GT(TimeStamp::Now(), future2);
+  ASSERT_EQ(frames.size(), 1U);
+  EXPECT_EQ(frames[0].first().width(), 640);
+  EXPECT_EQ(frames[0].first().height(), 480);
+  EXPECT_GT(frames[0].second(), future2);
+}