Bug 1509316 - p1: move composite listening out of VideoData/VideoSink. r=jya,mattwoodrow
authorJohn Lin <jolin@mozilla.com>
Wed, 09 Oct 2019 23:08:12 +0000
changeset 497073 526d623222f095b06b68d2d71ffab6c50cc8af79
parent 497072 171e651b0afcf434f1f3cceda77051188a0b7f16
child 497074 ee77371cc6e6b5e15601159271f999df01c957b5
push id97636
push userjolin@mozilla.com
push dateWed, 09 Oct 2019 23:23:08 +0000
treeherderautoland@5976eb8e6be6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya, mattwoodrow
bugs1509316, 1299068
milestone71.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1509316 - p1: move composite listening out of VideoData/VideoSink. r=jya,mattwoodrow On Android, decoded buffers need to be send back to MediaCodec in order to be rendered and/or recycled. The current mechanism introduced in bug 1299068 only works for playback(VideoData/VideoSink) but not WebRTC(VideoFrame/VideoOutput). Move the callback to SurfaceTextureImage because VideoData and VideoFrame both own that when using MediaCodec, and move the notification to VideoFrameContainer for both VideoSink and VideoOutput pass frames there for compositing. Differential Revision: https://phabricator.services.mozilla.com/D45771
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/VideoFrameContainer.cpp
dom/media/platforms/android/RemoteDataDecoder.cpp
gfx/layers/GLImages.h
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -219,35 +219,16 @@ VideoData::VideoData(int64_t aOffset, co
       mNextKeyFrameTime(TimeUnit::Invalid()) {
   MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration.");
   mKeyframe = aKeyframe;
   mTimecode = aTimecode;
 }
 
 VideoData::~VideoData() {}
 
-void VideoData::SetListener(UniquePtr<Listener> aListener) {
-  MOZ_ASSERT(!mSentToCompositor,
-             "Listener should be registered before sending data");
-
-  mListener = std::move(aListener);
-}
-
-void VideoData::MarkSentToCompositor() {
-  if (mSentToCompositor) {
-    return;
-  }
-
-  mSentToCompositor = true;
-  if (mListener != nullptr) {
-    mListener->OnSentToCompositor();
-    mListener = nullptr;
-  }
-}
-
 size_t VideoData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
   size_t size = aMallocSizeOf(this);
 
   // Currently only PLANAR_YCBCR has a well defined function for determining
   // it's size, so reporting is limited to that type.
   if (mImage && mImage->GetFormat() == ImageFormat::PLANAR_YCBCR) {
     const mozilla::layers::PlanarYCbCrImage* img =
         static_cast<const mozilla::layers::PlanarYCbCrImage*>(mImage.get());
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -446,22 +446,16 @@ class VideoData : public MediaData {
     };
 
     Plane mPlanes[3];
     YUVColorSpace mYUVColorSpace = YUVColorSpace::UNKNOWN;
     ColorDepth mColorDepth = ColorDepth::COLOR_8;
     ColorRange mColorRange = ColorRange::LIMITED;
   };
 
-  class Listener {
-   public:
-    virtual void OnSentToCompositor() = 0;
-    virtual ~Listener() {}
-  };
-
   // Constructs a VideoData object. If aImage is nullptr, creates a new Image
   // holding a copy of the YCbCr data passed in aBuffer. If aImage is not
   // nullptr, it's stored as the underlying video image and aBuffer is assumed
   // to point to memory within aImage so no copy is made. aTimecode is a codec
   // specific number representing the timestamp of the frame of video data.
   // Returns nsnull if an error occurs. This may indicate that memory couldn't
   // be allocated to create the VideoData object, or it may indicate some
   // problem with the input data (e.g. negative stride).
@@ -506,34 +500,32 @@ class VideoData : public MediaData {
 
   int32_t mFrameID;
 
   VideoData(int64_t aOffset, const media::TimeUnit& aTime,
             const media::TimeUnit& aDuration, bool aKeyframe,
             const media::TimeUnit& aTimecode, IntSize aDisplay,
             uint32_t aFrameID);
 
-  void SetListener(UniquePtr<Listener> aListener);
-  void MarkSentToCompositor();
+  void MarkSentToCompositor() { mSentToCompositor = true; }
   bool IsSentToCompositor() { return mSentToCompositor; }
 
   void UpdateDuration(const media::TimeUnit& aDuration);
   void UpdateTimestamp(const media::TimeUnit& aTimestamp);
 
   void SetNextKeyFrameTime(const media::TimeUnit& aTime) {
     mNextKeyFrameTime = aTime;
   }
 
   const media::TimeUnit& NextKeyFrameTime() const { return mNextKeyFrameTime; }
 
  protected:
   ~VideoData();
 
   bool mSentToCompositor;
-  UniquePtr<Listener> mListener;
   media::TimeUnit mNextKeyFrameTime;
 };
 
 enum class CryptoScheme : uint8_t {
   None,
   Cenc,
   Cbcs,
 };
--- a/dom/media/VideoFrameContainer.cpp
+++ b/dom/media/VideoFrameContainer.cpp
@@ -1,17 +1,21 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "VideoFrameContainer.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+#include "GLImages.h"  // for SurfaceTextureImage
+#endif
+#include "MediaDecoderOwner.h"
 #include "mozilla/Telemetry.h"
-#include "MediaDecoderOwner.h"
 
 using namespace mozilla::layers;
 
 namespace mozilla {
 static LazyLogModule gVideoFrameContainerLog("VideoFrameContainer");
 #define CONTAINER_LOG(type, msg) MOZ_LOG(gVideoFrameContainerLog, type, msg)
 
 #define NS_DispatchToMainThread(...) CompileError_UseAbstractMainThreadInstead
@@ -73,33 +77,60 @@ void VideoFrameContainer::UpdatePrincipa
     const ImageContainer::FrameID& aFrameID) {
   if (mPendingPrincipalHandle == aPrincipalHandle) {
     return;
   }
   mPendingPrincipalHandle = aPrincipalHandle;
   mFrameIDForPendingPrincipalHandle = aFrameID;
 }
 
+#ifdef MOZ_WIDGET_ANDROID
+static void NotifySetCurrent(Image* aImage) {
+  if (aImage == nullptr) {
+    return;
+  }
+
+  SurfaceTextureImage* image = aImage->AsSurfaceTextureImage();
+  if (image == nullptr) {
+    return;
+  }
+
+  image->OnSetCurrent();
+}
+#endif
+
 void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize,
                                           Image* aImage,
                                           const TimeStamp& aTargetTime) {
+#ifdef MOZ_WIDGET_ANDROID
+    NotifySetCurrent(aImage);
+#endif
   if (aImage) {
     MutexAutoLock lock(mMutex);
     AutoTArray<ImageContainer::NonOwningImage, 1> imageList;
     imageList.AppendElement(
         ImageContainer::NonOwningImage(aImage, aTargetTime, ++mFrameID));
     SetCurrentFramesLocked(aIntrinsicSize, imageList);
   } else {
     ClearCurrentFrame(aIntrinsicSize);
   }
 }
 
 void VideoFrameContainer::SetCurrentFrames(
     const gfx::IntSize& aIntrinsicSize,
     const nsTArray<ImageContainer::NonOwningImage>& aImages) {
+#ifdef MOZ_WIDGET_ANDROID
+  // When there are multiple frames, only the last one is effective
+  // (see bug 1299068 comment 4). Here I just count on VideoSink and VideoOutput
+  // to send one frame at a time and warn if not.
+  Unused << NS_WARN_IF(aImages.Length() > 1);
+  for (auto& image : aImages) {
+    NotifySetCurrent(image.mImage);
+  }
+#endif
   MutexAutoLock lock(mMutex);
   SetCurrentFramesLocked(aIntrinsicSize, aImages);
 }
 
 void VideoFrameContainer::SetCurrentFramesLocked(
     const gfx::IntSize& aIntrinsicSize,
     const nsTArray<ImageContainer::NonOwningImage>& aImages) {
   mMutex.AssertCurrentThreadOwns();
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -57,23 +57,24 @@ class RenderOrReleaseOutput {
   CodecProxy::GlobalRef mCodec;
   Sample::GlobalRef mSample;
 };
 
 class RemoteVideoDecoder : public RemoteDataDecoder {
  public:
   // Render the output to the surface when the frame is sent
   // to compositor, or release it if not presented.
-  class CompositeListener : private RenderOrReleaseOutput,
-                            public VideoData::Listener {
+  class CompositeListener
+      : private RenderOrReleaseOutput,
+        public layers::SurfaceTextureImage::SetCurrentCallback {
    public:
     CompositeListener(CodecProxy::Param aCodec, Sample::Param aSample)
         : RenderOrReleaseOutput(aCodec, aSample) {}
 
-    void OnSentToCompositor() override { ReleaseOutput(true); }
+    void operator()(void) override { ReleaseOutput(true); }
   };
 
   class InputInfo {
    public:
     InputInfo() {}
 
     InputInfo(const int64_t aDurationUs, const gfx::IntSize& aImageSize,
               const gfx::IntSize& aDisplaySize)
@@ -250,17 +251,17 @@ class RemoteVideoDecoder : public Remote
     }
 
     AssertOnTaskQueue();
     if (GetState() == State::SHUTDOWN) {
       aSample->Dispose();
       return;
     }
 
-    UniquePtr<VideoData::Listener> releaseSample(
+    UniquePtr<layers::SurfaceTextureImage::SetCurrentCallback> releaseSample(
         new CompositeListener(mJavaDecoder, aSample));
 
     BufferInfo::LocalRef info = aSample->Info();
     MOZ_ASSERT(info);
 
     int32_t flags;
     bool ok = NS_SUCCEEDED(info->Flags(&flags));
 
@@ -286,25 +287,26 @@ class RemoteVideoDecoder : public Remote
       // Ignore output with no corresponding input.
       return;
     }
 
     if (ok && (size > 0 || presentationTimeUs >= 0)) {
       RefPtr<layers::Image> img = new layers::SurfaceTextureImage(
           mSurfaceHandle, inputInfo.mImageSize, false /* NOT continuous */,
           gl::OriginPos::BottomLeft, mConfig.HasAlpha());
+      img->AsSurfaceTextureImage()->RegisterSetCurrentCallback(
+          std::move(releaseSample));
 
       RefPtr<VideoData> v = VideoData::CreateFromImage(
           inputInfo.mDisplaySize, offset,
           TimeUnit::FromMicroseconds(presentationTimeUs),
           TimeUnit::FromMicroseconds(inputInfo.mDurationUs), img,
           !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
           TimeUnit::FromMicroseconds(presentationTimeUs));
 
-      v->SetListener(std::move(releaseSample));
       RemoteDataDecoder::UpdateOutputStatus(std::move(v));
     }
 
     if (isEOS) {
       DrainComplete();
     }
   }
 
--- a/gfx/layers/GLImages.h
+++ b/gfx/layers/GLImages.h
@@ -26,16 +26,22 @@ class GLImage : public Image {
 
   GLImage* AsGLImage() override { return this; }
 };
 
 #ifdef MOZ_WIDGET_ANDROID
 
 class SurfaceTextureImage : public GLImage {
  public:
+  class SetCurrentCallback {
+   public:
+    virtual void operator()(void) = 0;
+    virtual ~SetCurrentCallback() {}
+  };
+
   SurfaceTextureImage(AndroidSurfaceTextureHandle aHandle,
                       const gfx::IntSize& aSize, bool aContinuous,
                       gl::OriginPos aOriginPos, bool aHasAlpha = true);
 
   gfx::IntSize GetSize() const override { return mSize; }
   AndroidSurfaceTextureHandle GetHandle() const { return mHandle; }
   bool GetContinuous() const { return mContinuous; }
   gl::OriginPos GetOriginPos() const { return mOriginPos; }
@@ -45,22 +51,34 @@ class SurfaceTextureImage : public GLIma
     // We can implement this, but currently don't want to because it will cause
     // the SurfaceTexture to be permanently bound to the snapshot readback
     // context.
     return nullptr;
   }
 
   SurfaceTextureImage* AsSurfaceTextureImage() override { return this; }
 
+  void RegisterSetCurrentCallback(UniquePtr<SetCurrentCallback> aCallback) {
+    mSetCurrentCallback = std::move(aCallback);
+  }
+
+  void OnSetCurrent() {
+    if (mSetCurrentCallback) {
+      (*mSetCurrentCallback)();
+      mSetCurrentCallback.reset();
+    }
+  }
+
  private:
   AndroidSurfaceTextureHandle mHandle;
   gfx::IntSize mSize;
   bool mContinuous;
   gl::OriginPos mOriginPos;
   const bool mHasAlpha;
+  UniquePtr<SetCurrentCallback> mSetCurrentCallback;
 };
 
 #endif  // MOZ_WIDGET_ANDROID
 
 }  // namespace layers
 }  // namespace mozilla
 
 #endif  // GFX_GLIMAGES_H