Bug 1223270: P3. Remove extra copy of YUV buffer on Windows. r?mattwoodrow draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Wed, 03 May 2017 23:25:54 +0200
changeset 598187 d0eb3d2553a7b8e5b4506e81d5b7095ebbb6ccdd
parent 598186 156b3d7df58c1a13d63df008f1f9fd596efec746
child 598188 630cadf73a04ec214e32db0db846965d6a876b85
push id65154
push userbmo:jyavenard@mozilla.com
push dateWed, 21 Jun 2017 14:10:59 +0000
reviewersmattwoodrow
bugs1223270
milestone56.0a1
Bug 1223270: P3. Remove extra copy of YUV buffer on Windows. r?mattwoodrow MozReview-Commit-ID: JgbAwtLNr9e
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.h
dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -5,23 +5,29 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaData.h"
 
 #include "ImageContainer.h"
 #include "MediaInfo.h"
 #include "VideoUtils.h"
 #include "YCbCrUtils.h"
+#include "mozilla/layers/ImageBridgeChild.h"
+#include "mozilla/layers/KnowsCompositor.h"
 #include "mozilla/layers/SharedRGBImage.h"
 
 #ifdef MOZ_WIDGET_GONK
 #include <cutils/properties.h>
 #endif
 #include <stdint.h>
 
+#ifdef XP_WIN
+#include "mozilla/layers/D3D11YCbCrImage.h"
+#endif
+
 namespace mozilla {
 
 using namespace mozilla::gfx;
 using layers::ImageContainer;
 using layers::PlanarYCbCrImage;
 using layers::PlanarYCbCrData;
 using media::TimeUnit;
 
@@ -237,29 +243,24 @@ VideoData::UpdateTimestamp(const TimeUni
 
   auto updatedDuration = GetEndTime() - aTimestamp;
   MOZ_ASSERT(!updatedDuration.IsNegative());
 
   mTime = aTimestamp;
   mDuration = updatedDuration;
 }
 
-/* static */
-bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
-                                    const VideoInfo& aInfo,
-                                    const YCbCrBuffer &aBuffer,
-                                    const IntRect& aPicture,
-                                    bool aCopyData)
+PlanarYCbCrData
+ConstructPlanarYCbCrData(const VideoInfo& aInfo,
+                         const VideoData::YCbCrBuffer& aBuffer,
+                         const IntRect& aPicture)
 {
-  if (!aVideoImage) {
-    return false;
-  }
-  const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
-  const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
-  const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
+  const VideoData::YCbCrBuffer::Plane& Y = aBuffer.mPlanes[0];
+  const VideoData::YCbCrBuffer::Plane& Cb = aBuffer.mPlanes[1];
+  const VideoData::YCbCrBuffer::Plane& Cr = aBuffer.mPlanes[2];
 
   PlanarYCbCrData data;
   data.mYChannel = Y.mData + Y.mOffset;
   data.mYSize = IntSize(Y.mWidth, Y.mHeight);
   data.mYStride = Y.mStride;
   data.mYSkip = Y.mSkip;
   data.mCbChannel = Cb.mData + Cb.mOffset;
   data.mCrChannel = Cr.mData + Cr.mOffset;
@@ -267,16 +268,31 @@ bool VideoData::SetVideoDataToImage(Plan
   data.mCbCrStride = Cb.mStride;
   data.mCbSkip = Cb.mSkip;
   data.mCrSkip = Cr.mSkip;
   data.mPicX = aPicture.x;
   data.mPicY = aPicture.y;
   data.mPicSize = aPicture.Size();
   data.mStereoMode = aInfo.mStereoMode;
   data.mYUVColorSpace = aBuffer.mYUVColorSpace;
+  return data;
+}
+
+/* static */ bool
+VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
+                               const VideoInfo& aInfo,
+                               const YCbCrBuffer &aBuffer,
+                               const IntRect& aPicture,
+                               bool aCopyData)
+{
+  if (!aVideoImage) {
+    return false;
+  }
+
+  PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
 
   aVideoImage->SetDelayedConversion(true);
   if (aCopyData) {
     return aVideoImage->CopyData(data);
   } else {
     return aVideoImage->AdoptData(data);
   }
 }
@@ -286,17 +302,18 @@ already_AddRefed<VideoData>
 VideoData::CreateAndCopyData(const VideoInfo& aInfo,
                              ImageContainer* aContainer,
                              int64_t aOffset,
                              const TimeUnit& aTime,
                              const TimeUnit& aDuration,
                              const YCbCrBuffer& aBuffer,
                              bool aKeyframe,
                              const TimeUnit& aTimecode,
-                             const IntRect& aPicture)
+                             const IntRect& aPicture,
+                             layers::KnowsCompositor* aAllocator)
 {
   if (!aContainer) {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     RefPtr<VideoData> v(new VideoData(aOffset,
                                       aTime,
                                       aDuration,
                                       aKeyframe,
@@ -324,16 +341,29 @@ VideoData::CreateAndCopyData(const Video
 #endif
 
   // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
   // format.
 #ifdef MOZ_WIDGET_GONK
   if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
     v->mImage = new layers::GrallocImage();
   }
+#elif XP_WIN
+  if (aAllocator && aAllocator->GetCompositorBackendType()
+                    == layers::LayersBackend::LAYERS_D3D11) {
+    RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
+    PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
+    if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
+                            ? layers::ImageBridgeChild::GetSingleton().get()
+                            : aAllocator,
+                            aContainer, data)) {
+      v->mImage = d3d11Image;
+      return v.forget();
+    }
+  }
 #endif
   if (!v->mImage) {
     v->mImage = aContainer->CreatePlanarYCbCrImage();
   }
 
   if (!v->mImage) {
     return nullptr;
   }
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -22,16 +22,17 @@
 #include "nsSize.h"
 #include "nsTArray.h"
 
 namespace mozilla {
 
 namespace layers {
 class Image;
 class ImageContainer;
+class KnowsCompositor;
 } // namespace layers
 
 class MediaByteBuffer;
 class TrackInfoSharedPtr;
 
 // AlignedBuffer:
 // Memory allocations are fallibles. Methods return a boolean indicating if
 // memory allocations were successful. Return values should always be checked.
@@ -492,17 +493,18 @@ public:
     const VideoInfo& aInfo,
     ImageContainer* aContainer,
     int64_t aOffset,
     const media::TimeUnit& aTime,
     const media::TimeUnit& aDuration,
     const YCbCrBuffer& aBuffer,
     bool aKeyframe,
     const media::TimeUnit& aTimecode,
-    const IntRect& aPicture);
+    const IntRect& aPicture,
+    layers::KnowsCompositor* aAllocator = nullptr);
 
   static already_AddRefed<VideoData> CreateAndCopyData(
     const VideoInfo& aInfo,
     ImageContainer* aContainer,
     int64_t aOffset,
     const media::TimeUnit& aTime,
     const media::TimeUnit& aDuration,
     const YCbCrBuffer& aBuffer,
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -63,16 +63,17 @@ InitContext(vpx_codec_ctx_t* aCtx,
   if (!dx || vpx_codec_dec_init(aCtx, dx, &config, 0)) {
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
   : mImageContainer(aParams.mImageContainer)
+  , mImageAllocator(aParams.mKnowsCompositor)
   , mTaskQueue(aParams.mTaskQueue)
   , mInfo(aParams.VideoConfig())
   , mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
 {
   MOZ_COUNT_CTOR(VPXDecoder);
   PodZero(&mVPX);
   PodZero(&mVPXAlpha);
 }
@@ -200,17 +201,18 @@ VPXDecoder::ProcessDecode(MediaRawData* 
                                        mImageContainer,
                                        aSample->mOffset,
                                        aSample->mTime,
                                        aSample->mDuration,
                                        b,
                                        aSample->mKeyframe,
                                        aSample->mTimecode,
                                        mInfo.ScaledImageRect(img->d_w,
-                                                             img->d_h));
+                                                             img->d_h),
+                                       mImageAllocator);
     } else {
       VideoData::YCbCrBuffer::Plane alpha_plane;
       alpha_plane.mData = img_alpha->planes[0];
       alpha_plane.mStride = img_alpha->stride[0];
       alpha_plane.mHeight = img_alpha->d_h;
       alpha_plane.mWidth = img_alpha->d_w;
       alpha_plane.mOffset = alpha_plane.mSkip = 0;
       v = VideoData::CreateAndCopyData(mInfo,
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -53,16 +53,17 @@ public:
   static nsIntSize GetFrameSize(Span<const uint8_t> aBuffer, Codec aCodec);
 
 private:
   ~VPXDecoder();
   RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
   MediaResult DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample);
 
   const RefPtr<layers::ImageContainer> mImageContainer;
+  RefPtr<layers::KnowsCompositor> mImageAllocator;
   const RefPtr<TaskQueue> mTaskQueue;
 
   // VPx decoder state
   vpx_codec_ctx_t mVPX;
 
   // VPx alpha decoder state
   vpx_codec_ctx_t mVPXAlpha;
 
--- a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
@@ -44,16 +44,17 @@ public:
           CreateDecoderParams::Option::LowLatency) &&
         !MediaPrefs::PDMFFmpegLowLatencyEnabled()) {
       return nullptr;
     }
     RefPtr<MediaDataDecoder> decoder = new FFmpegVideoDecoder<V>(
       mLib,
       aParams.mTaskQueue,
       aParams.VideoConfig(),
+      aParams.mKnowsCompositor,
       aParams.mImageContainer,
       aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency));
     return decoder.forget();
   }
 
   already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const CreateDecoderParams& aParams) override
   {
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "FFmpegVideoDecoder.h"
 #include "FFmpegLog.h"
 #include "ImageContainer.h"
 #include "MediaInfo.h"
 #include "MP4Decoder.h"
 #include "VPXDecoder.h"
+#include "mozilla/layers/KnowsCompositor.h"
 
 #include "libavutil/pixfmt.h"
 #if LIBAVCODEC_VERSION_MAJOR < 54
 #define AVPixelFormat PixelFormat
 #define AV_PIX_FMT_YUV420P PIX_FMT_YUV420P
 #define AV_PIX_FMT_YUVJ420P PIX_FMT_YUVJ420P
 #define AV_PIX_FMT_YUV444P PIX_FMT_YUV444P
 #define AV_PIX_FMT_NONE PIX_FMT_NONE
@@ -98,18 +99,20 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrec
   mNumFaultyPts = 0;
   mNumFaultyDts = 0;
   mLastPts = INT64_MIN;
   mLastDts = INT64_MIN;
 }
 
 FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(
   FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue, const VideoInfo& aConfig,
-  ImageContainer* aImageContainer, bool aLowLatency)
+  KnowsCompositor* aAllocator, ImageContainer* aImageContainer,
+  bool aLowLatency)
   : FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
+  , mImageAllocator(aAllocator)
   , mImageContainer(aImageContainer)
   , mInfo(aConfig)
   , mCodecParser(nullptr)
   , mLastInputDts(INT64_MIN)
   , mLowLatency(aLowLatency)
 {
   MOZ_COUNT_CTOR(FFmpegVideoDecoder);
   // Use a new MediaByteBuffer as the object will be modified during
@@ -344,17 +347,18 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
                                   mImageContainer,
                                   aSample->mOffset,
                                   TimeUnit::FromMicroseconds(pts),
                                   TimeUnit::FromMicroseconds(duration),
                                   b,
                                   !!mFrame->key_frame,
                                   TimeUnit::FromMicroseconds(-1),
                                   mInfo.ScaledImageRect(mFrame->width,
-                                                        mFrame->height));
+                                                        mFrame->height),
+                                  mImageAllocator);
 
   if (!v) {
     return MediaResult(NS_ERROR_OUT_OF_MEMORY,
                        RESULT_DETAIL("image allocation error"));
   }
   aResults.AppendElement(Move(v));
   if (aGotFrame) {
     *aGotFrame = true;
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -19,21 +19,23 @@ class FFmpegVideoDecoder : public FFmpeg
 {
 };
 
 template <>
 class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
 {
   typedef mozilla::layers::Image Image;
   typedef mozilla::layers::ImageContainer ImageContainer;
+  typedef mozilla::layers::KnowsCompositor KnowsCompositor;
   typedef SimpleMap<int64_t> DurationMap;
 
 public:
   FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
                      const VideoInfo& aConfig,
+                     KnowsCompositor* aAllocator,
                      ImageContainer* aImageContainer,
                      bool aLowLatency);
   virtual ~FFmpegVideoDecoder();
 
   RefPtr<InitPromise> Init() override;
   void InitCodecContext() override;
   const char* GetDescriptionName() const override
   {
@@ -64,16 +66,17 @@ private:
    * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
    * Currently it only supports Planar YUV420, which appears to be the only
    * non-hardware accelerated image format that FFmpeg's H264 decoder is
    * capable of outputting.
    */
   int AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                  AVFrame* aFrame);
 
+  RefPtr<KnowsCompositor> mImageAllocator;
   RefPtr<ImageContainer> mImageContainer;
   VideoInfo mInfo;
 
   // Parser used for VP8 and VP9 decoding.
   AVCodecParserContext* mCodecParser;
 
   class PtsCorrectionContext
   {