Bug 1495025 - P5. Add Windows P010 and P016 support for software decoder r=cpearce
☠☠ backed out by 9f2c6ae03c6d ☠ ☠
authorJean-Yves Avenard <jyavenard@mozilla.com>
Thu, 04 Oct 2018 09:41:58 +0000
changeset 498369 68efa7588ba8
parent 498368 9f59a50dcc6d
child 498370 24d67618f6b9
push id1864
push userffxbld-merge
push dateMon, 03 Dec 2018 15:51:40 +0000
treeherdermozilla-release@f040763d99ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce
bugs1495025
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1495025 - P5. Add Windows P010 and P016 support for software decoder r=cpearce As we do not have an IMF nor D3D11 NV12 image, we always require a full copy of the data that will deinterleave the chroma channels. Depends on D7316 Differential Revision: https://phabricator.services.mozilla.com/D7318
dom/media/MediaData.cpp
dom/media/platforms/wmf/MFTDecoder.cpp
dom/media/platforms/wmf/MFTDecoder.h
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
gfx/layers/client/TextureClient.cpp
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -318,24 +318,28 @@ VideoData::CreateAndCopyData(const Video
                                     aInfo.mDisplay,
                                     0));
 
   // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
   // format.
 #if XP_WIN
   // We disable this code path on Windows version earlier of Windows 8 due to
   // intermittent crashes with old drivers. See bug 1405110.
-  if (IsWin8OrLater() && !XRE_IsParentProcess() &&
-      aAllocator && aAllocator->SupportsD3D11()) {
+  // D3D11YCbCrImage can only handle YCbCr images using 3 non-interleaved planes
+  // non-zero mSkip value indicates that one of the plane would be interleaved.
+  if (IsWin8OrLater() && !XRE_IsParentProcess() && aAllocator &&
+      aAllocator->SupportsD3D11() && aBuffer.mPlanes[0].mSkip == 0 &&
+      aBuffer.mPlanes[1].mSkip == 0 && aBuffer.mPlanes[2].mSkip == 0) {
     RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
     PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
     if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
-                            ? layers::ImageBridgeChild::GetSingleton().get()
-                            : aAllocator,
-                            aContainer, data)) {
+                              ? layers::ImageBridgeChild::GetSingleton().get()
+                              : aAllocator,
+                            aContainer,
+                            data)) {
       v->mImage = d3d11Image;
       return v.forget();
     }
   }
 #endif
   if (!v->mImage) {
     v->mImage = aContainer->CreatePlanarYCbCrImage();
   }
--- a/dom/media/platforms/wmf/MFTDecoder.cpp
+++ b/dom/media/platforms/wmf/MFTDecoder.cpp
@@ -83,17 +83,16 @@ MFTDecoder::Create(HMODULE aDecoderDLL, 
 }
 
 HRESULT
 MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
                           IMFMediaType* aOutputType,
                           std::function<HRESULT(IMFMediaType*)>&& aCallback)
 {
   MOZ_ASSERT(mscom::IsCurrentThreadMTA());
-  mOutputType = aOutputType;
 
   // Set the input type to the one the caller gave us...
   HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   GUID currentSubtype = {0};
   hr = aOutputType->GetGUID(MF_MT_SUBTYPE, &currentSubtype);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
@@ -127,20 +126,17 @@ MFTDecoder::GetAttributes()
 }
 
 HRESULT
 MFTDecoder::FindDecoderOutputType(bool aMatchAllAttributes)
 {
   MOZ_ASSERT(mscom::IsCurrentThreadMTA());
   MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once");
 
-  GUID currentSubtype = {0};
-  HRESULT hr = mOutputType->GetGUID(MF_MT_SUBTYPE, &currentSubtype);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-  return FindDecoderOutputTypeWithSubtype(currentSubtype, aMatchAllAttributes);
+  return FindDecoderOutputTypeWithSubtype(mOutputSubType, aMatchAllAttributes);
 }
 
 HRESULT
 MFTDecoder::FindDecoderOutputTypeWithSubtype(const GUID& aSubType,
                                              bool aMatchAllAttributes)
 {
   return SetDecoderOutputType(
     aSubType, nullptr, aMatchAllAttributes, [](IMFMediaType*) { return S_OK; });
@@ -186,16 +182,17 @@ MFTDecoder::SetDecoderOutputType(
 
       hr = mDecoder->GetOutputStreamInfo(0, &mOutputStreamInfo);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       mMFTProvidesOutputSamples = IsFlagSet(mOutputStreamInfo.dwFlags,
                                             MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
 
       mOutputType = outputType;
+      mOutputSubType = outSubtype;
 
       return S_OK;
     }
     outputType = nullptr;
   }
   return E_FAIL;
 }
 
--- a/dom/media/platforms/wmf/MFTDecoder.h
+++ b/dom/media/platforms/wmf/MFTDecoder.h
@@ -44,16 +44,17 @@ public:
                           [](IMFMediaType* aOutput) { return S_OK; });
 
   // Returns the MFT's IMFAttributes object.
   already_AddRefed<IMFAttributes> GetAttributes();
 
   // Retrieves the media type being output. This may not be valid until
   //  the first sample is decoded.
   HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
+  const GUID& GetOutputMediaSubType() const { return mOutputSubType; }
 
   // Submits data into the MFT for processing.
   //
   // Returns:
   //  - MF_E_NOTACCEPTING if the decoder can't accept input. The data
   //    must be resubmitted after Output() stops producing output.
   HRESULT Input(const uint8_t* aData,
                 uint32_t aDataSize,
@@ -100,16 +101,17 @@ private:
   HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);
 
   MFT_INPUT_STREAM_INFO mInputStreamInfo;
   MFT_OUTPUT_STREAM_INFO mOutputStreamInfo;
 
   RefPtr<IMFTransform> mDecoder;
 
   RefPtr<IMFMediaType> mOutputType;
+  GUID mOutputSubType;
 
   // True if the IMFTransform allocates the samples that it returns.
   bool mMFTProvidesOutputSamples = false;
 
   // True if we need to mark the next sample as a discontinuity.
   bool mDiscontinuity = true;
 };
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -894,18 +894,28 @@ WMFVideoMFTManager::CreateBasicVideoFram
     hr = twoDBuffer->Lock2D(&data, &stride);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   } else {
     hr = buffer->Lock(&data, nullptr, nullptr);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
     stride = mVideoStride;
   }
 
-  // YV12, planar format: [YYYY....][VVVV....][UUUU....]
+  const GUID& subType = mDecoder->GetOutputMediaSubType();
+  MOZ_DIAGNOSTIC_ASSERT(subType == MFVideoFormat_YV12 ||
+                        subType == MFVideoFormat_P010 ||
+                        subType == MFVideoFormat_P016);
+  const gfx::ColorDepth colorDepth = subType == MFVideoFormat_YV12
+                                       ? gfx::ColorDepth::COLOR_8
+                                       : gfx::ColorDepth::COLOR_16;
+
+  // YV12, planar format (3 planes): [YYYY....][VVVV....][UUUU....]
   // i.e., Y, then V, then U.
+  // P010, P016 planar format (2 planes) [YYYY....][UVUV...]
+  // See https://docs.microsoft.com/en-us/windows/desktop/medfound/10-bit-and-16-bit-yuv-video-formats
   VideoData::YCbCrBuffer b;
 
   uint32_t videoWidth = mImageSize.width;
   uint32_t videoHeight = mImageSize.height;
 
   // Y (Y') plane
   b.mPlanes[0].mData = data;
   b.mPlanes[0].mStride = stride;
@@ -917,43 +927,63 @@ WMFVideoMFTManager::CreateBasicVideoFram
   MOZ_DIAGNOSTIC_ASSERT(mDecodedImageSize.height % 16 == 0,
                         "decoded height must be 16 bytes aligned");
   uint32_t y_size = stride * mDecodedImageSize.height;
   uint32_t v_size = stride * mDecodedImageSize.height / 4;
   uint32_t halfStride = (stride + 1) / 2;
   uint32_t halfHeight = (videoHeight + 1) / 2;
   uint32_t halfWidth = (videoWidth + 1) / 2;
 
-  // U plane (Cb)
-  b.mPlanes[1].mData = data + y_size + v_size;
-  b.mPlanes[1].mStride = halfStride;
-  b.mPlanes[1].mHeight = halfHeight;
-  b.mPlanes[1].mWidth = halfWidth;
-  b.mPlanes[1].mOffset = 0;
-  b.mPlanes[1].mSkip = 0;
+  if (subType == MFVideoFormat_YV12) {
+    // U plane (Cb)
+    b.mPlanes[1].mData = data + y_size + v_size;
+    b.mPlanes[1].mStride = halfStride;
+    b.mPlanes[1].mHeight = halfHeight;
+    b.mPlanes[1].mWidth = halfWidth;
+    b.mPlanes[1].mOffset = 0;
+    b.mPlanes[1].mSkip = 0;
 
-  // V plane (Cr)
-  b.mPlanes[2].mData = data + y_size;
-  b.mPlanes[2].mStride = halfStride;
-  b.mPlanes[2].mHeight = halfHeight;
-  b.mPlanes[2].mWidth = halfWidth;
-  b.mPlanes[2].mOffset = 0;
-  b.mPlanes[2].mSkip = 0;
+    // V plane (Cr)
+    b.mPlanes[2].mData = data + y_size;
+    b.mPlanes[2].mStride = halfStride;
+    b.mPlanes[2].mHeight = halfHeight;
+    b.mPlanes[2].mWidth = halfWidth;
+    b.mPlanes[2].mOffset = 0;
+    b.mPlanes[2].mSkip = 0;
+  } else {
+    // U plane (Cb)
+    b.mPlanes[1].mData = data + y_size;
+    b.mPlanes[1].mStride = stride;
+    b.mPlanes[1].mHeight = halfHeight;
+    b.mPlanes[1].mWidth = halfWidth;
+    b.mPlanes[1].mOffset = 0;
+    b.mPlanes[1].mSkip = 1;
+
+    // V plane (Cr)
+    b.mPlanes[2].mData = data + y_size + sizeof(short);
+    b.mPlanes[2].mStride = stride;
+    b.mPlanes[2].mHeight = halfHeight;
+    b.mPlanes[2].mWidth = halfWidth;
+    b.mPlanes[2].mOffset = 0;
+    b.mPlanes[2].mSkip = 1;
+  }
 
   // YuvColorSpace
   b.mYUVColorSpace = mYUVColorSpace;
+  b.mColorDepth = colorDepth;
 
   TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   gfx::IntRect pictureRegion =
     mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
 
-  if (!mKnowsCompositor || !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
+  if (colorDepth != gfx::ColorDepth::COLOR_8 || !mKnowsCompositor ||
+      !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(mVideoInfo,
                                    mImageContainer,
                                    aStreamOffset,
                                    pts,
                                    duration,
                                    b,
                                    false,
@@ -1060,25 +1090,24 @@ WMFVideoMFTManager::Output(int64_t aStre
     if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
       MOZ_ASSERT(!sample);
       // Video stream output type change, probably geometric aperture change or
       // pixel type.
       // We must reconfigure the decoder output type.
 
       // Attempt to find an appropriate OutputType, trying in order:
       // if HW accelerated: NV12, P010, P016
-      // if SW: YV12
+      // if SW: YV12, P010, P016
       if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype(
                     mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12,
                     false)))) &&
-          (!mUseHwAccel ||
-           (FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
-                      MFVideoFormat_P010, false))) &&
-            FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
-                      MFVideoFormat_P016, false)))))) {
+          FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
+                    MFVideoFormat_P010, false))) &&
+          FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
+                    MFVideoFormat_P016, false)))) {
         LOG("No suitable output format found");
         return hr;
       }
 
       RefPtr<IMFMediaType> outputType;
       hr = mDecoder->GetOutputMediaType(outputType);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
--- a/gfx/layers/client/TextureClient.cpp
+++ b/gfx/layers/client/TextureClient.cpp
@@ -1890,23 +1890,16 @@ MappedYCbCrChannelData::CopyInto(MappedY
     return true;
   }
 
   MOZ_ASSERT(bytesPerPixel == 1 || bytesPerPixel == 2);
   // slow path
   if (bytesPerPixel == 1) {
     copyData(aDst.data, aDst, data, *this);
   } else if (bytesPerPixel == 2) {
-    if (skip != 0) {
-      // The skip value definition doesn't specify if it's in bytes, or in
-      // "pixels". We will assume the later. There are currently no decoders
-      // returning HDR content with a skip value different than zero anyway.
-      NS_WARNING("skip value non zero for HDR content, please verify code "
-                 "(see bug 1421187)");
-    }
     copyData(reinterpret_cast<uint16_t*>(aDst.data),
              aDst,
              reinterpret_cast<uint16_t*>(data),
              *this);
   }
   return true;
 }