Backed out 6 changesets (bug 1495025) for mochitest-webgl2 failures in test_2_conformance2__textures__misc__npot-video-sizing.html. CLOSED TREE
authorBrindusan Cristian <cbrindusan@mozilla.com>
Thu, 04 Oct 2018 15:16:07 +0300
changeset 495295 a22ea1fdf4cb5eea00a73306d2b0d005e26d4a6c
parent 495294 1873f93e51a63549336cce3df1c850e743352a19
child 495296 01634947caab094ec094beda6d039bf189c59ffa
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1495025
milestone64.0a1
backs out263d4f72217465543fc7e0ecda66b8db2cd5a1d9
528dbc463c225f21580d651431c08b63913f2da7
25895d283d47e1f2de38776c18f278a82ee330c1
c3b43ee1092e7156c355d8e02911e9ea87b038c8
c548d816019d6e08c8e2ad043494182f9690e1bc
208624601a18d653e0b309798028fe8545b5eb1f
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 6 changesets (bug 1495025) for mochitest-webgl2 failures in test_2_conformance2__textures__misc__npot-video-sizing.html. CLOSED TREE Backed out changeset 263d4f722174 (bug 1495025) Backed out changeset 528dbc463c22 (bug 1495025) Backed out changeset 25895d283d47 (bug 1495025) Backed out changeset c3b43ee1092e (bug 1495025) Backed out changeset c548d816019d (bug 1495025) Backed out changeset 208624601a18 (bug 1495025)
dom/media/MediaData.cpp
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/platforms/wmf/DXVA2Manager.h
dom/media/platforms/wmf/MFTDecoder.cpp
dom/media/platforms/wmf/MFTDecoder.h
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
gfx/2d/Tools.h
gfx/2d/Types.h
gfx/layers/D3D11ShareHandleImage.cpp
gfx/layers/D3D11ShareHandleImage.h
gfx/layers/client/TextureClient.cpp
gfx/webrender_bindings/WebRenderTypes.h
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -318,28 +318,24 @@ VideoData::CreateAndCopyData(const Video
                                     aInfo.mDisplay,
                                     0));
 
   // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
   // format.
 #if XP_WIN
   // We disable this code path on Windows version earlier of Windows 8 due to
   // intermittent crashes with old drivers. See bug 1405110.
-  // D3D11YCbCrImage can only handle YCbCr images using 3 non-interleaved planes
-  // non-zero mSkip value indicates that one of the plane would be interleaved.
-  if (IsWin8OrLater() && !XRE_IsParentProcess() && aAllocator &&
-      aAllocator->SupportsD3D11() && aBuffer.mPlanes[0].mSkip == 0 &&
-      aBuffer.mPlanes[1].mSkip == 0 && aBuffer.mPlanes[2].mSkip == 0) {
+  if (IsWin8OrLater() && !XRE_IsParentProcess() &&
+      aAllocator && aAllocator->SupportsD3D11()) {
     RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
     PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
     if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
-                              ? layers::ImageBridgeChild::GetSingleton().get()
-                              : aAllocator,
-                            aContainer,
-                            data)) {
+                            ? layers::ImageBridgeChild::GetSingleton().get()
+                            : aAllocator,
+                            aContainer, data)) {
       v->mImage = d3d11Image;
       return v.forget();
     }
   }
 #endif
   if (!v->mImage) {
     v->mImage = aContainer->CreatePlanarYCbCrImage();
   }
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -615,21 +615,20 @@ public:
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const gfx::IntRect& aRegion,
                       Image** aOutImage) override;
 
-  HRESULT CopyToBGRATexture(ID3D11Texture2D* aInTexture,
-                            const GUID& aSubType,
-                            ID3D11Texture2D** aOutTexture) override;
+  virtual HRESULT CopyToBGRATexture(ID3D11Texture2D *aInTexture,
+                                    ID3D11Texture2D** aOutTexture);
 
-  HRESULT ConfigureForSize(IMFMediaType* aInputType) override;
+  HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) override;
 
   bool IsD3D11() override { return true; }
 
   bool SupportsConfig(IMFMediaType* aType, float aFramerate) override;
 
 private:
   HRESULT CreateFormatConverter();
 
@@ -648,18 +647,17 @@ private:
   RefPtr<MFTDecoder> mTransform;
   RefPtr<D3D11RecycleAllocator> mTextureClientAllocator;
   RefPtr<ID3D11VideoDecoder> mDecoder;
   RefPtr<layers::SyncObjectClient> mSyncObject;
   GUID mDecoderGUID;
   uint32_t mWidth = 0;
   uint32_t mHeight = 0;
   UINT mDeviceManagerToken = 0;
-  RefPtr<IMFMediaType> mInputType;
-  GUID mInputSubType;
+  bool mConfiguredForSize = false;
 };
 
 bool
 D3D11DXVA2Manager::SupportsConfig(IMFMediaType* aType, float aFramerate)
 {
   MOZ_ASSERT(NS_IsMainThread());
   D3D11_VIDEO_DECODER_DESC desc;
   desc.Guid = mDecoderGUID;
@@ -916,68 +914,62 @@ HRESULT
 D3D11DXVA2Manager::CopyToImage(IMFSample* aVideoSample,
                                const gfx::IntRect& aRegion,
                                Image** aOutImage)
 {
   NS_ENSURE_TRUE(aVideoSample, E_POINTER);
   NS_ENSURE_TRUE(aOutImage, E_POINTER);
   MOZ_ASSERT(mTextureClientAllocator);
 
-  RefPtr<D3D11ShareHandleImage> image = new D3D11ShareHandleImage(
-    gfx::IntSize(mWidth, mHeight), aRegion, mInputSubType);
-
-  // Retrieve the DXGI_FORMAT for the current video sample.
-  RefPtr<IMFMediaBuffer> buffer;
-  HRESULT hr = aVideoSample->GetBufferByIndex(0, getter_AddRefs(buffer));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-  RefPtr<IMFDXGIBuffer> dxgiBuf;
-  hr = buffer->QueryInterface((IMFDXGIBuffer**)getter_AddRefs(dxgiBuf));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-  RefPtr<ID3D11Texture2D> tex;
-  hr = dxgiBuf->GetResource(__uuidof(ID3D11Texture2D), getter_AddRefs(tex));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-  D3D11_TEXTURE2D_DESC inDesc;
-  tex->GetDesc(&inDesc);
-
+  RefPtr<D3D11ShareHandleImage> image =
+    new D3D11ShareHandleImage(gfx::IntSize(mWidth, mHeight), aRegion);
   bool ok = image->AllocateTexture(mTextureClientAllocator, mDevice);
   NS_ENSURE_TRUE(ok, E_FAIL);
 
-  RefPtr<TextureClient> client =
-    image->GetTextureClient(ImageBridgeChild::GetSingleton().get());
+  RefPtr<TextureClient> client = image->GetTextureClient(ImageBridgeChild::GetSingleton().get());
   NS_ENSURE_TRUE(client, E_FAIL);
 
+  RefPtr<IDXGIKeyedMutex> mutex;
+  HRESULT hr = S_OK;
   RefPtr<ID3D11Texture2D> texture = image->GetTexture();
-  D3D11_TEXTURE2D_DESC outDesc;
-  texture->GetDesc(&outDesc);
 
-  RefPtr<IDXGIKeyedMutex> mutex;
   texture->QueryInterface((IDXGIKeyedMutex**)getter_AddRefs(mutex));
 
   {
     AutoTextureLock(mutex, hr, 2000);
     if (mutex && (FAILED(hr) || hr == WAIT_TIMEOUT || hr == WAIT_ABANDONED)) {
       return hr;
     }
 
     if (!mutex && mDevice != DeviceManagerDx::Get()->GetCompositorDevice()) {
       NS_ENSURE_TRUE(mSyncObject, E_FAIL);
     }
 
-    if (outDesc.Format == inDesc.Format) {
+    if (client && client->GetFormat() == SurfaceFormat::NV12) {
       // Our video frame is stored in a non-sharable ID3D11Texture2D. We need
       // to create a copy of that frame as a sharable resource, save its share
       // handle, and put that handle into the rendering pipeline.
 
+      RefPtr<IMFMediaBuffer> buffer;
+      hr = aVideoSample->GetBufferByIndex(0, getter_AddRefs(buffer));
+      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+      RefPtr<IMFDXGIBuffer> dxgiBuf;
+      hr = buffer->QueryInterface((IMFDXGIBuffer**)getter_AddRefs(dxgiBuf));
+      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+      RefPtr<ID3D11Texture2D> tex;
+      hr = dxgiBuf->GetResource(__uuidof(ID3D11Texture2D), getter_AddRefs(tex));
+      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
       UINT index;
       dxgiBuf->GetSubresourceIndex(&index);
       mContext->CopySubresourceRegion(texture, 0, 0, 0, 0, tex, index, nullptr);
     } else {
+      // Our video sample is in NV12 format but our output texture is in BGRA.
       // Use MFT to do color conversion.
       hr = E_FAIL;
       mozilla::mscom::EnsureMTA(
         [&]() -> void { hr = mTransform->Input(aVideoSample); });
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       RefPtr<IMFSample> sample;
       hr = CreateOutputSample(sample, texture);
@@ -1000,56 +992,31 @@ D3D11DXVA2Manager::CopyToImage(IMFSample
   }
 
   image.forget(aOutImage);
 
   return S_OK;
 }
 
 HRESULT
-D3D11DXVA2Manager::CopyToBGRATexture(ID3D11Texture2D* aInTexture,
-                                     const GUID& aSubType,
+D3D11DXVA2Manager::CopyToBGRATexture(ID3D11Texture2D *aInTexture,
                                      ID3D11Texture2D** aOutTexture)
 {
   NS_ENSURE_TRUE(aInTexture, E_POINTER);
   NS_ENSURE_TRUE(aOutTexture, E_POINTER);
 
   HRESULT hr;
   RefPtr<ID3D11Texture2D> texture, inTexture;
 
   inTexture = aInTexture;
 
   CD3D11_TEXTURE2D_DESC desc;
   aInTexture->GetDesc(&desc);
-
-  if (!mInputType || desc.Width != mWidth || desc.Height != mHeight) {
-    RefPtr<IMFMediaType> inputType;
-    hr = wmf::MFCreateMediaType(getter_AddRefs(inputType));
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr = inputType->SetGUID(MF_MT_SUBTYPE, aSubType);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr =
-      MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, desc.Width, desc.Height);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr =
-      inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr = inputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-    hr = ConfigureForSize(inputType);
-    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-  }
+  hr = ConfigureForSize(desc.Width, desc.Height);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   RefPtr<IDXGIKeyedMutex> mutex;
   inTexture->QueryInterface((IDXGIKeyedMutex**)getter_AddRefs(mutex));
   // The rest of this function will not work if inTexture implements
   // IDXGIKeyedMutex! In that case case we would have to copy to a
   // non-mutex using texture.
 
   if (mutex) {
@@ -1101,75 +1068,92 @@ D3D11DXVA2Manager::CopyToBGRATexture(ID3
     [&]() -> void { hr = mTransform->Output(&outputSample); });
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   texture.forget(aOutTexture);
 
   return S_OK;
 }
 
-HRESULT
-D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType)
+HRESULT ConfigureOutput(IMFMediaType* aOutput, void* aData)
 {
+  HRESULT hr =
+    aOutput->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  if (aInputType == mInputType) {
-    // If the media type hasn't changed, don't reconfigure.
+  hr = aOutput->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  gfx::IntSize* size = reinterpret_cast<gfx::IntSize*>(aData);
+  hr = MFSetAttributeSize(aOutput, MF_MT_FRAME_SIZE, size->width, size->height);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::ConfigureForSize(uint32_t aWidth, uint32_t aHeight)
+{
+  if (mConfiguredForSize && aWidth == mWidth && aHeight == mHeight) {
+    // If the size hasn't changed, don't reconfigure.
     return S_OK;
   }
 
-  UINT32 width = 0, height = 0;
-  HRESULT hr = MFGetAttributeSize(aInputType, MF_MT_FRAME_SIZE, &width, &height);
+  mWidth = aWidth;
+  mHeight = aHeight;
+
+  RefPtr<IMFMediaType> inputType;
+  HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(inputType));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-  mWidth = width;
-  mHeight = height;
+
+  hr = inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  GUID subType = { 0 };
-  hr = aInputType->GetGUID(MF_MT_SUBTYPE, &subType);
+  hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   RefPtr<IMFAttributes> attr;
   mozilla::mscom::EnsureMTA(
     [&]() -> void { attr = mTransform->GetAttributes(); });
   NS_ENSURE_TRUE(attr != nullptr, E_FAIL);
 
   hr = attr->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = attr->SetUINT32(MF_LOW_LATENCY, FALSE);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
+  hr = MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, aWidth, aHeight);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
   RefPtr<IMFMediaType> outputType;
   hr = wmf::MFCreateMediaType(getter_AddRefs(outputType));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
+  gfx::IntSize size(mWidth, mHeight);
   hr = E_FAIL;
   mozilla::mscom::EnsureMTA([&]() -> void {
-    hr = mTransform->SetMediaTypes(
-      aInputType, outputType, [this](IMFMediaType* aOutput) {
-        HRESULT hr = aOutput->SetUINT32(MF_MT_INTERLACE_MODE,
-                                        MFVideoInterlace_Progressive);
-        NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-        hr = aOutput->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
-        NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-        hr = MFSetAttributeSize(aOutput, MF_MT_FRAME_SIZE, mWidth, mHeight);
-        NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-        return S_OK;
-      });
+    hr =
+      mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size);
   });
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  mInputType = aInputType;
-  mInputSubType = subType;
+  mConfiguredForSize = true;
 
   return S_OK;
 }
 
 bool
 D3D11DXVA2Manager::CanCreateDecoder(const D3D11_VIDEO_DECODER_DESC& aDesc,
                                     const float aFramerate) const
 {
--- a/dom/media/platforms/wmf/DXVA2Manager.h
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -41,26 +41,25 @@ public:
   // IMFDXGIDeviceManager. It is safe to call this on any thread.
   virtual IUnknown* GetDXVADeviceManager() = 0;
 
   // Creates an Image for the video frame stored in aVideoSample.
   virtual HRESULT CopyToImage(IMFSample* aVideoSample,
                               const gfx::IntRect& aRegion,
                               layers::Image** aOutImage) = 0;
 
-  virtual HRESULT CopyToBGRATexture(ID3D11Texture2D* aInTexture,
-                                    const GUID& aSubType,
+  virtual HRESULT CopyToBGRATexture(ID3D11Texture2D *aInTexture,
                                     ID3D11Texture2D** aOutTexture)
   {
     // Not implemented!
     MOZ_CRASH("CopyToBGRATexture not implemented on this manager.");
     return E_FAIL;
   }
 
-  virtual HRESULT ConfigureForSize(IMFMediaType* aInputType)
+  virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight)
   {
     return S_OK;
   }
 
   virtual bool IsD3D11() { return false; }
 
   virtual ~DXVA2Manager();
 
--- a/dom/media/platforms/wmf/MFTDecoder.cpp
+++ b/dom/media/platforms/wmf/MFTDecoder.cpp
@@ -80,31 +80,27 @@ MFTDecoder::Create(HMODULE aDecoderDLL, 
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   return S_OK;
 }
 
 HRESULT
 MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
                           IMFMediaType* aOutputType,
-                          std::function<HRESULT(IMFMediaType*)>&& aCallback)
+                          ConfigureOutputCallback aCallback,
+                          void* aData)
 {
   MOZ_ASSERT(mscom::IsCurrentThreadMTA());
+  mOutputType = aOutputType;
 
   // Set the input type to the one the caller gave us...
   HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  GUID currentSubtype = {0};
-  hr = aOutputType->GetGUID(MF_MT_SUBTYPE, &currentSubtype);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-  hr = SetDecoderOutputType(currentSubtype,
-                            aOutputType,
-                            std::move(aCallback));
+  hr = SetDecoderOutputType(true /* match all attributes */, aCallback, aData);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = mDecoder->GetInputStreamInfo(0, &mInputStreamInfo);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
@@ -120,70 +116,58 @@ MFTDecoder::GetAttributes()
   MOZ_ASSERT(mscom::IsCurrentThreadMTA());
   RefPtr<IMFAttributes> attr;
   HRESULT hr = mDecoder->GetAttributes(getter_AddRefs(attr));
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
   return attr.forget();
 }
 
 HRESULT
-MFTDecoder::FindDecoderOutputType()
-{
-  MOZ_ASSERT(mscom::IsCurrentThreadMTA());
-  MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once");
-
-  return FindDecoderOutputTypeWithSubtype(mOutputSubType);
-}
-
-HRESULT
-MFTDecoder::FindDecoderOutputTypeWithSubtype(const GUID& aSubType)
-{
-  return SetDecoderOutputType(
-    aSubType, nullptr, [](IMFMediaType*) { return S_OK; });
-}
-
-HRESULT
-MFTDecoder::SetDecoderOutputType(
-  const GUID& aSubType,
-  IMFMediaType* aTypeToUse,
-  std::function<HRESULT(IMFMediaType*)>&& aCallback)
+MFTDecoder::SetDecoderOutputType(bool aMatchAllAttributes,
+                                 ConfigureOutputCallback aCallback,
+                                 void* aData)
 {
   MOZ_ASSERT(mscom::IsCurrentThreadMTA());
   NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
 
-  if (!aTypeToUse) {
-    aTypeToUse = mOutputType;
-  }
+  GUID currentSubtype = {0};
+  HRESULT hr = mOutputType->GetGUID(MF_MT_SUBTYPE, &currentSubtype);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   // Iterate the enumerate the output types, until we find one compatible
   // with what we need.
   RefPtr<IMFMediaType> outputType;
   UINT32 typeIndex = 0;
   while (SUCCEEDED(mDecoder->GetOutputAvailableType(
     0, typeIndex++, getter_AddRefs(outputType)))) {
     GUID outSubtype = {0};
-    HRESULT hr = outputType->GetGUID(MF_MT_SUBTYPE, &outSubtype);
+    hr = outputType->GetGUID(MF_MT_SUBTYPE, &outSubtype);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-    if (aSubType == outSubtype) {
-      hr = aCallback(outputType);
+    BOOL resultMatch = currentSubtype == outSubtype;
+
+    if (resultMatch && aMatchAllAttributes) {
+      hr = mOutputType->Compare(outputType, MF_ATTRIBUTES_MATCH_OUR_ITEMS,
+                                &resultMatch);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
+    }
+    if (resultMatch == TRUE) {
+      if (aCallback) {
+        hr = aCallback(outputType, aData);
+        NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+      }
       hr = mDecoder->SetOutputType(0, outputType, 0);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       hr = mDecoder->GetOutputStreamInfo(0, &mOutputStreamInfo);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       mMFTProvidesOutputSamples = IsFlagSet(mOutputStreamInfo.dwFlags,
                                             MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
 
-      mOutputType = outputType;
-      mOutputSubType = outSubtype;
-
       return S_OK;
     }
     outputType = nullptr;
   }
   return E_FAIL;
 }
 
 HRESULT
--- a/dom/media/platforms/wmf/MFTDecoder.h
+++ b/dom/media/platforms/wmf/MFTDecoder.h
@@ -33,28 +33,29 @@ public:
 
   // Sets the input and output media types. Call after Init().
   //
   // Params:
   //  - aInputType needs at least major and minor types set.
   //  - aOutputType needs at least major and minor types set.
   //    This is used to select the matching output type out
   //    of all the available output types of the MFT.
+  typedef HRESULT (*ConfigureOutputCallback)(IMFMediaType* aOutputType,
+                                             void* aData);
   HRESULT SetMediaTypes(IMFMediaType* aInputType,
                         IMFMediaType* aOutputType,
-                        std::function<HRESULT(IMFMediaType*)>&& aCallback =
-                          [](IMFMediaType* aOutput) { return S_OK; });
+                        ConfigureOutputCallback aCallback = nullptr,
+                        void* aData = nullptr);
 
   // Returns the MFT's IMFAttributes object.
   already_AddRefed<IMFAttributes> GetAttributes();
 
   // Retrieves the media type being output. This may not be valid until
   //  the first sample is decoded.
   HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
-  const GUID& GetOutputMediaSubType() const { return mOutputSubType; }
 
   // Submits data into the MFT for processing.
   //
   // Returns:
   //  - MF_E_NOTACCEPTING if the decoder can't accept input. The data
   //    must be resubmitted after Output() stops producing output.
   HRESULT Input(const uint8_t* aData,
                 uint32_t aDataSize,
@@ -82,34 +83,28 @@ public:
 
   // Sends a flush message to the MFT. This causes it to discard all
   // input data. Use before seeking.
   HRESULT Flush();
 
   // Sends a message to the MFT.
   HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData);
 
-  HRESULT FindDecoderOutputTypeWithSubtype(const GUID& aSubType);
-  HRESULT FindDecoderOutputType();
+  HRESULT SetDecoderOutputType(bool aMatchAllAttributes,
+                               ConfigureOutputCallback aCallback,
+                               void* aData);
 private:
-  // Will search a suitable MediaType using aTypeToUse if set, if not will
-  // use the current mOutputType.
-  HRESULT SetDecoderOutputType(
-    const GUID& aSubType,
-    IMFMediaType* aTypeToUse,
-    std::function<HRESULT(IMFMediaType*)>&& aCallback);
   HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);
 
   MFT_INPUT_STREAM_INFO mInputStreamInfo;
   MFT_OUTPUT_STREAM_INFO mOutputStreamInfo;
 
   RefPtr<IMFTransform> mDecoder;
 
   RefPtr<IMFMediaType> mOutputType;
-  GUID mOutputSubType;
 
   // True if the IMFTransform allocates the samples that it returns.
   bool mMFTProvidesOutputSamples = false;
 
   // True if we need to mark the next sample as a discontinuity.
   bool mDiscontinuity = true;
 };
 
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -232,18 +232,19 @@ WMFAudioMFTManager::Output(int64_t aStre
   HRESULT hr;
   int typeChangeCount = 0;
   while (true) {
     hr = mDecoder->Output(&sample);
     if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
       return hr;
     }
     if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
-      hr = mDecoder->FindDecoderOutputType();
-      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+      hr = mDecoder->SetDecoderOutputType(true /* check all attribute */,
+                                          nullptr,
+                                          nullptr);
       hr = UpdateOutputType();
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
       // Catch infinite loops, but some decoders perform at least 2 stream
       // changes on consecutive calls, so be permissive.
       // 100 is arbitrarily > 2.
       NS_ENSURE_TRUE(typeChangeCount < 100, MF_E_TRANSFORM_STREAM_CHANGE);
       ++typeChangeCount;
       continue;
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -681,17 +681,18 @@ WMFVideoMFTManager::InitInternal()
     // re-do initialization.
     return InitInternal();
   }
 
   LOG("Video Decoder initialized, Using DXVA: %s",
       (mUseHwAccel ? "Yes" : "No"));
 
   if (mUseHwAccel) {
-    hr = mDXVA2Manager->ConfigureForSize(outputType);
+    hr = mDXVA2Manager->ConfigureForSize(mVideoInfo.ImageRect().width,
+                                         mVideoInfo.ImageRect().height);
     NS_ENSURE_TRUE(SUCCEEDED(hr),
                    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                RESULT_DETAIL("Fail to configure image size for "
                                              "DXVA2Manager.")));
   } else {
     mYUVColorSpace = GetYUVColorSpace(outputType);
     GetDefaultStride(outputType, mVideoInfo.ImageRect().width, &mVideoStride);
   }
@@ -894,28 +895,18 @@ WMFVideoMFTManager::CreateBasicVideoFram
     hr = twoDBuffer->Lock2D(&data, &stride);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   } else {
     hr = buffer->Lock(&data, nullptr, nullptr);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
     stride = mVideoStride;
   }
 
-  const GUID& subType = mDecoder->GetOutputMediaSubType();
-  MOZ_DIAGNOSTIC_ASSERT(subType == MFVideoFormat_YV12 ||
-                        subType == MFVideoFormat_P010 ||
-                        subType == MFVideoFormat_P016);
-  const gfx::ColorDepth colorDepth = subType == MFVideoFormat_YV12
-                                       ? gfx::ColorDepth::COLOR_8
-                                       : gfx::ColorDepth::COLOR_16;
-
-  // YV12, planar format (3 planes): [YYYY....][VVVV....][UUUU....]
+  // YV12, planar format: [YYYY....][VVVV....][UUUU....]
   // i.e., Y, then V, then U.
-  // P010, P016 planar format (2 planes) [YYYY....][UVUV...]
-  // See https://docs.microsoft.com/en-us/windows/desktop/medfound/10-bit-and-16-bit-yuv-video-formats
   VideoData::YCbCrBuffer b;
 
   uint32_t videoWidth = mImageSize.width;
   uint32_t videoHeight = mImageSize.height;
 
   // Y (Y') plane
   b.mPlanes[0].mData = data;
   b.mPlanes[0].mStride = stride;
@@ -927,63 +918,43 @@ WMFVideoMFTManager::CreateBasicVideoFram
   MOZ_DIAGNOSTIC_ASSERT(mDecodedImageSize.height % 16 == 0,
                         "decoded height must be 16 bytes aligned");
   uint32_t y_size = stride * mDecodedImageSize.height;
   uint32_t v_size = stride * mDecodedImageSize.height / 4;
   uint32_t halfStride = (stride + 1) / 2;
   uint32_t halfHeight = (videoHeight + 1) / 2;
   uint32_t halfWidth = (videoWidth + 1) / 2;
 
-  if (subType == MFVideoFormat_YV12) {
-    // U plane (Cb)
-    b.mPlanes[1].mData = data + y_size + v_size;
-    b.mPlanes[1].mStride = halfStride;
-    b.mPlanes[1].mHeight = halfHeight;
-    b.mPlanes[1].mWidth = halfWidth;
-    b.mPlanes[1].mOffset = 0;
-    b.mPlanes[1].mSkip = 0;
+  // U plane (Cb)
+  b.mPlanes[1].mData = data + y_size + v_size;
+  b.mPlanes[1].mStride = halfStride;
+  b.mPlanes[1].mHeight = halfHeight;
+  b.mPlanes[1].mWidth = halfWidth;
+  b.mPlanes[1].mOffset = 0;
+  b.mPlanes[1].mSkip = 0;
 
-    // V plane (Cr)
-    b.mPlanes[2].mData = data + y_size;
-    b.mPlanes[2].mStride = halfStride;
-    b.mPlanes[2].mHeight = halfHeight;
-    b.mPlanes[2].mWidth = halfWidth;
-    b.mPlanes[2].mOffset = 0;
-    b.mPlanes[2].mSkip = 0;
-  } else {
-    // U plane (Cb)
-    b.mPlanes[1].mData = data + y_size;
-    b.mPlanes[1].mStride = stride;
-    b.mPlanes[1].mHeight = halfHeight;
-    b.mPlanes[1].mWidth = halfWidth;
-    b.mPlanes[1].mOffset = 0;
-    b.mPlanes[1].mSkip = 1;
-
-    // V plane (Cr)
-    b.mPlanes[2].mData = data + y_size + sizeof(short);
-    b.mPlanes[2].mStride = stride;
-    b.mPlanes[2].mHeight = halfHeight;
-    b.mPlanes[2].mWidth = halfWidth;
-    b.mPlanes[2].mOffset = 0;
-    b.mPlanes[2].mSkip = 1;
-  }
+  // V plane (Cr)
+  b.mPlanes[2].mData = data + y_size;
+  b.mPlanes[2].mStride = halfStride;
+  b.mPlanes[2].mHeight = halfHeight;
+  b.mPlanes[2].mWidth = halfWidth;
+  b.mPlanes[2].mOffset = 0;
+  b.mPlanes[2].mSkip = 0;
 
   // YuvColorSpace
   b.mYUVColorSpace = mYUVColorSpace;
-  b.mColorDepth = colorDepth;
 
   TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   gfx::IntRect pictureRegion =
     mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
 
-  if (colorDepth != gfx::ColorDepth::COLOR_8 || !mKnowsCompositor ||
-      !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
+  if (!mKnowsCompositor || !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(mVideoInfo,
                                    mImageContainer,
                                    aStreamOffset,
                                    pts,
                                    duration,
                                    b,
                                    false,
@@ -1084,45 +1055,31 @@ WMFVideoMFTManager::Output(int64_t aStre
   while (true) {
     hr = mDecoder->Output(&sample);
     if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
       return MF_E_TRANSFORM_NEED_MORE_INPUT;
     }
 
     if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
       MOZ_ASSERT(!sample);
-      // Video stream output type change, probably geometric aperture change or
-      // pixel type.
+      // Video stream output type change, probably geometric aperture change.
       // We must reconfigure the decoder output type.
-
-      // Attempt to find an appropriate OutputType, trying in order:
-      // if HW accelerated: NV12, P010, P016
-      // if SW: YV12, P010, P016
-      if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype(
-                    mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12)))) &&
-          FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
-                    MFVideoFormat_P010))) &&
-          FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
-                    MFVideoFormat_P016)))) {
-        LOG("No suitable output format found");
-        return hr;
-      }
-
-      RefPtr<IMFMediaType> outputType;
-      hr = mDecoder->GetOutputMediaType(outputType);
+      hr = mDecoder->SetDecoderOutputType(false /* check all attribute */,
+                                          nullptr,
+                                          nullptr);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-      if (mUseHwAccel) {
-        hr = mDXVA2Manager->ConfigureForSize(outputType);
+      if (!mUseHwAccel) {
+        // The stride may have changed, recheck for it.
+        RefPtr<IMFMediaType> outputType;
+        hr = mDecoder->GetOutputMediaType(outputType);
         NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-      } else {
-        // The stride may have changed, recheck for it.
         mYUVColorSpace = GetYUVColorSpace(outputType);
-        hr = GetDefaultStride(
-          outputType, mVideoInfo.ImageRect().width, &mVideoStride);
+        hr = GetDefaultStride(outputType, mVideoInfo.ImageRect().width,
+                              &mVideoStride);
         NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
         UINT32 width = 0, height = 0;
         hr = MFGetAttributeSize(outputType, MF_MT_FRAME_SIZE, &width, &height);
         NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
         NS_ENSURE_TRUE(width <= MAX_VIDEO_WIDTH, E_FAIL);
         NS_ENSURE_TRUE(height <= MAX_VIDEO_HEIGHT, E_FAIL);
         mDecodedImageSize = gfx::IntSize(width, height);
--- a/gfx/2d/Tools.h
+++ b/gfx/2d/Tools.h
@@ -113,17 +113,16 @@ static inline SurfaceFormat
 SurfaceFormatForColorDepth(ColorDepth aColorDepth)
 {
   SurfaceFormat format = SurfaceFormat::A8;
   switch (aColorDepth) {
     case ColorDepth::COLOR_8:
       break;
     case ColorDepth::COLOR_10:
     case ColorDepth::COLOR_12:
-    case ColorDepth::COLOR_16:
       format = SurfaceFormat::A16;
       break;
     case ColorDepth::UNKNOWN:
       MOZ_ASSERT_UNREACHABLE("invalid color depth value");
   }
   return format;
 }
 
@@ -135,19 +134,16 @@ BitDepthForColorDepth(ColorDepth aColorD
     case ColorDepth::COLOR_8:
       break;
     case ColorDepth::COLOR_10:
       depth = 10;
       break;
     case ColorDepth::COLOR_12:
       depth = 12;
       break;
-    case ColorDepth::COLOR_16:
-      depth = 16;
-      break;
     case ColorDepth::UNKNOWN:
       MOZ_ASSERT_UNREACHABLE("invalid color depth value");
   }
   return depth;
 }
 
 static inline ColorDepth
 ColorDepthForBitDepth(uint8_t aBitDepth)
@@ -157,19 +153,16 @@ ColorDepthForBitDepth(uint8_t aBitDepth)
     case 8:
       break;
     case 10:
       depth = ColorDepth::COLOR_10;
       break;
     case 12:
       depth = ColorDepth::COLOR_12;
       break;
-    case 16:
-      depth = ColorDepth::COLOR_16;
-      break;
     default:
       MOZ_ASSERT_UNREACHABLE("invalid color depth value");
   }
   return depth;
 }
 
 // 10 and 12 bits color depth image are using 16 bits integers for storage
 // As such we need to rescale the value from 10 or 12 bits to 16.
@@ -181,18 +174,16 @@ RescalingFactorForColorDepth(ColorDepth 
     case ColorDepth::COLOR_8:
       break;
     case ColorDepth::COLOR_10:
       factor = 64;
       break;
     case ColorDepth::COLOR_12:
       factor = 16;
       break;
-    case ColorDepth::COLOR_16:
-      break;
     case ColorDepth::UNKNOWN:
       MOZ_ASSERT_UNREACHABLE("invalid color depth value");
   }
   return factor;
 }
 
 static inline bool
 IsOpaqueFormat(SurfaceFormat aFormat) {
--- a/gfx/2d/Types.h
+++ b/gfx/2d/Types.h
@@ -101,17 +101,16 @@ inline bool IsOpaque(SurfaceFormat aForm
     return false;
   }
 }
 
 enum class ColorDepth : uint8_t {
   COLOR_8,
   COLOR_10,
   COLOR_12,
-  COLOR_16,
   UNKNOWN
 };
 
 enum class FilterType : int8_t {
   BLEND = 0,
   TRANSFORM,
   MORPHOLOGY,
   COLOR_MATRIX,
--- a/gfx/layers/D3D11ShareHandleImage.cpp
+++ b/gfx/layers/D3D11ShareHandleImage.cpp
@@ -19,39 +19,32 @@
 #include <memory>
 
 namespace mozilla {
 namespace layers {
 
 using namespace gfx;
 
 D3D11ShareHandleImage::D3D11ShareHandleImage(const gfx::IntSize& aSize,
-                                             const gfx::IntRect& aRect,
-                                             const GUID& aSourceFormat)
-  : Image(nullptr, ImageFormat::D3D11_SHARE_HANDLE_TEXTURE)
-  , mSize(aSize)
-  , mPictureRect(aRect)
-  , mSourceFormat(aSourceFormat)
-
+                                             const gfx::IntRect& aRect)
+ : Image(nullptr, ImageFormat::D3D11_SHARE_HANDLE_TEXTURE),
+   mSize(aSize),
+   mPictureRect(aRect)
 {
 }
 
 bool
-D3D11ShareHandleImage::AllocateTexture(D3D11RecycleAllocator* aAllocator,
-                                       ID3D11Device* aDevice)
+D3D11ShareHandleImage::AllocateTexture(D3D11RecycleAllocator* aAllocator, ID3D11Device* aDevice)
 {
   if (aAllocator) {
-    if (mSourceFormat == MFVideoFormat_NV12 &&
-        gfxPrefs::PDMWMFUseNV12Format() &&
+    if (gfxPrefs::PDMWMFUseNV12Format() &&
         gfx::DeviceManagerDx::Get()->CanUseNV12()) {
-      mTextureClient =
-        aAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::NV12, mSize);
+      mTextureClient = aAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::NV12, mSize);
     } else {
-      mTextureClient =
-        aAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::B8G8R8A8, mSize);
+      mTextureClient = aAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::B8G8R8A8, mSize);
     }
     if (mTextureClient) {
       mTexture = static_cast<D3D11TextureData*>(mTextureClient->GetInternalData())->GetD3D11Texture();
       return true;
     }
     return false;
   } else {
     MOZ_ASSERT(aDevice);
@@ -89,32 +82,31 @@ D3D11ShareHandleImage::GetAsSourceSurfac
   RefPtr<ID3D11Device> device;
   texture->GetDevice(getter_AddRefs(device));
 
   D3D11_TEXTURE2D_DESC desc;
   texture->GetDesc(&desc);
 
   HRESULT hr;
 
-  if (desc.Format != DXGI_FORMAT_B8G8R8A8_UNORM) {
+  if (desc.Format == DXGI_FORMAT_NV12) {
     nsAutoCString error;
     std::unique_ptr<DXVA2Manager> manager(DXVA2Manager::CreateD3D11DXVA(nullptr, error, device));
 
     if (!manager) {
       gfxWarning() << "Failed to create DXVA2 manager!";
       return nullptr;
     }
 
     RefPtr<ID3D11Texture2D> outTexture;
 
-    hr = manager->CopyToBGRATexture(
-      texture, mSourceFormat, getter_AddRefs(outTexture));
+    hr = manager->CopyToBGRATexture(texture, getter_AddRefs(outTexture));
 
     if (FAILED(hr)) {
-      gfxWarning() << "Failed to copy to BGRA texture.";
+      gfxWarning() << "Failed to copy NV12 to BGRA texture.";
       return nullptr;
     }
 
     texture = outTexture;
     texture->GetDesc(&desc);
   }
 
   CD3D11_TEXTURE2D_DESC softDesc(desc.Format, desc.Width, desc.Height);
--- a/gfx/layers/D3D11ShareHandleImage.h
+++ b/gfx/layers/D3D11ShareHandleImage.h
@@ -47,34 +47,31 @@ protected:
 
 // Image class that wraps a ID3D11Texture2D. This class copies the image
 // passed into SetData(), so that it can be accessed from other D3D devices.
 // This class also manages the synchronization of the copy, to ensure the
 // resource is ready to use.
 class D3D11ShareHandleImage final : public Image {
 public:
   D3D11ShareHandleImage(const gfx::IntSize& aSize,
-                        const gfx::IntRect& aRect,
-                        const GUID& aSourceFormat);
+                        const gfx::IntRect& aRect);
   virtual ~D3D11ShareHandleImage() {}
 
-  bool AllocateTexture(D3D11RecycleAllocator* aAllocator,
-                       ID3D11Device* aDevice);
+  bool AllocateTexture(D3D11RecycleAllocator* aAllocator, ID3D11Device* aDevice);
 
   gfx::IntSize GetSize() const override;
   already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
   TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
   gfx::IntRect GetPictureRect() const override { return mPictureRect; }
 
   ID3D11Texture2D* GetTexture() const;
 
 private:
   gfx::IntSize mSize;
   gfx::IntRect mPictureRect;
-  const GUID mSourceFormat;
   RefPtr<TextureClient> mTextureClient;
   RefPtr<ID3D11Texture2D> mTexture;
 };
 
 } // namepace layers
 } // namespace mozilla
 
 #endif // GFX_D3DSURFACEIMAGE_H
--- a/gfx/layers/client/TextureClient.cpp
+++ b/gfx/layers/client/TextureClient.cpp
@@ -1890,16 +1890,23 @@ MappedYCbCrChannelData::CopyInto(MappedY
     return true;
   }
 
   MOZ_ASSERT(bytesPerPixel == 1 || bytesPerPixel == 2);
   // slow path
   if (bytesPerPixel == 1) {
     copyData(aDst.data, aDst, data, *this);
   } else if (bytesPerPixel == 2) {
+    if (skip != 0) {
+      // The skip value definition doesn't specify if it's in bytes, or in
+      // "pixels". We will assume the later. There are currently no decoders
+      // returning HDR content with a skip value different than zero anyway.
+      NS_WARNING("skip value non zero for HDR content, please verify code "
+                 "(see bug 1421187)");
+    }
     copyData(reinterpret_cast<uint16_t*>(aDst.data),
              aDst,
              reinterpret_cast<uint16_t*>(data),
              *this);
   }
   return true;
 }
 
--- a/gfx/webrender_bindings/WebRenderTypes.h
+++ b/gfx/webrender_bindings/WebRenderTypes.h
@@ -858,18 +858,16 @@ static inline wr::WrYuvColorSpace ToWrYu
 static inline wr::WrColorDepth ToWrColorDepth(gfx::ColorDepth aColorDepth) {
   switch (aColorDepth) {
     case gfx::ColorDepth::COLOR_8:
       return wr::WrColorDepth::Color8;
     case gfx::ColorDepth::COLOR_10:
       return wr::WrColorDepth::Color10;
     case gfx::ColorDepth::COLOR_12:
       return wr::WrColorDepth::Color12;
-    case gfx::ColorDepth::COLOR_16:
-      return wr::WrColorDepth::Color16;
     default:
       MOZ_ASSERT_UNREACHABLE("Tried to convert invalid color depth value.");
   }
   return wr::WrColorDepth::Color8;
 }
 
 static inline wr::SyntheticItalics DegreesToSyntheticItalics(float aDegrees) {
   wr::SyntheticItalics synthetic_italics;