Bug 1300682 - Part 5: Use KnowsCompositor to initialize decoders and create one for VideoDecoderParent to use. r=nical,jya
authorMatt Woodrow <mwoodrow@mozilla.com>
Fri, 07 Oct 2016 21:13:33 +1300
changeset 316888 7bafa704f35031912833ad1991cbfc5ab46aa58e
parent 316887 19e24514eb8f6101cf52fe3f2c06a75e7f244b39
child 316889 4b28c86a0536bad836c17597e57d1d5a4c3eac35
push id82560
push usermwoodrow@mozilla.com
push dateFri, 07 Oct 2016 08:13:28 +0000
treeherdermozilla-inbound@4b28c86a0536 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnical, jya
bugs1300682
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1300682 - Part 5: Use KnowsCompositor to initialize decoders and create one for VideoDecoderParent to use. r=nical,jya
dom/base/nsDOMWindowUtils.cpp
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/fmp4/MP4Decoder.cpp
dom/media/fmp4/MP4Decoder.h
dom/media/ipc/PVideoDecoder.ipdl
dom/media/ipc/RemoteVideoDecoder.cpp
dom/media/ipc/VideoDecoderChild.cpp
dom/media/ipc/VideoDecoderChild.h
dom/media/ipc/VideoDecoderParent.cpp
dom/media/ipc/VideoDecoderParent.h
dom/media/platforms/PlatformDecoderModule.h
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/platforms/wmf/DXVA2Manager.h
dom/media/platforms/wmf/WMFDecoderModule.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.h
dom/media/platforms/wrappers/H264Converter.cpp
dom/media/platforms/wrappers/H264Converter.h
gfx/layers/ipc/VideoBridgeChild.h
--- a/dom/base/nsDOMWindowUtils.cpp
+++ b/dom/base/nsDOMWindowUtils.cpp
@@ -2335,17 +2335,17 @@ nsDOMWindowUtils::GetSupportsHardwareH26
     do_QueryInterface(window->GetCurrentInnerWindow());
   NS_ENSURE_STATE(parentObject);
 #ifdef MOZ_FMP4
   nsCOMPtr<nsIWidget> widget = GetWidget();
   NS_ENSURE_STATE(widget);
   LayerManager *mgr = widget->GetLayerManager();
   NS_ENSURE_STATE(mgr);
   RefPtr<Promise> promise =
-    MP4Decoder::IsVideoAccelerated(mgr->GetCompositorBackendType(), parentObject);
+    MP4Decoder::IsVideoAccelerated(mgr->AsShadowForwarder(), parentObject);
   NS_ENSURE_STATE(promise);
   aPromise.setObject(*promise->PromiseObj());
 #else
   ErrorResult rv;
   RefPtr<Promise> promise = Promise::Create(parentObject, rv);
   if (rv.Failed()) {
     return rv.StealNSResult();
   }
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -14,16 +14,17 @@
 #include "Layers.h"
 #include "MediaData.h"
 #include "MediaInfo.h"
 #include "MediaFormatReader.h"
 #include "MediaResource.h"
 #include "mozilla/SharedThreadPool.h"
 #include "VideoUtils.h"
 #include "VideoFrameContainer.h"
+#include "mozilla/layers/ShadowLayers.h"
 
 #include <algorithm>
 
 using namespace mozilla::media;
 
 using mozilla::layers::Image;
 using mozilla::layers::LayerManager;
 using mozilla::layers::LayersBackend;
@@ -51,28 +52,26 @@ TrackTypeToStr(TrackInfo::TrackType aTra
     return "Text";
   default:
     return "Unknown";
   }
 }
 
 MediaFormatReader::MediaFormatReader(AbstractMediaDecoder* aDecoder,
                                      MediaDataDemuxer* aDemuxer,
-                                     VideoFrameContainer* aVideoFrameContainer,
-                                     layers::LayersBackend aLayersBackend)
+                                     VideoFrameContainer* aVideoFrameContainer)
   : MediaDecoderReader(aDecoder)
   , mAudio(this, MediaData::AUDIO_DATA,
            Preferences::GetUint("media.audio-max-decode-error", 3))
   , mVideo(this, MediaData::VIDEO_DATA,
            Preferences::GetUint("media.video-max-decode-error", 2))
   , mDemuxer(aDemuxer)
   , mDemuxerInitDone(false)
   , mLastReportedNumDecodedFrames(0)
   , mPreviousDecodedKeyframeTime_us(sNoPreviousDecodedKeyframe)
-  , mLayersBackendType(aLayersBackend)
   , mInitDone(false)
   , mTrackDemuxersMayBlock(false)
   , mDemuxOnly(false)
   , mSeekScheduled(false)
   , mVideoFrameContainer(aVideoFrameContainer)
 {
   MOZ_ASSERT(aDemuxer);
   MOZ_COUNT_CTOR(MediaFormatReader);
@@ -155,17 +154,17 @@ MediaFormatReader::InitLayersBackendType
 
   dom::HTMLMediaElement* element = owner->GetMediaElement();
   NS_ENSURE_TRUE_VOID(element);
 
   RefPtr<LayerManager> layerManager =
     nsContentUtils::LayerManagerForDocument(element->OwnerDoc());
   NS_ENSURE_TRUE_VOID(layerManager);
 
-  mLayersBackendType = layerManager->GetCompositorBackendType();
+  mKnowsCompositor = layerManager->AsShadowForwarder();
 }
 
 nsresult
 MediaFormatReader::Init()
 {
   MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
 
   InitLayersBackendType();
@@ -412,17 +411,17 @@ MediaFormatReader::EnsureDecoderCreated(
 
     case TrackType::kVideoTrack: {
       // Decoders use the layers backend to decide if they can use hardware decoding,
       // so specify LAYERS_NONE if we want to forcibly disable it.
       decoder.mDecoder = mPlatform->CreateDecoder({
         mVideo.mInfo ? *mVideo.mInfo->GetAsVideoInfo() : mInfo.mVideo,
         decoder.mTaskQueue,
         decoder.mCallback.get(),
-        mLayersBackendType,
+        mKnowsCompositor,
         GetImageContainer(),
         mCrashHelper,
         decoder.mIsBlankDecode
       });
       break;
     }
     default:
       break;
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -23,18 +23,17 @@ class CDMProxy;
 
 class MediaFormatReader final : public MediaDecoderReader
 {
   typedef TrackInfo::TrackType TrackType;
 
 public:
   MediaFormatReader(AbstractMediaDecoder* aDecoder,
                     MediaDataDemuxer* aDemuxer,
-                    VideoFrameContainer* aVideoFrameContainer = nullptr,
-                    layers::LayersBackend aLayersBackend = layers::LayersBackend::LAYERS_NONE);
+                    VideoFrameContainer* aVideoFrameContainer = nullptr);
 
   virtual ~MediaFormatReader();
 
   nsresult Init() override;
 
   size_t SizeOfVideoQueueInFrames() override;
   size_t SizeOfAudioQueueInFrames() override;
 
@@ -517,17 +516,17 @@ private:
   // delta there.
   uint64_t mLastReportedNumDecodedFrames;
 
   // Timestamp of the previous decoded keyframe, in microseconds.
   int64_t mPreviousDecodedKeyframeTime_us;
   // Default mLastDecodedKeyframeTime_us value, must be bigger than anything.
   static const int64_t sNoPreviousDecodedKeyframe = INT64_MAX;
 
-  layers::LayersBackend mLayersBackendType;
+  RefPtr<layers::KnowsCompositor> mKnowsCompositor;
 
   // Metadata objects
   // True if we've read the streams' metadata.
   bool mInitDone;
   MozPromiseHolder<MetadataPromise> mMetadataPromise;
   bool IsEncrypted() const;
 
   // Set to true if any of our track buffers may be blocking.
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -210,52 +210,52 @@ MP4Decoder::IsEnabled()
 static const uint8_t sTestH264ExtraData[] = {
   0x01, 0x42, 0xc0, 0x1e, 0xff, 0xe1, 0x00, 0x17, 0x67, 0x42,
   0xc0, 0x1e, 0xbb, 0x40, 0x50, 0x17, 0xfc, 0xb8, 0x08, 0x80,
   0x00, 0x00, 0x32, 0x00, 0x00, 0x0b, 0xb5, 0x07, 0x8b, 0x17,
   0x50, 0x01, 0x00, 0x04, 0x68, 0xce, 0x32, 0xc8
 };
 
 static already_AddRefed<MediaDataDecoder>
-CreateTestH264Decoder(layers::LayersBackend aBackend,
+CreateTestH264Decoder(layers::KnowsCompositor* aKnowsCompositor,
                       VideoInfo& aConfig,
                       TaskQueue* aTaskQueue)
 {
   aConfig.mMimeType = "video/avc";
   aConfig.mId = 1;
   aConfig.mDuration = 40000;
   aConfig.mMediaTime = 0;
   aConfig.mImage = aConfig.mDisplay = nsIntSize(640, 360);
   aConfig.mExtraData = new MediaByteBuffer();
   aConfig.mExtraData->AppendElements(sTestH264ExtraData,
                                      MOZ_ARRAY_LENGTH(sTestH264ExtraData));
 
   RefPtr<PDMFactory> platform = new PDMFactory();
-  RefPtr<MediaDataDecoder> decoder(platform->CreateDecoder({ aConfig, aTaskQueue, aBackend }));
+  RefPtr<MediaDataDecoder> decoder(platform->CreateDecoder({ aConfig, aTaskQueue, aKnowsCompositor }));
 
   return decoder.forget();
 }
 
 /* static */ already_AddRefed<dom::Promise>
-MP4Decoder::IsVideoAccelerated(layers::LayersBackend aBackend, nsIGlobalObject* aParent)
+MP4Decoder::IsVideoAccelerated(layers::KnowsCompositor* aKnowsCompositor, nsIGlobalObject* aParent)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   ErrorResult rv;
   RefPtr<dom::Promise> promise;
   promise = dom::Promise::Create(aParent, rv);
   if (rv.Failed()) {
     rv.SuppressException();
     return nullptr;
   }
 
   RefPtr<TaskQueue> taskQueue =
     new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER));
   VideoInfo config;
-  RefPtr<MediaDataDecoder> decoder(CreateTestH264Decoder(aBackend, config, taskQueue));
+  RefPtr<MediaDataDecoder> decoder(CreateTestH264Decoder(aKnowsCompositor, config, taskQueue));
   if (!decoder) {
     taskQueue->BeginShutdown();
     taskQueue->AwaitShutdownAndIdle();
     promise->MaybeResolve(NS_LITERAL_STRING("No; Failed to create H264 decoder"));
     return promise.forget();
   }
 
   decoder->Init()
--- a/dom/media/fmp4/MP4Decoder.h
+++ b/dom/media/fmp4/MP4Decoder.h
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(MP4Decoder_h_)
 #define MP4Decoder_h_
 
 #include "MediaDecoder.h"
 #include "MediaFormatReader.h"
 #include "mozilla/dom/Promise.h"
+#include "mozilla/layers/KnowsCompositor.h"
 
 namespace mozilla {
 
 // Decoder that uses a bundled MP4 demuxer and platform decoders to play MP4.
 class MP4Decoder : public MediaDecoder
 {
 public:
   explicit MP4Decoder(MediaDecoderOwner* aOwner);
@@ -42,17 +43,17 @@ public:
   // identify H264. Does not parse general content type strings, i.e. white
   // space matters.
   static bool IsH264(const nsACString& aMimeType);
 
   // Returns true if the MP4 backend is preffed on.
   static bool IsEnabled();
 
   static already_AddRefed<dom::Promise>
-  IsVideoAccelerated(layers::LayersBackend aBackend, nsIGlobalObject* aParent);
+  IsVideoAccelerated(layers::KnowsCompositor* aKnowsCompositor, nsIGlobalObject* aParent);
 
   void GetMozDebugReaderData(nsAString& aString) override;
 
 private:
   RefPtr<MediaFormatReader> mReader;
 };
 
 } // namespace mozilla
--- a/dom/media/ipc/PVideoDecoder.ipdl
+++ b/dom/media/ipc/PVideoDecoder.ipdl
@@ -4,16 +4,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 include "mozilla/dom/MediaIPCUtils.h";
 
 include protocol PVideoDecoderManager;
 include LayersSurfaces;
 using VideoInfo from "MediaInfo.h";
 using mozilla::layers::LayersBackend from "mozilla/layers/LayersTypes.h";
+using struct mozilla::layers::TextureFactoryIdentifier from "mozilla/layers/CompositorTypes.h";
 
 namespace mozilla {
 namespace dom {
 
 struct MediaDataIPDL
 {
   int64_t offset;
   int64_t time;
@@ -41,17 +42,17 @@ struct MediaRawDataIPDL
 // across processes. The parent side currently is only implemented to work with
 // Window Media Foundation, but can be extended easily to support other backends.
 // The child side runs in the content process, and the parent side runs in the
 // GPU process. We run a separate IPDL thread for both sides.
 async protocol PVideoDecoder
 {
   manager PVideoDecoderManager;
 parent:
-  async Init(VideoInfo info, LayersBackend backend);
+  async Init(VideoInfo info, TextureFactoryIdentifier identifier);
 
   async Input(MediaRawDataIPDL data);
 
   async Flush();
   async Drain();
   async Shutdown();
 
   async SetSeekThreshold(int64_t time);
--- a/dom/media/ipc/RemoteVideoDecoder.cpp
+++ b/dom/media/ipc/RemoteVideoDecoder.cpp
@@ -142,24 +142,28 @@ PlatformDecoderModule::ConversionRequire
 RemoteDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
   return mWrapped->DecoderNeedsConversion(aConfig);
 }
 
 already_AddRefed<MediaDataDecoder>
 RemoteDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
 {
+  if (!aParams.mKnowsCompositor) {
+    return nullptr;
+  }
+
   MediaDataDecoderCallback* callback = aParams.mCallback;
   MOZ_ASSERT(callback->OnReaderTaskQueue());
   RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder(callback);
 
   VideoInfo info = aParams.VideoConfig();
 
-  layers::LayersBackend backend = aParams.mLayersBackend;
-  VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([object, callback, info, backend]() {
-    object->mActor->InitIPDL(callback, info, backend);
+  RefPtr<layers::KnowsCompositor> knowsCompositor = aParams.mKnowsCompositor;
+  VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([=]() {
+    object->mActor->InitIPDL(callback, info, knowsCompositor);
   }), NS_DISPATCH_NORMAL);
 
   return object.forget();
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -16,17 +16,16 @@ namespace dom {
 
 using base::Thread;
 using namespace ipc;
 using namespace layers;
 using namespace gfx;
 
 VideoDecoderChild::VideoDecoderChild()
   : mThread(VideoDecoderManagerChild::GetManagerThread())
-  , mLayersBackend(layers::LayersBackend::LAYERS_NONE)
   , mCanSend(true)
   , mInitialized(false)
   , mIsHardwareAccelerated(false)
 {
 }
 
 VideoDecoderChild::~VideoDecoderChild()
 {
@@ -111,23 +110,23 @@ VideoDecoderChild::ActorDestroy(ActorDes
     }
   }
   mCanSend = false;
 }
 
 void
 VideoDecoderChild::InitIPDL(MediaDataDecoderCallback* aCallback,
                             const VideoInfo& aVideoInfo,
-                            layers::LayersBackend aLayersBackend)
+                            layers::KnowsCompositor* aKnowsCompositor)
 {
   VideoDecoderManagerChild::GetSingleton()->SendPVideoDecoderConstructor(this);
   mIPDLSelfRef = this;
   mCallback = aCallback;
   mVideoInfo = aVideoInfo;
-  mLayersBackend = aLayersBackend;
+  mKnowsCompositor = aKnowsCompositor;
 }
 
 void
 VideoDecoderChild::DestroyIPDL()
 {
   if (mCanSend) {
     PVideoDecoderChild::Send__delete__(this);
   }
@@ -140,17 +139,17 @@ VideoDecoderChild::IPDLActorDestroyed()
 }
 
 // MediaDataDecoder methods
 
 RefPtr<MediaDataDecoder::InitPromise>
 VideoDecoderChild::Init()
 {
   AssertOnManagerThread();
-  if (!mCanSend || !SendInit(mVideoInfo, mLayersBackend)) {
+  if (!mCanSend || !SendInit(mVideoInfo, mKnowsCompositor->GetTextureFactoryIdentifier())) {
     return MediaDataDecoder::InitPromise::CreateAndReject(
       NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
   }
   return mInitPromise.Ensure(__func__);
 }
 
 void
 VideoDecoderChild::Input(MediaRawData* aSample)
--- a/dom/media/ipc/VideoDecoderChild.h
+++ b/dom/media/ipc/VideoDecoderChild.h
@@ -40,17 +40,17 @@ public:
   void Drain();
   void Shutdown();
   bool IsHardwareAccelerated(nsACString& aFailureReason) const;
   void SetSeekThreshold(const media::TimeUnit& aTime);
 
   MOZ_IS_CLASS_INIT
   void InitIPDL(MediaDataDecoderCallback* aCallback,
                 const VideoInfo& aVideoInfo,
-                layers::LayersBackend aLayersBackend);
+                layers::KnowsCompositor* aKnowsCompositor);
   void DestroyIPDL();
 
   // Called from IPDL when our actor has been destroyed
   void IPDLActorDestroyed();
 
 private:
   ~VideoDecoderChild();
 
@@ -59,17 +59,17 @@ private:
   RefPtr<VideoDecoderChild> mIPDLSelfRef;
   RefPtr<nsIThread> mThread;
 
   MediaDataDecoderCallback* mCallback;
 
   MozPromiseHolder<MediaDataDecoder::InitPromise> mInitPromise;
 
   VideoInfo mVideoInfo;
-  layers::LayersBackend mLayersBackend;
+  RefPtr<layers::KnowsCompositor> mKnowsCompositor;
   nsCString mHardwareAcceleratedReason;
   bool mCanSend;
   bool mInitialized;
   bool mIsHardwareAccelerated;
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -19,22 +19,40 @@
 namespace mozilla {
 namespace dom {
 
 using base::Thread;
 using namespace ipc;
 using namespace layers;
 using namespace gfx;
 
+class KnowsCompositorVideo : public layers::KnowsCompositor
+{
+public:
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(KnowsCompositorVideo, override)
+
+  layers::TextureForwarder* GetTextureForwarder() override
+  {
+    return VideoBridgeChild::GetSingleton();
+  }
+  layers::LayersIPCActor* GetLayersIPCActor() override
+  {
+    return VideoBridgeChild::GetSingleton();
+  }
+private:
+  virtual ~KnowsCompositorVideo() {}
+};
+
 VideoDecoderParent::VideoDecoderParent(VideoDecoderManagerParent* aParent,
                                        TaskQueue* aManagerTaskQueue,
                                        TaskQueue* aDecodeTaskQueue)
   : mParent(aParent)
   , mManagerTaskQueue(aManagerTaskQueue)
   , mDecodeTaskQueue(aDecodeTaskQueue)
+  , mKnowsCompositor(new KnowsCompositorVideo)
   , mDestroyed(false)
 {
   MOZ_COUNT_CTOR(VideoDecoderParent);
   // We hold a reference to ourselves to keep us alive until IPDL
   // explictly destroys us. There may still be refs held by
   // tasks, but no new ones should be added after we're
   // destroyed.
   mIPDLSelfRef = this;
@@ -49,22 +67,24 @@ void
 VideoDecoderParent::Destroy()
 {
   mDecodeTaskQueue->AwaitShutdownAndIdle();
   mDestroyed = true;
   mIPDLSelfRef = nullptr;
 }
 
 bool
-VideoDecoderParent::RecvInit(const VideoInfo& aInfo, const layers::LayersBackend& aBackend)
+VideoDecoderParent::RecvInit(const VideoInfo& aInfo, const layers::TextureFactoryIdentifier& aIdentifier)
 {
+  mKnowsCompositor->IdentifyTextureHost(aIdentifier);
+
   CreateDecoderParams params(aInfo);
   params.mTaskQueue = mDecodeTaskQueue;
   params.mCallback = this;
-  params.mLayersBackend = aBackend;
+  params.mKnowsCompositor = mKnowsCompositor;
   params.mImageContainer = new layers::ImageContainer();
 
 #ifdef XP_WIN
   // TODO: Ideally we wouldn't hardcode the WMF PDM, and we'd use the normal PDM
   // factory logic for picking a decoder.
   WMFDecoderModule::Init();
   RefPtr<WMFDecoderModule> pdm(new WMFDecoderModule());
   pdm->Startup();
@@ -159,37 +179,36 @@ VideoDecoderParent::ActorDestroy(ActorDe
   }
 }
 
 void
 VideoDecoderParent::Output(MediaData* aData)
 {
   MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
   RefPtr<VideoDecoderParent> self = this;
+  RefPtr<KnowsCompositor> knowsCompositor = mKnowsCompositor;
   RefPtr<MediaData> data = aData;
-  mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, data]() {
+  mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, knowsCompositor, data]() {
     if (self->mDestroyed) {
       return;
     }
 
     MOZ_ASSERT(data->mType == MediaData::VIDEO_DATA, "Can only decode videos using VideoDecoderParent!");
     VideoData* video = static_cast<VideoData*>(data.get());
 
     MOZ_ASSERT(video->mImage, "Decoded video must output a layer::Image to be used with VideoDecoderParent");
 
-    RefPtr<TextureClient> texture = video->mImage->GetTextureClient(VideoBridgeChild::GetSingleton());
+    RefPtr<TextureClient> texture = video->mImage->GetTextureClient(knowsCompositor);
 
     if (!texture) {
-      texture =
-        ImageClient::CreateTextureClientForImage(video->mImage,
-                                                 VideoBridgeChild::GetSingleton());
+      texture = ImageClient::CreateTextureClientForImage(video->mImage, knowsCompositor);
     }
 
     if (texture && !texture->IsAddedToCompositableClient()) {
-      texture->InitIPDLActor(VideoBridgeChild::GetSingleton());
+      texture->InitIPDLActor(knowsCompositor);
       texture->SetAddedToCompositableClient();
     }
 
     VideoDataIPDL output(MediaDataIPDL(data->mOffset,
                                        data->mTime,
                                        data->mTimecode,
                                        data->mDuration,
                                        data->mFrames,
--- a/dom/media/ipc/VideoDecoderParent.h
+++ b/dom/media/ipc/VideoDecoderParent.h
@@ -3,39 +3,42 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef include_dom_ipc_VideoDecoderParent_h
 #define include_dom_ipc_VideoDecoderParent_h
 
 #include "mozilla/RefPtr.h"
 #include "mozilla/dom/PVideoDecoderParent.h"
+#include "mozilla/layers/TextureForwarder.h"
 #include "VideoDecoderManagerParent.h"
 #include "MediaData.h"
 #include "ImageContainer.h"
 
 namespace mozilla {
 namespace dom {
 
+class KnowsCompositorVideo;
+
 class VideoDecoderParent final : public PVideoDecoderParent,
                                  public MediaDataDecoderCallback
 {
 public:
   // We refcount this class since the task queue can have runnables
   // that reference us.
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoDecoderParent)
 
   VideoDecoderParent(VideoDecoderManagerParent* aParent,
                      TaskQueue* aManagerTaskQueue,
                      TaskQueue* aDecodeTaskQueue);
 
   void Destroy();
 
   // PVideoDecoderParent
-  bool RecvInit(const VideoInfo& aVideoInfo, const layers::LayersBackend& aBackend) override;
+  bool RecvInit(const VideoInfo& aVideoInfo, const layers::TextureFactoryIdentifier& aIdentifier) override;
   bool RecvInput(const MediaRawDataIPDL& aData) override;
   bool RecvFlush() override;
   bool RecvDrain() override;
   bool RecvShutdown() override;
   bool RecvSetSeekThreshold(const int64_t& aTime) override;
 
   void ActorDestroy(ActorDestroyReason aWhy) override;
 
@@ -49,16 +52,17 @@ public:
 private:
   ~VideoDecoderParent();
 
   RefPtr<VideoDecoderManagerParent> mParent;
   RefPtr<VideoDecoderParent> mIPDLSelfRef;
   RefPtr<TaskQueue> mManagerTaskQueue;
   RefPtr<TaskQueue> mDecodeTaskQueue;
   RefPtr<MediaDataDecoder> mDecoder;
+  RefPtr<KnowsCompositorVideo> mKnowsCompositor;
 
   // Can only be accessed from the manager thread
   bool mDestroyed;
 };
 
 } // namespace dom
 } // namespace mozilla
 
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #if !defined(PlatformDecoderModule_h_)
 #define PlatformDecoderModule_h_
 
 #include "MediaDecoderReader.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/layers/LayersTypes.h"
+#include "mozilla/layers/KnowsCompositor.h"
 #include "nsTArray.h"
 #include "mozilla/RefPtr.h"
 #include "GMPService.h"
 #include <queue>
 #include "MediaResult.h"
 
 namespace mozilla {
 class TrackInfo;
@@ -53,33 +54,41 @@ struct CreateDecoderParams {
   }
 
   const AudioInfo& AudioConfig() const
   {
     MOZ_ASSERT(mConfig.IsAudio());
     return *mConfig.GetAsAudioInfo();
   }
 
+  layers::LayersBackend GetLayersBackend() const
+  {
+    if (mKnowsCompositor) {
+      return mKnowsCompositor->GetCompositorBackendType();
+    }
+    return layers::LayersBackend::LAYERS_NONE;
+  }
+
   const TrackInfo& mConfig;
   TaskQueue* mTaskQueue = nullptr;
   MediaDataDecoderCallback* mCallback = nullptr;
   DecoderDoctorDiagnostics* mDiagnostics = nullptr;
   layers::ImageContainer* mImageContainer = nullptr;
-  layers::LayersBackend mLayersBackend = layers::LayersBackend::LAYERS_NONE;
+  RefPtr<layers::KnowsCompositor> mKnowsCompositor;
   RefPtr<GMPCrashHelper> mCrashHelper;
   bool mUseBlankDecoder = false;
 
 private:
   void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
   void Set(MediaDataDecoderCallback* aCallback) { mCallback = aCallback; }
   void Set(DecoderDoctorDiagnostics* aDiagnostics) { mDiagnostics = aDiagnostics; }
   void Set(layers::ImageContainer* aImageContainer) { mImageContainer = aImageContainer; }
-  void Set(layers::LayersBackend aLayersBackend) { mLayersBackend = aLayersBackend; }
   void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
   void Set(bool aUseBlankDecoder) { mUseBlankDecoder = aUseBlankDecoder; }
+  void Set(layers::KnowsCompositor* aKnowsCompositor) { mKnowsCompositor = aKnowsCompositor; }
   template <typename T1, typename T2, typename... Ts>
   void Set(T1&& a1, T2&& a2, Ts&&... args)
   {
     Set(mozilla::Forward<T1>(a1));
     Set(mozilla::Forward<T2>(a2), mozilla::Forward<Ts>(args)...);
   }
 };
 
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -110,17 +110,17 @@ VideoCallbackAdapter::Terminated()
 }
 
 GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
   : mConfig(aParams.VideoConfig())
   , mTaskQueue(aParams.mTaskQueue)
   , mCallback(nullptr)
   , mAdapter(nullptr)
   , mImageContainer(aParams.mImageContainer)
-  , mLayersBackend(aParams.mLayersBackend)
+  , mLayersBackend(aParams.GetLayersBackend())
   , mCrashHelper(aParams.mCrashHelper)
 {}
 
 GMPVideoDecoderParams&
 GMPVideoDecoderParams::WithCallback(MediaDataDecoderProxy* aWrapper)
 {
   MOZ_ASSERT(aWrapper);
   MOZ_ASSERT(!mCallback); // Should only be called once per instance.
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -8,17 +8,17 @@
 #include <d3d11.h>
 #include "nsThreadUtils.h"
 #include "ImageContainer.h"
 #include "gfxWindowsPlatform.h"
 #include "D3D9SurfaceImage.h"
 #include "mozilla/gfx/DeviceManagerDx.h"
 #include "mozilla/layers/D3D11ShareHandleImage.h"
 #include "mozilla/layers/ImageBridgeChild.h"
-#include "mozilla/layers/VideoBridgeChild.h"
+#include "mozilla/layers/TextureForwarder.h"
 #include "mozilla/Telemetry.h"
 #include "MediaTelemetryConstants.h"
 #include "mfapi.h"
 #include "MediaPrefs.h"
 #include "MFTDecoder.h"
 #include "DriverCrashGuard.h"
 #include "nsPrintfCString.h"
 #include "gfxCrashReporterUtils.h"
@@ -86,17 +86,18 @@ using layers::D3D11ShareHandleImage;
 using layers::D3D11RecycleAllocator;
 
 class D3D9DXVA2Manager : public DXVA2Manager
 {
 public:
   D3D9DXVA2Manager();
   virtual ~D3D9DXVA2Manager();
 
-  HRESULT Init(nsACString& aFailureReason);
+  HRESULT Init(layers::KnowsCompositor* aKnowsCompositor,
+               nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
                       Image** aOutImage) override;
@@ -257,17 +258,18 @@ D3D9DXVA2Manager::~D3D9DXVA2Manager()
 IUnknown*
 D3D9DXVA2Manager::GetDXVADeviceManager()
 {
   MutexAutoLock lock(mLock);
   return mDeviceManager;
 }
 
 HRESULT
-D3D9DXVA2Manager::Init(nsACString& aFailureReason)
+D3D9DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
+                       nsACString& aFailureReason)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   ScopedGfxFeatureReporter reporter("DXVA2D3D9");
 
   gfx::D3D9VideoCrashGuard crashGuard;
   if (crashGuard.Crashed()) {
     NS_WARNING("DXVA2D3D9 crash detected");
@@ -425,20 +427,22 @@ D3D9DXVA2Manager::Init(nsACString& aFail
 
   mResetToken = resetToken;
   mD3D9 = d3d9Ex;
   mDevice = device;
   mDeviceManager = deviceManager;
   mSyncSurface = syncSurf;
 
   if (layers::ImageBridgeChild::GetSingleton()) {
+    // There's no proper KnowsCompositor for ImageBridge currently (and it
+    // implements the interface), so just use that if it's available.
     mTextureClientAllocator = new D3D9RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(),
                                                        mDevice);
   } else {
-    mTextureClientAllocator = new D3D9RecycleAllocator(layers::VideoBridgeChild::GetSingleton(),
+    mTextureClientAllocator = new D3D9RecycleAllocator(aKnowsCompositor,
                                                        mDevice);
   }
   mTextureClientAllocator->SetMaxPoolSize(5);
 
   Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
                         uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D9));
 
   reporter.SetSuccessful();
@@ -486,17 +490,18 @@ D3D9DXVA2Manager::CopyToImage(IMFSample*
 }
 
 // Count of the number of DXVAManager's we've created. This is also the
 // number of videos we're decoding with DXVA. Use on main thread only.
 static uint32_t sDXVAVideosCount = 0;
 
 /* static */
 DXVA2Manager*
-DXVA2Manager::CreateD3D9DXVA(nsACString& aFailureReason)
+DXVA2Manager::CreateD3D9DXVA(layers::KnowsCompositor* aKnowsCompositor,
+                             nsACString& aFailureReason)
 {
   MOZ_ASSERT(NS_IsMainThread());
   HRESULT hr;
 
   // DXVA processing takes up a lot of GPU resources, so limit the number of
   // videos we use DXVA with at any one time.
   uint32_t dxvaLimit = 4;
   // TODO: Sync this value across to the GPU process.
@@ -505,32 +510,33 @@ DXVA2Manager::CreateD3D9DXVA(nsACString&
   }
 
   if (sDXVAVideosCount == dxvaLimit) {
     aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
   nsAutoPtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
-  hr = d3d9Manager->Init(aFailureReason);
+  hr = d3d9Manager->Init(aKnowsCompositor, aFailureReason);
   if (SUCCEEDED(hr)) {
     return d3d9Manager.forget();
   }
 
   // No hardware accelerated video decoding. :(
   return nullptr;
 }
 
 class D3D11DXVA2Manager : public DXVA2Manager
 {
 public:
   D3D11DXVA2Manager();
   virtual ~D3D11DXVA2Manager();
 
-  HRESULT Init(nsACString& aFailureReason);
+  HRESULT Init(layers::KnowsCompositor* aKnowsCompositor,
+               nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
                       Image** aOutImage) override;
@@ -628,17 +634,18 @@ D3D11DXVA2Manager::~D3D11DXVA2Manager()
 IUnknown*
 D3D11DXVA2Manager::GetDXVADeviceManager()
 {
   MutexAutoLock lock(mLock);
   return mDXGIDeviceManager;
 }
 
 HRESULT
-D3D11DXVA2Manager::Init(nsACString& aFailureReason)
+D3D11DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
+                        nsACString& aFailureReason)
 {
   HRESULT hr;
 
   ScopedGfxFeatureReporter reporter("DXVA2D3D11");
 
   gfx::D3D11VideoCrashGuard crashGuard;
   if (crashGuard.Crashed()) {
     NS_WARNING("DXVA2D3D11 crash detected");
@@ -759,20 +766,22 @@ D3D11DXVA2Manager::Init(nsACString& aFai
   desc.BindFlags = 0;
   desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
   desc.MiscFlags = 0;
 
   hr = mDevice->CreateTexture2D(&desc, NULL, getter_AddRefs(mSyncSurface));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   if (layers::ImageBridgeChild::GetSingleton()) {
+    // There's no proper KnowsCompositor for ImageBridge currently (and it
+    // implements the interface), so just use that if it's available.
     mTextureClientAllocator = new D3D11RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(),
                                                         mDevice);
   } else {
-    mTextureClientAllocator = new D3D11RecycleAllocator(layers::VideoBridgeChild::GetSingleton(),
+    mTextureClientAllocator = new D3D11RecycleAllocator(aKnowsCompositor,
                                                         mDevice);
   }
   mTextureClientAllocator->SetMaxPoolSize(5);
 
   Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
                         uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D11));
 
   reporter.SetSuccessful();
@@ -905,33 +914,34 @@ D3D11DXVA2Manager::ConfigureForSize(uint
   hr = mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   return S_OK;
 }
 
 /* static */
 DXVA2Manager*
-DXVA2Manager::CreateD3D11DXVA(nsACString& aFailureReason)
+DXVA2Manager::CreateD3D11DXVA(layers::KnowsCompositor* aKnowsCompositor,
+                              nsACString& aFailureReason)
 {
   // DXVA processing takes up a lot of GPU resources, so limit the number of
   // videos we use DXVA with at any one time.
   uint32_t dxvaLimit = 4;
   // TODO: Sync this value across to the GPU process.
   if (XRE_GetProcessType() != GeckoProcessType_GPU) {
     dxvaLimit = MediaPrefs::PDMWMFMaxDXVAVideos();
   }
 
   if (sDXVAVideosCount == dxvaLimit) {
     aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
   nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
-  HRESULT hr = manager->Init(aFailureReason);
+  HRESULT hr = manager->Init(aKnowsCompositor, aFailureReason);
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
   return manager.forget();
 }
 
 DXVA2Manager::DXVA2Manager()
   : mLock("DXVA2Manager")
 {
--- a/dom/media/platforms/wmf/DXVA2Manager.h
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -11,25 +11,26 @@
 #include "mozilla/Mutex.h"
 #include "nsRect.h"
 
 namespace mozilla {
 
 namespace layers {
 class Image;
 class ImageContainer;
+class KnowsCompositor;
 }
 
 class DXVA2Manager {
 public:
 
   // Creates and initializes a DXVA2Manager. We can use DXVA2 via either
   // D3D9Ex or D3D11.
-  static DXVA2Manager* CreateD3D9DXVA(nsACString& aFailureReason);
-  static DXVA2Manager* CreateD3D11DXVA(nsACString& aFailureReason);
+  static DXVA2Manager* CreateD3D9DXVA(layers::KnowsCompositor* aKnowsCompositor, nsACString& aFailureReason);
+  static DXVA2Manager* CreateD3D11DXVA(layers::KnowsCompositor* aKnowsCompositor, nsACString& aFailureReason);
 
   // Returns a pointer to the D3D device manager responsible for managing the
   // device we're using for hardware accelerated video decoding. If we're using
   // D3D9Ex, this is an IDirect3DDeviceManager9. For D3D11 this is an
   // IMFDXGIDeviceManager. It is safe to call this on any thread.
   virtual IUnknown* GetDXVADeviceManager() = 0;
 
   // Creates an Image for the video frame stored in aVideoSample.
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -80,17 +80,17 @@ WMFDecoderModule::Startup()
   return mWMFInitialized ? NS_OK : NS_ERROR_FAILURE;
 }
 
 already_AddRefed<MediaDataDecoder>
 WMFDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
 {
   nsAutoPtr<WMFVideoMFTManager> manager(
     new WMFVideoMFTManager(aParams.VideoConfig(),
-                           aParams.mLayersBackend,
+                           aParams.mKnowsCompositor,
                            aParams.mImageContainer,
                            sDXVAEnabled));
 
   if (!manager->Init()) {
     return nullptr;
   }
 
   RefPtr<MediaDataDecoder> decoder =
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -71,28 +71,37 @@ const CLSID CLSID_WebmMfVp9Dec =
 {
   0x7ab4bd2,
   0x1979,
   0x4fcd,
   {0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe}
 };
 
 namespace mozilla {
+  
+LayersBackend
+GetCompositorBackendType(layers::KnowsCompositor* aKnowsCompositor)
+{
+  if (aKnowsCompositor) {
+    return aKnowsCompositor->GetCompositorBackendType();
+  }
+  return LayersBackend::LAYERS_NONE;
+}
 
 WMFVideoMFTManager::WMFVideoMFTManager(
                             const VideoInfo& aConfig,
-                            mozilla::layers::LayersBackend aLayersBackend,
-                            mozilla::layers::ImageContainer* aImageContainer,
+                            layers::KnowsCompositor* aKnowsCompositor,
+                            layers::ImageContainer* aImageContainer,
                             bool aDXVAEnabled)
   : mVideoInfo(aConfig)
   , mVideoStride(0)
   , mImageSize(aConfig.mImage)
   , mImageContainer(aImageContainer)
   , mDXVAEnabled(aDXVAEnabled)
-  , mLayersBackend(aLayersBackend)
+  , mKnowsCompositor(aKnowsCompositor)
   , mNullOutputCount(0)
   , mGotValidOutputAfterNullOutput(false)
   , mGotExcessiveNullOutput(false)
   , mIsValid(true)
   // mVideoStride, mVideoWidth, mVideoHeight, mUseHwAccel are initialized in
   // Init().
 {
   MOZ_COUNT_CTOR(WMFVideoMFTManager);
@@ -298,82 +307,88 @@ FindD3D11BlacklistedDLL() {
 static const nsCString&
 FindD3D9BlacklistedDLL() {
   return FindDXVABlacklistedDLL(sD3D9BlacklistingCache,
                                 "media.wmf.disable-d3d9-for-dlls");
 }
 
 class CreateDXVAManagerEvent : public Runnable {
 public:
-  CreateDXVAManagerEvent(LayersBackend aBackend, nsCString& aFailureReason)
+  CreateDXVAManagerEvent(LayersBackend aBackend,
+                         layers::KnowsCompositor* aKnowsCompositor,
+                         nsCString& aFailureReason)
     : mBackend(aBackend)
+    , mKnowsCompositor(aKnowsCompositor)
     , mFailureReason(aFailureReason)
   {}
 
   NS_IMETHOD Run() override {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
     nsACString* failureReason = &mFailureReason;
     nsCString secondFailureReason;
     bool allowD3D11 = (XRE_GetProcessType() == GeckoProcessType_GPU) ||
                       MediaPrefs::PDMWMFAllowD3D11();
     if (mBackend == LayersBackend::LAYERS_D3D11 &&
         allowD3D11 && IsWin8OrLater()) {
       const nsCString& blacklistedDLL = FindD3D11BlacklistedDLL();
       if (!blacklistedDLL.IsEmpty()) {
         failureReason->AppendPrintf("D3D11 blacklisted with DLL %s",
                                     blacklistedDLL.get());
       } else {
-        mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA(*failureReason);
+        mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA(mKnowsCompositor, *failureReason);
         if (mDXVA2Manager) {
           return NS_OK;
         }
       }
       // Try again with d3d9, but record the failure reason
       // into a new var to avoid overwriting the d3d11 failure.
       failureReason = &secondFailureReason;
       mFailureReason.Append(NS_LITERAL_CSTRING("; "));
     }
 
     const nsCString& blacklistedDLL = FindD3D9BlacklistedDLL();
     if (!blacklistedDLL.IsEmpty()) {
       mFailureReason.AppendPrintf("D3D9 blacklisted with DLL %s",
                                   blacklistedDLL.get());
     } else {
-      mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA(*failureReason);
+      mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA(mKnowsCompositor, *failureReason);
       // Make sure we include the messages from both attempts (if applicable).
       mFailureReason.Append(secondFailureReason);
     }
     return NS_OK;
   }
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
-  LayersBackend mBackend;
+  layers::LayersBackend mBackend;
+  KnowsCompositor* mKnowsCompositor;
   nsACString& mFailureReason;
 };
 
 bool
 WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
 {
   // If we use DXVA but aren't running with a D3D layer manager then the
   // readback of decoded video frames from GPU to CPU memory grinds painting
   // to a halt, and makes playback performance *worse*.
   if (!mDXVAEnabled) {
     mDXVAFailureReason.AssignLiteral("Hardware video decoding disabled or blacklisted");
     return false;
   }
   MOZ_ASSERT(!mDXVA2Manager);
-  if (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
-      mLayersBackend != LayersBackend::LAYERS_D3D11) {
+  LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
+  if (backend != LayersBackend::LAYERS_D3D9 &&
+      backend != LayersBackend::LAYERS_D3D11) {
     mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
     return false;
   }
 
   // The DXVA manager must be created on the main thread.
   RefPtr<CreateDXVAManagerEvent> event =
     new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9
-                                          : mLayersBackend,
+                                          : backend,
+                               mKnowsCompositor,
                                mDXVAFailureReason);
 
   if (NS_IsMainThread()) {
     event->Run();
   } else {
     NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
   }
   mDXVA2Manager = event->mDXVA2Manager;
@@ -746,18 +761,19 @@ WMFVideoMFTManager::CreateBasicVideoFram
   b.mPlanes[2].mSkip = 0;
 
   media::TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   media::TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
 
-  if (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
-      mLayersBackend != LayersBackend::LAYERS_D3D11) {
+  LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
+  if (backend != LayersBackend::LAYERS_D3D9 &&
+      backend != LayersBackend::LAYERS_D3D11) {
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(mVideoInfo,
                                    mImageContainer,
                                    aStreamOffset,
                                    pts.ToMicroseconds(),
                                    duration.ToMicroseconds(),
                                    b,
                                    false,
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -16,18 +16,18 @@
 
 namespace mozilla {
 
 class DXVA2Manager;
 
 class WMFVideoMFTManager : public MFTManager {
 public:
   WMFVideoMFTManager(const VideoInfo& aConfig,
-                     mozilla::layers::LayersBackend aLayersBackend,
-                     mozilla::layers::ImageContainer* aImageContainer,
+                     layers::KnowsCompositor* aKnowsCompositor,
+                     layers::ImageContainer* aImageContainer,
                      bool aDXVAEnabled);
   ~WMFVideoMFTManager();
 
   bool Init();
 
   HRESULT Input(MediaRawData* aSample) override;
 
   HRESULT Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) override;
@@ -83,26 +83,26 @@ private:
   bool CanUseDXVA(IMFMediaType* aType);
 
   // Video frame geometry.
   VideoInfo mVideoInfo;
   uint32_t mVideoStride;
   nsIntSize mImageSize;
 
   RefPtr<layers::ImageContainer> mImageContainer;
+  RefPtr<layers::KnowsCompositor> mKnowsCompositor;
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
 
   RefPtr<IMFSample> mLastInput;
   float mLastDuration;
   int64_t mLastTime = 0;
   bool mDraining = false;
   int64_t mSamplesCount = 0;
 
   bool mDXVAEnabled;
-  const layers::LayersBackend mLayersBackend;
   bool mUseHwAccel;
 
   nsCString mDXVAFailureReason;
 
   enum StreamType {
     Unknown,
     H264,
     VP8,
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -14,17 +14,17 @@
 
 namespace mozilla
 {
 
 H264Converter::H264Converter(PlatformDecoderModule* aPDM,
                              const CreateDecoderParams& aParams)
   : mPDM(aPDM)
   , mCurrentConfig(aParams.VideoConfig())
-  , mLayersBackend(aParams.mLayersBackend)
+  , mKnowsCompositor(aParams.mKnowsCompositor)
   , mImageContainer(aParams.mImageContainer)
   , mTaskQueue(aParams.mTaskQueue)
   , mCallback(aParams.mCallback)
   , mDecoder(nullptr)
   , mGMPCrashHelper(aParams.mCrashHelper)
   , mNeedAVCC(aPDM->DecoderNeedsConversion(aParams.mConfig)
       == PlatformDecoderModule::ConversionRequired::kNeedAVCC)
   , mLastError(NS_OK)
@@ -189,17 +189,17 @@ H264Converter::CreateDecoder(DecoderDoct
   }
 
   mDecoder = mPDM->CreateVideoDecoder({
     mCurrentConfig,
     mTaskQueue,
     mCallback,
     aDiagnostics,
     mImageContainer,
-    mLayersBackend,
+    mKnowsCompositor,
     mGMPCrashHelper
   });
 
   if (!mDecoder) {
     mLastError = NS_ERROR_FAILURE;
     return NS_ERROR_FAILURE;
   }
 
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -51,17 +51,17 @@ private:
   nsresult CheckForSPSChange(MediaRawData* aSample);
   void UpdateConfigFromExtraData(MediaByteBuffer* aExtraData);
 
   void OnDecoderInitDone(const TrackType aTrackType);
   void OnDecoderInitFailed(MediaResult aError);
 
   RefPtr<PlatformDecoderModule> mPDM;
   VideoInfo mCurrentConfig;
-  layers::LayersBackend mLayersBackend;
+  RefPtr<layers::KnowsCompositor> mKnowsCompositor;
   RefPtr<layers::ImageContainer> mImageContainer;
   const RefPtr<TaskQueue> mTaskQueue;
   nsTArray<RefPtr<MediaRawData>> mMediaRawSamples;
   MediaDataDecoderCallback* mCallback;
   RefPtr<MediaDataDecoder> mDecoder;
   MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
   RefPtr<GMPCrashHelper> mGMPCrashHelper;
   bool mNeedAVCC;
--- a/gfx/layers/ipc/VideoBridgeChild.h
+++ b/gfx/layers/ipc/VideoBridgeChild.h
@@ -10,24 +10,20 @@
 #include "ISurfaceAllocator.h"
 #include "TextureForwarder.h"
 
 namespace mozilla {
 namespace layers {
 
 class VideoBridgeChild final : public PVideoBridgeChild
                              , public TextureForwarder
-                             , public KnowsCompositor
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoBridgeChild, override);
 
-  TextureForwarder* GetTextureForwarder() override { return this; }
-  LayersIPCActor* GetLayersIPCActor() override { return this; }
-
   static void Startup();
   static void Shutdown();
 
   static VideoBridgeChild* GetSingleton();
 
   // PVideoBridgeChild
   PTextureChild* AllocPTextureChild(const SurfaceDescriptor& aSharedData,
                                     const LayersBackend& aLayersBackend,