Bug 1192675: P1. Ensure VDA/VT APIs are only ever accessed from the same thread. r=cpearce a=ritu
☠☠ backed out by 5bb661db5c6c ☠ ☠
authorJean-Yves Avenard <jyavenard@mozilla.com>
Mon, 10 Aug 2015 18:59:59 +1000
changeset 288969 0a9391fdb35077aa8f36ee9b7687e06ad80a05e7
parent 288968 b171d1b0b0ecbaeb35fa6914a2e37e4c04cafc87
child 288970 744ce5c31af5b6121dd450ec25d32999d80b0090
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce, ritu
bugs1192675
milestone42.0a2
Bug 1192675: P1. Ensure VDA/VT APIs are only ever accessed from the same thread. r=cpearce a=ritu
dom/media/platforms/apple/AppleUtils.h
dom/media/platforms/apple/AppleVDADecoder.cpp
dom/media/platforms/apple/AppleVDADecoder.h
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.h
--- a/dom/media/platforms/apple/AppleUtils.h
+++ b/dom/media/platforms/apple/AppleUtils.h
@@ -38,11 +38,61 @@ public:
   }
 
 private:
   // Copy operator isn't supported and is not implemented.
   AutoCFRelease<T>& operator=(const AutoCFRelease<T>&);
   T mRef;
 };
 
+// CFRefPtr: A CoreFoundation smart pointer.
+template <class T>
+class CFRefPtr {
+public:
+  explicit CFRefPtr(T aRef)
+    : mRef(aRef)
+  {
+    if (mRef) {
+      CFRetain(mRef);
+    }
+  }
+  // Copy constructor.
+  CFRefPtr(const CFRefPtr<T>& aCFRefPtr)
+    : mRef(aCFRefPtr.mRef)
+  {
+    if (mRef) {
+      CFRetain(mRef);
+    }
+  }
+  // Copy operator
+  CFRefPtr<T>& operator=(const CFRefPtr<T>& aCFRefPtr)
+  {
+    if (mRef == aCFRefPtr.mRef) {
+      return;
+    }
+    if (mRef) {
+      CFRelease(mRef);
+    }
+    mRef = aCFRefPtr.mRef;
+    if (mRef) {
+      CFRetain(mRef);
+    }
+    return *this;
+  }
+  ~CFRefPtr()
+  {
+    if (mRef) {
+      CFRelease(mRef);
+    }
+  }
+  // Return the wrapped ref so it can be used as an in parameter.
+  operator T()
+  {
+    return mRef;
+  }
+
+private:
+  T mRef;
+};
+
 } // namespace mozilla
 
 #endif // mozilla_AppleUtils_h
--- a/dom/media/platforms/apple/AppleVDADecoder.cpp
+++ b/dom/media/platforms/apple/AppleVDADecoder.cpp
@@ -35,18 +35,22 @@ AppleVDADecoder::AppleVDADecoder(const V
                                layers::ImageContainer* aImageContainer)
   : mTaskQueue(aVideoTaskQueue)
   , mCallback(aCallback)
   , mImageContainer(aImageContainer)
   , mPictureWidth(aConfig.mImage.width)
   , mPictureHeight(aConfig.mImage.height)
   , mDisplayWidth(aConfig.mDisplay.width)
   , mDisplayHeight(aConfig.mDisplay.height)
+  , mInputIncoming(0)
+  , mIsShutDown(false)
   , mUseSoftwareImages(true)
   , mIs106(!nsCocoaFeatures::OnLionOrLater())
+  , mMonitor("AppleVideoDecoder")
+  , mIsFlushing(false)
   , mDecoder(nullptr)
 {
   MOZ_COUNT_CTOR(AppleVDADecoder);
   // TODO: Verify aConfig.mime_type.
 
   mExtraData = aConfig.mExtraData;
   mMaxRefFrames = 4;
   // Retrieve video dimensions from H264 SPS NAL.
@@ -86,69 +90,116 @@ AppleVDADecoder::Init()
   }
   nsresult rv = InitializeSession();
   return rv;
 }
 
 nsresult
 AppleVDADecoder::Shutdown()
 {
+  MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+  mIsShutDown = true;
+  if (mTaskQueue) {
+    nsCOMPtr<nsIRunnable> runnable =
+      NS_NewRunnableMethod(this, &AppleVDADecoder::ProcessShutdown);
+    mTaskQueue->Dispatch(runnable.forget());
+  } else {
+    ProcessShutdown();
+  }
+  return NS_OK;
+}
+
+void
+AppleVDADecoder::ProcessShutdown()
+{
   if (mDecoder) {
     LOG("%s: cleaning up decoder %p", __func__, mDecoder);
     VDADecoderDestroy(mDecoder);
     mDecoder = nullptr;
   }
-  return NS_OK;
 }
 
 nsresult
 AppleVDADecoder::Input(MediaRawData* aSample)
 {
+  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+
   LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
       aSample,
       aSample->mTime,
       aSample->mDuration,
       aSample->mKeyframe ? " keyframe" : "",
       aSample->Size());
 
+  mInputIncoming++;
+
   nsCOMPtr<nsIRunnable> runnable =
       NS_NewRunnableMethodWithArg<nsRefPtr<MediaRawData>>(
           this,
           &AppleVDADecoder::SubmitFrame,
           nsRefPtr<MediaRawData>(aSample));
   mTaskQueue->Dispatch(runnable.forget());
   return NS_OK;
 }
 
 nsresult
 AppleVDADecoder::Flush()
 {
+  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+  mIsFlushing = true;
   mTaskQueue->Flush();
+  nsCOMPtr<nsIRunnable> runnable =
+    NS_NewRunnableMethod(this, &AppleVDADecoder::ProcessFlush);
+  MonitorAutoLock mon(mMonitor);
+  mTaskQueue->Dispatch(runnable.forget());
+  while (mIsFlushing) {
+    mon.Wait();
+  }
+  mInputIncoming = 0;
+  return NS_OK;
+}
+
+nsresult
+AppleVDADecoder::Drain()
+{
+  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+  nsCOMPtr<nsIRunnable> runnable =
+    NS_NewRunnableMethod(this, &AppleVDADecoder::ProcessDrain);
+  mTaskQueue->Dispatch(runnable.forget());
+  return NS_OK;
+}
+
+void
+AppleVDADecoder::ProcessFlush()
+{
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
   OSStatus rv = VDADecoderFlush(mDecoder, 0 /*dont emit*/);
   if (rv != noErr) {
     LOG("AppleVDADecoder::Flush failed waiting for platform decoder "
         "with error:%d.", rv);
   }
   ClearReorderedFrames();
-
-  return NS_OK;
+  MonitorAutoLock mon(mMonitor);
+  mIsFlushing = false;
+  mon.NotifyAll();
 }
 
-nsresult
-AppleVDADecoder::Drain()
+void
+AppleVDADecoder::ProcessDrain()
 {
-  mTaskQueue->AwaitIdle();
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
   OSStatus rv = VDADecoderFlush(mDecoder, kVDADecoderFlush_EmitFrames);
   if (rv != noErr) {
     LOG("AppleVDADecoder::Drain failed waiting for platform decoder "
         "with error:%d.", rv);
   }
   DrainReorderedFrames();
   mCallback->DrainComplete();
-  return NS_OK;
 }
 
 //
 // Implementation details.
 //
 
 // Callback passed to the VideoToolbox decoder for returning data.
 // This needs to be static because the API takes a C-style pair of
@@ -203,27 +254,29 @@ PlatformCallback(void* decompressionOutp
   char is_sync_point;
 
   CFNumberGetValue(ptsref, kCFNumberSInt64Type, &pts);
   CFNumberGetValue(dtsref, kCFNumberSInt64Type, &dts);
   CFNumberGetValue(durref, kCFNumberSInt64Type, &duration);
   CFNumberGetValue(boref, kCFNumberSInt64Type, &byte_offset);
   CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
 
-  nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
-    new AppleVDADecoder::AppleFrameRef(
+  AppleVDADecoder::AppleFrameRef frameRef(
       media::TimeUnit::FromMicroseconds(dts),
       media::TimeUnit::FromMicroseconds(pts),
       media::TimeUnit::FromMicroseconds(duration),
       byte_offset,
-      is_sync_point == 1));
+      is_sync_point == 1);
 
   // Forward the data back to an object method which can access
-  // the correct MP4Reader callback.
-  decoder->OutputFrame(image, frameRef);
+  // the correct reader's callback.
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableMethodWithArgs<CFRefPtr<CVPixelBufferRef>, AppleVDADecoder::AppleFrameRef>(
+      decoder, &AppleVDADecoder::OutputFrame, image, frameRef);
+  decoder->DispatchOutputTask(task.forget());
 }
 
 AppleVDADecoder::AppleFrameRef*
 AppleVDADecoder::CreateAppleFrameRef(const MediaRawData* aSample)
 {
   MOZ_ASSERT(aSample);
   return new AppleFrameRef(*aSample);
 }
@@ -241,25 +294,32 @@ AppleVDADecoder::ClearReorderedFrames()
 {
   while (!mReorderQueue.IsEmpty()) {
     mReorderQueue.Pop();
   }
 }
 
 // Copy and return a decoded frame.
 nsresult
-AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
-                             nsAutoPtr<AppleVDADecoder::AppleFrameRef> aFrameRef)
+AppleVDADecoder::OutputFrame(CFRefPtr<CVPixelBufferRef> aImage,
+                             AppleVDADecoder::AppleFrameRef aFrameRef)
 {
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+  if (mIsFlushing) {
+    // We are in the process of flushing; ignore frame.
+    return NS_OK;
+  }
+
   LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
-      aFrameRef->byte_offset,
-      aFrameRef->decode_timestamp.ToMicroseconds(),
-      aFrameRef->composition_timestamp.ToMicroseconds(),
-      aFrameRef->duration.ToMicroseconds(),
-      aFrameRef->is_sync_point ? " keyframe" : ""
+      aFrameRef.byte_offset,
+      aFrameRef.decode_timestamp.ToMicroseconds(),
+      aFrameRef.composition_timestamp.ToMicroseconds(),
+      aFrameRef.duration.ToMicroseconds(),
+      aFrameRef.is_sync_point ? " keyframe" : ""
   );
 
   // Where our resulting image will end up.
   nsRefPtr<VideoData> data;
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
   gfx::IntRect visible = gfx::IntRect(0,
@@ -307,22 +367,22 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
     buffer.mPlanes[2].mOffset = 1;
     buffer.mPlanes[2].mSkip = 1;
 
     // Copy the image data into our own format.
     data =
       VideoData::Create(info,
                         mImageContainer,
                         nullptr,
-                        aFrameRef->byte_offset,
-                        aFrameRef->composition_timestamp.ToMicroseconds(),
-                        aFrameRef->duration.ToMicroseconds(),
+                        aFrameRef.byte_offset,
+                        aFrameRef.composition_timestamp.ToMicroseconds(),
+                        aFrameRef.duration.ToMicroseconds(),
                         buffer,
-                        aFrameRef->is_sync_point,
-                        aFrameRef->decode_timestamp.ToMicroseconds(),
+                        aFrameRef.is_sync_point,
+                        aFrameRef.decode_timestamp.ToMicroseconds(),
                         visible);
     // Unlock the returned image data.
     CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
   } else {
     IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
     MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
 
     nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
@@ -331,22 +391,22 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
       mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
     layers::MacIOSurfaceImage* videoImage =
       static_cast<layers::MacIOSurfaceImage*>(image.get());
     videoImage->SetSurface(macSurface);
 
     data =
       VideoData::CreateFromImage(info,
                                  mImageContainer,
-                                 aFrameRef->byte_offset,
-                                 aFrameRef->composition_timestamp.ToMicroseconds(),
-                                 aFrameRef->duration.ToMicroseconds(),
+                                 aFrameRef.byte_offset,
+                                 aFrameRef.composition_timestamp.ToMicroseconds(),
+                                 aFrameRef.duration.ToMicroseconds(),
                                  image.forget(),
-                                 aFrameRef->is_sync_point,
-                                 aFrameRef->decode_timestamp.ToMicroseconds(),
+                                 aFrameRef.is_sync_point,
+                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                  visible);
   }
 
   if (!data) {
     NS_ERROR("Couldn't create VideoData for frame");
     mCallback->Error();
     return NS_ERROR_FAILURE;
   }
@@ -361,16 +421,20 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
       static_cast<unsigned long long>(mReorderQueue.Length()));
 
   return NS_OK;
 }
 
 nsresult
 AppleVDADecoder::SubmitFrame(MediaRawData* aSample)
 {
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+  mInputIncoming--;
+
   AutoCFRelease<CFDataRef> block =
     CFDataCreate(kCFAllocatorDefault, aSample->Data(), aSample->Size());
   if (!block) {
     NS_ERROR("Couldn't create CFData");
     return NS_ERROR_FAILURE;
   }
 
   AutoCFRelease<CFNumberRef> pts =
@@ -434,17 +498,17 @@ AppleVDADecoder::SubmitFrame(MediaRawDat
     // This dictionary can contain client provided information associated with
     // the frame being decoded, for example presentation time.
     // The CFDictionaryRef will be retained by the framework.
     // In 10.6, it is released one too many. So retain it.
     CFRetain(frameInfo);
   }
 
   // Ask for more data.
-  if (mTaskQueue->IsEmpty()) {
+  if (!mInputIncoming) {
     LOG("AppleVDADecoder task queue empty; requesting more data");
     mCallback->InputExhausted();
   }
 
   return NS_OK;
 }
 
 nsresult
--- a/dom/media/platforms/apple/AppleVDADecoder.h
+++ b/dom/media/platforms/apple/AppleVDADecoder.h
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_AppleVDADecoder_h
 #define mozilla_AppleVDADecoder_h
 
 #include "PlatformDecoderModule.h"
+#include "mozilla/Atomics.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "MP4Decoder.h"
 #include "nsIThread.h"
 #include "ReorderQueue.h"
 #include "TimeUnits.h"
 
 #include "VideoDecodeAcceleration/VDADecoder.h"
 
@@ -75,38 +76,64 @@ public:
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
   virtual bool IsHardwareAccelerated() const override
   {
     return true;
   }
 
-  nsresult OutputFrame(CVPixelBufferRef aImage,
-                       nsAutoPtr<AppleFrameRef> aFrameRef);
+  void DispatchOutputTask(already_AddRefed<nsIRunnable> aTask)
+  {
+    nsCOMPtr<nsIRunnable> task = aTask;
+    if (mIsShutDown || mIsFlushing) {
+      return;
+    }
+    mTaskQueue->Dispatch(task.forget());
+  }
 
- protected:
+  nsresult OutputFrame(CFRefPtr<CVPixelBufferRef> aImage,
+                       AppleFrameRef aFrameRef);
+
+protected:
+  // Flush and Drain operation, always run
+  virtual void ProcessFlush();
+  virtual void ProcessDrain();
+  virtual void ProcessShutdown();
+
   AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
   void DrainReorderedFrames();
   void ClearReorderedFrames();
   CFDictionaryRef CreateOutputConfiguration();
 
   nsRefPtr<MediaByteBuffer> mExtraData;
   nsRefPtr<FlushableTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   nsRefPtr<layers::ImageContainer> mImageContainer;
   ReorderQueue mReorderQueue;
   uint32_t mPictureWidth;
   uint32_t mPictureHeight;
   uint32_t mDisplayWidth;
   uint32_t mDisplayHeight;
   uint32_t mMaxRefFrames;
+  // Increased when Input is called, and decreased when ProcessFrame runs.
+  // Reaching 0 indicates that there's no pending Input.
+  Atomic<uint32_t> mInputIncoming;
+  Atomic<bool> mIsShutDown;
+
   bool mUseSoftwareImages;
   bool mIs106;
 
+  // For wait on mIsFlushing during Shutdown() process.
+  Monitor mMonitor;
+  // Set on reader/decode thread calling Flush() to indicate that output is
+  // not required and so input samples on mTaskQueue need not be processed.
+  // Cleared on mTaskQueue in ProcessDrain().
+  Atomic<bool> mIsFlushing;
+
 private:
   VDADecoder mDecoder;
 
   // Method to pass a frame to VideoToolbox for decoding.
   nsresult SubmitFrame(MediaRawData* aSample);
   // Method to set up the decompression session.
   nsresult InitializeSession();
   CFDictionaryRef CreateDecoderSpecification();
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -53,36 +53,37 @@ AppleVTDecoder::~AppleVTDecoder()
 
 nsresult
 AppleVTDecoder::Init()
 {
   nsresult rv = InitializeSession();
   return rv;
 }
 
-nsresult
-AppleVTDecoder::Shutdown()
+void
+AppleVTDecoder::ProcessShutdown()
 {
   if (mSession) {
     LOG("%s: cleaning up session %p", __func__, mSession);
     VTDecompressionSessionInvalidate(mSession);
     CFRelease(mSession);
     mSession = nullptr;
   }
   if (mFormat) {
     LOG("%s: releasing format %p", __func__, mFormat);
     CFRelease(mFormat);
     mFormat = nullptr;
   }
-  return NS_OK;
 }
 
 nsresult
 AppleVTDecoder::Input(MediaRawData* aSample)
 {
+  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+
   LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
       aSample,
       aSample->mTime,
       aSample->mDuration,
       aSample->mKeyframe ? " keyframe" : "",
       aSample->Size());
 
 #ifdef LOG_MEDIA_SHA1
@@ -92,51 +93,51 @@ AppleVTDecoder::Input(MediaRawData* aSam
   hash.finish(digest_buf);
   nsAutoCString digest;
   for (size_t i = 0; i < sizeof(digest_buf); i++) {
     digest.AppendPrintf("%02x", digest_buf[i]);
   }
   LOG("    sha1 %s", digest.get());
 #endif // LOG_MEDIA_SHA1
 
+  mInputIncoming++;
+
   nsCOMPtr<nsIRunnable> runnable =
       NS_NewRunnableMethodWithArg<nsRefPtr<MediaRawData>>(
-          this,
-          &AppleVTDecoder::SubmitFrame,
-          nsRefPtr<MediaRawData>(aSample));
+          this, &AppleVTDecoder::SubmitFrame, aSample);
   mTaskQueue->Dispatch(runnable.forget());
   return NS_OK;
 }
 
-nsresult
-AppleVTDecoder::Flush()
+void
+AppleVTDecoder::ProcessFlush()
 {
-  mTaskQueue->Flush();
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   nsresult rv = WaitForAsynchronousFrames();
   if (NS_FAILED(rv)) {
     LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
         "with error:%d.", rv);
   }
   ClearReorderedFrames();
-
-  return rv;
+  MonitorAutoLock mon(mMonitor);
+  mIsFlushing = false;
+  mon.NotifyAll();
 }
 
-nsresult
-AppleVTDecoder::Drain()
+void
+AppleVTDecoder::ProcessDrain()
 {
-  mTaskQueue->AwaitIdle();
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   nsresult rv = WaitForAsynchronousFrames();
   if (NS_FAILED(rv)) {
     LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
         "with error:%d.", rv);
   }
   DrainReorderedFrames();
   mCallback->DrainComplete();
-  return NS_OK;
 }
 
 //
 // Implementation details.
 //
 
 // Callback passed to the VideoToolbox decoder for returning data.
 // This needs to be static because the API takes a C-style pair of
@@ -164,19 +165,20 @@ PlatformCallback(void* decompressionOutp
     return;
   }
   if (flags & kVTDecodeInfo_FrameDropped) {
     NS_WARNING("  ...frame tagged as dropped...");
   }
   MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
     "VideoToolbox returned an unexpected image type");
 
-  // Forward the data back to an object method which can access
-  // the correct MP4Reader callback.
-  decoder->OutputFrame(image, frameRef);
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableMethodWithArgs<CFRefPtr<CVPixelBufferRef>, AppleVTDecoder::AppleFrameRef>(
+      decoder, &AppleVTDecoder::OutputFrame, image, *frameRef);
+  decoder->DispatchOutputTask(task.forget());
 }
 
 nsresult
 AppleVTDecoder::WaitForAsynchronousFrames()
 {
   OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
   if (rv != noErr) {
     LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
@@ -198,16 +200,18 @@ TimingInfoFromSample(MediaRawData* aSamp
     CMTimeMake(aSample->mTimecode, USECS_PER_S);
 
   return timestamp;
 }
 
 nsresult
 AppleVTDecoder::SubmitFrame(MediaRawData* aSample)
 {
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+  mInputIncoming--;
   // For some reason this gives me a double-free error with stagefright.
   AutoCFRelease<CMBlockBufferRef> block = nullptr;
   AutoCFRelease<CMSampleBufferRef> sample = nullptr;
   VTDecodeInfoFlags infoFlags;
   OSStatus rv;
 
   // FIXME: This copies the sample data. I think we can provide
   // a custom block source which reuses the aSample buffer.
@@ -243,17 +247,17 @@ AppleVTDecoder::SubmitFrame(MediaRawData
   if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
     LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
     NS_WARNING("Couldn't pass frame to decoder");
     mCallback->Error();
     return NS_ERROR_FAILURE;
   }
 
   // Ask for more data.
-  if (mTaskQueue->IsEmpty()) {
+  if (!mInputIncoming) {
     LOG("AppleVTDecoder task queue empty; requesting more data");
     mCallback->InputExhausted();
   }
 
   return NS_OK;
 }
 
 nsresult
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -17,24 +17,26 @@ class AppleVTDecoder : public AppleVDADe
 public:
   AppleVTDecoder(const VideoInfo& aConfig,
                  FlushableTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback,
                  layers::ImageContainer* aImageContainer);
   virtual ~AppleVTDecoder();
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
-  virtual nsresult Flush() override;
-  virtual nsresult Drain() override;
-  virtual nsresult Shutdown() override;
   virtual bool IsHardwareAccelerated() const override
   {
     return mIsHardwareAccelerated;
   }
 
+protected:
+  void ProcessFlush() override;
+  void ProcessDrain() override;
+  void ProcessShutdown() override;
+
 private:
   CMVideoFormatDescriptionRef mFormat;
   VTDecompressionSessionRef mSession;
 
   // Method to pass a frame to VideoToolbox for decoding.
   nsresult SubmitFrame(MediaRawData* aSample);
   // Method to set up the decompression session.
   nsresult InitializeSession();