Bug 1125970 - Make flushing an opt-in mechanism, and use it only for the PDM task queues. v1 r=cpearce a=lmandel
authorBobby Holley <bobbyholley@gmail.com>
Sun, 22 Feb 2015 20:30:46 -0800
changeset 249936 a48f841801e08e28b6566a4672b161f59808fae7
parent 249935 74c5dedf63e9e38baf52980e4116b7bc5945c870
child 249937 387a228666a21ae5ec5ce826e85af1e2e33c914c
push id4489
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 15:17:55 +0000
treeherdermozilla-beta@fd7c3dc24146 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce, lmandel
bugs1125970
milestone37.0a2
Bug 1125970 - Make flushing an opt-in mechanism, and use it only for the PDM task queues. v1 r=cpearce a=lmandel
dom/media/MediaTaskQueue.cpp
dom/media/MediaTaskQueue.h
dom/media/VideoUtils.cpp
dom/media/VideoUtils.h
dom/media/fmp4/AVCCDecoderModule.cpp
dom/media/fmp4/AVCCDecoderModule.h
dom/media/fmp4/BlankDecoderModule.cpp
dom/media/fmp4/MP4Reader.cpp
dom/media/fmp4/MP4Reader.h
dom/media/fmp4/PlatformDecoderModule.cpp
dom/media/fmp4/PlatformDecoderModule.h
dom/media/fmp4/SharedDecoderManager.cpp
dom/media/fmp4/SharedDecoderManager.h
dom/media/fmp4/android/AndroidDecoderModule.cpp
dom/media/fmp4/android/AndroidDecoderModule.h
dom/media/fmp4/apple/AppleATDecoder.cpp
dom/media/fmp4/apple/AppleATDecoder.h
dom/media/fmp4/apple/AppleDecoderModule.cpp
dom/media/fmp4/apple/AppleDecoderModule.h
dom/media/fmp4/apple/AppleVDADecoder.cpp
dom/media/fmp4/apple/AppleVDADecoder.h
dom/media/fmp4/apple/AppleVTDecoder.cpp
dom/media/fmp4/apple/AppleVTDecoder.h
dom/media/fmp4/eme/EMEDecoderModule.cpp
dom/media/fmp4/eme/EMEDecoderModule.h
dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.h
dom/media/fmp4/ffmpeg/FFmpegDataDecoder.cpp
dom/media/fmp4/ffmpeg/FFmpegDataDecoder.h
dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h
dom/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp
dom/media/fmp4/ffmpeg/FFmpegH264Decoder.h
dom/media/fmp4/gonk/GonkDecoderModule.cpp
dom/media/fmp4/gonk/GonkDecoderModule.h
dom/media/fmp4/gonk/GonkMediaDataDecoder.cpp
dom/media/fmp4/gonk/GonkMediaDataDecoder.h
dom/media/fmp4/wmf/WMFDecoderModule.cpp
dom/media/fmp4/wmf/WMFDecoderModule.h
dom/media/fmp4/wmf/WMFMediaDataDecoder.cpp
dom/media/fmp4/wmf/WMFMediaDataDecoder.h
dom/media/fmp4/wmf/WMFVideoMFTManager.h
dom/media/omx/MediaCodecReader.cpp
dom/media/omx/MediaCodecReader.h
dom/media/webm/IntelWebMVideoDecoder.h
dom/media/webm/WebMReader.cpp
dom/media/webm/WebMReader.h
--- a/dom/media/MediaTaskQueue.cpp
+++ b/dom/media/MediaTaskQueue.cpp
@@ -142,39 +142,39 @@ MediaTaskQueue::BeginShutdown()
   nsRefPtr<ShutdownPromise> p = mShutdownPromise.Ensure(__func__);
   if (!mIsRunning) {
     mShutdownPromise.Resolve(true, __func__);
   }
   mon.NotifyAll();
   return p;
 }
 
+void
+FlushableMediaTaskQueue::Flush()
+{
+  MonitorAutoLock mon(mQueueMonitor);
+  AutoSetFlushing autoFlush(this);
+  FlushLocked();
+  AwaitIdleLocked();
+}
+
 nsresult
-MediaTaskQueue::FlushAndDispatch(TemporaryRef<nsIRunnable> aRunnable)
+FlushableMediaTaskQueue::FlushAndDispatch(TemporaryRef<nsIRunnable> aRunnable)
 {
   MonitorAutoLock mon(mQueueMonitor);
   AutoSetFlushing autoFlush(this);
   FlushLocked();
   nsresult rv = DispatchLocked(aRunnable, IgnoreFlushing);
   NS_ENSURE_SUCCESS(rv, rv);
   AwaitIdleLocked();
   return NS_OK;
 }
 
 void
-MediaTaskQueue::Flush()
-{
-  MonitorAutoLock mon(mQueueMonitor);
-  AutoSetFlushing autoFlush(this);
-  FlushLocked();
-  AwaitIdleLocked();
-}
-
-void
-MediaTaskQueue::FlushLocked()
+FlushableMediaTaskQueue::FlushLocked()
 {
   mQueueMonitor.AssertCurrentThreadOwns();
   MOZ_ASSERT(mIsFlushing);
 
   // Clear the tasks, but preserve those with mForceDispatch by re-appending
   // them to the queue.
   size_t numTasks = mTasks.size();
   for (size_t i = 0; i < numTasks; ++i) {
--- a/dom/media/MediaTaskQueue.h
+++ b/dom/media/MediaTaskQueue.h
@@ -22,38 +22,30 @@ class SharedThreadPool;
 
 typedef MediaPromise<bool, bool, false> ShutdownPromise;
 
 // Abstracts executing runnables in order in a thread pool. The runnables
 // dispatched to the MediaTaskQueue will be executed in the order in which
 // they're received, and are guaranteed to not be executed concurrently.
 // They may be executed on different threads, and a memory barrier is used
 // to make this threadsafe for objects that aren't already threadsafe.
-class MediaTaskQueue MOZ_FINAL {
-  ~MediaTaskQueue();
-
+class MediaTaskQueue {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTaskQueue)
 
   explicit MediaTaskQueue(TemporaryRef<SharedThreadPool> aPool);
 
   nsresult Dispatch(TemporaryRef<nsIRunnable> aRunnable);
 
   // This should only be used for things that absolutely can't afford to be
   // flushed. Normal operations should use Dispatch.
   nsresult ForceDispatch(TemporaryRef<nsIRunnable> aRunnable);
 
   nsresult SyncDispatch(TemporaryRef<nsIRunnable> aRunnable);
 
-  nsresult FlushAndDispatch(TemporaryRef<nsIRunnable> aRunnable);
-
-  // Removes all pending tasks from the task queue, and blocks until
-  // the currently running task (if any) finishes.
-  void Flush();
-
   // Puts the queue in a shutdown state and returns immediately. The queue will
   // remain alive at least until all the events are drained, because the Runners
   // hold a strong reference to the task queue, and one of them is always held
   // by the threadpool event queue when the task queue is non-empty.
   //
   // The returned promise is resolved when the queue goes empty.
   nsRefPtr<ShutdownPromise> BeginShutdown();
 
@@ -65,28 +57,29 @@ public:
   void AwaitShutdownAndIdle();
 
   bool IsEmpty();
 
   // Returns true if the current thread is currently running a Runnable in
   // the task queue. This is for debugging/validation purposes only.
   bool IsCurrentThreadIn();
 
-private:
+protected:
+  virtual ~MediaTaskQueue();
+
 
   // Blocks until all task finish executing. Called internally by methods
   // that need to wait until the task queue is idle.
   // mQueueMonitor must be held.
   void AwaitIdleLocked();
 
   enum DispatchMode { AbortIfFlushing, IgnoreFlushing, Forced };
 
   nsresult DispatchLocked(TemporaryRef<nsIRunnable> aRunnable,
                           DispatchMode aMode);
-  void FlushLocked();
 
   RefPtr<SharedThreadPool> mPool;
 
   // Monitor that protects the queue and mIsRunning;
   Monitor mQueueMonitor;
 
   struct TaskQueueEntry {
     RefPtr<nsIRunnable> mRunnable;
@@ -108,44 +101,57 @@ private:
   // True if we've dispatched an event to the pool to execute events from
   // the queue.
   bool mIsRunning;
 
   // True if we've started our shutdown process.
   bool mIsShutdown;
   MediaPromiseHolder<ShutdownPromise> mShutdownPromise;
 
-  class MOZ_STACK_CLASS AutoSetFlushing
-  {
-  public:
-    explicit AutoSetFlushing(MediaTaskQueue* aTaskQueue) : mTaskQueue(aTaskQueue)
-    {
-      mTaskQueue->mQueueMonitor.AssertCurrentThreadOwns();
-      mTaskQueue->mIsFlushing = true;
-    }
-    ~AutoSetFlushing()
-    {
-      mTaskQueue->mQueueMonitor.AssertCurrentThreadOwns();
-      mTaskQueue->mIsFlushing = false;
-    }
-
-  private:
-    MediaTaskQueue* mTaskQueue;
-  };
-
   // True if we're flushing; we reject new tasks if we're flushing.
   bool mIsFlushing;
 
   class Runner : public nsRunnable {
   public:
     explicit Runner(MediaTaskQueue* aQueue)
       : mQueue(aQueue)
     {
     }
     NS_METHOD Run() MOZ_OVERRIDE;
   private:
     RefPtr<MediaTaskQueue> mQueue;
   };
 };
 
+class FlushableMediaTaskQueue : public MediaTaskQueue
+{
+public:
+  explicit FlushableMediaTaskQueue(TemporaryRef<SharedThreadPool> aPool) : MediaTaskQueue(aPool) {}
+  nsresult FlushAndDispatch(TemporaryRef<nsIRunnable> aRunnable);
+  void Flush();
+
+private:
+
+  class MOZ_STACK_CLASS AutoSetFlushing
+  {
+  public:
+    explicit AutoSetFlushing(FlushableMediaTaskQueue* aTaskQueue) : mTaskQueue(aTaskQueue)
+    {
+      mTaskQueue->mQueueMonitor.AssertCurrentThreadOwns();
+      mTaskQueue->mIsFlushing = true;
+    }
+    ~AutoSetFlushing()
+    {
+      mTaskQueue->mQueueMonitor.AssertCurrentThreadOwns();
+      mTaskQueue->mIsFlushing = false;
+    }
+
+  private:
+    FlushableMediaTaskQueue* mTaskQueue;
+  };
+
+  void FlushLocked();
+
+};
+
 } // namespace mozilla
 
 #endif // MediaTaskQueue_h_
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -277,19 +277,39 @@ public:
   NS_IMETHOD Run() {
     MOZ_ASSERT(NS_IsMainThread());
     mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
     return NS_OK;
   }
   nsRefPtr<MediaTaskQueue> mTaskQueue;
 };
 
+class CreateFlushableTaskQueueTask : public nsRunnable {
+public:
+  NS_IMETHOD Run() {
+    MOZ_ASSERT(NS_IsMainThread());
+    mTaskQueue = new FlushableMediaTaskQueue(GetMediaDecodeThreadPool());
+    return NS_OK;
+  }
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
+};
+
 already_AddRefed<MediaTaskQueue>
 CreateMediaDecodeTaskQueue()
 {
   // We must create the MediaTaskQueue/SharedThreadPool on the main thread.
   nsRefPtr<CreateTaskQueueTask> t(new CreateTaskQueueTask());
   nsresult rv = NS_DispatchToMainThread(t, NS_DISPATCH_SYNC);
   NS_ENSURE_SUCCESS(rv, nullptr);
   return t->mTaskQueue.forget();
 }
 
+already_AddRefed<FlushableMediaTaskQueue>
+CreateFlushableMediaDecodeTaskQueue()
+{
+  // We must create the MediaTaskQueue/SharedThreadPool on the main thread.
+  nsRefPtr<CreateFlushableTaskQueueTask> t(new CreateFlushableTaskQueueTask());
+  nsresult rv = NS_DispatchToMainThread(t, NS_DISPATCH_SYNC);
+  NS_ENSURE_SUCCESS(rv, nullptr);
+  return t->mTaskQueue.forget();
+}
+
 } // end namespace mozilla
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -259,15 +259,19 @@ ExtractH264CodecDetails(const nsAString&
 
 // Use a cryptographic quality PRNG to generate raw random bytes
 // and convert that to a base64 string suitable for use as a file or URL
 // path. This is based on code from nsExternalAppHandler::SetUpTempFile.
 nsresult
 GenerateRandomPathName(nsCString& aOutSalt, uint32_t aLength);
 
 class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 
 already_AddRefed<MediaTaskQueue>
 CreateMediaDecodeTaskQueue();
 
+already_AddRefed<FlushableMediaTaskQueue>
+CreateFlushableMediaDecodeTaskQueue();
+
 } // end namespace mozilla
 
 #endif
--- a/dom/media/fmp4/AVCCDecoderModule.cpp
+++ b/dom/media/fmp4/AVCCDecoderModule.cpp
@@ -15,17 +15,17 @@ namespace mozilla
 
 class AVCCMediaDataDecoder : public MediaDataDecoder {
 public:
 
   AVCCMediaDataDecoder(PlatformDecoderModule* aPDM,
                        const mp4_demuxer::VideoDecoderConfig& aConfig,
                        layers::LayersBackend aLayersBackend,
                        layers::ImageContainer* aImageContainer,
-                       MediaTaskQueue* aVideoTaskQueue,
+                       FlushableMediaTaskQueue* aVideoTaskQueue,
                        MediaDataDecoderCallback* aCallback);
 
   virtual ~AVCCMediaDataDecoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Flush() MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
@@ -42,27 +42,27 @@ private:
   // will set mError accordingly.
   nsresult CreateDecoder();
   nsresult CreateDecoderAndInit(mp4_demuxer::MP4Sample* aSample);
 
   nsRefPtr<PlatformDecoderModule> mPDM;
   mp4_demuxer::VideoDecoderConfig mCurrentConfig;
   layers::LayersBackend mLayersBackend;
   nsRefPtr<layers::ImageContainer> mImageContainer;
-  nsRefPtr<MediaTaskQueue> mVideoTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mVideoTaskQueue;
   MediaDataDecoderCallback* mCallback;
   nsRefPtr<MediaDataDecoder> mDecoder;
   nsresult mLastError;
 };
 
 AVCCMediaDataDecoder::AVCCMediaDataDecoder(PlatformDecoderModule* aPDM,
                                            const mp4_demuxer::VideoDecoderConfig& aConfig,
                                            layers::LayersBackend aLayersBackend,
                                            layers::ImageContainer* aImageContainer,
-                                           MediaTaskQueue* aVideoTaskQueue,
+                                           FlushableMediaTaskQueue* aVideoTaskQueue,
                                            MediaDataDecoderCallback* aCallback)
   : mPDM(aPDM)
   , mCurrentConfig(aConfig)
   , mLayersBackend(aLayersBackend)
   , mImageContainer(aImageContainer)
   , mVideoTaskQueue(aVideoTaskQueue)
   , mCallback(aCallback)
   , mDecoder(nullptr)
@@ -234,17 +234,17 @@ AVCCDecoderModule::Shutdown()
 {
   return mPDM->Shutdown();
 }
 
 already_AddRefed<MediaDataDecoder>
 AVCCDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                                       layers::LayersBackend aLayersBackend,
                                       layers::ImageContainer* aImageContainer,
-                                      MediaTaskQueue* aVideoTaskQueue,
+                                      FlushableMediaTaskQueue* aVideoTaskQueue,
                                       MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder;
 
   if (strcmp(aConfig.mime_type, "video/avc") ||
       !mPDM->DecoderNeedsAVCC(aConfig)) {
     // There is no need for an AVCC wrapper for non-AVC content.
     decoder = mPDM->CreateVideoDecoder(aConfig,
@@ -260,17 +260,17 @@ AVCCDecoderModule::CreateVideoDecoder(co
                                        aVideoTaskQueue,
                                        aCallback);
   }
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 AVCCDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                                      MediaTaskQueue* aAudioTaskQueue,
+                                      FlushableMediaTaskQueue* aAudioTaskQueue,
                                       MediaDataDecoderCallback* aCallback)
 {
   return mPDM->CreateAudioDecoder(aConfig,
                                   aAudioTaskQueue,
                                   aCallback);
 }
 
 bool
--- a/dom/media/fmp4/AVCCDecoderModule.h
+++ b/dom/media/fmp4/AVCCDecoderModule.h
@@ -29,22 +29,22 @@ public:
 
   virtual nsresult Startup() MOZ_OVERRIDE;
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE;
   virtual bool SupportsVideoMimeType(const char* aMimeType) MOZ_OVERRIDE;
 
 private:
   nsRefPtr<PlatformDecoderModule> mPDM;
 };
--- a/dom/media/fmp4/BlankDecoderModule.cpp
+++ b/dom/media/fmp4/BlankDecoderModule.cpp
@@ -18,17 +18,17 @@ namespace mozilla {
 
 // Decoder that uses a passed in object's Create function to create blank
 // MediaData objects.
 template<class BlankMediaDataCreator>
 class BlankMediaDataDecoder : public MediaDataDecoder {
 public:
 
   BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
-                        MediaTaskQueue* aTaskQueue,
+                        FlushableMediaTaskQueue* aTaskQueue,
                         MediaDataDecoderCallback* aCallback)
     : mCreator(aCreator)
     , mTaskQueue(aTaskQueue)
     , mCallback(aCallback)
   {
   }
 
   virtual nsresult Init() MOZ_OVERRIDE {
@@ -80,17 +80,17 @@ public:
 
   virtual nsresult Drain() MOZ_OVERRIDE {
     mCallback->DrainComplete();
     return NS_OK;
   }
 
 private:
   nsAutoPtr<BlankMediaDataCreator> mCreator;
-  RefPtr<MediaTaskQueue> mTaskQueue;
+  RefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 };
 
 class BlankVideoDataCreator {
 public:
   BlankVideoDataCreator(uint32_t aFrameWidth,
                         uint32_t aFrameHeight,
                         layers::ImageContainer* aImageContainer)
@@ -211,31 +211,31 @@ public:
     return NS_OK;
   }
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE {
     BlankVideoDataCreator* creator = new BlankVideoDataCreator(
       aConfig.display_width, aConfig.display_height, aImageContainer);
     nsRefPtr<MediaDataDecoder> decoder =
       new BlankMediaDataDecoder<BlankVideoDataCreator>(creator,
                                                        aVideoTaskQueue,
                                                        aCallback);
     return decoder.forget();
   }
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE {
     BlankAudioDataCreator* creator = new BlankAudioDataCreator(
       aConfig.channel_count, aConfig.samples_per_second);
 
     nsRefPtr<MediaDataDecoder> decoder =
       new BlankMediaDataDecoder<BlankAudioDataCreator>(creator,
                                                        aAudioTaskQueue,
                                                        aCallback);
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -204,20 +204,20 @@ nsresult
 MP4Reader::Init(MediaDecoderReader* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
   PlatformDecoderModule::Init();
   mStream = new MP4Stream(mDecoder->GetResource());
 
   InitLayersBackendType();
 
-  mAudio.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
+  mAudio.mTaskQueue = new FlushableMediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mAudio.mTaskQueue, NS_ERROR_FAILURE);
 
-  mVideo.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
+  mVideo.mTaskQueue = new FlushableMediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mVideo.mTaskQueue, NS_ERROR_FAILURE);
 
   static bool sSetupPrefCache = false;
   if (!sSetupPrefCache) {
     sSetupPrefCache = true;
     Preferences::AddBoolVarCache(&sIsEMEEnabled, "media.eme.enabled", false);
     Preferences::AddBoolVarCache(&sDemuxSkipToNextKeyframe, "media.fmp4.demux-skip", true);
   }
@@ -405,18 +405,17 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo
     {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       proxy = mDecoder->GetCDMProxy();
     }
     MOZ_ASSERT(proxy);
 
     mPlatform = PlatformDecoderModule::CreateCDMWrapper(proxy,
                                                         HasAudio(),
-                                                        HasVideo(),
-                                                        GetTaskQueue());
+                                                        HasVideo());
     NS_ENSURE_TRUE(mPlatform, NS_ERROR_FAILURE);
 #else
     // EME not supported.
     return NS_ERROR_FAILURE;
 #endif
   } else {
     mPlatform = PlatformDecoderModule::Create();
     NS_ENSURE_TRUE(mPlatform, NS_ERROR_FAILURE);
--- a/dom/media/fmp4/MP4Reader.h
+++ b/dom/media/fmp4/MP4Reader.h
@@ -180,17 +180,17 @@ private:
       , mDiscontinuity(false)
     {
     }
 
     // The platform decoder.
     nsRefPtr<MediaDataDecoder> mDecoder;
     // TaskQueue on which decoder can choose to decode.
     // Only non-null up until the decoder is created.
-    nsRefPtr<MediaTaskQueue> mTaskQueue;
+    nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
     // Callback that receives output and error notifications from the decoder.
     nsAutoPtr<DecoderCallback> mCallback;
     // Decoded samples returned my mDecoder awaiting being returned to
     // state machine upon request.
     nsTArray<nsRefPtr<MediaData> > mOutput;
     // Disambiguate Audio vs Video.
     MediaData::Type mType;
 
--- a/dom/media/fmp4/PlatformDecoderModule.cpp
+++ b/dom/media/fmp4/PlatformDecoderModule.cpp
@@ -76,18 +76,17 @@ PlatformDecoderModule::Init()
 #endif
 }
 
 #ifdef MOZ_EME
 /* static */
 already_AddRefed<PlatformDecoderModule>
 PlatformDecoderModule::CreateCDMWrapper(CDMProxy* aProxy,
                                         bool aHasAudio,
-                                        bool aHasVideo,
-                                        MediaTaskQueue* aTaskQueue)
+                                        bool aHasVideo)
 {
   bool cdmDecodesAudio;
   bool cdmDecodesVideo;
   {
     CDMCaps::AutoLock caps(aProxy->Capabilites());
     cdmDecodesAudio = caps.CanDecryptAndDecodeAudio();
     cdmDecodesVideo = caps.CanDecryptAndDecodeVideo();
   }
--- a/dom/media/fmp4/PlatformDecoderModule.h
+++ b/dom/media/fmp4/PlatformDecoderModule.h
@@ -25,17 +25,17 @@ namespace mozilla {
 
 namespace layers {
 class ImageContainer;
 }
 
 class MediaDataDecoder;
 class MediaDataDecoderCallback;
 class MediaInputQueue;
-class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 class CDMProxy;
 typedef int64_t Microseconds;
 
 // The PlatformDecoderModule interface is used by the MP4Reader to abstract
 // access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.
 // It may be extended to support other codecs in future. Each platform (Windows,
 // MacOSX, Linux, B2G etc) must implement a PlatformDecoderModule to provide
 // access to its decoders in order to get decompressed H.264/AAC from the
@@ -78,18 +78,17 @@ public:
   // Creates a PlatformDecoderModule that uses a CDMProxy to decrypt or
   // decrypt-and-decode EME encrypted content. If the CDM only decrypts and
   // does not decode, we create a PDM and use that to create MediaDataDecoders
   // that we use on on aTaskQueue to decode the decrypted stream.
   // This is called on the decode task queue.
   static already_AddRefed<PlatformDecoderModule>
   CreateCDMWrapper(CDMProxy* aProxy,
                    bool aHasAudio,
-                   bool aHasVideo,
-                   MediaTaskQueue* aTaskQueue);
+                   bool aHasVideo);
 #endif
 
   // Called to shutdown the decoder module and cleanup state. The PDM
   // is deleted immediately after Shutdown() is called. Shutdown() is
   // called after Shutdown() has been called on all MediaDataDecoders
   // created from this PlatformDecoderModule.
   // This is called on the decode task queue.
   virtual nsresult Shutdown() = 0;
@@ -104,32 +103,32 @@ public:
   // COINIT_MULTITHREADED.
   // Returns nullptr if the decoder can't be created.
   // It is safe to store a reference to aConfig.
   // This is called on the decode task queue.
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                     layers::LayersBackend aLayersBackend,
                     layers::ImageContainer* aImageContainer,
-                    MediaTaskQueue* aVideoTaskQueue,
+                    FlushableMediaTaskQueue* aVideoTaskQueue,
                     MediaDataDecoderCallback* aCallback) = 0;
 
   // Creates an Audio decoder with the specified properties.
   // Asynchronous decoding of audio should be done in runnables dispatched to
   // aAudioTaskQueue. If the task queue isn't needed, the decoder should
   // not hold a reference to it.
   // Output and errors should be returned to the reader via aCallback.
   // Returns nullptr if the decoder can't be created.
   // On Windows the task queue's threads in have MSCOM initialized with
   // COINIT_MULTITHREADED.
   // It is safe to store a reference to aConfig.
   // This is called on the decode task queue.
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) = 0;
 
   // An audio decoder module must support AAC by default.
   // If more audio codec is to be supported, SupportsAudioMimeType will have
   // to be extended
   virtual bool SupportsAudioMimeType(const char* aMimeType);
   virtual bool SupportsVideoMimeType(const char* aMimeType);
 
--- a/dom/media/fmp4/SharedDecoderManager.cpp
+++ b/dom/media/fmp4/SharedDecoderManager.cpp
@@ -50,17 +50,17 @@ public:
       mManager->mActiveCallback->ReleaseMediaResources();
     }
   }
 
   SharedDecoderManager* mManager;
 };
 
 SharedDecoderManager::SharedDecoderManager()
-  : mTaskQueue(new MediaTaskQueue(GetMediaDecodeThreadPool()))
+  : mTaskQueue(new FlushableMediaTaskQueue(GetMediaDecodeThreadPool()))
   , mActiveProxy(nullptr)
   , mActiveCallback(nullptr)
   , mWaitForInternalDrain(false)
   , mMonitor("SharedDecoderProxy")
   , mDecoderReleasedResources(false)
 {
   MOZ_ASSERT(NS_IsMainThread()); // taskqueue must be created on main thread.
   mCallback = new SharedDecoderCallback(this);
@@ -68,17 +68,17 @@ SharedDecoderManager::SharedDecoderManag
 
 SharedDecoderManager::~SharedDecoderManager() {}
 
 already_AddRefed<MediaDataDecoder>
 SharedDecoderManager::CreateVideoDecoder(
   PlatformDecoderModule* aPDM,
   const mp4_demuxer::VideoDecoderConfig& aConfig,
   layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer,
-  MediaTaskQueue* aVideoTaskQueue, MediaDataDecoderCallback* aCallback)
+  FlushableMediaTaskQueue* aVideoTaskQueue, MediaDataDecoderCallback* aCallback)
 {
   if (!mDecoder) {
     // We use the manager's task queue for the decoder, rather than the one
     // passed in, so that none of the objects sharing the decoder can shutdown
     // the task queue while we're potentially still using it for a *different*
     // object also sharing the decoder.
     mDecoder = aPDM->CreateVideoDecoder(
       aConfig, aLayersBackend, aImageContainer, mTaskQueue, mCallback);
--- a/dom/media/fmp4/SharedDecoderManager.h
+++ b/dom/media/fmp4/SharedDecoderManager.h
@@ -23,34 +23,34 @@ public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedDecoderManager)
 
   SharedDecoderManager();
 
   already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
     PlatformDecoderModule* aPDM,
     const mp4_demuxer::VideoDecoderConfig& aConfig,
     layers::LayersBackend aLayersBackend,
-    layers::ImageContainer* aImageContainer, MediaTaskQueue* aVideoTaskQueue,
+    layers::ImageContainer* aImageContainer, FlushableMediaTaskQueue* aVideoTaskQueue,
     MediaDataDecoderCallback* aCallback);
 
   void SetReader(MediaDecoderReader* aReader);
   void Select(SharedDecoderProxy* aProxy);
   void SetIdle(MediaDataDecoder* aProxy);
   void ReleaseMediaResources();
   void Shutdown();
 
   friend class SharedDecoderProxy;
   friend class SharedDecoderCallback;
 
 private:
   virtual ~SharedDecoderManager();
   void DrainComplete();
 
   nsRefPtr<MediaDataDecoder> mDecoder;
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   SharedDecoderProxy* mActiveProxy;
   MediaDataDecoderCallback* mActiveCallback;
   nsAutoPtr<MediaDataDecoderCallback> mCallback;
   bool mWaitForInternalDrain;
   Monitor mMonitor;
   bool mDecoderReleasedResources;
 };
 
--- a/dom/media/fmp4/android/AndroidDecoderModule.cpp
+++ b/dom/media/fmp4/android/AndroidDecoderModule.cpp
@@ -255,17 +255,17 @@ bool AndroidDecoderModule::SupportsAudio
   return static_cast<bool>(CreateDecoder(aMimeType));
 }
 
 already_AddRefed<MediaDataDecoder>
 AndroidDecoderModule::CreateVideoDecoder(
                                 const mp4_demuxer::VideoDecoderConfig& aConfig,
                                 layers::LayersBackend aLayersBackend,
                                 layers::ImageContainer* aImageContainer,
-                                MediaTaskQueue* aVideoTaskQueue,
+                                FlushableMediaTaskQueue* aVideoTaskQueue,
                                 MediaDataDecoderCallback* aCallback)
 {
   MediaFormat::LocalRef format;
 
   NS_ENSURE_SUCCESS(MediaFormat::CreateVideoFormat(
       aConfig.mime_type,
       aConfig.display_width,
       aConfig.display_height,
@@ -274,17 +274,17 @@ AndroidDecoderModule::CreateVideoDecoder
   nsRefPtr<MediaDataDecoder> decoder =
     new VideoDataDecoder(aConfig, format, aCallback, aImageContainer);
 
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 AndroidDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                                         MediaTaskQueue* aAudioTaskQueue,
+                                         FlushableMediaTaskQueue* aAudioTaskQueue,
                                          MediaDataDecoderCallback* aCallback)
 {
   MOZ_ASSERT(aConfig.bits_per_sample == 16, "We only handle 16-bit audio!");
 
   MediaFormat::LocalRef format;
 
   NS_ENSURE_SUCCESS(MediaFormat::CreateAudioFormat(
       aConfig.mime_type,
--- a/dom/media/fmp4/android/AndroidDecoderModule.h
+++ b/dom/media/fmp4/android/AndroidDecoderModule.h
@@ -20,22 +20,22 @@ typedef std::queue<mp4_demuxer::MP4Sampl
 class AndroidDecoderModule : public PlatformDecoderModule {
 public:
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
 
   AndroidDecoderModule() {}
   virtual ~AndroidDecoderModule() {}
 
   virtual bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE;
 };
--- a/dom/media/fmp4/apple/AppleATDecoder.cpp
+++ b/dom/media/fmp4/apple/AppleATDecoder.cpp
@@ -18,17 +18,17 @@ PRLogModuleInfo* GetAppleMediaLog();
 #else
 #define LOG(...)
 #endif
 #define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
 
 namespace mozilla {
 
 AppleATDecoder::AppleATDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                               MediaTaskQueue* aAudioTaskQueue,
+                               FlushableMediaTaskQueue* aAudioTaskQueue,
                                MediaDataDecoderCallback* aCallback)
   : mConfig(aConfig)
   , mFileStreamError(false)
   , mTaskQueue(aAudioTaskQueue)
   , mCallback(aCallback)
   , mConverter(nullptr)
   , mStream(nullptr)
 {
--- a/dom/media/fmp4/apple/AppleATDecoder.h
+++ b/dom/media/fmp4/apple/AppleATDecoder.h
@@ -10,23 +10,23 @@
 #include <AudioToolbox/AudioToolbox.h>
 #include "PlatformDecoderModule.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "mozilla/Vector.h"
 #include "nsIThread.h"
 
 namespace mozilla {
 
-class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 class MediaDataDecoderCallback;
 
 class AppleATDecoder : public MediaDataDecoder {
 public:
   AppleATDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                 MediaTaskQueue* aVideoTaskQueue,
+                 FlushableMediaTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback);
   virtual ~AppleATDecoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Flush() MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
   virtual nsresult Shutdown() MOZ_OVERRIDE;
@@ -36,17 +36,17 @@ public:
 
   // Use to extract magic cookie for HE-AAC detection.
   nsTArray<uint8_t> mMagicCookie;
   // Will be set to true should an error occurred while attempting to retrieve
   // the magic cookie property.
   bool mFileStreamError;
 
 private:
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   AudioConverterRef mConverter;
   AudioStreamBasicDescription mOutputFormat;
   UInt32 mFormatID;
   AudioFileStreamID mStream;
   nsTArray<nsAutoPtr<mp4_demuxer::MP4Sample>> mQueuedSamples;
 
   void SubmitSample(nsAutoPtr<mp4_demuxer::MP4Sample> aSample);
--- a/dom/media/fmp4/apple/AppleDecoderModule.cpp
+++ b/dom/media/fmp4/apple/AppleDecoderModule.cpp
@@ -153,17 +153,17 @@ AppleDecoderModule::Shutdown()
   NS_DispatchToMainThread(task);
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
 AppleDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                                        layers::LayersBackend aLayersBackend,
                                        layers::ImageContainer* aImageContainer,
-                                       MediaTaskQueue* aVideoTaskQueue,
+                                       FlushableMediaTaskQueue* aVideoTaskQueue,
                                        MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder;
 
   if (sIsVDAAvailable && (!sIsVTHWAvailable || sForceVDA)) {
     decoder =
       AppleVDADecoder::CreateVDADecoder(aConfig,
                                         aVideoTaskQueue,
@@ -179,17 +179,17 @@ AppleDecoderModule::CreateVideoDecoder(c
     decoder =
       new AppleVTDecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer);
   }
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 AppleDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                                       MediaTaskQueue* aAudioTaskQueue,
+                                       FlushableMediaTaskQueue* aAudioTaskQueue,
                                        MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new AppleATDecoder(aConfig, aAudioTaskQueue, aCallback);
   return decoder.forget();
 }
 
 bool
--- a/dom/media/fmp4/apple/AppleDecoderModule.h
+++ b/dom/media/fmp4/apple/AppleDecoderModule.h
@@ -22,23 +22,23 @@ public:
   // Does this really need to be main thread only????
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE;
   virtual bool
   DecoderNeedsAVCC(const mp4_demuxer::VideoDecoderConfig& aConfig) MOZ_OVERRIDE;
 
   static void Init();
   static nsresult CanDecode();
--- a/dom/media/fmp4/apple/AppleVDADecoder.cpp
+++ b/dom/media/fmp4/apple/AppleVDADecoder.cpp
@@ -27,17 +27,17 @@ PRLogModuleInfo* GetAppleMediaLog();
 //#define LOG_MEDIA_SHA1
 #else
 #define LOG(...)
 #endif
 
 namespace mozilla {
 
 AppleVDADecoder::AppleVDADecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
-                               MediaTaskQueue* aVideoTaskQueue,
+                               FlushableMediaTaskQueue* aVideoTaskQueue,
                                MediaDataDecoderCallback* aCallback,
                                layers::ImageContainer* aImageContainer)
   : mConfig(aConfig)
   , mTaskQueue(aVideoTaskQueue)
   , mCallback(aCallback)
   , mImageContainer(aImageContainer)
   , mDecoder(nullptr)
   , mIs106(!nsCocoaFeatures::OnLionOrLater())
@@ -490,17 +490,17 @@ AppleVDADecoder::CreateOutputConfigurati
                             &kCFTypeDictionaryKeyCallBacks,
                             &kCFTypeDictionaryValueCallBacks);
 }
 
 /* static */
 already_AddRefed<AppleVDADecoder>
 AppleVDADecoder::CreateVDADecoder(
   const mp4_demuxer::VideoDecoderConfig& aConfig,
-  MediaTaskQueue* aVideoTaskQueue,
+  FlushableMediaTaskQueue* aVideoTaskQueue,
   MediaDataDecoderCallback* aCallback,
   layers::ImageContainer* aImageContainer)
 {
   nsRefPtr<AppleVDADecoder> decoder =
     new AppleVDADecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer);
   if (NS_FAILED(decoder->Init())) {
     NS_ERROR("AppleVDADecoder an error occurred");
     return nullptr;
--- a/dom/media/fmp4/apple/AppleVDADecoder.h
+++ b/dom/media/fmp4/apple/AppleVDADecoder.h
@@ -13,17 +13,17 @@
 #include "MP4Decoder.h"
 #include "nsIThread.h"
 #include "ReorderQueue.h"
 
 #include "VideoDecodeAcceleration/VDADecoder.h"
 
 namespace mozilla {
 
-class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 class MediaDataDecoderCallback;
 namespace layers {
   class ImageContainer;
 }
 
 class AppleVDADecoder : public MediaDataDecoder {
 public:
   class AppleFrameRef {
@@ -56,22 +56,22 @@ public:
     {
     }
   };
 
   // Return a new created AppleVDADecoder or nullptr if media or hardware is
   // not supported by current configuration.
   static already_AddRefed<AppleVDADecoder> CreateVDADecoder(
     const mp4_demuxer::VideoDecoderConfig& aConfig,
-    MediaTaskQueue* aVideoTaskQueue,
+    FlushableMediaTaskQueue* aVideoTaskQueue,
     MediaDataDecoderCallback* aCallback,
     layers::ImageContainer* aImageContainer);
 
   AppleVDADecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
-                  MediaTaskQueue* aVideoTaskQueue,
+                  FlushableMediaTaskQueue* aVideoTaskQueue,
                   MediaDataDecoderCallback* aCallback,
                   layers::ImageContainer* aImageContainer);
   virtual ~AppleVDADecoder();
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Flush() MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
   virtual nsresult Shutdown() MOZ_OVERRIDE;
@@ -81,17 +81,17 @@ public:
 
  protected:
   AppleFrameRef* CreateAppleFrameRef(const mp4_demuxer::MP4Sample* aSample);
   void DrainReorderedFrames();
   void ClearReorderedFrames();
   CFDictionaryRef CreateOutputConfiguration();
 
   const mp4_demuxer::VideoDecoderConfig& mConfig;
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   nsRefPtr<layers::ImageContainer> mImageContainer;
   ReorderQueue mReorderQueue;
   uint32_t mPictureWidth;
   uint32_t mPictureHeight;
   uint32_t mMaxRefFrames;
 
 private:
--- a/dom/media/fmp4/apple/AppleVTDecoder.cpp
+++ b/dom/media/fmp4/apple/AppleVTDecoder.cpp
@@ -30,17 +30,17 @@ PRLogModuleInfo* GetAppleMediaLog();
 
 #ifdef LOG_MEDIA_SHA1
 #include "mozilla/SHA1.h"
 #endif
 
 namespace mozilla {
 
 AppleVTDecoder::AppleVTDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
-                               MediaTaskQueue* aVideoTaskQueue,
+                               FlushableMediaTaskQueue* aVideoTaskQueue,
                                MediaDataDecoderCallback* aCallback,
                                layers::ImageContainer* aImageContainer)
   : AppleVDADecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer)
   , mFormat(nullptr)
   , mSession(nullptr)
 {
   MOZ_COUNT_CTOR(AppleVTDecoder);
   // TODO: Verify aConfig.mime_type.
--- a/dom/media/fmp4/apple/AppleVTDecoder.h
+++ b/dom/media/fmp4/apple/AppleVTDecoder.h
@@ -11,17 +11,17 @@
 
 #include "VideoToolbox/VideoToolbox.h"
 
 namespace mozilla {
 
 class AppleVTDecoder : public AppleVDADecoder {
 public:
   AppleVTDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
-                 MediaTaskQueue* aVideoTaskQueue,
+                 FlushableMediaTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback,
                  layers::ImageContainer* aImageContainer);
   virtual ~AppleVTDecoder();
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Flush() MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
   virtual nsresult Shutdown() MOZ_OVERRIDE;
--- a/dom/media/fmp4/eme/EMEDecoderModule.cpp
+++ b/dom/media/fmp4/eme/EMEDecoderModule.cpp
@@ -31,17 +31,17 @@ class EMEDecryptor : public MediaDataDec
 
 public:
 
   EMEDecryptor(MediaDataDecoder* aDecoder,
                MediaDataDecoderCallback* aCallback,
                CDMProxy* aProxy)
     : mDecoder(aDecoder)
     , mCallback(aCallback)
-    , mTaskQueue(CreateMediaDecodeTaskQueue())
+    , mTaskQueue(CreateFlushableMediaDecodeTaskQueue())
     , mProxy(aProxy)
     , mSamplesWaitingForKey(new SamplesWaitingForKey(this, mTaskQueue, mProxy))
 #ifdef DEBUG
     , mIsShutdown(false)
 #endif
   {
   }
 
@@ -50,17 +50,17 @@ public:
     nsresult rv = mTaskQueue->SyncDispatch(
       NS_NewRunnableMethod(mDecoder, &MediaDataDecoder::Init));
     unused << NS_WARN_IF(NS_FAILED(rv));
     return rv;
   }
 
   class DeliverDecrypted : public DecryptionClient {
   public:
-    DeliverDecrypted(EMEDecryptor* aDecryptor, MediaTaskQueue* aTaskQueue)
+    DeliverDecrypted(EMEDecryptor* aDecryptor, FlushableMediaTaskQueue* aTaskQueue)
       : mDecryptor(aDecryptor)
       , mTaskQueue(aTaskQueue)
     {}
     virtual void Decrypted(GMPErr aResult,
                            mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE {
       if (aResult == GMPNoKeyErr) {
         RefPtr<nsIRunnable> task;
         task = NS_NewRunnableMethodWithArg<MP4Sample*>(mDecryptor,
@@ -79,17 +79,17 @@ public:
                                                        aSample);
         mTaskQueue->Dispatch(task.forget());
       }
       mTaskQueue = nullptr;
       mDecryptor = nullptr;
     }
   private:
     nsRefPtr<EMEDecryptor> mDecryptor;
-    nsRefPtr<MediaTaskQueue> mTaskQueue;
+    nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   };
 
   virtual nsresult Input(MP4Sample* aSample) MOZ_OVERRIDE {
     MOZ_ASSERT(!mIsShutdown);
     // We run the PDM on its own task queue. We can't run it on the decode
     // task queue, because that calls into Input() in a loop and waits until
     // output is delivered. We need to defer some Input() calls while we wait
     // for keys to become usable, and once they do we need to dispatch an event
@@ -155,17 +155,17 @@ public:
     mCallback = nullptr;
     return rv;
   }
 
 private:
 
   nsRefPtr<MediaDataDecoder> mDecoder;
   MediaDataDecoderCallback* mCallback;
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   nsRefPtr<CDMProxy> mProxy;
   nsRefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
 #ifdef DEBUG
   bool mIsShutdown;
 #endif
 };
 
 EMEDecoderModule::EMEDecoderModule(CDMProxy* aProxy,
@@ -191,17 +191,17 @@ EMEDecoderModule::Shutdown()
   }
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
 EMEDecoderModule::CreateVideoDecoder(const VideoDecoderConfig& aConfig,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer,
-                                     MediaTaskQueue* aVideoTaskQueue,
+                                     FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   if (mCDMDecodesVideo && aConfig.crypto.valid) {
     nsRefPtr<MediaDataDecoder> decoder(new EMEH264Decoder(mProxy,
                                                           aConfig,
                                                           aLayersBackend,
                                                           aImageContainer,
                                                           aVideoTaskQueue,
@@ -225,17 +225,17 @@ EMEDecoderModule::CreateVideoDecoder(con
   nsRefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
                                                          aCallback,
                                                          mProxy));
   return emeDecoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 EMEDecoderModule::CreateAudioDecoder(const AudioDecoderConfig& aConfig,
-                                     MediaTaskQueue* aAudioTaskQueue,
+                                     FlushableMediaTaskQueue* aAudioTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   if (mCDMDecodesAudio && aConfig.crypto.valid) {
     nsRefPtr<MediaDataDecoder> decoder(new EMEAudioDecoder(mProxy,
                                                            aConfig,
                                                            aAudioTaskQueue,
                                                            aCallback));
     return decoder.forget();
--- a/dom/media/fmp4/eme/EMEDecoderModule.h
+++ b/dom/media/fmp4/eme/EMEDecoderModule.h
@@ -8,17 +8,17 @@
 #define EMEDecoderModule_h_
 
 #include "PlatformDecoderModule.h"
 #include "gmp-decryption.h"
 
 namespace mozilla {
 
 class CDMProxy;
-class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 
 class EMEDecoderModule : public PlatformDecoderModule {
 private:
   typedef mp4_demuxer::AudioDecoderConfig AudioDecoderConfig;
   typedef mp4_demuxer::VideoDecoderConfig VideoDecoderConfig;
 
 public:
   EMEDecoderModule(CDMProxy* aProxy,
@@ -31,34 +31,34 @@ public:
   // Called when the decoders have shutdown. Main thread only.
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                     layers::LayersBackend aLayersBackend,
                     layers::ImageContainer* aImageContainer,
-                    MediaTaskQueue* aVideoTaskQueue,
+                    FlushableMediaTaskQueue* aVideoTaskQueue,
                     MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual bool
   DecoderNeedsAVCC(const mp4_demuxer::VideoDecoderConfig& aConfig) MOZ_OVERRIDE;
 
 private:
   nsRefPtr<CDMProxy> mProxy;
   // Will be null if CDM has decoding capability.
   nsRefPtr<PlatformDecoderModule> mPDM;
   // We run the PDM on its own task queue.
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   bool mCDMDecodesAudio;
   bool mCDMDecodesVideo;
 
 };
 
 } // namespace mozilla
 
 #endif // EMEDecoderModule_h_
--- a/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.cpp
@@ -13,17 +13,17 @@
 #define MAX_CHANNELS 16
 
 typedef mp4_demuxer::MP4Sample MP4Sample;
 
 namespace mozilla
 {
 
 FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
-  MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+  FlushableMediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
   const mp4_demuxer::AudioDecoderConfig& aConfig)
   : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mime_type))
   , mCallback(aCallback)
 {
   MOZ_COUNT_CTOR(FFmpegAudioDecoder);
   mExtraData = aConfig.audio_specific_config;
 }
 
--- a/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.h
@@ -16,17 +16,17 @@ namespace mozilla
 template <int V> class FFmpegAudioDecoder
 {
 };
 
 template <>
 class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
 {
 public:
-  FFmpegAudioDecoder(MediaTaskQueue* aTaskQueue,
+  FFmpegAudioDecoder(FlushableMediaTaskQueue* aTaskQueue,
                      MediaDataDecoderCallback* aCallback,
                      const mp4_demuxer::AudioDecoderConfig& aConfig);
   virtual ~FFmpegAudioDecoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
   static AVCodecID GetCodecId(const char* aMimeType);
--- a/dom/media/fmp4/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/fmp4/ffmpeg/FFmpegDataDecoder.cpp
@@ -14,17 +14,17 @@
 #include "prsystem.h"
 
 namespace mozilla
 {
 
 bool FFmpegDataDecoder<LIBAV_VER>::sFFmpegInitDone = false;
 StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMonitor;
 
-FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(MediaTaskQueue* aTaskQueue,
+FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FlushableMediaTaskQueue* aTaskQueue,
                                                 AVCodecID aCodecID)
   : mTaskQueue(aTaskQueue)
   , mCodecContext(nullptr)
   , mFrame(NULL)
   , mExtraData(nullptr)
   , mCodecID(aCodecID)
 {
   MOZ_COUNT_CTOR(FFmpegDataDecoder);
--- a/dom/media/fmp4/ffmpeg/FFmpegDataDecoder.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegDataDecoder.h
@@ -19,31 +19,31 @@ template <int V>
 class FFmpegDataDecoder : public MediaDataDecoder
 {
 };
 
 template <>
 class FFmpegDataDecoder<LIBAV_VER> : public MediaDataDecoder
 {
 public:
-  FFmpegDataDecoder(MediaTaskQueue* aTaskQueue, AVCodecID aCodecID);
+  FFmpegDataDecoder(FlushableMediaTaskQueue* aTaskQueue, AVCodecID aCodecID);
   virtual ~FFmpegDataDecoder();
 
   static bool Link();
 
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) = 0;
   virtual nsresult Flush() MOZ_OVERRIDE;
   virtual nsresult Drain() = 0;
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
 protected:
   AVFrame*        PrepareFrame();
 
-  MediaTaskQueue* mTaskQueue;
+  FlushableMediaTaskQueue* mTaskQueue;
   AVCodecContext* mCodecContext;
   AVFrame*        mFrame;
   nsRefPtr<mp4_demuxer::ByteBuffer> mExtraData;
 
 private:
   static bool sFFmpegInitDone;
   static StaticMutex sMonitor;
 
--- a/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h
@@ -29,28 +29,28 @@ public:
   virtual ~FFmpegDecoderModule() {}
 
   virtual nsresult Shutdown() MOZ_OVERRIDE { return NS_OK; }
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE
   {
     nsRefPtr<MediaDataDecoder> decoder =
       new FFmpegH264Decoder<V>(aVideoTaskQueue, aCallback, aConfig,
                                aImageContainer);
     return decoder.forget();
   }
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE
   {
     nsRefPtr<MediaDataDecoder> decoder =
       new FFmpegAudioDecoder<V>(aAudioTaskQueue, aCallback, aConfig);
     return decoder.forget();
   }
 
   virtual bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE
--- a/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp
+++ b/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp
@@ -20,17 +20,17 @@ typedef mozilla::layers::Image Image;
 typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
 
 typedef mp4_demuxer::MP4Sample MP4Sample;
 
 namespace mozilla
 {
 
 FFmpegH264Decoder<LIBAV_VER>::FFmpegH264Decoder(
-  MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+  FlushableMediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
   const mp4_demuxer::VideoDecoderConfig& aConfig,
   ImageContainer* aImageContainer)
   : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mime_type))
   , mCallback(aCallback)
   , mImageContainer(aImageContainer)
 {
   MOZ_COUNT_CTOR(FFmpegH264Decoder);
 }
--- a/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.h
@@ -25,17 +25,17 @@ class FFmpegH264Decoder<LIBAV_VER> : pub
 
   enum DecodeResult {
     DECODE_FRAME,
     DECODE_NO_FRAME,
     DECODE_ERROR
   };
 
 public:
-  FFmpegH264Decoder(MediaTaskQueue* aTaskQueue,
+  FFmpegH264Decoder(FlushableMediaTaskQueue* aTaskQueue,
                     MediaDataDecoderCallback* aCallback,
                     const mp4_demuxer::VideoDecoderConfig& aConfig,
                     ImageContainer* aImageContainer);
   virtual ~FFmpegH264Decoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) MOZ_OVERRIDE;
   virtual nsresult Drain() MOZ_OVERRIDE;
--- a/dom/media/fmp4/gonk/GonkDecoderModule.cpp
+++ b/dom/media/fmp4/gonk/GonkDecoderModule.cpp
@@ -31,28 +31,28 @@ GonkDecoderModule::Shutdown()
 {
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
 GonkDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                                      mozilla::layers::LayersBackend aLayersBackend,
                                      mozilla::layers::ImageContainer* aImageContainer,
-                                     MediaTaskQueue* aVideoTaskQueue,
+                                     FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
   new GonkMediaDataDecoder(new GonkVideoDecoderManager(aImageContainer,aConfig),
                            aVideoTaskQueue, aCallback);
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 GonkDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                                      MediaTaskQueue* aAudioTaskQueue,
+                                      FlushableMediaTaskQueue* aAudioTaskQueue,
                                       MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
   new GonkMediaDataDecoder(new GonkAudioDecoderManager(aConfig), aAudioTaskQueue,
                            aCallback);
   return decoder.forget();
 }
 
--- a/dom/media/fmp4/gonk/GonkDecoderModule.h
+++ b/dom/media/fmp4/gonk/GonkDecoderModule.h
@@ -19,23 +19,23 @@ public:
   // Called when the decoders have shutdown.
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      mozilla::layers::LayersBackend aLayersBackend,
                      mozilla::layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   static void Init();
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/fmp4/gonk/GonkMediaDataDecoder.cpp
+++ b/dom/media/fmp4/gonk/GonkMediaDataDecoder.cpp
@@ -20,17 +20,17 @@ PRLogModuleInfo* GetDemuxerLog();
 #define LOG(...)
 #endif
 
 using namespace android;
 
 namespace mozilla {
 
 GonkMediaDataDecoder::GonkMediaDataDecoder(GonkDecoderManager* aManager,
-                                           MediaTaskQueue* aTaskQueue,
+                                           FlushableMediaTaskQueue* aTaskQueue,
                                            MediaDataDecoderCallback* aCallback)
   : mTaskQueue(aTaskQueue)
   , mCallback(aCallback)
   , mManager(aManager)
   , mSignaledEOS(false)
   , mDrainComplete(false)
 {
   MOZ_COUNT_CTOR(GonkMediaDataDecoder);
--- a/dom/media/fmp4/gonk/GonkMediaDataDecoder.h
+++ b/dom/media/fmp4/gonk/GonkMediaDataDecoder.h
@@ -43,17 +43,17 @@ public:
 // Samples are decoded using the GonkDecoder (MediaCodec)
 // created by the GonkDecoderManager. This class implements
 // the higher-level logic that drives mapping the Gonk to the async
 // MediaDataDecoder interface. The specifics of decoding the exact stream
 // type are handled by GonkDecoderManager and the GonkDecoder it creates.
 class GonkMediaDataDecoder : public MediaDataDecoder {
 public:
   GonkMediaDataDecoder(GonkDecoderManager* aDecoderManager,
-                       MediaTaskQueue* aTaskQueue,
+                       FlushableMediaTaskQueue* aTaskQueue,
                        MediaDataDecoderCallback* aCallback);
 
   ~GonkMediaDataDecoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
 
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample);
 
@@ -82,17 +82,17 @@ private:
   // Called on the task queue. Extracts output if available, and delivers
   // it to the reader. Called after ProcessDecode() and ProcessDrain().
   void ProcessOutput();
 
   // Called on the task queue. Orders the Gonk to drain, and then extracts
   // all available output.
   void ProcessDrain();
 
-  RefPtr<MediaTaskQueue> mTaskQueue;
+  RefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   android::sp<android::MediaCodecProxy> mDecoder;
   nsAutoPtr<GonkDecoderManager> mManager;
 
   // The last offset into the media resource that was passed into Input().
   // This is used to approximate the decoder's position in the media resource.
   int64_t mLastStreamOffset;
--- a/dom/media/fmp4/wmf/WMFDecoderModule.cpp
+++ b/dom/media/fmp4/wmf/WMFDecoderModule.cpp
@@ -63,32 +63,32 @@ WMFDecoderModule::Shutdown()
   NS_ASSERTION(SUCCEEDED(hr), "MFShutdown failed");
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
 WMFDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer,
-                                     MediaTaskQueue* aVideoTaskQueue,
+                                     FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new WMFMediaDataDecoder(new WMFVideoMFTManager(aConfig,
                                                    aLayersBackend,
                                                    aImageContainer,
                                                    sDXVAEnabled),
                             aVideoTaskQueue,
                             aCallback);
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 WMFDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                                     MediaTaskQueue* aAudioTaskQueue,
+                                     FlushableMediaTaskQueue* aAudioTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new WMFMediaDataDecoder(new WMFAudioMFTManager(aConfig),
                             aAudioTaskQueue,
                             aCallback);
   return decoder.forget();
 }
--- a/dom/media/fmp4/wmf/WMFDecoderModule.h
+++ b/dom/media/fmp4/wmf/WMFDecoderModule.h
@@ -21,22 +21,22 @@ public:
 
   // Called when the decoders have shutdown.
   virtual nsresult Shutdown() MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
-                     MediaTaskQueue* aVideoTaskQueue,
+                     FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   virtual already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
-                     MediaTaskQueue* aAudioTaskQueue,
+                     FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE;
 
   bool SupportsVideoMimeType(const char* aMimeType) MOZ_OVERRIDE;
   bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE;
 
   // Accessors that report whether we have the required MFTs available
   // on the system to play various codecs. Windows Vista doesn't have the
   // H.264/AAC decoders if the "Platform Update Supplement for Windows Vista"
--- a/dom/media/fmp4/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/fmp4/wmf/WMFMediaDataDecoder.cpp
@@ -17,17 +17,17 @@ PRLogModuleInfo* GetDemuxerLog();
 #else
 #define LOG(...)
 #endif
 
 
 namespace mozilla {
 
 WMFMediaDataDecoder::WMFMediaDataDecoder(MFTManager* aMFTManager,
-                                         MediaTaskQueue* aTaskQueue,
+                                         FlushableMediaTaskQueue* aTaskQueue,
                                          MediaDataDecoderCallback* aCallback)
   : mTaskQueue(aTaskQueue)
   , mCallback(aCallback)
   , mMFTManager(aMFTManager)
 {
   MOZ_COUNT_CTOR(WMFMediaDataDecoder);
 }
 
--- a/dom/media/fmp4/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/fmp4/wmf/WMFMediaDataDecoder.h
@@ -54,17 +54,17 @@ public:
 // Decodes audio and video using Windows Media Foundation. Samples are decoded
 // using the MFTDecoder created by the MFTManager. This class implements
 // the higher-level logic that drives mapping the MFT to the async
 // MediaDataDecoder interface. The specifics of decoding the exact stream
 // type are handled by MFTManager and the MFTDecoder it creates.
 class WMFMediaDataDecoder : public MediaDataDecoder {
 public:
   WMFMediaDataDecoder(MFTManager* aOutputSource,
-                      MediaTaskQueue* aAudioTaskQueue,
+                      FlushableMediaTaskQueue* aAudioTaskQueue,
                       MediaDataDecoderCallback* aCallback);
   ~WMFMediaDataDecoder();
 
   virtual nsresult Init() MOZ_OVERRIDE;
 
   virtual nsresult Input(mp4_demuxer::MP4Sample* aSample);
 
   virtual nsresult Flush() MOZ_OVERRIDE;
@@ -92,17 +92,17 @@ private:
 
   // Called on the task queue. Orders the MFT to drain, and then extracts
   // all available output.
   void ProcessDrain();
 
   void ProcessShutdown();
   void ProcessReleaseDecoder();
 
-  RefPtr<MediaTaskQueue> mTaskQueue;
+  RefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   RefPtr<MFTDecoder> mDecoder;
   nsAutoPtr<MFTManager> mMFTManager;
 
   // The last offset into the media resource that was passed into Input().
   // This is used to approximate the decoder's position in the media resource.
   int64_t mLastStreamOffset;
--- a/dom/media/fmp4/wmf/WMFVideoMFTManager.h
+++ b/dom/media/fmp4/wmf/WMFVideoMFTManager.h
@@ -56,17 +56,17 @@ private:
   uint32_t mVideoStride;
   uint32_t mVideoWidth;
   uint32_t mVideoHeight;
   nsIntRect mPictureRegion;
 
   RefPtr<MFTDecoder> mDecoder;
   RefPtr<layers::ImageContainer> mImageContainer;
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
-  RefPtr<MediaTaskQueue> mTaskQueue;
+  RefPtr<FlushableMediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   const bool mDXVAEnabled;
   const layers::LayersBackend mLayersBackend;
   bool mUseHwAccel;
 
   enum StreamType {
     Unknown,
--- a/dom/media/omx/MediaCodecReader.cpp
+++ b/dom/media/omx/MediaCodecReader.cpp
@@ -1268,22 +1268,22 @@ MediaCodecReader::ShutdownTaskQueues()
   }
 }
 
 bool
 MediaCodecReader::CreateTaskQueues()
 {
   if (mAudioTrack.mSource != nullptr && mAudioTrack.mCodec != nullptr &&
       !mAudioTrack.mTaskQueue) {
-    mAudioTrack.mTaskQueue = CreateMediaDecodeTaskQueue();
+    mAudioTrack.mTaskQueue = CreateFlushableMediaDecodeTaskQueue();
     NS_ENSURE_TRUE(mAudioTrack.mTaskQueue, false);
   }
   if (mVideoTrack.mSource != nullptr && mVideoTrack.mCodec != nullptr &&
       !mVideoTrack.mTaskQueue) {
-    mVideoTrack.mTaskQueue = CreateMediaDecodeTaskQueue();
+    mVideoTrack.mTaskQueue = CreateFlushableMediaDecodeTaskQueue();
     NS_ENSURE_TRUE(mVideoTrack.mTaskQueue, false);
   }
 
   return true;
 }
 
 bool
 MediaCodecReader::CreateMediaCodecs()
--- a/dom/media/omx/MediaCodecReader.h
+++ b/dom/media/omx/MediaCodecReader.h
@@ -32,17 +32,17 @@ class MOZ_EXPORT MetaData;
 class MOZ_EXPORT MediaBuffer;
 struct MOZ_EXPORT MediaSource;
 
 class GonkNativeWindow;
 } // namespace android
 
 namespace mozilla {
 
-class MediaTaskQueue;
+class FlushableMediaTaskQueue;
 class MP3FrameParser;
 
 namespace layers {
 class TextureClient;
 } // namespace mozilla::layers
 
 class MediaCodecReader : public MediaOmxCommonReader
 {
@@ -155,17 +155,17 @@ protected:
     // mDiscontinuity, mFlushed, mInputEndOfStream, mInputEndOfStream,
     // mSeekTimeUs don't be protected by a lock because the
     // mTaskQueue->Flush() will flush all tasks.
     bool mInputEndOfStream;
     bool mOutputEndOfStream;
     int64_t mSeekTimeUs;
     bool mFlushed; // meaningless when mSeekTimeUs is invalid.
     bool mDiscontinuity;
-    nsRefPtr<MediaTaskQueue> mTaskQueue;
+    nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
 
   private:
     // Forbidden
     Track(const Track &rhs) = delete;
     const Track &operator=(const Track&) = delete;
   };
 
   // Receive a message from MessageHandler.
--- a/dom/media/webm/IntelWebMVideoDecoder.h
+++ b/dom/media/webm/IntelWebMVideoDecoder.h
@@ -59,17 +59,17 @@ private:
   VP8Sample* PopSample();
 
   nsRefPtr<WebMReader> mReader;
   nsRefPtr<PlatformDecoderModule> mPlatform;
   nsRefPtr<MediaDataDecoder> mMediaDataDecoder;
 
   // TaskQueue on which decoder can choose to decode.
   // Only non-null up until the decoder is created.
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
 
   // Monitor that protects all non-threadsafe state; the primitives
   // that follow.
   Monitor mMonitor;
   nsAutoPtr<mp4_demuxer::VideoDecoderConfig> mDecoderConfig;
 
   VP8SampleQueue mSampleQueue;
   nsAutoPtr<VP8Sample> mQueuedVideoSample;
--- a/dom/media/webm/WebMReader.cpp
+++ b/dom/media/webm/WebMReader.cpp
@@ -244,17 +244,17 @@ nsresult WebMReader::Init(MediaDecoderRe
   memset(&mVorbisBlock, 0, sizeof(vorbis_block));
 
 #if defined(MOZ_PDM_VPX)
   if (sIsIntelDecoderEnabled) {
     PlatformDecoderModule::Init();
 
     InitLayersBackendType();
 
-    mVideoTaskQueue = new MediaTaskQueue(
+    mVideoTaskQueue = new FlushableMediaTaskQueue(
       SharedThreadPool::Get(NS_LITERAL_CSTRING("IntelVP8 Video Decode")));
     NS_ENSURE_TRUE(mVideoTaskQueue, NS_ERROR_FAILURE);
   }
 #endif
 
   if (aCloneDonor) {
     mBufferedState = static_cast<WebMReader*>(aCloneDonor)->mBufferedState;
   } else {
--- a/dom/media/webm/WebMReader.h
+++ b/dom/media/webm/WebMReader.h
@@ -181,17 +181,17 @@ public:
   virtual void PushVideoPacket(NesteggPacketHolder* aItem);
 
   int GetVideoCodec();
   nsIntRect GetPicture();
   nsIntSize GetInitialFrame();
   uint64_t GetLastVideoFrameTime();
   void SetLastVideoFrameTime(uint64_t aFrameTime);
   layers::LayersBackend GetLayersBackendType() { return mLayersBackendType; }
-  MediaTaskQueue* GetVideoTaskQueue() { return mVideoTaskQueue; }
+  FlushableMediaTaskQueue* GetVideoTaskQueue() { return mVideoTaskQueue; }
 
 protected:
   // Setup opus decoder
   bool InitOpusDecoder();
 
   // Decode a nestegg packet of audio data. Push the audio data on the
   // audio queue. Returns true when there's more audio to decode,
   // false if the audio is finished, end of file has been reached,
@@ -272,17 +272,17 @@ private:
   // Codec ID of audio track
   int mAudioCodec;
   // Codec ID of video track
   int mVideoCodec;
 
   layers::LayersBackend mLayersBackendType;
 
   // For hardware video decoding.
-  nsRefPtr<MediaTaskQueue> mVideoTaskQueue;
+  nsRefPtr<FlushableMediaTaskQueue> mVideoTaskQueue;
 
   // Booleans to indicate if we have audio and/or video data
   bool mHasVideo;
   bool mHasAudio;
 
   // Opus padding should only be discarded on the final packet.  Once this
   // is set to true, if the reader attempts to decode any further packets it
   // will raise an error so we can indicate that the file is invalid.