Backed out changeset 3395ce618c91 (bug 979104) for mochitest-1 hangs and leaks
authorEd Morley <emorley@mozilla.com>
Mon, 09 Jun 2014 11:25:19 +0100
changeset 207841 c5f9a189da8bc6f53bf6a3d931b7167607ef9daa
parent 207840 00c2ba04f8d439ba9bb8624c10f058e287a57948
child 207842 da23ba53e4afafffaf719b69376cd858141a6203
push id494
push userraliiev@mozilla.com
push dateMon, 25 Aug 2014 18:42:16 +0000
treeherdermozilla-release@a3cc3e46b571 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs979104
milestone32.0a1
backs out3395ce618c91f7ba993b34f487d754b9ab494bb8
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 3395ce618c91 (bug 979104) for mochitest-1 hangs and leaks
content/media/MediaData.h
content/media/MediaDataDecodedListener.h
content/media/MediaDecoder.cpp
content/media/MediaDecoder.h
content/media/MediaDecoderReader.cpp
content/media/MediaDecoderReader.h
content/media/MediaDecoderStateMachine.cpp
content/media/MediaDecoderStateMachine.h
content/media/MediaQueue.h
content/media/VideoUtils.cpp
content/media/VideoUtils.h
content/media/mediasource/MediaSourceDecoder.cpp
content/media/moz.build
content/media/omx/MediaOmxReader.cpp
content/media/omx/MediaOmxReader.h
content/media/plugins/MediaPluginReader.cpp
content/media/plugins/MediaPluginReader.h
content/media/test/manifest.js
content/media/test/test_bug465498.html
content/media/test/test_bug493187.html
content/media/test/test_seek.html
content/media/webaudio/MediaBufferDecoder.cpp
--- a/content/media/MediaData.h
+++ b/content/media/MediaData.h
@@ -32,37 +32,32 @@ public:
   MediaData(Type aType,
             int64_t aOffset,
             int64_t aTimestamp,
             int64_t aDuration)
     : mType(aType)
     , mOffset(aOffset)
     , mTime(aTimestamp)
     , mDuration(aDuration)
-    , mDiscontinuity(false)
   {}
 
   virtual ~MediaData() {}
 
   // Type of contained data.
   const Type mType;
 
   // Approximate byte offset where this data was demuxed from its media.
   const int64_t mOffset;
 
   // Start time of sample, in microseconds.
   const int64_t mTime;
 
   // Duration of sample, in microseconds.
   const int64_t mDuration;
 
-  // True if this is the first sample after a gap or discontinuity in
-  // the stream. This is true for the first sample in a stream after a seek.
-  bool mDiscontinuity;
-
   int64_t GetEndTime() const { return mTime + mDuration; }
 
 };
 
 // Holds chunk a decoded audio frames.
 class AudioData : public MediaData {
 public:
 
deleted file mode 100644
--- a/content/media/MediaDataDecodedListener.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MediaDataDecodedListener_h_
-#define MediaDataDecodedListener_h_
-
-#include "mozilla/Monitor.h"
-#include "MediaDecoderReader.h"
-
-namespace mozilla {
-
-class MediaDecoderStateMachine;
-class MediaData;
-
-// A RequestSampleCallback implementation that forwards samples onto the
-// MediaDecoderStateMachine via tasks that run on the supplied task queue.
-template<class Target>
-class MediaDataDecodedListener : public RequestSampleCallback {
-public:
-  MediaDataDecodedListener(Target* aTarget,
-                           MediaTaskQueue* aTaskQueue)
-    : mMonitor("MediaDataDecodedListener")
-    , mTaskQueue(aTaskQueue)
-    , mTarget(aTarget)
-  {
-    MOZ_ASSERT(aTarget);
-    MOZ_ASSERT(aTaskQueue);
-  }
-
-  virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE {
-    MonitorAutoLock lock(mMonitor);
-    nsAutoPtr<AudioData> sample(aSample);
-    if (!mTarget || !mTaskQueue) {
-      // We've been shutdown, abort.
-      return;
-    }
-    RefPtr<nsIRunnable> task(new DeliverAudioTask(sample.forget(), mTarget));
-    mTaskQueue->Dispatch(task);
-  }
-
-  virtual void OnAudioEOS() MOZ_OVERRIDE {
-    MonitorAutoLock lock(mMonitor);
-    if (!mTarget || !mTaskQueue) {
-      // We've been shutdown, abort.
-      return;
-    }
-    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnAudioEOS));
-    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
-      NS_WARNING("Failed to dispatch OnAudioEOS task");
-    }
-  }
-
-  virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {
-    MonitorAutoLock lock(mMonitor);
-    nsAutoPtr<VideoData> sample(aSample);
-    if (!mTarget || !mTaskQueue) {
-      // We've been shutdown, abort.
-      return;
-    }
-    RefPtr<nsIRunnable> task(new DeliverVideoTask(sample.forget(), mTarget));
-    mTaskQueue->Dispatch(task);
-  }
-
-  virtual void OnVideoEOS() MOZ_OVERRIDE {
-    MonitorAutoLock lock(mMonitor);
-    if (!mTarget || !mTaskQueue) {
-      // We've been shutdown, abort.
-      return;
-    }
-    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnVideoEOS));
-    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
-      NS_WARNING("Failed to dispatch OnVideoEOS task");
-    }
-  }
-
-  virtual void OnDecodeError() MOZ_OVERRIDE {
-    MonitorAutoLock lock(mMonitor);
-    if (!mTarget || !mTaskQueue) {
-      // We've been shutdown, abort.
-      return;
-    }
-    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnDecodeError));
-    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
-      NS_WARNING("Failed to dispatch OnAudioDecoded task");
-    }
-  }
-
-  void BreakCycles() {
-    MonitorAutoLock lock(mMonitor);
-    mTarget = nullptr;
-    mTaskQueue = nullptr;
-  }
-
-private:
-
-  class DeliverAudioTask : public nsRunnable {
-  public:
-    DeliverAudioTask(AudioData* aSample, Target* aTarget)
-      : mSample(aSample)
-      , mTarget(aTarget)
-    {
-      MOZ_COUNT_CTOR(DeliverAudioTask);
-    }
-    ~DeliverAudioTask()
-    {
-      MOZ_COUNT_DTOR(DeliverAudioTask);
-    }
-    NS_METHOD Run() {
-      mTarget->OnAudioDecoded(mSample.forget());
-      return NS_OK;
-    }
-  private:
-    nsAutoPtr<AudioData> mSample;
-    RefPtr<Target> mTarget;
-  };
-
-  class DeliverVideoTask : public nsRunnable {
-  public:
-    DeliverVideoTask(VideoData* aSample, Target* aTarget)
-      : mSample(aSample)
-      , mTarget(aTarget)
-    {
-      MOZ_COUNT_CTOR(DeliverVideoTask);
-    }
-    ~DeliverVideoTask()
-    {
-      MOZ_COUNT_DTOR(DeliverVideoTask);
-    }
-    NS_METHOD Run() {
-      mTarget->OnVideoDecoded(mSample.forget());
-      return NS_OK;
-    }
-  private:
-    nsAutoPtr<VideoData> mSample;
-    RefPtr<Target> mTarget;
-  };
-
-  Monitor mMonitor;
-  RefPtr<MediaTaskQueue> mTaskQueue;
-  RefPtr<Target> mTarget;
-};
-
-}
-
-#endif // MediaDataDecodedListener_h_
--- a/content/media/MediaDecoder.cpp
+++ b/content/media/MediaDecoder.cpp
@@ -1521,17 +1521,17 @@ bool MediaDecoder::IsShutdown() const {
 }
 
 int64_t MediaDecoder::GetEndMediaTime() const {
   NS_ENSURE_TRUE(GetStateMachine(), -1);
   return GetStateMachine()->GetEndMediaTime();
 }
 
 // Drop reference to state machine.  Only called during shutdown dance.
-void MediaDecoder::BreakCycles() {
+void MediaDecoder::ReleaseStateMachine() {
   mDecoderStateMachine = nullptr;
 }
 
 MediaDecoderOwner* MediaDecoder::GetMediaOwner() const
 {
   return mOwner;
 }
 
--- a/content/media/MediaDecoder.h
+++ b/content/media/MediaDecoder.h
@@ -1,19 +1,19 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 /*
 Each video element based on MediaDecoder has a state machine to manage
 its play state and keep the current frame up to date. All state machines
-share time in a single shared thread. Each decoder also has a MediaTaskQueue
-running in a SharedThreadPool to decode audio and video data.
-Each decoder also has a thread to push decoded audio
+share time in a single shared thread. Each decoder also has one thread
+dedicated to decoding audio and video data. This thread is shutdown when
+playback is paused. Each decoder also has a thread to push decoded audio
 to the hardware. This thread is not created until playback starts, but
 currently is not destroyed when paused, only when playback ends.
 
 The decoder owns the resources for downloading the media file, and the
 high level state. It holds an owning reference to the state machine that
 owns all the resources related to decoding data, and manages the low level
 decoding operations and A/V sync.
 
@@ -229,21 +229,16 @@ struct SeekTarget {
     , mType(SeekTarget::Invalid)
   {
   }
   SeekTarget(int64_t aTimeUsecs, Type aType)
     : mTime(aTimeUsecs)
     , mType(aType)
   {
   }
-  SeekTarget(const SeekTarget& aOther)
-    : mTime(aOther.mTime)
-    , mType(aOther.mType)
-  {
-  }
   bool IsValid() const {
     return mType != SeekTarget::Invalid;
   }
   void Reset() {
     mTime = -1;
     mType = SeekTarget::Invalid;
   }
   // Seek target time in microseconds.
@@ -823,17 +818,17 @@ public:
   // Updates the approximate byte offset which playback has reached. This is
   // used to calculate the readyState transitions.
   void UpdatePlaybackOffset(int64_t aOffset);
 
   // Provide access to the state machine object
   MediaDecoderStateMachine* GetStateMachine() const;
 
   // Drop reference to state machine.  Only called during shutdown dance.
-  virtual void BreakCycles();
+  virtual void ReleaseStateMachine();
 
   // Notifies the element that decoding has failed.
   virtual void DecodeError();
 
   // Indicate whether the media is same-origin with the element.
   void UpdateSameOriginStatus(bool aSameOrigin);
 
   MediaDecoderOwner* GetOwner() MOZ_OVERRIDE;
--- a/content/media/MediaDecoderReader.cpp
+++ b/content/media/MediaDecoderReader.cpp
@@ -58,21 +58,19 @@ public:
     mSize += audioData->SizeOfIncludingThis(MallocSizeOf);
     return nullptr;
   }
 
   size_t mSize;
 };
 
 MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
-  : mAudioCompactor(mAudioQueue)
-  , mDecoder(aDecoder)
-  , mIgnoreAudioOutputFormat(false)
-  , mAudioDiscontinuity(false)
-  , mVideoDiscontinuity(false)
+  : mAudioCompactor(mAudioQueue),
+    mDecoder(aDecoder),
+    mIgnoreAudioOutputFormat(false)
 {
   MOZ_COUNT_CTOR(MediaDecoderReader);
 }
 
 MediaDecoderReader::~MediaDecoderReader()
 {
   ResetDecode();
   MOZ_COUNT_DTOR(MediaDecoderReader);
@@ -94,19 +92,16 @@ size_t MediaDecoderReader::SizeOfAudioQu
 
 nsresult MediaDecoderReader::ResetDecode()
 {
   nsresult res = NS_OK;
 
   VideoQueue().Reset();
   AudioQueue().Reset();
 
-  mAudioDiscontinuity = true;
-  mVideoDiscontinuity = true;
-
   return res;
 }
 
 VideoData* MediaDecoderReader::DecodeToFirstVideoData()
 {
   bool eof = false;
   while (!eof && VideoQueue().GetSize() == 0) {
     {
@@ -173,192 +168,186 @@ VideoData* MediaDecoderReader::FindStart
   int64_t startTime = std::min(videoStartTime, audioStartTime);
   if (startTime != INT64_MAX) {
     aOutStartTime = startTime;
   }
 
   return videoData;
 }
 
+nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
+{
+  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));
+
+  // Decode forward to the target frame. Start with video, if we have it.
+  if (HasVideo()) {
+    // Note: when decoding hits the end of stream we must keep the last frame
+    // in the video queue so that we'll have something to display after the
+    // seek completes. This makes our logic a bit messy.
+    bool eof = false;
+    nsAutoPtr<VideoData> video;
+    while (HasVideo() && !eof) {
+      while (VideoQueue().GetSize() == 0 && !eof) {
+        bool skip = false;
+        eof = !DecodeVideoFrame(skip, 0);
+        {
+          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
+          if (mDecoder->IsShutdown()) {
+            return NS_ERROR_FAILURE;
+          }
+        }
+      }
+      if (eof) {
+        // Hit end of file, we want to display the last frame of the video.
+        if (video) {
+          DECODER_LOG(PR_LOG_DEBUG,
+            ("MediaDecoderReader::DecodeToTarget(%lld) repushing video frame [%lld, %lld] at EOF",
+            aTarget, video->mTime, video->GetEndTime()));
+          VideoQueue().PushFront(video.forget());
+        }
+        VideoQueue().Finish();
+        break;
+      }
+      video = VideoQueue().PeekFront();
+      // If the frame end time is less than the seek target, we won't want
+      // to display this frame after the seek, so discard it.
+      if (video && video->GetEndTime() <= aTarget) {
+        DECODER_LOG(PR_LOG_DEBUG,
+                    ("MediaDecoderReader::DecodeToTarget(%lld) pop video frame [%lld, %lld]",
+                     aTarget, video->mTime, video->GetEndTime()));
+        VideoQueue().PopFront();
+      } else {
+        // Found a frame after or encompasing the seek target.
+        if (aTarget >= video->mTime && video->GetEndTime() >= aTarget) {
+          // The seek target lies inside this frame's time slice. Adjust the frame's
+          // start time to match the seek target. We do this by replacing the
+          // first frame with a shallow copy which has the new timestamp.
+          VideoQueue().PopFront();
+          VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, aTarget);
+          video = temp;
+          VideoQueue().PushFront(video);
+        }
+        DECODER_LOG(PR_LOG_DEBUG,
+                    ("MediaDecoderReader::DecodeToTarget(%lld) found target video frame [%lld,%lld]",
+                     aTarget, video->mTime, video->GetEndTime()));
+
+        video.forget();
+        break;
+      }
+    }
+    {
+      ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
+      if (mDecoder->IsShutdown()) {
+        return NS_ERROR_FAILURE;
+      }
+    }
+#ifdef PR_LOGGING
+    const VideoData* front =  VideoQueue().PeekFront();
+    DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld",
+                front ? front->mTime : -1));
+#endif
+  }
+
+  if (HasAudio()) {
+    // Decode audio forward to the seek target.
+    bool eof = false;
+    while (HasAudio() && !eof) {
+      while (!eof && AudioQueue().GetSize() == 0) {
+        eof = !DecodeAudioData();
+        {
+          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
+          if (mDecoder->IsShutdown()) {
+            return NS_ERROR_FAILURE;
+          }
+        }
+      }
+      const AudioData* audio = AudioQueue().PeekFront();
+      if (!audio || eof) {
+        AudioQueue().Finish();
+        break;
+      }
+      CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
+      CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
+      if (!startFrame.isValid() || !targetFrame.isValid()) {
+        return NS_ERROR_FAILURE;
+      }
+      if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
+        // Our seek target lies after the frames in this AudioData. Pop it
+        // off the queue, and keep decoding forwards.
+        delete AudioQueue().PopFront();
+        audio = nullptr;
+        continue;
+      }
+      if (startFrame.value() > targetFrame.value()) {
+        // The seek target doesn't lie in the audio block just after the last
+        // audio frames we've seen which were before the seek target. This
+        // could have been the first audio data we've seen after seek, i.e. the
+        // seek terminated after the seek target in the audio stream. Just
+        // abort the audio decode-to-target, the state machine will play
+        // silence to cover the gap. Typically this happens in poorly muxed
+        // files.
+        NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
+        break;
+      }
+
+      // The seek target lies somewhere in this AudioData's frames, strip off
+      // any frames which lie before the seek target, so we'll begin playback
+      // exactly at the seek target.
+      NS_ASSERTION(targetFrame.value() >= startFrame.value(),
+                   "Target must at or be after data start.");
+      NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
+                   "Data must end after target.");
+
+      int64_t framesToPrune = targetFrame.value() - startFrame.value();
+      if (framesToPrune > audio->mFrames) {
+        // We've messed up somehow. Don't try to trim frames, the |frames|
+        // variable below will overflow.
+        NS_WARNING("Can't prune more frames that we have!");
+        break;
+      }
+      uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
+      uint32_t channels = audio->mChannels;
+      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
+      memcpy(audioData.get(),
+             audio->mAudioData.get() + (framesToPrune * channels),
+             frames * channels * sizeof(AudioDataValue));
+      CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
+      if (!duration.isValid()) {
+        return NS_ERROR_FAILURE;
+      }
+      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
+                                              aTarget,
+                                              duration.value(),
+                                              frames,
+                                              audioData.forget(),
+                                              channels));
+      delete AudioQueue().PopFront();
+      AudioQueue().PushFront(data.forget());
+      break;
+    }
+  }
+
+#ifdef PR_LOGGING
+  const VideoData* v = VideoQueue().PeekFront();
+  const AudioData* a = AudioQueue().PeekFront();
+  DECODER_LOG(PR_LOG_DEBUG,
+              ("MediaDecoderReader::DecodeToTarget(%lld) finished v=%lld a=%lld",
+              aTarget, v ? v->mTime : -1, a ? a->mTime : -1));
+#endif
+
+  return NS_OK;
+}
+
 nsresult
 MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
                                 int64_t aStartTime)
 {
   MediaResource* stream = mDecoder->GetResource();
   int64_t durationUs = 0;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     durationUs = mDecoder->GetMediaDuration();
   }
   GetEstimatedBufferedTimeRanges(stream, durationUs, aBuffered);
   return NS_OK;
 }
 
-class RequestVideoWithSkipTask : public nsRunnable {
-public:
-  RequestVideoWithSkipTask(MediaDecoderReader* aReader,
-                           int64_t aTimeThreshold)
-    : mReader(aReader)
-    , mTimeThreshold(aTimeThreshold)
-  {
-  }
-  NS_METHOD Run() {
-    bool skip = true;
-    mReader->RequestVideoData(skip, mTimeThreshold);
-    return NS_OK;
-  }
-private:
-  nsRefPtr<MediaDecoderReader> mReader;
-  int64_t mTimeThreshold;
-};
-
-void
-MediaDecoderReader::RequestVideoData(bool aSkipToNextKeyframe,
-                                     int64_t aTimeThreshold)
-{
-  bool skip = aSkipToNextKeyframe;
-  while (VideoQueue().GetSize() == 0 &&
-         !VideoQueue().IsFinished()) {
-    if (!DecodeVideoFrame(skip, aTimeThreshold)) {
-      VideoQueue().Finish();
-    } else if (skip) {
-      // We still need to decode more data in order to skip to the next
-      // keyframe. Post another task to the decode task queue to decode
-      // again. We don't just decode straight in a loop here, as that
-      // would hog the decode task queue.
-      RefPtr<nsIRunnable> task(new RequestVideoWithSkipTask(this, aTimeThreshold));
-      mTaskQueue->Dispatch(task);
-      return;
-    }
-  }
-  if (VideoQueue().GetSize() > 0) {
-    VideoData* v = VideoQueue().PopFront();
-    if (v && mVideoDiscontinuity) {
-      v->mDiscontinuity = true;
-      mVideoDiscontinuity = false;
-    }
-    GetCallback()->OnVideoDecoded(v);
-  } else if (VideoQueue().IsFinished()) {
-    GetCallback()->OnVideoEOS();
-  }
-}
-
-void
-MediaDecoderReader::RequestAudioData()
-{
-  while (AudioQueue().GetSize() == 0 &&
-         !AudioQueue().IsFinished()) {
-    if (!DecodeAudioData()) {
-      AudioQueue().Finish();
-    }
-  }
-  if (AudioQueue().GetSize() > 0) {
-    AudioData* a = AudioQueue().PopFront();
-    if (mAudioDiscontinuity) {
-      a->mDiscontinuity = true;
-      mAudioDiscontinuity = false;
-    }
-    GetCallback()->OnAudioDecoded(a);
-    return;
-  } else if (AudioQueue().IsFinished()) {
-    GetCallback()->OnAudioEOS();
-    return;
-  }
-}
-
-void
-MediaDecoderReader::SetCallback(RequestSampleCallback* aCallback)
-{
-  mSampleDecodedCallback = aCallback;
-}
-
-void
-MediaDecoderReader::SetTaskQueue(MediaTaskQueue* aTaskQueue)
-{
-  mTaskQueue = aTaskQueue;
-}
-
-void
-MediaDecoderReader::BreakCycles()
-{
-  if (mSampleDecodedCallback) {
-    mSampleDecodedCallback->BreakCycles();
-    mSampleDecodedCallback = nullptr;
-  }
-}
-
-void
-MediaDecoderReader::Shutdown()
-{
-  ReleaseMediaResources();
-}
-
-AudioDecodeRendezvous::AudioDecodeRendezvous()
-  : mMonitor("AudioDecodeRendezvous")
-  , mHaveResult(false)
-{
-}
-
-AudioDecodeRendezvous::~AudioDecodeRendezvous()
-{
-}
-
-void
-AudioDecodeRendezvous::OnAudioDecoded(AudioData* aSample)
-{
-  MonitorAutoLock mon(mMonitor);
-  mSample = aSample;
-  mStatus = NS_OK;
-  mHaveResult = true;
-  mon.NotifyAll();
-}
-
-void
-AudioDecodeRendezvous::OnAudioEOS()
-{
-  MonitorAutoLock mon(mMonitor);
-  mSample = nullptr;
-  mStatus = NS_OK;
-  mHaveResult = true;
-  mon.NotifyAll();
-}
-
-void
-AudioDecodeRendezvous::OnDecodeError()
-{
-  MonitorAutoLock mon(mMonitor);
-  mSample = nullptr;
-  mStatus = NS_ERROR_FAILURE;
-  mHaveResult = true;
-  mon.NotifyAll();
-}
-
-void
-AudioDecodeRendezvous::Reset()
-{
-  MonitorAutoLock mon(mMonitor);
-  mHaveResult = false;
-  mStatus = NS_OK;
-  mSample = nullptr;
-}
-
-nsresult
-AudioDecodeRendezvous::Await(nsAutoPtr<AudioData>& aSample)
-{
-  MonitorAutoLock mon(mMonitor);
-  while (!mHaveResult) {
-    mon.Wait();
-  }
-  mHaveResult = false;
-  aSample = mSample;
-  return mStatus;
-}
-
-void
-AudioDecodeRendezvous::Cancel()
-{
-  MonitorAutoLock mon(mMonitor);
-  mStatus = NS_ERROR_ABORT;
-  mHaveResult = true;
-  mon.NotifyAll();
-}
-
 } // namespace mozilla
--- a/content/media/MediaDecoderReader.h
+++ b/content/media/MediaDecoderReader.h
@@ -13,93 +13,63 @@
 #include "AudioCompactor.h"
 
 namespace mozilla {
 
 namespace dom {
 class TimeRanges;
 }
 
-class RequestSampleCallback;
-
-// Encapsulates the decoding and reading of media data. Reading can either
-// synchronous and done on the calling "decode" thread, or asynchronous and
-// performed on a background thread, with the result being returned by
-// callback. Never hold the decoder monitor when calling into this class.
-// Unless otherwise specified, methods and fields of this class can only
-// be accessed on the decode task queue.
+// Encapsulates the decoding and reading of media data. Reading can only be
+// done on the decode thread. Never hold the decoder monitor when
+// calling into this class. Unless otherwise specified, methods and fields of
+// this class can only be accessed on the decode thread.
 class MediaDecoderReader {
 public:
-
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderReader)
-
   MediaDecoderReader(AbstractMediaDecoder* aDecoder);
   virtual ~MediaDecoderReader();
 
   // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE
   // on failure.
   virtual nsresult Init(MediaDecoderReader* aCloneDonor) = 0;
 
   // True if this reader is waiting media resource allocation
   virtual bool IsWaitingMediaResources() { return false; }
   // True when this reader need to become dormant state
   virtual bool IsDormantNeeded() { return false; }
   // Release media resources they should be released in dormant state
-  // The reader can be made usable again by calling ReadMetadata().
   virtual void ReleaseMediaResources() {};
-  // Breaks reference-counted cycles. Called during shutdown.
-  void BreakCycles();
-
-  // Destroys the decoding state. The reader cannot be made usable again.
-  // This is different from ReleaseMediaResources() as it is irreversable,
-  // whereas ReleaseMediaResources() is.
-  virtual void Shutdown();
-
-  virtual void SetCallback(RequestSampleCallback* aDecodedSampleCallback);
-  virtual void SetTaskQueue(MediaTaskQueue* aTaskQueue);
+  // Release the decoder during shutdown
+  virtual void ReleaseDecoder() {};
 
   // Resets all state related to decoding, emptying all buffers etc.
-  // Cancels all pending Request*Data() request callbacks, and flushes the
-  // decode pipeline. The decoder must not call any of the callbacks for
-  // outstanding Request*Data() calls after this is called. Calls to
-  // Request*Data() made after this should be processed as usual.
-  // Normally this call preceedes a Seek() call, or shutdown.
-  // The first samples of every stream produced after a ResetDecode() call
-  // *must* be marked as "discontinuities". If it's not, seeking work won't
-  // properly!
   virtual nsresult ResetDecode();
 
-  // Requests the Reader to call OnAudioDecoded() on aCallback with one
-  // audio sample. The decode should be performed asynchronously, and
-  // the callback can be performed on any thread. Don't hold the decoder
-  // monitor while calling this, as the implementation may try to wait
-  // on something that needs the monitor and deadlock.
-  virtual void RequestAudioData();
+  // Decodes an unspecified amount of audio data, enqueuing the audio data
+  // in mAudioQueue. Returns true when there's more audio to decode,
+  // false if the audio is finished, end of file has been reached,
+  // or an un-recoverable read error has occured.
+  virtual bool DecodeAudioData() = 0;
 
-  // Requests the Reader to call OnVideoDecoded() on aCallback with one
-  // video sample. The decode should be performed asynchronously, and
-  // the callback can be performed on any thread. Don't hold the decoder
-  // monitor while calling this, as the implementation may try to wait
-  // on something that needs the monitor and deadlock.
-  // If aSkipToKeyframe is true, the decode should skip ahead to the
-  // the next keyframe at or after aTimeThreshold microseconds.
-  virtual void RequestVideoData(bool aSkipToNextKeyframe,
-                                int64_t aTimeThreshold);
+  // Reads and decodes one video frame. Packets with a timestamp less
+  // than aTimeThreshold will be decoded (unless they're not keyframes
+  // and aKeyframeSkip is true), but will not be added to the queue.
+  virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
+                                int64_t aTimeThreshold) = 0;
 
   virtual bool HasAudio() = 0;
   virtual bool HasVideo() = 0;
 
   // Read header data for all bitstreams in the file. Fills aInfo with
   // the data required to present the media, and optionally fills *aTags
   // with tag metadata from the file.
   // Returns NS_OK on success, or NS_ERROR_FAILURE on failure.
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags) = 0;
 
-  // TODO: DEPRECATED. This uses synchronous decoding.
   // Stores the presentation time of the first frame we'd be able to play if
   // we started playback at the current position. Returns the first video
   // frame, if we have video.
   virtual VideoData* FindStartTime(int64_t& aOutStartTime);
 
   // Moves the decode head to aTime microseconds. aStartTime and aEndTime
   // denote the start and end times of the media in usecs, and aCurrentTime
   // is the current playback position in microseconds.
@@ -123,16 +93,32 @@ public:
   // Tell the reader that the data decoded are not for direct playback, so it
   // can accept more files, in particular those which have more channels than
   // available in the audio output.
   void SetIgnoreAudioOutputFormat()
   {
     mIgnoreAudioOutputFormat = true;
   }
 
+protected:
+  // Queue of audio frames. This queue is threadsafe, and is accessed from
+  // the audio, decoder, state machine, and main threads.
+  MediaQueue<AudioData> mAudioQueue;
+
+  // Queue of video frames. This queue is threadsafe, and is accessed from
+  // the decoder, state machine, and main threads.
+  MediaQueue<VideoData> mVideoQueue;
+
+  // An adapter to the audio queue which first copies data to buffers with
+  // minimal allocation slop and then pushes them to the queue.  This is
+  // useful for decoders working with formats that give awkward numbers of
+  // frames such as mp3.
+  AudioCompactor mAudioCompactor;
+
+public:
   // Populates aBuffered with the time ranges which are buffered. aStartTime
   // must be the presentation time of the first frame in the media, e.g.
   // the media time corresponding to playback time/position 0. This function
   // is called on the main, decode, and state machine threads.
   //
   // This base implementation in MediaDecoderReader estimates the time ranges
   // buffered by interpolating the cached byte ranges with the duration
   // of the media. Reader subclasses should override this method if they
@@ -165,144 +151,32 @@ public:
   // Returns a pointer to the decoder.
   AbstractMediaDecoder* GetDecoder() {
     return mDecoder;
   }
 
   AudioData* DecodeToFirstAudioData();
   VideoData* DecodeToFirstVideoData();
 
+  // Decodes samples until we reach frames required to play at time aTarget
+  // (usecs). This also trims the samples to start exactly at aTarget,
+  // by discarding audio samples and adjusting start times of video frames.
+  nsresult DecodeToTarget(int64_t aTarget);
+
   MediaInfo GetMediaInfo() { return mInfo; }
 
 protected:
 
-  // Overrides of this function should decodes an unspecified amount of
-  // audio data, enqueuing the audio data in mAudioQueue. Returns true
-  // when there's more audio to decode, false if the audio is finished,
-  // end of file has been reached, or an un-recoverable read error has
-  // occured. This function blocks until the decode is complete.
-  virtual bool DecodeAudioData() {
-    return false;
-  }
-
-  // Overrides of this function should read and decodes one video frame.
-  // Packets with a timestamp less than aTimeThreshold will be decoded
-  // (unless they're not keyframes and aKeyframeSkip is true), but will
-  // not be added to the queue. This function blocks until the decode
-  // is complete.
-  virtual bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) {
-    return false;
-  }
-
-  RequestSampleCallback* GetCallback() {
-    MOZ_ASSERT(mSampleDecodedCallback);
-    return mSampleDecodedCallback;
-  }
-
-  virtual MediaTaskQueue* GetTaskQueue() {
-    return mTaskQueue;
-  }
-
-  // Queue of audio frames. This queue is threadsafe, and is accessed from
-  // the audio, decoder, state machine, and main threads.
-  MediaQueue<AudioData> mAudioQueue;
-
-  // Queue of video frames. This queue is threadsafe, and is accessed from
-  // the decoder, state machine, and main threads.
-  MediaQueue<VideoData> mVideoQueue;
-
-  // An adapter to the audio queue which first copies data to buffers with
-  // minimal allocation slop and then pushes them to the queue.  This is
-  // useful for decoders working with formats that give awkward numbers of
-  // frames such as mp3.
-  AudioCompactor mAudioCompactor;
-
   // Reference to the owning decoder object.
   AbstractMediaDecoder* mDecoder;
 
   // Stores presentation info required for playback.
   MediaInfo mInfo;
 
   // Whether we should accept media that we know we can't play
   // directly, because they have a number of channel higher than
   // what we support.
   bool mIgnoreAudioOutputFormat;
-
-private:
-
-  nsRefPtr<RequestSampleCallback> mSampleDecodedCallback;
-
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
-
-  // Flags whether a the next audio/video sample comes after a "gap" or
-  // "discontinuity" in the stream. For example after a seek.
-  bool mAudioDiscontinuity;
-  bool mVideoDiscontinuity;
-};
-
-// Interface that callers to MediaDecoderReader::Request{Audio,Video}Data()
-// must implement to receive the requested samples asynchronously.
-// This object is refcounted, and cycles must be broken by calling
-// BreakCycles() during shutdown.
-class RequestSampleCallback {
-public:
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RequestSampleCallback)
-
-  // Receives the result of a RequestAudioData() call.
-  virtual void OnAudioDecoded(AudioData* aSample) = 0;
-
-  // Called when a RequestAudioData() call can't be fulfiled as we've
-  // reached the end of stream.
-  virtual void OnAudioEOS() = 0;
-
-  // Receives the result of a RequestVideoData() call.
-  virtual void OnVideoDecoded(VideoData* aSample) = 0;
-
-  // Called when a RequestVideoData() call can't be fulfiled as we've
-  // reached the end of stream.
-  virtual void OnVideoEOS() = 0;
-
-  // Called when there's a decode error. No more sample requests
-  // will succeed.
-  virtual void OnDecodeError() = 0;
-
-  // Called during shutdown to break any reference cycles.
-  virtual void BreakCycles() = 0;
-
-  virtual ~RequestSampleCallback() {}
-};
-
-// A RequestSampleCallback implementation that can be passed to the
-// MediaDecoderReader to block the thread requesting an audio sample until
-// the audio decode is complete. This is used to adapt the asynchronous
-// model of the MediaDecoderReader to a synchronous model.
-class AudioDecodeRendezvous : public RequestSampleCallback {
-public:
-  AudioDecodeRendezvous();
-  ~AudioDecodeRendezvous();
-
-  // RequestSampleCallback implementation. Called when decode is complete.
-  // Note: aSample is null at end of stream.
-  virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE;
-  virtual void OnAudioEOS() MOZ_OVERRIDE;
-  virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {}
-  virtual void OnVideoEOS() MOZ_OVERRIDE {}
-  virtual void OnDecodeError() MOZ_OVERRIDE;
-  virtual void BreakCycles() MOZ_OVERRIDE {};
-  void Reset();
-
-  // Returns failure on error, or NS_OK.
-  // If *aSample is null, EOS has been reached.
-  nsresult Await(nsAutoPtr<AudioData>& aSample);
-
-  // Interrupts a call to Wait().
-  void Cancel();
-
-private:
-  Monitor mMonitor;
-  nsresult mStatus;
-  nsAutoPtr<AudioData> mSample;
-  bool mHaveResult;
 };
 
 } // namespace mozilla
 
 #endif
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -52,26 +52,19 @@ extern PRLogModuleInfo* gMediaDecoderLog
 #define DECODER_LOG(type, msg, ...) \
   PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__))
 #define VERBOSE_LOG(msg, ...)                          \
     PR_BEGIN_MACRO                                     \
       if (!PR_GetEnv("MOZ_QUIET")) {                   \
         DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
       }                                                \
     PR_END_MACRO
-#define SAMPLE_LOG(msg, ...)                          \
-    PR_BEGIN_MACRO                                     \
-      if (PR_GetEnv("MEDIA_LOG_SAMPLES")) {            \
-        DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
-      }                                                \
-    PR_END_MACRO
 #else
 #define DECODER_LOG(type, msg, ...)
 #define VERBOSE_LOG(msg, ...)
-#define SAMPLE_LOG(msg, ...)
 #endif
 
 // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
 // implementation.  With unified builds, putting this in headers is not enough.
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
@@ -194,34 +187,32 @@ MediaDecoderStateMachine::MediaDecoderSt
   mVideoFrameEndTime(-1),
   mVolume(1.0),
   mPlaybackRate(1.0),
   mPreservesPitch(true),
   mBasePosition(0),
   mAmpleVideoFrames(2),
   mLowAudioThresholdUsecs(LOW_AUDIO_USECS),
   mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS),
-  mAudioRequestPending(false),
-  mVideoRequestPending(false),
+  mDispatchedAudioDecodeTask(false),
+  mDispatchedVideoDecodeTask(false),
   mAudioCaptured(false),
   mTransportSeekable(true),
   mMediaSeekable(true),
   mPositionChangeQueued(false),
   mAudioCompleted(false),
   mGotDurationFromMetaData(false),
   mDispatchedEventToDecode(false),
   mStopAudioThread(true),
   mQuickBuffering(false),
   mMinimizePreroll(false),
   mDecodeThreadWaiting(false),
   mRealTime(aRealTime),
   mDispatchedDecodeMetadataTask(false),
-  mDropAudioUntilNextDiscontinuity(false),
-  mDropVideoUntilNextDiscontinuity(false),
-  mDecodeToSeekTarget(false),
+  mDispatchedDecodeSeekTask(false),
   mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED),
   mTimerId(0)
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   // Only enable realtime mode when "media.realtime_decoder.enabled" is true.
   if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false)
@@ -561,484 +552,171 @@ bool MediaDecoderStateMachine::HaveEnoug
 }
 
 bool
 MediaDecoderStateMachine::NeedToDecodeVideo()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
-  return IsVideoDecoding() &&
-         ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) ||
-          (!mMinimizePreroll && !HaveEnoughDecodedVideo()));
+  return mIsVideoDecoding &&
+         !mMinimizePreroll &&
+         !HaveEnoughDecodedVideo();
 }
 
 void
 MediaDecoderStateMachine::DecodeVideo()
 {
-  int64_t currentTime = 0;
-  bool skipToNextKeyFrame = false;
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+
+  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
+    mDispatchedVideoDecodeTask = false;
+    return;
+  }
+
+  // We don't want to consider skipping to the next keyframe if we've
+  // only just started up the decode loop, so wait until we've decoded
+  // some frames before enabling the keyframe skip logic on video.
+  if (mIsVideoPrerolling &&
+      (static_cast<uint32_t>(VideoQueue().GetSize())
+        >= mVideoPrerollFrames * mPlaybackRate))
   {
-    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-    NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-
-    if (mState != DECODER_STATE_DECODING &&
-        mState != DECODER_STATE_BUFFERING &&
-        mState != DECODER_STATE_SEEKING) {
-      mVideoRequestPending = false;
-      DispatchDecodeTasksIfNeeded();
-      return;
-    }
-
-    // We don't want to consider skipping to the next keyframe if we've
-    // only just started up the decode loop, so wait until we've decoded
-    // some frames before enabling the keyframe skip logic on video.
-    if (mIsVideoPrerolling &&
+    mIsVideoPrerolling = false;
+  }
+
+  // We'll skip the video decode to the nearest keyframe if we're low on
+  // audio, or if we're low on video, provided we're not running low on
+  // data to decode. If we're running low on downloaded data to decode,
+  // we won't start keyframe skipping, as we'll be pausing playback to buffer
+  // soon anyway and we'll want to be able to display frames immediately
+  // after buffering finishes.
+  if (mState == DECODER_STATE_DECODING &&
+      !mSkipToNextKeyFrame &&
+      mIsVideoDecoding &&
+      ((!mIsAudioPrerolling && mIsAudioDecoding &&
+        GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
+        (!mIsVideoPrerolling && mIsVideoDecoding &&
+         // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
+         // we are still in the safe range without underrunning video frames
+         GetClock() > mVideoFrameEndTime &&
         (static_cast<uint32_t>(VideoQueue().GetSize())
-          >= mVideoPrerollFrames * mPlaybackRate))
-    {
-      mIsVideoPrerolling = false;
-    }
-
-    // We'll skip the video decode to the nearest keyframe if we're low on
-    // audio, or if we're low on video, provided we're not running low on
-    // data to decode. If we're running low on downloaded data to decode,
-    // we won't start keyframe skipping, as we'll be pausing playback to buffer
-    // soon anyway and we'll want to be able to display frames immediately
-    // after buffering finishes.
-    if (mState == DECODER_STATE_DECODING &&
-        mIsVideoDecoding &&
-        ((!mIsAudioPrerolling && mIsAudioDecoding &&
-          GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
-          (!mIsVideoPrerolling && IsVideoDecoding() &&
-           // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
-           // we are still in the safe range without underrunning video frames
-           GetClock() > mVideoFrameEndTime &&
-          (static_cast<uint32_t>(VideoQueue().GetSize())
-            < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
-        !HasLowUndecodedData())
-    {
-      skipToNextKeyFrame = true;
-      DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
-    }
-    currentTime = mState == DECODER_STATE_SEEKING ? 0 : GetMediaTime();
-
-    // Time the video decode, so that if it's slow, we can increase our low
-    // audio threshold to reduce the chance of an audio underrun while we're
-    // waiting for a video decode to complete.
-    mVideoDecodeStartTime = TimeStamp::Now();
+          < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
+      !HasLowUndecodedData())
+  {
+    mSkipToNextKeyFrame = true;
+    DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
+  }
+
+  // Time the video decode, so that if it's slow, we can increase our low
+  // audio threshold to reduce the chance of an audio underrun while we're
+  // waiting for a video decode to complete.
+  TimeDuration decodeTime;
+  {
+    int64_t currentTime = GetMediaTime();
+    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+    TimeStamp start = TimeStamp::Now();
+    mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime);
+    decodeTime = TimeStamp::Now() - start;
   }
-
-  mReader->RequestVideoData(skipToNextKeyFrame, currentTime);
+  if (!mIsVideoDecoding) {
+    // Playback ended for this stream, close the sample queue.
+    VideoQueue().Finish();
+    CheckIfDecodeComplete();
+  }
+
+  if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
+      !HasLowUndecodedData())
+  {
+    mLowAudioThresholdUsecs =
+      std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
+    mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
+                                          mAmpleAudioThresholdUsecs);
+    DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
+                mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
+  }
+
+  SendStreamData();
+
+  // The ready state can change when we've decoded data, so update the
+  // ready state, so that DOM events can fire.
+  UpdateReadyState();
+
+  mDispatchedVideoDecodeTask = false;
+  DispatchDecodeTasksIfNeeded();
 }
 
 bool
 MediaDecoderStateMachine::NeedToDecodeAudio()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
-  return IsAudioDecoding() &&
-         ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) ||
-          (!mMinimizePreroll &&
-          !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate) &&
-          (mState != DECODER_STATE_SEEKING || mDecodeToSeekTarget)));
+  return mIsAudioDecoding &&
+         !mMinimizePreroll &&
+         !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate);
 }
 
 void
 MediaDecoderStateMachine::DecodeAudio()
 {
-  {
-    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-    NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-
-    if (mState != DECODER_STATE_DECODING &&
-        mState != DECODER_STATE_BUFFERING &&
-        mState != DECODER_STATE_SEEKING) {
-      mAudioRequestPending = false;
-      DispatchDecodeTasksIfNeeded();
-      mon.NotifyAll();
-      return;
-    }
-
-    // We don't want to consider skipping to the next keyframe if we've
-    // only just started up the decode loop, so wait until we've decoded
-    // some audio data before enabling the keyframe skip logic on audio.
-    if (mIsAudioPrerolling &&
-        GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
-      mIsAudioPrerolling = false;
-    }
-  }
-  mReader->RequestAudioData();
-}
-
-bool
-MediaDecoderStateMachine::IsAudioSeekComplete()
-{
-  AssertCurrentThreadInMonitor();
-  SAMPLE_LOG("IsAudioSeekComplete() curTarVal=%d mAudDis=%d aqFin=%d aqSz=%d",
-    mCurrentSeekTarget.IsValid(), mDropAudioUntilNextDiscontinuity, AudioQueue().IsFinished(), AudioQueue().GetSize());
-  return
-    !HasAudio() ||
-    (mCurrentSeekTarget.IsValid() &&
-     !mDropAudioUntilNextDiscontinuity &&
-     (AudioQueue().IsFinished() || AudioQueue().GetSize() > 0));
-}
-
-bool
-MediaDecoderStateMachine::IsVideoSeekComplete()
-{
-  AssertCurrentThreadInMonitor();
-  SAMPLE_LOG("IsVideoSeekComplete() curTarVal=%d mVidDis=%d vqFin=%d vqSz=%d",
-    mCurrentSeekTarget.IsValid(), mDropVideoUntilNextDiscontinuity, VideoQueue().IsFinished(), VideoQueue().GetSize());
-  return
-    !HasVideo() ||
-    (mCurrentSeekTarget.IsValid() &&
-     !mDropVideoUntilNextDiscontinuity &&
-     (VideoQueue().IsFinished() || VideoQueue().GetSize() > 0));
-}
-
-void
-MediaDecoderStateMachine::OnAudioEOS()
-{
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  SAMPLE_LOG("OnAudioEOS");
-  mAudioRequestPending = false;
-  AudioQueue().Finish();
-  switch (mState) {
-    case DECODER_STATE_DECODING_METADATA: {
-      MaybeFinishDecodeMetadata();
-      return;
-    }
-    case DECODER_STATE_BUFFERING:
-    case DECODER_STATE_DECODING: {
-      CheckIfDecodeComplete();
-      SendStreamData();
-      // The ready state can change when we've decoded data, so update the
-      // ready state, so that DOM events can fire.
-      UpdateReadyState();
-      mDecoder->GetReentrantMonitor().NotifyAll();
-      return;
-    }
-
-    case DECODER_STATE_SEEKING: {
-      if (!mCurrentSeekTarget.IsValid()) {
-        // We've received an EOS from a previous decode. Discard it.
-        return;
-      }
-      mDropAudioUntilNextDiscontinuity = false;
-      CheckIfSeekComplete();
-      return;
-    }
-    default: {
-      // Ignore other cases.
-      return;
-    }
-  }
-}
-
-void
-MediaDecoderStateMachine::OnAudioDecoded(AudioData* aAudioSample)
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  nsAutoPtr<AudioData> audio(aAudioSample);
-  MOZ_ASSERT(audio);
-  mAudioRequestPending = false;
-
-  SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
-             (audio ? audio->mTime : -1),
-             (audio ? audio->GetEndTime() : -1),
-             (audio ? audio->mDiscontinuity : 0));
-
-  switch (mState) {
-    case DECODER_STATE_DECODING_METADATA: {
-      Push(audio.forget());
-      MaybeFinishDecodeMetadata();
-      return;
-    }
-
-    case DECODER_STATE_BUFFERING:
-    case DECODER_STATE_DECODING: {
-      // In buffering and decoding state, we simply enqueue samples.
-      Push(audio.forget());
-      return;
-    }
-
-    case DECODER_STATE_SEEKING: {
-      if (!mCurrentSeekTarget.IsValid()) {
-        // We've received a sample from a previous decode. Discard it.
-        return;
-      }
-      if (audio->mDiscontinuity) {
-        mDropAudioUntilNextDiscontinuity = false;
-      }
-      if (!mDropAudioUntilNextDiscontinuity) {
-        // We must be after the discontinuity; we're receiving samples
-        // at or after the seek target.
-        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint) {
-          // Non-precise seek; we can stop the seek at the first sample.
-          AudioQueue().Push(audio.forget());
-        } else {
-          // We're doing an accurate seek. We must discard
-          // MediaData up to the one containing exact seek target.
-          if (NS_FAILED(DropAudioUpToSeekTarget(audio.forget()))) {
-            DecodeError();
-            return;
-          }
-        }
-      }
-      CheckIfSeekComplete();
-      return;
-    }
-    default: {
-      // Ignore other cases.
-      return;
-    }
-  }
-}
-
-void
-MediaDecoderStateMachine::Push(AudioData* aSample)
-{
-  MOZ_ASSERT(aSample);
-  // TODO: Send aSample to MSG and recalculate readystate before pushing,
-  // otherwise AdvanceFrame may pop the sample before we have a chance
-  // to reach playing.
-  AudioQueue().Push(aSample);
-  if (mState > DECODER_STATE_DECODING_METADATA) {
-    SendStreamData();
-    // The ready state can change when we've decoded data, so update the
-    // ready state, so that DOM events can fire.
-    UpdateReadyState();
-    DispatchDecodeTasksIfNeeded();
-    mDecoder->GetReentrantMonitor().NotifyAll();
-  }
-}
-
-void
-MediaDecoderStateMachine::Push(VideoData* aSample)
-{
-  MOZ_ASSERT(aSample);
-  // TODO: Send aSample to MSG and recalculate readystate before pushing,
-  // otherwise AdvanceFrame may pop the sample before we have a chance
-  // to reach playing.
-  VideoQueue().Push(aSample);
-  if (mState > DECODER_STATE_DECODING_METADATA) {
-    SendStreamData();
-    // The ready state can change when we've decoded data, so update the
-    // ready state, so that DOM events can fire.
-    UpdateReadyState();
-    DispatchDecodeTasksIfNeeded();
-    mDecoder->GetReentrantMonitor().NotifyAll();
-  }
-}
-
-void
-MediaDecoderStateMachine::OnDecodeError()
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  DecodeError();
-}
-
-void
-MediaDecoderStateMachine::MaybeFinishDecodeMetadata()
-{
-  AssertCurrentThreadInMonitor();
-  if ((IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
-      (IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
+  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+
+  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
+    mDispatchedAudioDecodeTask = false;
     return;
   }
-  if (NS_FAILED(FinishDecodeMetadata())) {
-    DecodeError();
+
+  // We don't want to consider skipping to the next keyframe if we've
+  // only just started up the decode loop, so wait until we've decoded
+  // some audio data before enabling the keyframe skip logic on audio.
+  if (mIsAudioPrerolling &&
+      GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
+    mIsAudioPrerolling = false;
   }
-}
-
-void
-MediaDecoderStateMachine::OnVideoEOS()
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  SAMPLE_LOG("OnVideoEOS");
-  mVideoRequestPending = false;
-  switch (mState) {
-    case DECODER_STATE_DECODING_METADATA: {
-      VideoQueue().Finish();
-      MaybeFinishDecodeMetadata();
-      return;
-    }
-
-    case DECODER_STATE_BUFFERING:
-    case DECODER_STATE_DECODING: {
-      VideoQueue().Finish();
-      CheckIfDecodeComplete();
-      SendStreamData();
-      // The ready state can change when we've decoded data, so update the
-      // ready state, so that DOM events can fire.
-      UpdateReadyState();
-      mDecoder->GetReentrantMonitor().NotifyAll();
-      return;
-    }
-    case DECODER_STATE_SEEKING: {
-      if (!mCurrentSeekTarget.IsValid()) {
-        // We've received a sample from a previous decode. Discard it.
-        return;
-      }
-      // Null sample. Hit end of stream. If we have decoded a frame,
-      // insert it into the queue so that we have something to display.
-      if (mFirstVideoFrameAfterSeek) {
-        VideoQueue().Push(mFirstVideoFrameAfterSeek.forget());
-      }
-      VideoQueue().Finish();
-      mDropVideoUntilNextDiscontinuity = false;
-      CheckIfSeekComplete();
-      return;
-    }
-    default: {
-      // Ignore other cases.
-      return;
-    }
+
+  {
+    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+    mIsAudioDecoding = mReader->DecodeAudioData();
   }
-}
-
-void
-MediaDecoderStateMachine::OnVideoDecoded(VideoData* aVideoSample)
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  nsAutoPtr<VideoData> video(aVideoSample);
-  mVideoRequestPending = false;
-
-  SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d",
-             (video ? video->mTime : -1),
-             (video ? video->GetEndTime() : -1),
-             (video ? video->mDiscontinuity : 0));
-
-  switch (mState) {
-    case DECODER_STATE_DECODING_METADATA: {
-      Push(video.forget());
-      MaybeFinishDecodeMetadata();
-      return;
-    }
-
-    case DECODER_STATE_BUFFERING:
-    case DECODER_STATE_DECODING: {
-      Push(video.forget());
-      // If the requested video sample was slow to arrive, increase the
-      // amount of audio we buffer to ensure that we don't run out of audio.
-      // TODO: Detect when we're truly async, and don't do this if so, as
-      // it's not necessary.
-      TimeDuration decodeTime = TimeStamp::Now() - mVideoDecodeStartTime;
-      if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
-          !HasLowUndecodedData())
-      {
-        mLowAudioThresholdUsecs =
-          std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
-        mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
-                                              mAmpleAudioThresholdUsecs);
-        DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
-                    mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
-      }
-      return;
-    }
-    case DECODER_STATE_SEEKING: {
-      if (!mCurrentSeekTarget.IsValid()) {
-        // We've received a sample from a previous decode. Discard it.
-        return;
-      }
-      if (mDropVideoUntilNextDiscontinuity) {
-        if (video->mDiscontinuity) {
-          mDropVideoUntilNextDiscontinuity = false;
-        }
-      }
-      if (!mDropVideoUntilNextDiscontinuity) {
-        // We must be after the discontinuity; we're receiving samples
-        // at or after the seek target.
-        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint) {
-          // Non-precise seek; we can stop the seek at the first sample.
-          VideoQueue().Push(video.forget());
-        } else {
-          // We're doing an accurate seek. We still need to discard
-          // MediaData up to the one containing exact seek target.
-          if (NS_FAILED(DropVideoUpToSeekTarget(video.forget()))) {
-            DecodeError();
-            return;
-          }
-        }
-      }
-      CheckIfSeekComplete();
-      return;
-    }
-    default: {
-      // Ignore other cases.
-      return;
-    }
+  if (!mIsAudioDecoding) {
+    // Playback ended for this stream, close the sample queue.
+    AudioQueue().Finish();
+    CheckIfDecodeComplete();
   }
-}
-
-void
-MediaDecoderStateMachine::CheckIfSeekComplete()
-{
-  AssertCurrentThreadInMonitor();
-
-  const bool videoSeekComplete = IsVideoSeekComplete();
-  if (HasVideo() && !videoSeekComplete) {
-    // We haven't reached the target. Ensure we have requested another sample.
-    if (NS_FAILED(EnsureVideoDecodeTaskQueued())) {
-      NS_WARNING("Failed to request video during seek");
-      DecodeError();
-    }
-  }
-
-  const bool audioSeekComplete = IsAudioSeekComplete();
-  if (HasAudio() && !audioSeekComplete) {
-    // We haven't reached the target. Ensure we have requested another sample.
-    if (NS_FAILED(EnsureAudioDecodeTaskQueued())) {
-      NS_WARNING("Failed to request audio during seek");
-      DecodeError();
-    }
-  }
-
-  SAMPLE_LOG("CheckIfSeekComplete() audioSeekComplete=%d videoSeekComplete=%d",
-             audioSeekComplete, videoSeekComplete);
-
-  if (audioSeekComplete && videoSeekComplete) {
-    mDecodeToSeekTarget = false;
-    RefPtr<nsIRunnable> task(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SeekCompleted));
-    nsresult rv = mDecodeTaskQueue->Dispatch(task);
-    if (NS_FAILED(rv)) {
-      DecodeError();
-    }
-  }
-}
-
-bool
-MediaDecoderStateMachine::IsAudioDecoding()
-{
-  AssertCurrentThreadInMonitor();
-  return HasAudio() && !AudioQueue().IsFinished();
-}
-
-bool
-MediaDecoderStateMachine::IsVideoDecoding()
-{
-  AssertCurrentThreadInMonitor();
-  return HasVideo() && !VideoQueue().IsFinished();
+
+  SendStreamData();
+
+  // Notify to ensure that the AudioLoop() is not waiting, in case it was
+  // waiting for more audio to be decoded.
+  mDecoder->GetReentrantMonitor().NotifyAll();
+
+  // The ready state can change when we've decoded data, so update the
+  // ready state, so that DOM events can fire.
+  UpdateReadyState();
+
+  mDispatchedAudioDecodeTask = false;
+  DispatchDecodeTasksIfNeeded();
 }
 
 void
 MediaDecoderStateMachine::CheckIfDecodeComplete()
 {
   AssertCurrentThreadInMonitor();
   if (mState == DECODER_STATE_SHUTDOWN ||
       mState == DECODER_STATE_SEEKING ||
       mState == DECODER_STATE_COMPLETED) {
     // Don't change our state if we've already been shutdown, or we're seeking,
     // since we don't want to abort the shutdown or seek processes.
     return;
   }
-  if (!IsVideoDecoding() && !IsAudioDecoding()) {
+  MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
+  MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
+  if (!mIsVideoDecoding && !mIsAudioDecoding) {
     // We've finished decoding all active streams,
     // so move to COMPLETED state.
     mState = DECODER_STATE_COMPLETED;
     DispatchDecodeTasksIfNeeded();
     ScheduleStateMachine();
   }
   DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted",
               ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT "));
@@ -1331,17 +1009,19 @@ uint32_t MediaDecoderStateMachine::PlayF
   }
   return frames;
 }
 
 nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
-  RefPtr<SharedThreadPool> decodePool(GetMediaDecodeThreadPool());
+  RefPtr<SharedThreadPool> decodePool(
+    SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
+                          Preferences::GetUint("media.num-decode-threads", 25)));
   NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE);
 
   RefPtr<SharedThreadPool> stateMachinePool(
     SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1));
   NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE);
 
   mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget());
   NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE);
@@ -1354,27 +1034,17 @@ nsresult MediaDecoderStateMachine::Init(
   mStateMachineThreadPool = stateMachinePool;
 
   nsresult rv;
   mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv);
   NS_ENSURE_SUCCESS(rv, rv);
   rv = mTimer->SetTarget(GetStateMachineThread());
   NS_ENSURE_SUCCESS(rv, rv);
 
-  // Note: This creates a cycle, broken in shutdown.
-  mMediaDecodedListener =
-    new MediaDataDecodedListener<MediaDecoderStateMachine>(this,
-                                                           mDecodeTaskQueue);
-  mReader->SetCallback(mMediaDecodedListener);
-  mReader->SetTaskQueue(mDecodeTaskQueue);
-
-  rv = mReader->Init(cloneReader);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  return NS_OK;
+  return mReader->Init(cloneReader);
 }
 
 void MediaDecoderStateMachine::StopPlayback()
 {
   DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()");
 
   AssertCurrentThreadInMonitor();
 
@@ -1426,22 +1096,20 @@ void MediaDecoderStateMachine::StartPlay
   mPlayStartTime = TimeStamp::Now();
 
   NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()");
   if (NS_FAILED(StartAudioThread())) {
     NS_WARNING("Failed to create audio thread");
   }
   mDecoder->GetReentrantMonitor().NotifyAll();
   mDecoder->UpdateStreamBlockingForStateMachinePlaying();
-  DispatchDecodeTasksIfNeeded();
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
 {
-  SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld) (mStartTime=%lld)", aTime, mStartTime);
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine thread.");
   AssertCurrentThreadInMonitor();
 
   NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime");
   mCurrentFrameTime = aTime - mStartTime;
   NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!");
   if (aTime > mEndTime) {
@@ -1644,22 +1312,28 @@ void MediaDecoderStateMachine::StartDeco
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   if (mState == DECODER_STATE_DECODING) {
     return;
   }
   mState = DECODER_STATE_DECODING;
 
   mDecodeStartTime = TimeStamp::Now();
 
+  // Reset our "stream finished decoding" flags, so we try to decode all
+  // streams that we have when we start decoding.
+  mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
+  mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
+
   CheckIfDecodeComplete();
   if (mState == DECODER_STATE_COMPLETED) {
     return;
   }
 
   // Reset other state to pristine values before starting decode.
+  mSkipToNextKeyFrame = false;
   mIsAudioPrerolling = true;
   mIsVideoPrerolling = true;
 
   // Ensure that we've got tasks enqueued to decode data if we need to.
   DispatchDecodeTasksIfNeeded();
 
   ScheduleStateMachine();
 }
@@ -1701,26 +1375,20 @@ void MediaDecoderStateMachine::Play()
   // assume the user is likely to want to keep playing in future.
   mMinimizePreroll = false;
   ScheduleStateMachine();
 }
 
 void MediaDecoderStateMachine::ResetPlayback()
 {
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  MOZ_ASSERT(mState == DECODER_STATE_SEEKING);
   mVideoFrameEndTime = -1;
   mAudioStartTime = -1;
   mAudioEndTime = -1;
   mAudioCompleted = false;
-  AudioQueue().Reset();
-  VideoQueue().Reset();
-  mFirstVideoFrameAfterSeek = nullptr;
-  mDropAudioUntilNextDiscontinuity = true;
-  mDropVideoUntilNextDiscontinuity = true;
 }
 
 void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
                                                      uint32_t aLength,
                                                      int64_t aOffset)
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
@@ -1810,19 +1478,18 @@ nsresult
 MediaDecoderStateMachine::EnqueueDecodeMetadataTask()
 {
   AssertCurrentThreadInMonitor();
 
   if (mState != DECODER_STATE_DECODING_METADATA ||
       mDispatchedDecodeMetadataTask) {
     return NS_OK;
   }
-  RefPtr<nsIRunnable> task(
+  nsresult rv = mDecodeTaskQueue->Dispatch(
     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata));
-  nsresult rv = mDecodeTaskQueue->Dispatch(task);
   if (NS_SUCCEEDED(rv)) {
     mDispatchedDecodeMetadataTask = true;
   } else {
     NS_WARNING("Dispatch ReadMetadata task failed.");
     return rv;
   }
 
   return NS_OK;
@@ -1843,22 +1510,16 @@ MediaDecoderStateMachine::SetReaderIdle(
   mReader->SetIdle();
 }
 
 void
 MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded()
 {
   AssertCurrentThreadInMonitor();
 
-  if (mState != DECODER_STATE_DECODING &&
-      mState != DECODER_STATE_BUFFERING &&
-      mState != DECODER_STATE_SEEKING) {
-    return;
-  }
-
   // NeedToDecodeAudio() can go from false to true while we hold the
   // monitor, but it can't go from true to false. This can happen because
   // NeedToDecodeAudio() takes into account the amount of decoded audio
   // that's been written to the AudioStream but not played yet. So if we
   // were calling NeedToDecodeAudio() twice and we thread-context switch
   // between the calls, audio can play, which can affect the return value
   // of NeedToDecodeAudio() giving inconsistent results. So we cache the
   // value returned by NeedToDecodeAudio(), and make decisions
@@ -1882,21 +1543,16 @@ MediaDecoderStateMachine::DispatchDecode
 
   if (needToDecodeAudio) {
     EnsureAudioDecodeTaskQueued();
   }
   if (needToDecodeVideo) {
     EnsureVideoDecodeTaskQueued();
   }
 
-  SAMPLE_LOG("DispatchDecodeTasksIfNeeded needAudio=%d dispAudio=%d needVideo=%d dispVid=%d needIdle=%d",
-             needToDecodeAudio, mAudioRequestPending,
-             needToDecodeVideo, mVideoRequestPending,
-             needIdle);
-
   if (needIdle) {
     RefPtr<nsIRunnable> event = NS_NewRunnableMethod(
         this, &MediaDecoderStateMachine::SetReaderIdle);
     nsresult rv = mDecodeTaskQueue->Dispatch(event.forget());
     if (NS_FAILED(rv) && mState != DECODER_STATE_SHUTDOWN) {
       NS_WARNING("Failed to dispatch event to set decoder idle state");
     }
   }
@@ -1905,32 +1561,25 @@ MediaDecoderStateMachine::DispatchDecode
 nsresult
 MediaDecoderStateMachine::EnqueueDecodeSeekTask()
 {
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
   AssertCurrentThreadInMonitor();
 
   if (mState != DECODER_STATE_SEEKING ||
-      !mSeekTarget.IsValid() ||
-      mCurrentSeekTarget.IsValid()) {
+      mDispatchedDecodeSeekTask) {
     return NS_OK;
   }
-  mCurrentSeekTarget = mSeekTarget;
-  mSeekTarget.Reset();
-  mDropAudioUntilNextDiscontinuity = HasAudio();
-  mDropVideoUntilNextDiscontinuity = HasVideo();
-
-  RefPtr<nsIRunnable> task(
+  nsresult rv = mDecodeTaskQueue->Dispatch(
     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek));
-  nsresult rv = mDecodeTaskQueue->Dispatch(task);
-  if (NS_FAILED(rv)) {
+  if (NS_SUCCEEDED(rv)) {
+    mDispatchedDecodeSeekTask = true;
+  } else {
     NS_WARNING("Dispatch DecodeSeek task failed.");
-    mCurrentSeekTarget.Reset();
-    DecodeError();
   }
   return rv;
 }
 
 nsresult
 MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -1942,35 +1591,31 @@ MediaDecoderStateMachine::DispatchAudioD
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued()
 {
-  AssertCurrentThreadInMonitor();
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
 
-  SAMPLE_LOG("EnsureAudioDecodeTaskQueued isDecoding=%d dispatched=%d",
-              IsAudioDecoding(), mAudioRequestPending);
-
   if (mState >= DECODER_STATE_COMPLETED) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
 
-  if (IsAudioDecoding() && !mAudioRequestPending) {
-    RefPtr<nsIRunnable> task(
+  if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) {
+    nsresult rv = mDecodeTaskQueue->Dispatch(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio));
-    nsresult rv = mDecodeTaskQueue->Dispatch(task);
     if (NS_SUCCEEDED(rv)) {
-      mAudioRequestPending = true;
+      mDispatchedAudioDecodeTask = true;
     } else {
       NS_WARNING("Failed to dispatch task to decode audio");
     }
   }
 
   return NS_OK;
 }
 
@@ -1986,36 +1631,31 @@ MediaDecoderStateMachine::DispatchVideoD
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued()
 {
-  AssertCurrentThreadInMonitor();
-
-  SAMPLE_LOG("EnsureVideoDecodeTaskQueued isDecoding=%d dispatched=%d",
-             IsVideoDecoding(), mVideoRequestPending);
-
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
 
   if (mState >= DECODER_STATE_COMPLETED) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
 
-  if (IsVideoDecoding() && !mVideoRequestPending) {
-    RefPtr<nsIRunnable> task(
+  if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) {
+    nsresult rv = mDecodeTaskQueue->Dispatch(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo));
-    nsresult rv = mDecodeTaskQueue->Dispatch(task);
     if (NS_SUCCEEDED(rv)) {
-      mVideoRequestPending = true;
+      mDispatchedVideoDecodeTask = true;
     } else {
       NS_WARNING("Failed to dispatch task to decode video");
     }
   }
 
   return NS_OK;
 }
 
@@ -2062,18 +1702,22 @@ int64_t MediaDecoderStateMachine::AudioD
 
 bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
 {
   AssertCurrentThreadInMonitor();
   // We consider ourselves low on decoded data if we're low on audio,
   // provided we've not decoded to the end of the audio stream, or
   // if we're low on video frames, provided
   // we've not decoded to the end of the video stream.
-  return ((IsAudioDecoding() && AudioDecodedUsecs() < aAudioUsecs) ||
-         (IsVideoDecoding() &&
+  return ((HasAudio() &&
+           !AudioQueue().IsFinished() &&
+           AudioDecodedUsecs() < aAudioUsecs)
+          ||
+         (HasVideo() &&
+          !VideoQueue().IsFinished() &&
           static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
 }
 
 bool MediaDecoderStateMachine::HasLowUndecodedData()
 {
   return HasLowUndecodedData(mLowDataThresholdUsecs);
 }
 
@@ -2102,25 +1746,20 @@ bool MediaDecoderStateMachine::HasLowUnd
 }
 
 void
 MediaDecoderStateMachine::DecodeError()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
 
-  if (mState == DECODER_STATE_SHUTDOWN) {
-    // Already shutdown.
-    return;
-  }
-
   // Change state to shutdown before sending error report to MediaDecoder
   // and the HTMLMediaElement, so that our pipeline can start exiting
   // cleanly during the sync dispatch below.
-  DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN due to error");
+  DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN");
   ScheduleStateMachine();
   mState = DECODER_STATE_SHUTDOWN;
   mDecoder->GetReentrantMonitor().NotifyAll();
 
   // Dispatch the event to call DecodeError synchronously. This ensures
   // we're in shutdown state by the time we exit the decode thread.
   // If we just moved to shutdown state here on the decode thread, we may
   // cause the state machine to shutdown/free memory without closing its
@@ -2154,98 +1793,47 @@ nsresult MediaDecoderStateMachine::Decod
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
   if (mState != DECODER_STATE_DECODING_METADATA) {
     return NS_ERROR_FAILURE;
   }
 
   nsresult res;
   MediaInfo info;
+  MetadataTags* tags;
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    res = mReader->ReadMetadata(&info, getter_Transfers(mMetadataTags));
+    res = mReader->ReadMetadata(&info, &tags);
   }
-  if (NS_SUCCEEDED(res)) {
-    if (mState == DECODER_STATE_DECODING_METADATA &&
-        mReader->IsWaitingMediaResources()) {
-      // change state to DECODER_STATE_WAIT_FOR_RESOURCES
-      StartWaitForResources();
-      return NS_OK;
-    }
+  if (NS_SUCCEEDED(res) &&
+      mState == DECODER_STATE_DECODING_METADATA &&
+      mReader->IsWaitingMediaResources()) {
+    // change state to DECODER_STATE_WAIT_FOR_RESOURCES
+    StartWaitForResources();
+    return NS_OK;
   }
 
   mInfo = info;
 
   if (NS_FAILED(res) || (!info.HasValidMedia())) {
     return NS_ERROR_FAILURE;
   }
   mDecoder->StartProgressUpdates();
   mGotDurationFromMetaData = (GetDuration() != -1);
 
-  if (HasAudio()) {
-    RefPtr<nsIRunnable> decodeTask(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
-    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
-  }
-  if (HasVideo()) {
-    RefPtr<nsIRunnable> decodeTask(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
-    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+  VideoData* videoData = FindStartTime();
+  if (videoData) {
+    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+    RenderVideoFrame(videoData, TimeStamp::Now());
   }
 
-  if (mRealTime) {
-    SetStartTime(0);
-    res = FinishDecodeMetadata();
-    NS_ENSURE_SUCCESS(res, res);
-  } else {
-    if (HasAudio()) {
-      ReentrantMonitorAutoExit unlock(mDecoder->GetReentrantMonitor());
-      mReader->RequestAudioData();
-    }
-    if (HasVideo()) {
-      ReentrantMonitorAutoExit unlock(mDecoder->GetReentrantMonitor());
-      mReader->RequestVideoData(false, 0);
-    }
-  }
-
-  return NS_OK;
-}
-
-nsresult
-MediaDecoderStateMachine::FinishDecodeMetadata()
-{
-  AssertCurrentThreadInMonitor();
-  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
-
   if (mState == DECODER_STATE_SHUTDOWN) {
     return NS_ERROR_FAILURE;
   }
 
-  if (!mRealTime) {
-
-    const VideoData* v = VideoQueue().PeekFront();
-    const AudioData* a = AudioQueue().PeekFront();
-
-    int64_t startTime = std::min<int64_t>(a ? a->mTime : INT64_MAX,
-                                          v ? v->mTime : INT64_MAX);
-    if (startTime == INT64_MAX) {
-      startTime = 0;
-    }
-    DECODER_LOG(PR_LOG_DEBUG, "DecodeMetadata first video frame start %lld",
-                              v ? v->mTime : -1);
-    DECODER_LOG(PR_LOG_DEBUG, "DecodeMetadata first audio frame start %lld",
-                              a ? a->mTime : -1);
-    SetStartTime(startTime);
-    if (VideoQueue().GetSize()) {
-      ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-      RenderVideoFrame(VideoQueue().PeekFront(), TimeStamp::Now());
-    }
-  }
-
   NS_ASSERTION(mStartTime != -1, "Must have start time");
   MOZ_ASSERT((!HasVideo() && !HasAudio()) ||
               !(mMediaSeekable && mTransportSeekable) || mEndTime != -1,
               "Active seekable media should have end time");
   MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) ||
              GetDuration() != -1, "Seekable media should have duration");
   DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) "
               "transportSeekable=%d, mediaSeekable=%d",
@@ -2261,25 +1849,36 @@ MediaDecoderStateMachine::FinishDecodeMe
 
   // Inform the element that we've loaded the metadata and the first frame.
   nsCOMPtr<nsIRunnable> metadataLoadedEvent =
     new AudioMetadataEventRunner(mDecoder,
                                  mInfo.mAudio.mChannels,
                                  mInfo.mAudio.mRate,
                                  HasAudio(),
                                  HasVideo(),
-                                 mMetadataTags.forget());
-  NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
+                                 tags);
+  NS_DispatchToMainThread(metadataLoadedEvent);
+
+  if (HasAudio()) {
+    RefPtr<nsIRunnable> decodeTask(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
+    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+  }
+  if (HasVideo()) {
+    RefPtr<nsIRunnable> decodeTask(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
+    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+  }
 
   if (mState == DECODER_STATE_DECODING_METADATA) {
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING");
     StartDecoding();
   }
 
-  // For very short media the metadata decode can decode the entire media.
+  // For very short media FindStartTime() can decode the entire media.
   // So we need to check if this has occurred, else our decode pipeline won't
   // run (since it doesn't need to) and we won't detect end of stream.
   CheckIfDecodeComplete();
 
   if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) &&
       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
       !IsPlaying())
   {
@@ -2288,29 +1887,30 @@ MediaDecoderStateMachine::FinishDecodeMe
 
   return NS_OK;
 }
 
 void MediaDecoderStateMachine::DecodeSeek()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+  AutoSetOnScopeExit<bool> unsetOnExit(mDispatchedDecodeSeekTask, false);
   if (mState != DECODER_STATE_SEEKING) {
     return;
   }
 
   // During the seek, don't have a lock on the decoder state,
   // otherwise long seek operations can block the main thread.
   // The events dispatched to the main thread are SYNC calls.
   // These calls are made outside of the decode monitor lock so
   // it is safe for the main thread to makes calls that acquire
   // the lock since it won't deadlock. We check the state when
   // acquiring the lock again in case shutdown has occurred
   // during the time when we didn't have the lock.
-  int64_t seekTime = mCurrentSeekTarget.mTime;
+  int64_t seekTime = mSeekTarget.mTime;
   mDecoder->StopProgressUpdates();
 
   bool currentTimeChanged = false;
   const int64_t mediaTime = GetMediaTime();
   if (mediaTime != seekTime) {
     currentTimeChanged = true;
     // Stop playback now to ensure that while we're outside the monitor
     // dispatching SeekingStarted, playback doesn't advance and mess with
@@ -2329,104 +1929,73 @@ void MediaDecoderStateMachine::DecodeSee
   // inform the element and its users that we have no frames
   // to display
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     nsCOMPtr<nsIRunnable> startEvent =
       NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted);
     NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC);
   }
-  if (mState != DECODER_STATE_SEEKING) {
-    // May have shutdown while we released the monitor.
-    return;
-  }
-
-  if (!currentTimeChanged) {
-    DECODER_LOG(PR_LOG_DEBUG, "Seek !currentTimeChanged...");
-    mDecodeToSeekTarget = false;
-    nsresult rv = mDecodeTaskQueue->Dispatch(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SeekCompleted));
-    if (NS_FAILED(rv)) {
-      DecodeError();
-    }
-  } else {
+
+  int64_t newCurrentTime = seekTime;
+  if (currentTimeChanged) {
     // The seek target is different than the current playback position,
     // we'll need to seek the playback position, so shutdown our decode
     // and audio threads.
     StopAudioThread();
     ResetPlayback();
-
     nsresult res;
     {
       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-      // We must not hold the state machine monitor while we call into
-      // the reader, since it could do I/O or deadlock some other way.
-      res = mReader->ResetDecode();
-      if (NS_SUCCEEDED(res)) {
-        res = mReader->Seek(seekTime,
-                            mStartTime,
-                            mEndTime,
-                            mediaTime);
+      // Now perform the seek. We must not hold the state machine monitor
+      // while we seek, since the seek reads, which could block on I/O.
+      res = mReader->Seek(seekTime,
+                          mStartTime,
+                          mEndTime,
+                          mediaTime);
+
+      if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) {
+        res = mReader->DecodeToTarget(seekTime);
       }
     }
-    if (NS_FAILED(res)) {
-      DecodeError();
-      return;
-    }
-
-    // We must decode the first samples of active streams, so we can determine
-    // the new stream time. So dispatch tasks to do that.
-    mDecodeToSeekTarget = true;
-    DispatchDecodeTasksIfNeeded();
-  }
-}
-
-void
-MediaDecoderStateMachine::SeekCompleted()
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-
-  // We must reset the seek target when exiting this function, but not
-  // before, as if we dropped the monitor in any function called here,
-  // we may begin a new seek on the state machine thread, and be in
-  // an inconsistent state.
-  AutoSetOnScopeExit<SeekTarget> reset(mCurrentSeekTarget, SeekTarget());
-
-  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  if (mState != DECODER_STATE_SEEKING) {
-    return;
-  }
-
-  int64_t seekTime = mCurrentSeekTarget.mTime;
-  int64_t newCurrentTime = mCurrentSeekTarget.mTime;
-
-  // Setup timestamp state.
-  VideoData* video = VideoQueue().PeekFront();
-  if (seekTime == mEndTime) {
-    newCurrentTime = mAudioStartTime = seekTime;
-  } else if (HasAudio()) {
-    AudioData* audio = AudioQueue().PeekFront();
-    newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
-  } else {
-    newCurrentTime = video ? video->mTime : seekTime;
-  }
-  mPlayDuration = newCurrentTime - mStartTime;
-
-  if (HasVideo()) {
-    if (video) {
+
+    if (NS_SUCCEEDED(res)) {
+      int64_t nextSampleStartTime = 0;
+      VideoData* video = nullptr;
       {
         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-        RenderVideoFrame(video, TimeStamp::Now());
+        video = mReader->FindStartTime(nextSampleStartTime);
+      }
+
+      // Setup timestamp state.
+      if (seekTime == mEndTime) {
+        newCurrentTime = mAudioStartTime = seekTime;
+      } else if (HasAudio()) {
+        AudioData* audio = AudioQueue().PeekFront();
+        newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
+      } else {
+        newCurrentTime = video ? video->mTime : seekTime;
       }
-      nsCOMPtr<nsIRunnable> event =
-        NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
-      NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
+      mPlayDuration = newCurrentTime - mStartTime;
+
+      if (HasVideo()) {
+        if (video) {
+          {
+            ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+            RenderVideoFrame(video, TimeStamp::Now());
+          }
+          nsCOMPtr<nsIRunnable> event =
+            NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
+          NS_DispatchToMainThread(event);
+        }
+      }
+    } else {
+      DecodeError();
     }
   }
-
   mDecoder->StartProgressUpdates();
   if (mState == DECODER_STATE_DECODING_METADATA ||
       mState == DECODER_STATE_DORMANT ||
       mState == DECODER_STATE_SHUTDOWN) {
     return;
   }
 
   // Change state to DECODING or COMPLETED now. SeekingStopped will
@@ -2439,27 +2008,30 @@ MediaDecoderStateMachine::SeekCompleted(
     // Seeked to end of media, move to COMPLETED state. Note we don't do
     // this if we're playing a live stream, since the end of media will advance
     // once we download more data!
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime);
     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd);
     // Explicitly set our state so we don't decode further, and so
     // we report playback ended to the media element.
     mState = DECODER_STATE_COMPLETED;
+    mIsAudioDecoding = false;
+    mIsVideoDecoding = false;
     DispatchDecodeTasksIfNeeded();
   } else {
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime);
     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped);
     StartDecoding();
   }
 
-  // Ensure timestamps are up to date.
-  UpdatePlaybackPositionInternal(newCurrentTime);
-  if (mDecoder->GetDecodedStream()) {
-    SetSyncPointForMediaStream();
+  if (newCurrentTime != mediaTime) {
+    UpdatePlaybackPositionInternal(newCurrentTime);
+    if (mDecoder->GetDecodedStream()) {
+      SetSyncPointForMediaStream();
+    }
   }
 
   // Try to decode another frame to detect if we're at the end...
   DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime);
 
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC);
@@ -2476,20 +2048,18 @@ MediaDecoderStateMachine::SeekCompleted(
 // Runnable to dispose of the decoder and state machine on the main thread.
 class nsDecoderDisposeEvent : public nsRunnable {
 public:
   nsDecoderDisposeEvent(already_AddRefed<MediaDecoder> aDecoder,
                         already_AddRefed<MediaDecoderStateMachine> aStateMachine)
     : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
   NS_IMETHOD Run() {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
-    MOZ_ASSERT(mStateMachine);
-    MOZ_ASSERT(mDecoder);
-    mStateMachine->BreakCycles();
-    mDecoder->BreakCycles();
+    mStateMachine->ReleaseDecoder();
+    mDecoder->ReleaseStateMachine();
     mStateMachine = nullptr;
     mDecoder = nullptr;
     return NS_OK;
   }
 private:
   nsRefPtr<MediaDecoder> mDecoder;
   nsRefPtr<MediaDecoderStateMachine> mStateMachine;
 };
@@ -2520,48 +2090,39 @@ nsresult MediaDecoderStateMachine::RunSt
   MediaResource* resource = mDecoder->GetResource();
   NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER);
 
   switch (mState) {
     case DECODER_STATE_SHUTDOWN: {
       if (IsPlaying()) {
         StopPlayback();
       }
-
-      // Put a task in the decode queue to shutdown the reader.
-      RefPtr<nsIRunnable> task(
-        NS_NewRunnableMethod(mReader, &MediaDecoderReader::Shutdown));
-      mDecodeTaskQueue->Dispatch(task);
-
       StopAudioThread();
       // If mAudioThread is non-null after StopAudioThread completes, we are
       // running in a nested event loop waiting for Shutdown() on
       // mAudioThread to complete.  Return to the event loop and let it
       // finish processing before continuing with shutdown.
       if (mAudioThread) {
         MOZ_ASSERT(mStopAudioThread);
         return NS_OK;
       }
 
-      {
-        // Wait for the thread decoding to exit.
-        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-        mDecodeTaskQueue->Shutdown();
-        mDecodeTaskQueue = nullptr;
-      }
-
-      AudioQueue().Reset();
-      VideoQueue().Reset();
-
       // The reader's listeners hold references to the state machine,
       // creating a cycle which keeps the state machine and its shared
       // thread pools alive. So break it here.
       AudioQueue().ClearListeners();
       VideoQueue().ClearListeners();
 
+      {
+        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+        // Wait for the thread decoding to exit.
+        mDecodeTaskQueue->Shutdown();
+        mDecodeTaskQueue = nullptr;
+        mReader->ReleaseMediaResources();
+      }
       // Now that those threads are stopped, there's no possibility of
       // mPendingWakeDecoder being needed again. Revoke it.
       mPendingWakeDecoder = nullptr;
 
       MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN,
                  "How did we escape from the shutdown state?");
       // We must daisy-chain these events to destroy the decoder. We must
       // destroy the decoder on the main thread, but we can't destroy the
@@ -2668,17 +2229,18 @@ nsresult MediaDecoderStateMachine::RunSt
       {
         StartPlayback();
       }
       NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
       return NS_OK;
     }
 
     case DECODER_STATE_SEEKING: {
-      return EnqueueDecodeSeekTask();
+      // Ensure we have a decode thread to perform the seek.
+     return EnqueueDecodeSeekTask();
     }
 
     case DECODER_STATE_COMPLETED: {
       // Play the remaining media. We want to run AdvanceFrame() at least
       // once to ensure the current playback position is advanced to the
       // end of the media, and so that we update the readyState.
       if (VideoQueue().GetSize() > 0 ||
           (HasAudio() && !mAudioCompleted) ||
@@ -2967,140 +2529,44 @@ void MediaDecoderStateMachine::Wait(int6
     int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000));
     if (ms == 0 || ms > UINT32_MAX) {
       break;
     }
     mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms)));
   }
 }
 
-nsresult
-MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample)
-{
-  nsAutoPtr<VideoData> video(aSample);
-
-  const int64_t target = mCurrentSeekTarget.mTime;
-
-  // If the frame end time is less than the seek target, we won't want
-  // to display this frame after the seek, so discard it.
-  if (target >= video->GetEndTime()) {
-    DECODER_LOG(PR_LOG_DEBUG,
-                "DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
-                video->mTime, video->GetEndTime(), target);
-    mFirstVideoFrameAfterSeek = video;
-  } else {
-    if (target >= video->mTime && video->GetEndTime() >= target) {
-      // The seek target lies inside this frame's time slice. Adjust the frame's
-      // start time to match the seek target. We do this by replacing the
-      // first frame with a shallow copy which has the new timestamp.
-      VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, target);
-      video = temp;
-    }
-    mFirstVideoFrameAfterSeek = nullptr;
-
-    DECODER_LOG(PR_LOG_DEBUG,
-                "DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
-                video->mTime, video->GetEndTime(), target);
-
-    VideoQueue().PushFront(video.forget());
-
-  }
-  return NS_OK;
-}
-
-nsresult
-MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample)
-{
-  nsAutoPtr<AudioData> audio(aSample);
-  MOZ_ASSERT(audio &&
-             mCurrentSeekTarget.IsValid() &&
-             mCurrentSeekTarget.mType == SeekTarget::Accurate);
-
-  CheckedInt64 startFrame = UsecsToFrames(audio->mTime,
-                                          mInfo.mAudio.mRate);
-  CheckedInt64 targetFrame = UsecsToFrames(mCurrentSeekTarget.mTime,
-                                           mInfo.mAudio.mRate);
-  if (!startFrame.isValid() || !targetFrame.isValid()) {
-    return NS_ERROR_FAILURE;
-  }
-  if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
-    // Our seek target lies after the frames in this AudioData. Don't
-    // push it onto the audio queue, and keep decoding forwards.
-    return NS_OK;
-  }
-  if (startFrame.value() > targetFrame.value()) {
-    // The seek target doesn't lie in the audio block just after the last
-    // audio frames we've seen which were before the seek target. This
-    // could have been the first audio data we've seen after seek, i.e. the
-    // seek terminated after the seek target in the audio stream. Just
-    // abort the audio decode-to-target, the state machine will play
-    // silence to cover the gap. Typically this happens in poorly muxed
-    // files.
-    NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
-    AudioQueue().Push(audio.forget());
-    return NS_OK;
-  }
-
-  // The seek target lies somewhere in this AudioData's frames, strip off
-  // any frames which lie before the seek target, so we'll begin playback
-  // exactly at the seek target.
-  NS_ASSERTION(targetFrame.value() >= startFrame.value(),
-               "Target must at or be after data start.");
-  NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
-               "Data must end after target.");
-
-  int64_t framesToPrune = targetFrame.value() - startFrame.value();
-  if (framesToPrune > audio->mFrames) {
-    // We've messed up somehow. Don't try to trim frames, the |frames|
-    // variable below will overflow.
-    NS_WARNING("Can't prune more frames that we have!");
-    return NS_ERROR_FAILURE;
-  }
-  uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
-  uint32_t channels = audio->mChannels;
-  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
-  memcpy(audioData.get(),
-         audio->mAudioData.get() + (framesToPrune * channels),
-         frames * channels * sizeof(AudioDataValue));
-  CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
-  if (!duration.isValid()) {
-    return NS_ERROR_FAILURE;
-  }
-  nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
-                                          mCurrentSeekTarget.mTime,
-                                          duration.value(),
-                                          frames,
-                                          audioData.forget(),
-                                          channels));
-  AudioQueue().PushFront(data.forget());
-
-  return NS_OK;
-}
-
-void MediaDecoderStateMachine::SetStartTime(int64_t aStartTimeUsecs)
+VideoData* MediaDecoderStateMachine::FindStartTime()
 {
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  DECODER_LOG(PR_LOG_DEBUG, "SetStartTime(%lld)", aStartTimeUsecs);
+  AssertCurrentThreadInMonitor();
+  int64_t startTime = 0;
   mStartTime = 0;
-  if (aStartTimeUsecs != 0) {
-    mStartTime = aStartTimeUsecs;
+  VideoData* v = nullptr;
+  {
+    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+    v = mReader->FindStartTime(startTime);
+  }
+  if (startTime != 0) {
+    mStartTime = startTime;
     if (mGotDurationFromMetaData) {
       NS_ASSERTION(mEndTime != -1,
                    "We should have mEndTime as supplied duration here");
       // We were specified a duration from a Content-Duration HTTP header.
       // Adjust mEndTime so that mEndTime-mStartTime matches the specified
       // duration.
       mEndTime = mStartTime + mEndTime;
     }
   }
   // Set the audio start time to be start of media. If this lies before the
   // first actual audio frame we have, we'll inject silence during playback
   // to ensure the audio starts at the correct time.
   mAudioStartTime = mStartTime;
-  DECODER_LOG(PR_LOG_DEBUG, "Set media start time to %lld", mStartTime);
+  DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime);
+  return v;
 }
 
 void MediaDecoderStateMachine::UpdateReadyState() {
   AssertCurrentThreadInMonitor();
 
   MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus();
   if (nextFrameStatus == mLastFrameStatus) {
     return;
@@ -3315,17 +2781,17 @@ void MediaDecoderStateMachine::SetPlayba
   if (mPlaybackRate == aPlaybackRate) {
     return;
   }
 
   // Get position of the last time we changed the rate.
   if (!HasAudio()) {
     // mBasePosition is a position in the video stream, not an absolute time.
     if (mState == DECODER_STATE_SEEKING) {
-      mBasePosition = mCurrentSeekTarget.mTime - mStartTime;
+      mBasePosition = mSeekTarget.mTime - mStartTime;
     } else {
       mBasePosition = GetVideoStreamPosition();
     }
     mPlayDuration = mBasePosition;
     mResetPlayStartTime = true;
     mPlayStartTime = TimeStamp::Now();
   }
 
--- a/content/media/MediaDecoderStateMachine.h
+++ b/content/media/MediaDecoderStateMachine.h
@@ -1,82 +1,76 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 /*
-
-Each media element for a media file has one thread called the "audio thread".
-
-The audio thread  writes the decoded audio data to the audio
-hardware. This is done in a separate thread to ensure that the
-audio hardware gets a constant stream of data without
-interruption due to decoding or display. At some point
-AudioStream will be refactored to have a callback interface
-where it asks for data and this thread will no longer be
-needed.
+Each video element for a media file has two threads:
 
-The element/state machine also has a MediaTaskQueue which runs in a
-SharedThreadPool that is shared with all other elements/decoders. The state
-machine dispatches tasks to this to call into the MediaDecoderReader to
-request decoded audio or video data. The Reader will callback with decoded
-sampled when it has them available, and the state machine places the decoded
-samples into its queues for the consuming threads to pull from.
+  1) The Audio thread writes the decoded audio data to the audio
+     hardware. This is done in a separate thread to ensure that the
+     audio hardware gets a constant stream of data without
+     interruption due to decoding or display. At some point
+     AudioStream will be refactored to have a callback interface
+     where it asks for data and an extra thread will no longer be
+     needed.
 
-The MediaDecoderReader can choose to decode asynchronously, or synchronously
-and return requested samples synchronously inside it's Request*Data()
-functions via callback. Asynchronous decoding is preferred, and should be
-used for any new readers.
+  2) The decode thread. This thread reads from the media stream and
+     decodes the Theora and Vorbis data. It places the decoded data into
+     queues for the other threads to pull from.
 
+All file reads, seeks, and all decoding must occur on the decode thread.
 Synchronisation of state between the thread is done via a monitor owned
 by MediaDecoder.
 
-The lifetime of the audio thread is controlled by the state machine when
-it runs on the shared state machine thread. When playback needs to occur
-the audio thread is created and an event dispatched to run it. The audio
-thread exits when audio playback is completed or no longer required.
+The lifetime of the decode and audio threads is controlled by the state
+machine when it runs on the shared state machine thread. When playback
+needs to occur they are created and events dispatched to them to run
+them. These events exit when decoding/audio playback is completed or
+no longer required.
 
 A/V synchronisation is handled by the state machine. It examines the audio
 playback time and compares this to the next frame in the queue of video
 frames. If it is time to play the video frame it is then displayed, otherwise
 it schedules the state machine to run again at the time of the next frame.
 
 Frame skipping is done in the following ways:
 
   1) The state machine will skip all frames in the video queue whose
      display time is less than the current audio time. This ensures
      the correct frame for the current time is always displayed.
 
-  2) The decode tasks will stop decoding interframes and read to the
+  2) The decode thread will stop decoding interframes and read to the
      next keyframe if it determines that decoding the remaining
      interframes will cause playback issues. It detects this by:
        a) If the amount of audio data in the audio queue drops
           below a threshold whereby audio may start to skip.
        b) If the video queue drops below a threshold where it
           will be decoding video data that won't be displayed due
           to the decode thread dropping the frame immediately.
-     TODO: In future we should only do this when the Reader is decoding
-           synchronously.
 
 When hardware accelerated graphics is not available, YCbCr conversion
-is done on the decode task queue when video frames are decoded.
+is done on the decode thread when video frames are decoded.
 
-The decode task queue pushes decoded audio and videos frames into two
+The decode thread pushes decoded audio and videos frames into two
 separate queues - one for audio and one for video. These are kept
 separate to make it easy to constantly feed audio data to the audio
 hardware while allowing frame skipping of video data. These queues are
 threadsafe, and neither the decode, audio, or state machine should
 be able to monopolize them, and cause starvation of the other threads.
 
 Both queues are bounded by a maximum size. When this size is reached
-the decode tasks will no longer request video or audio depending on the
-queue that has reached the threshold. If both queues are full, no more
-decode tasks will be dispatched to the decode task queue, so other
-decoders will have an opportunity to run.
+the decode thread will no longer decode video or audio depending on the
+queue that has reached the threshold. If both queues are full, the decode
+thread will wait on the decoder monitor.
+
+When the decode queues are full (they've reaced their maximum size) and
+the decoder is not in PLAYING play state, the state machine may opt
+to shut down the decode thread in order to conserve resources.
 
 During playback the audio thread will be idle (via a Wait() on the
 monitor) if the audio queue is empty. Otherwise it constantly pops
 audio data off the queue and plays it with a blocking write to the audio
 hardware (via AudioStream).
 
 */
 #if !defined(MediaDecoderStateMachine_h__)
@@ -84,17 +78,16 @@ hardware (via AudioStream).
 
 #include "mozilla/Attributes.h"
 #include "nsThreadUtils.h"
 #include "MediaDecoder.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "MediaDecoderReader.h"
 #include "MediaDecoderOwner.h"
 #include "MediaMetadataManager.h"
-#include "MediaDataDecodedListener.h"
 
 class nsITimer;
 
 namespace mozilla {
 
 class AudioSegment;
 class VideoSegment;
 class MediaTaskQueue;
@@ -104,17 +97,17 @@ class SharedThreadPool;
 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
 // implementation.
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
 
 /*
   The state machine class. This manages the decoding and seeking in the
-  MediaDecoderReader on the decode task queue, and A/V sync on the shared
+  MediaDecoderReader on the decode thread, and A/V sync on the shared
   state machine thread, and controls the audio "push" thread.
 
   All internal state is synchronised via the decoder monitor. State changes
   are either propagated by NotifyAll on the monitor (typically when state
   changes need to be propagated to non-state machine threads) or by scheduling
   the state machine to run another cycle on the shared state machine thread.
 
   See MediaDecoder.h for more details.
@@ -314,19 +307,20 @@ public:
 
   // Timer function to implement ScheduleStateMachine(aUsecs).
   nsresult TimeoutExpired(int aGeneration);
 
   // Set the media fragment end time. aEndTime is in microseconds.
   void SetFragmentEndTime(int64_t aEndTime);
 
   // Drop reference to decoder.  Only called during shutdown dance.
-  void BreakCycles() {
+  void ReleaseDecoder() {
+    MOZ_ASSERT(mReader);
     if (mReader) {
-      mReader->BreakCycles();
+      mReader->ReleaseDecoder();
     }
     mDecoder = nullptr;
   }
 
   // If we're playing into a MediaStream, record the current point in the
   // MediaStream and the current point in our media resource so later we can
   // convert MediaStream playback positions to media resource positions. Best to
   // call this while we're not playing (while the MediaStream is blocked). Can
@@ -358,32 +352,21 @@ public:
 
   // Notifies the state machine that should minimize the number of samples
   // decoded we preroll, until playback starts. The first time playback starts
   // the state machine is free to return to prerolling normally. Note
   // "prerolling" in this context refers to when we decode and buffer decoded
   // samples in advance of when they're needed for playback.
   void SetMinimizePrerollUntilPlaybackStarts();
 
-  void OnAudioDecoded(AudioData* aSample);
-  void OnAudioEOS();
-  void OnVideoDecoded(VideoData* aSample);
-  void OnVideoEOS();
-  void OnDecodeError();
-
 protected:
   virtual ~MediaDecoderStateMachine();
 
   void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
 
-  // Inserts MediaData* samples into their respective MediaQueues.
-  // aSample must not be null.
-  void Push(AudioData* aSample);
-  void Push(VideoData* aSample);
-
   class WakeDecoderRunnable : public nsRunnable {
   public:
     WakeDecoderRunnable(MediaDecoderStateMachine* aSM)
       : mMutex("WakeDecoderRunnable"), mStateMachine(aSM) {}
     NS_IMETHOD Run() MOZ_OVERRIDE
     {
       nsRefPtr<MediaDecoderStateMachine> stateMachine;
       {
@@ -409,24 +392,18 @@ protected:
     // would mean in some cases we'd have to destroy mStateMachine from this
     // object, which would be problematic since MediaDecoderStateMachine can
     // only be destroyed on the main thread whereas this object can be destroyed
     // on the media stream graph thread.
     MediaDecoderStateMachine* mStateMachine;
   };
   WakeDecoderRunnable* GetWakeDecoderRunnable();
 
-  MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
-  MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
-
-  nsresult FinishDecodeMetadata();
-
-  RefPtr<MediaDataDecodedListener<MediaDecoderStateMachine>> mMediaDecodedListener;
-
-  nsAutoPtr<MetadataTags> mMetadataTags;
+  MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
+  MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
 
   // True if our buffers of decoded audio are not full, and we should
   // decode more.
   bool NeedToDecodeAudio();
 
   // Decodes some audio. This should be run on the decode task queue.
   void DecodeAudio();
 
@@ -486,20 +463,21 @@ protected:
   // so far.
   int64_t GetVideoStreamPosition();
 
   // Return the current time, either the audio clock if available (if the media
   // has audio, and the playback is possible), or a clock for the video.
   // Called on the state machine thread.
   int64_t GetClock();
 
-  nsresult DropAudioUpToSeekTarget(AudioData* aSample);
-  nsresult DropVideoUpToSeekTarget(VideoData* aSample);
-
-  void SetStartTime(int64_t aStartTimeUsecs);
+  // Returns the presentation time of the first audio or video frame in the
+  // media.  If the media has video, it returns the first video frame. The
+  // decoder monitor must be held with exactly one lock count. Called on the
+  // state machine thread.
+  VideoData* FindStartTime();
 
   // Update only the state machine's current playback position (and duration,
   // if unknown).  Does not update the playback position on the decoder or
   // media element -- use UpdatePlaybackPosition for that.  Called on the state
   // machine thread, caller must hold the decoder lock.
   void UpdatePlaybackPositionInternal(int64_t aTime);
 
   // Pushes the image down the rendering pipeline. Called on the shared state
@@ -561,20 +539,16 @@ protected:
 
   void StartWaitForResources();
 
   // Dispatches a task to the decode task queue to begin decoding metadata.
   // This is threadsafe and can be called on any thread.
   // The decoder monitor must be held.
   nsresult EnqueueDecodeMetadataTask();
 
-  // Dispatches a task to the decode task queue to seek the decoder.
-  // The decoder monitor must be held.
-  nsresult EnqueueDecodeSeekTask();
-
   nsresult DispatchAudioDecodeTaskIfNeeded();
 
   // Ensures a to decode audio has been dispatched to the decode task queue.
   // If a task to decode has already been dispatched, this does nothing,
   // otherwise this dispatches a task to do the decode.
   // This is called on the state machine or decode threads.
   // The decoder monitor must be held.
   nsresult EnsureAudioDecodeTaskQueued();
@@ -582,26 +556,36 @@ protected:
   nsresult DispatchVideoDecodeTaskIfNeeded();
 
   // Ensures a to decode video has been dispatched to the decode task queue.
   // If a task to decode has already been dispatched, this does nothing,
   // otherwise this dispatches a task to do the decode.
   // The decoder monitor must be held.
   nsresult EnsureVideoDecodeTaskQueued();
 
+  // Dispatches a task to the decode task queue to seek the decoder.
+  // The decoder monitor must be held.
+  nsresult EnqueueDecodeSeekTask();
+
   // Calls the reader's SetIdle(). This is only called in a task dispatched to
   // the decode task queue, don't call it directly.
   void SetReaderIdle();
 
   // Re-evaluates the state and determines whether we need to dispatch
   // events to run the decode, or if not whether we should set the reader
   // to idle mode. This is threadsafe, and can be called from any thread.
   // The decoder monitor must be held.
   void DispatchDecodeTasksIfNeeded();
 
+  // Queries our state to see whether the decode has finished for all streams.
+  // If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
+  // to run.
+  // The decoder monitor must be held.
+  void CheckIfDecodeComplete();
+
   // Returns the "media time". This is the absolute time which the media
   // playback has reached. i.e. this returns values in the range
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different to the value returned
   // by GetCurrentTime(), which is in the range [0,duration].
   int64_t GetMediaTime() const {
     AssertCurrentThreadInMonitor();
     return mStartTime + mCurrentFrameTime;
@@ -615,39 +599,25 @@ protected:
   // hardware, so this can only be used as a upper bound. The decoder monitor
   // must be held when calling this. Called on the decode thread.
   int64_t GetDecodedAudioDuration();
 
   // Load metadata. Called on the decode thread. The decoder monitor
   // must be held with exactly one lock count.
   nsresult DecodeMetadata();
 
-  // Wraps the call to DecodeMetadata(), signals a DecodeError() on failure.
-  void CallDecodeMetadata();
-
-  // Checks whether we're finished decoding metadata, and switches to DECODING
-  // state if so.
-  void MaybeFinishDecodeMetadata();
-
   // Seeks to mSeekTarget. Called on the decode thread. The decoder monitor
   // must be held with exactly one lock count.
   void DecodeSeek();
 
-  void CheckIfSeekComplete();
-  bool IsAudioSeekComplete();
-  bool IsVideoSeekComplete();
+  // Decode loop, decodes data until EOF or shutdown.
+  // Called on the decode thread.
+  void DecodeLoop();
 
-  // Completes the seek operation, moves onto the next appropriate state.
-  void SeekCompleted();
-
-  // Queries our state to see whether the decode has finished for all streams.
-  // If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
-  // to run.
-  // The decoder monitor must be held.
-  void CheckIfDecodeComplete();
+  void CallDecodeMetadata();
 
   // Copy audio from an AudioData packet to aOutput. This may require
   // inserting silence depending on the timing of the audio packet.
   void SendStreamAudio(AudioData* aAudio, DecodedStreamData* aStream,
                        AudioSegment* aOutput);
 
   // State machine thread run function. Defers to RunStateMachine().
   nsresult CallRunStateMachine();
@@ -662,45 +632,27 @@ protected:
     return !mTimeout.IsNull();
   }
 
   // Returns true if we're not playing and the decode thread has filled its
   // decode buffers and is waiting. We can shut the decode thread down in this
   // case as it may not be needed again.
   bool IsPausedAndDecoderWaiting();
 
-  // These return true if the respective stream's decode has not yet reached
-  // the end of stream.
-  bool IsAudioDecoding();
-  bool IsVideoDecoding();
-
   // The decoder object that created this state machine. The state machine
   // holds a strong reference to the decoder to ensure that the decoder stays
   // alive once media element has started the decoder shutdown process, and has
   // dropped its reference to the decoder. This enables the state machine to
   // keep using the decoder's monitor until the state machine has finished
   // shutting down, without fear of the monitor being destroyed. After
   // shutting down, the state machine will then release this reference,
   // causing the decoder to be destroyed. This is accessed on the decode,
   // state machine, audio and main threads.
   nsRefPtr<MediaDecoder> mDecoder;
 
-  // Time at which the last video sample was requested. If it takes too long
-  // before the sample arrives, we will increase the amount of audio we buffer.
-  // This is necessary for legacy synchronous decoders to prevent underruns.
-  TimeStamp mVideoDecodeStartTime;
-
-  // Queue of audio frames. This queue is threadsafe, and is accessed from
-  // the audio, decoder, state machine, and main threads.
-  MediaQueue<AudioData> mAudioQueue;
-
-  // Queue of video frames. This queue is threadsafe, and is accessed from
-  // the decoder, state machine, and main threads.
-  MediaQueue<VideoData> mVideoQueue;
-
   // The decoder monitor must be obtained before modifying this state.
   // NotifyAll on the monitor must be called when the state is changed so
   // that interested threads can wake up and alter behaviour if appropriate
   // Accessed on state machine, audio, main, and AV thread.
   State mState;
 
   // Thread for pushing audio onto the audio hardware.
   // The "audio push thread".
@@ -762,36 +714,29 @@ protected:
   // machine, decode, and main threads. Access controlled by decoder monitor.
   int64_t mEndTime;
 
   // Position to seek to in microseconds when the seek state transition occurs.
   // The decoder monitor lock must be obtained before reading or writing
   // this value. Accessed on main and decode thread.
   SeekTarget mSeekTarget;
 
-  // The position that we're currently seeking to. This differs from
-  // mSeekTarget, as mSeekTarget is the target we'll seek to next, whereas
-  // mCurrentSeekTarget is the position that the decode is in the process
-  // of seeking to.
-  // The decoder monitor lock must be obtained before reading or writing
-  // this value.
-  SeekTarget mCurrentSeekTarget;
-
   // Media Fragment end time in microseconds. Access controlled by decoder monitor.
   int64_t mFragmentEndTime;
 
   // The audio stream resource. Used on the state machine, and audio threads.
   // This is created and destroyed on the audio thread, while holding the
   // decoder monitor, so if this is used off the audio thread, you must
   // first acquire the decoder monitor and check that it is non-null.
   RefPtr<AudioStream> mAudioStream;
 
   // The reader, don't call its methods with the decoder monitor held.
-  // This is created in the state machine's constructor.
-  nsRefPtr<MediaDecoderReader> mReader;
+  // This is created in the play state machine's constructor, and destroyed
+  // in the play state machine's destructor.
+  nsAutoPtr<MediaDecoderReader> mReader;
 
   // Accessed only on the state machine thread.
   // Not an nsRevocableEventPtr since we must Revoke() it well before
   // this object is destroyed, anyway.
   // Protected by decoder monitor except during the SHUTDOWN state after the
   // decoder thread has been stopped.
   nsRevocableEventPtr<WakeDecoderRunnable> mPendingWakeDecoder;
 
@@ -867,22 +812,16 @@ protected:
   // got a few frames decoded before we consider whether decode is falling
   // behind. Otherwise our "we're falling behind" logic will trigger
   // unneccessarily if we start playing as soon as the first sample is
   // decoded. These two fields store how many video frames and audio
   // samples we must consume before are considered to be finished prerolling.
   uint32_t mAudioPrerollUsecs;
   uint32_t mVideoPrerollFrames;
 
-  // This temporarily stores the first frame we decode after we seek.
-  // This is so that if we hit end of stream while we're decoding to reach
-  // the seek target, we will still have a frame that we can display as the
-  // last frame in the media.
-  nsAutoPtr<VideoData> mFirstVideoFrameAfterSeek;
-
   // When we start decoding (either for the first time, or after a pause)
   // we may be low on decoded data. We don't want our "low data" logic to
   // kick in and decide that we're low on decoded data because the download
   // can't keep up with the decode, and cause us to pause playback. So we
   // have a "preroll" stage, where we ignore the results of our "low data"
   // logic during the first few frames of our decode. This occurs during
   // playback. The flags below are true when the corresponding stream is
   // being "prerolled".
@@ -892,21 +831,29 @@ protected:
   // True when we have an audio stream that we're decoding, and we have not
   // yet decoded to end of stream.
   bool mIsAudioDecoding;
 
   // True when we have a video stream that we're decoding, and we have not
   // yet decoded to end of stream.
   bool mIsVideoDecoding;
 
-  // True when we have dispatched a task to the decode task queue to request
-  // decoded audio/video, and/or we are waiting for the requested sample to be
-  // returned by callback from the Reader.
-  bool mAudioRequestPending;
-  bool mVideoRequestPending;
+  // True when we have dispatched a task to the decode task queue to run
+  // the audio decode.
+  bool mDispatchedAudioDecodeTask;
+
+  // True when we have dispatched a task to the decode task queue to run
+  // the video decode.
+  bool mDispatchedVideoDecodeTask;
+
+  // If the video decode is falling behind the audio, we'll start dropping the
+  // inter-frames up until the next keyframe which is at or before the current
+  // playback position. skipToNextKeyframe is true if we're currently
+  // skipping up to the next keyframe.
+  bool mSkipToNextKeyFrame;
 
   // True if we shouldn't play our audio (but still write it to any capturing
   // streams). When this is true, mStopAudioThread is always true and
   // the audio thread will never start again after it has stopped.
   bool mAudioCaptured;
 
   // True if the media resource can be seeked on a transport level. Accessed
   // from the state machine and main threads. Synchronised via decoder monitor.
@@ -972,26 +919,20 @@ protected:
   // True is we are decoding a realtime stream, like a camera stream
   bool mRealTime;
 
   // True if we've dispatched a task to the decode task queue to call
   // ReadMetadata on the reader. We maintain a flag to ensure that we don't
   // dispatch multiple tasks to re-do the metadata loading.
   bool mDispatchedDecodeMetadataTask;
 
-  // These two flags are true when we need to drop decoded samples that
-  // we receive up to the next discontinuity. We do this when we seek;
-  // the first sample in each stream after the seek is marked as being
-  // a "discontinuity".
-  bool mDropAudioUntilNextDiscontinuity;
-  bool mDropVideoUntilNextDiscontinuity;
-
-  // True if we need to decode forwards to the seek target inside
-  // mCurrentSeekTarget.
-  bool mDecodeToSeekTarget;
+  // True if we've dispatched a task to the decode task queue to call
+  // Seek on the reader. We maintain a flag to ensure that we don't
+  // dispatch multiple tasks to re-do the seek.
+  bool mDispatchedDecodeSeekTask;
 
   // Stores presentation info required for playback. The decoder monitor
   // must be held when accessing this.
   MediaInfo mInfo;
 
   mozilla::MediaMetadataManager mMetadataManager;
 
   MediaDecoderOwner::NextFrameStatus mLastFrameStatus;
--- a/content/media/MediaQueue.h
+++ b/content/media/MediaQueue.h
@@ -38,23 +38,21 @@ template <class T> class MediaQueue : pr
 
   inline int32_t GetSize() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     return nsDeque::GetSize();
   }
 
   inline void Push(T* aItem) {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    MOZ_ASSERT(aItem);
     nsDeque::Push(aItem);
   }
 
   inline void PushFront(T* aItem) {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    MOZ_ASSERT(aItem);
     nsDeque::PushFront(aItem);
   }
 
   inline T* PopFront() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     T* rv = static_cast<T*>(nsDeque::PopFront());
     if (rv) {
       NotifyPopListeners();
@@ -72,16 +70,21 @@ template <class T> class MediaQueue : pr
     return static_cast<T*>(nsDeque::PeekFront());
   }
 
   inline void Empty() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     nsDeque::Empty();
   }
 
+  inline void Erase() {
+    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+    nsDeque::Erase();
+  }
+
   void Reset() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     while (GetSize() > 0) {
       T* x = PopFront();
       delete x;
     }
     mEndOfStream = false;
   }
--- a/content/media/VideoUtils.cpp
+++ b/content/media/VideoUtils.cpp
@@ -4,18 +4,16 @@
 
 #include "VideoUtils.h"
 #include "MediaResource.h"
 #include "mozilla/dom/TimeRanges.h"
 #include "nsMathUtils.h"
 #include "nsSize.h"
 #include "VorbisUtils.h"
 #include "ImageContainer.h"
-#include "SharedThreadPool.h"
-#include "mozilla/Preferences.h"
 
 #include <stdint.h>
 
 namespace mozilla {
 
 using layers::PlanarYCbCrImage;
 
 // Converts from number of audio frames to microseconds, given the specified
@@ -187,15 +185,9 @@ IsValidVideoRegion(const nsIntSize& aFra
     aPicture.width * aPicture.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
     aPicture.width * aPicture.height != 0 &&
     aDisplay.width <= PlanarYCbCrImage::MAX_DIMENSION &&
     aDisplay.height <= PlanarYCbCrImage::MAX_DIMENSION &&
     aDisplay.width * aDisplay.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
     aDisplay.width * aDisplay.height != 0;
 }
 
-TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool()
-{
-  return SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
-                               Preferences::GetUint("media.num-decode-threads", 25));
-}
-
 } // end namespace mozilla
--- a/content/media/VideoUtils.h
+++ b/content/media/VideoUtils.h
@@ -14,17 +14,16 @@
 #if !(defined(XP_WIN) || defined(XP_MACOSX) || defined(LINUX)) || \
     defined(MOZ_ASAN)
 // For MEDIA_THREAD_STACK_SIZE
 #include "nsIThreadManager.h"
 #endif
 #include "nsThreadUtils.h"
 #include "prtime.h"
 #include "AudioSampleFormat.h"
-#include "mozilla/RefPtr.h"
 
 using mozilla::CheckedInt64;
 using mozilla::CheckedUint64;
 using mozilla::CheckedInt32;
 using mozilla::CheckedUint32;
 
 struct nsIntSize;
 struct nsIntRect;
@@ -182,17 +181,11 @@ public:
   ~AutoSetOnScopeExit() {
     mVar = mValue;
   }
 private:
   T& mVar;
   const T mValue;
 };
 
-class SharedThreadPool;
-
-// Returns the thread pool that is shared amongst all decoder state machines
-// for decoding streams.
-TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool();
-
 } // end namespace mozilla
 
 #endif
--- a/content/media/mediasource/MediaSourceDecoder.cpp
+++ b/content/media/mediasource/MediaSourceDecoder.cpp
@@ -38,18 +38,16 @@ class TimeRanges;
 
 } // namespace dom
 
 class MediaSourceReader : public MediaDecoderReader
 {
 public:
   MediaSourceReader(MediaSourceDecoder* aDecoder, dom::MediaSource* aSource)
     : MediaDecoderReader(aDecoder)
-    , mTimeThreshold(-1)
-    , mDropVideoBeforeThreshold(false)
     , mActiveVideoDecoder(-1)
     , mActiveAudioDecoder(-1)
     , mMediaSource(aSource)
   {
   }
 
   nsresult Init(MediaDecoderReader* aCloneDonor) MOZ_OVERRIDE
   {
@@ -59,82 +57,63 @@ public:
     return NS_OK;
   }
 
   bool IsWaitingMediaResources() MOZ_OVERRIDE
   {
     return mDecoders.IsEmpty() && mPendingDecoders.IsEmpty();
   }
 
-  void RequestAudioData() MOZ_OVERRIDE
+  bool DecodeAudioData() MOZ_OVERRIDE
   {
     if (!GetAudioReader()) {
       MSE_DEBUG("%p DecodeAudioFrame called with no audio reader", this);
       MOZ_ASSERT(mPendingDecoders.IsEmpty());
-      GetCallback()->OnDecodeError();
-      return;
+      return false;
     }
-    GetAudioReader()->RequestAudioData();
+    bool rv = GetAudioReader()->DecodeAudioData();
+
+    nsAutoTArray<AudioData*, 10> audio;
+    GetAudioReader()->AudioQueue().GetElementsAfter(-1, &audio);
+    for (uint32_t i = 0; i < audio.Length(); ++i) {
+      AudioQueue().Push(audio[i]);
+    }
+    GetAudioReader()->AudioQueue().Empty();
+
+    return rv;
   }
 
-  void OnAudioDecoded(AudioData* aSample)
-  {
-    GetCallback()->OnAudioDecoded(aSample);
-  }
-
-  void OnAudioEOS()
-  {
-    GetCallback()->OnAudioEOS();
-  }
-
-  void RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) MOZ_OVERRIDE
+  bool DecodeVideoFrame(bool& aKeyFrameSkip, int64_t aTimeThreshold) MOZ_OVERRIDE
   {
     if (!GetVideoReader()) {
       MSE_DEBUG("%p DecodeVideoFrame called with no video reader", this);
       MOZ_ASSERT(mPendingDecoders.IsEmpty());
-      GetCallback()->OnDecodeError();
-      return;
+      return false;
     }
-    mTimeThreshold = aTimeThreshold;
-    GetVideoReader()->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
-  }
+
+    if (MaybeSwitchVideoReaders(aTimeThreshold)) {
+      GetVideoReader()->DecodeToTarget(aTimeThreshold);
+    }
+
+    bool rv = GetVideoReader()->DecodeVideoFrame(aKeyFrameSkip, aTimeThreshold);
 
-  void OnVideoDecoded(VideoData* aSample)
-  {
-    if (mDropVideoBeforeThreshold) {
-      if (aSample->mTime < mTimeThreshold) {
-        delete aSample;
-        GetVideoReader()->RequestVideoData(false, mTimeThreshold);
-      } else {
-        mDropVideoBeforeThreshold = false;
-        GetCallback()->OnVideoDecoded(aSample);
-      }
-    } else {
-      GetCallback()->OnVideoDecoded(aSample);
+    nsAutoTArray<VideoData*, 10> video;
+    GetVideoReader()->VideoQueue().GetElementsAfter(-1, &video);
+    for (uint32_t i = 0; i < video.Length(); ++i) {
+      VideoQueue().Push(video[i]);
     }
-  }
+    GetVideoReader()->VideoQueue().Empty();
 
-  void OnVideoEOS()
-  {
-    // End of stream. See if we can switch to another video decoder.
+    if (rv) {
+      return true;
+    }
+
     MSE_DEBUG("%p MSR::DecodeVF %d (%p) returned false (readers=%u)",
               this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
-    if (MaybeSwitchVideoReaders()) {
-      // Success! Resume decoding with next video decoder.
-      RequestVideoData(false, mTimeThreshold);
-    } else {
-      // End of stream.
-      MSE_DEBUG("%p MSR::DecodeVF %d (%p) EOS (readers=%u)",
-                this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
-      GetCallback()->OnVideoEOS();
-    }
-  }
-
-  void OnDecodeError() {
-    GetCallback()->OnDecodeError();
+    return rv;
   }
 
   bool HasVideo() MOZ_OVERRIDE
   {
     return mInfo.HasVideo();
   }
 
   bool HasAudio() MOZ_OVERRIDE
@@ -142,50 +121,37 @@ public:
     return mInfo.HasAudio();
   }
 
   nsresult ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) MOZ_OVERRIDE;
   nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
                 int64_t aCurrentTime) MOZ_OVERRIDE;
   nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime) MOZ_OVERRIDE;
   already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
-                                                      MediaSourceDecoder* aParentDecoder,
-                                                      MediaTaskQueue* aTaskQueue);
-
-  void Shutdown() MOZ_OVERRIDE {
-    MediaDecoderReader::Shutdown();
-    for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
-      mDecoders[i]->GetReader()->Shutdown();
-    }
-  }
+                                                      MediaSourceDecoder* aParentDecoder);
 
   void InitializePendingDecoders();
 
   bool IsShutdown() {
     ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
     return mDecoder->IsShutdown();
   }
 
 private:
-
-  // These are read and written on the decode task queue threads.
-  int64_t mTimeThreshold;
-  bool mDropVideoBeforeThreshold;
-
-  bool MaybeSwitchVideoReaders() {
+  bool MaybeSwitchVideoReaders(int64_t aTimeThreshold) {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     MOZ_ASSERT(mActiveVideoDecoder != -1);
 
     InitializePendingDecoders();
 
     for (uint32_t i = mActiveVideoDecoder + 1; i < mDecoders.Length(); ++i) {
       if (!mDecoders[i]->GetReader()->GetMediaInfo().HasVideo()) {
         continue;
       }
-      if (mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
+      if (aTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
         GetVideoReader()->SetIdle();
 
         mActiveVideoDecoder = i;
         MSE_DEBUG("%p MSR::DecodeVF switching to %d", this, mActiveVideoDecoder);
 
         return true;
       }
     }
@@ -225,17 +191,17 @@ public:
   {
   }
 
   already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
                                                       MediaSourceDecoder* aParentDecoder) {
     if (!mReader) {
       return nullptr;
     }
-    return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder, mDecodeTaskQueue);
+    return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder);
   }
 
   nsresult EnqueueDecoderInitialization() {
     AssertCurrentThreadInMonitor();
     if (!mReader) {
       return NS_ERROR_FAILURE;
     }
     return mDecodeTaskQueue->Dispatch(NS_NewRunnableMethod(this,
@@ -395,34 +361,25 @@ MediaSourceReader::InitializePendingDeco
     }
   }
   NS_DispatchToMainThread(new ReleaseDecodersTask(mPendingDecoders));
   MOZ_ASSERT(mPendingDecoders.IsEmpty());
   mDecoder->NotifyWaitingForResourcesStatusChanged();
 }
 
 already_AddRefed<SubBufferDecoder>
-MediaSourceReader::CreateSubDecoder(const nsACString& aType,
-                                    MediaSourceDecoder* aParentDecoder,
-                                    MediaTaskQueue* aTaskQueue)
+MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder* aParentDecoder)
 {
   // XXX: Why/when is mDecoder null here, since it should be equal to aParentDecoder?!
   nsRefPtr<SubBufferDecoder> decoder =
     new SubBufferDecoder(new SourceBufferResource(nullptr, aType), aParentDecoder);
   nsAutoPtr<MediaDecoderReader> reader(DecoderTraits::CreateReader(aType, decoder));
   if (!reader) {
     return nullptr;
   }
-  // Set a callback on the subreader that forwards calls to this reader.
-  // This reader will then forward them onto the state machine via this
-  // reader's callback.
-  RefPtr<MediaDataDecodedListener<MediaSourceReader>> callback =
-    new MediaDataDecodedListener<MediaSourceReader>(this, aTaskQueue);
-  reader->SetCallback(callback);
-  reader->SetTaskQueue(aTaskQueue);
   reader->Init(nullptr);
   ReentrantMonitorAutoEnter mon(aParentDecoder->GetReentrantMonitor());
   MSE_DEBUG("Registered subdecoder %p subreader %p", decoder.get(), reader.get());
   decoder->SetReader(reader.forget());
   mPendingDecoders.AppendElement(decoder);
   if (NS_FAILED(static_cast<MediaSourceDecoder*>(mDecoder)->EnqueueDecoderInitialization())) {
     MSE_DEBUG("%p: Failed to enqueue decoder initialization task", this);
     return nullptr;
@@ -462,17 +419,17 @@ MediaSourceReader::Seek(int64_t aTime, i
 
   // Loop until we have the requested time range in the source buffers.
   // This is a workaround for our lack of async functionality in the
   // MediaDecoderStateMachine. Bug 979104 implements what we need and
   // we'll remove this for an async approach based on that in bug XXXXXXX.
   while (!mMediaSource->ActiveSourceBuffers()->AllContainsTime (aTime / USECS_PER_S)
          && !IsShutdown()) {
     mMediaSource->WaitForData();
-    MaybeSwitchVideoReaders();
+    MaybeSwitchVideoReaders(aTime);
   }
 
   if (IsShutdown()) {
     return NS_OK;
   }
 
   ResetDecode();
   if (GetAudioReader()) {
--- a/content/media/moz.build
+++ b/content/media/moz.build
@@ -73,17 +73,16 @@ EXPORTS += [
     'BufferMediaResource.h',
     'DecoderTraits.h',
     'DOMMediaStream.h',
     'EncodedBufferCache.h',
     'FileBlockCache.h',
     'Latency.h',
     'MediaCache.h',
     'MediaData.h',
-    'MediaDataDecodedListener.h',
     'MediaDecoder.h',
     'MediaDecoderOwner.h',
     'MediaDecoderReader.h',
     'MediaDecoderStateMachine.h',
     'MediaInfo.h',
     'MediaMetadataManager.h',
     'MediaQueue.h',
     'MediaRecorder.h',
--- a/content/media/omx/MediaOmxReader.cpp
+++ b/content/media/omx/MediaOmxReader.cpp
@@ -54,32 +54,26 @@ MediaOmxReader::MediaOmxReader(AbstractM
   }
 #endif
 
   mAudioChannel = dom::AudioChannelService::GetDefaultAudioChannel();
 }
 
 MediaOmxReader::~MediaOmxReader()
 {
+  ReleaseMediaResources();
+  ReleaseDecoder();
+  mOmxDecoder.clear();
 }
 
 nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
 {
   return NS_OK;
 }
 
-void MediaOmxReader::Shutdown()
-{
-  ReleaseMediaResources();
-  if (mOmxDecoder.get()) {
-    mOmxDecoder->ReleaseDecoder();
-  }
-  mOmxDecoder.clear();
-}
-
 bool MediaOmxReader::IsWaitingMediaResources()
 {
   if (!mOmxDecoder.get()) {
     return false;
   }
   return mOmxDecoder->IsWaitingMediaResources();
 }
 
@@ -100,16 +94,23 @@ void MediaOmxReader::ReleaseMediaResourc
   if (container) {
     container->ClearCurrentFrame();
   }
   if (mOmxDecoder.get()) {
     mOmxDecoder->ReleaseMediaResources();
   }
 }
 
+void MediaOmxReader::ReleaseDecoder()
+{
+  if (mOmxDecoder.get()) {
+    mOmxDecoder->ReleaseDecoder();
+  }
+}
+
 nsresult MediaOmxReader::InitOmxDecoder()
 {
   if (!mOmxDecoder.get()) {
     //register sniffers, if they are not registered in this process.
     DataSource::RegisterDefaultSniffers();
     mDecoder->GetResource()->SetReadMode(MediaCacheStream::MODE_METADATA);
 
     sp<DataSource> dataSource = new MediaStreamSource(mDecoder->GetResource(), mDecoder);
@@ -369,16 +370,17 @@ bool MediaOmxReader::DecodeAudioData()
                                       source.mAudioChannels));
 }
 
 nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   EnsureActive();
 
+  ResetDecode();
   VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
   if (container && container->GetImageContainer()) {
     container->GetImageContainer()->ClearAllImagesExceptFront();
   }
 
   if (mHasAudio && mHasVideo) {
     // The OMXDecoder seeks/demuxes audio and video streams separately. So if
     // we seek both audio and video to aTarget, the audio stream can typically
--- a/content/media/omx/MediaOmxReader.h
+++ b/content/media/omx/MediaOmxReader.h
@@ -75,24 +75,24 @@ public:
     return mHasVideo;
   }
 
   virtual bool IsWaitingMediaResources();
 
   virtual bool IsDormantNeeded();
   virtual void ReleaseMediaResources();
 
+  virtual void ReleaseDecoder() MOZ_OVERRIDE;
+
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
 
   virtual void SetIdle() MOZ_OVERRIDE;
 
-  virtual void Shutdown() MOZ_OVERRIDE;
-
   void SetAudioChannel(dom::AudioChannel aAudioChannel) {
     mAudioChannel = aAudioChannel;
   }
 
   android::sp<android::MediaSource> GetAudioOffloadTrack() {
     return mAudioOffloadTrack;
   }
 
--- a/content/media/plugins/MediaPluginReader.cpp
+++ b/content/media/plugins/MediaPluginReader.cpp
@@ -30,16 +30,21 @@ MediaPluginReader::MediaPluginReader(Abs
   mPlugin(nullptr),
   mHasAudio(false),
   mHasVideo(false),
   mVideoSeekTimeUs(-1),
   mAudioSeekTimeUs(-1)
 {
 }
 
+MediaPluginReader::~MediaPluginReader()
+{
+  ResetDecode();
+}
+
 nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor)
 {
   return NS_OK;
 }
 
 nsresult MediaPluginReader::ReadMetadata(MediaInfo* aInfo,
                                          MetadataTags** aTags)
 {
@@ -94,32 +99,28 @@ nsresult MediaPluginReader::ReadMetadata
     mInfo.mAudio.mRate = sampleRate;
   }
 
  *aInfo = mInfo;
  *aTags = nullptr;
   return NS_OK;
 }
 
-void MediaPluginReader::Shutdown()
-{
-  ResetDecode();
-  if (mPlugin) {
-    GetMediaPluginHost()->DestroyDecoder(mPlugin);
-    mPlugin = nullptr;
-  }
-}
-
 // Resets all state related to decoding, emptying all buffers etc.
 nsresult MediaPluginReader::ResetDecode()
 {
   if (mLastVideoFrame) {
     mLastVideoFrame = nullptr;
   }
-  return MediaDecoderReader::ResetDecode();
+  if (mPlugin) {
+    GetMediaPluginHost()->DestroyDecoder(mPlugin);
+    mPlugin = nullptr;
+  }
+
+  return NS_OK;
 }
 
 bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip,
                                          int64_t aTimeThreshold)
 {
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   uint32_t parsed = 0, decoded = 0;
@@ -315,16 +316,19 @@ bool MediaPluginReader::DecodeAudioData(
                                      source.mSize,
                                      source.mAudioChannels));
 }
 
 nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
 
+  mVideoQueue.Reset();
+  mAudioQueue.Reset();
+
   if (mHasAudio && mHasVideo) {
     // The decoder seeks/demuxes audio and video streams separately. So if
     // we seek both audio and video to aTarget, the audio stream can typically
     // seek closer to the seek target, since typically every audio block is
     // a sync point, whereas for video there are only keyframes once every few
     // seconds. So if we have both audio and video, we must seek the video
     // stream to the preceeding keyframe first, get the stream time, and then
     // seek the audio stream to match the video stream's time. Otherwise, the
--- a/content/media/plugins/MediaPluginReader.h
+++ b/content/media/plugins/MediaPluginReader.h
@@ -38,16 +38,17 @@ class MediaPluginReader : public MediaDe
   nsIntRect mPicture;
   nsIntSize mInitialFrame;
   int64_t mVideoSeekTimeUs;
   int64_t mAudioSeekTimeUs;
   nsAutoPtr<VideoData> mLastVideoFrame;
 public:
   MediaPluginReader(AbstractMediaDecoder* aDecoder,
                     const nsACString& aContentType);
+  ~MediaPluginReader();
 
   virtual nsresult Init(MediaDecoderReader* aCloneDonor);
   virtual nsresult ResetDecode();
 
   virtual bool DecodeAudioData();
   virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
                                 int64_t aTimeThreshold);
 
@@ -60,18 +61,16 @@ public:
   {
     return mHasVideo;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
 
-  virtual void Shutdown() MOZ_OVERRIDE;
-
   class ImageBufferCallback : public MPAPI::BufferCallback {
     typedef mozilla::layers::Image Image;
 
   public:
     ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer);
     void *operator()(size_t aWidth, size_t aHeight,
                      MPAPI::ColorFormat aColorFormat) MOZ_OVERRIDE;
     already_AddRefed<Image> GetImage();
--- a/content/media/test/manifest.js
+++ b/content/media/test/manifest.js
@@ -384,17 +384,17 @@ function IsWindows8OrLater() {
 
 // These are files that are non seekable, due to problems with the media,
 // for example broken or missing indexes.
 var gUnseekableTests = [
   { name:"no-cues.webm", type:"video/webm" },
   { name:"bogus.duh", type:"bogus/duh"}
 ];
 // Unfortunately big-buck-bunny-unseekable.mp4 is doesn't play on Windows 7, so
-// only include it in the unseekable tests if we're on later versions of Windows.
+// only include it in the unseekable tests if we're on later versions of Windows. 
 // This test actually only passes on win8 at the moment.
 if (navigator.userAgent.indexOf("Windows") != -1 && IsWindows8OrLater()) {
   gUnseekableTests = gUnseekableTests.concat([
     { name:"big-buck-bunny-unseekable.mp4", type:"video/mp4" }
   ]);
 }
 // Android supports fragmented MP4 playback from 4.3.
 var androidVersion = SpecialPowers.Cc['@mozilla.org/system-info;1']
@@ -672,46 +672,30 @@ function MediaTestManager() {
   // Registers that the test corresponding to 'token' has been started.
   // Don't call more than once per token.
   this.started = function(token) {
     this.tokens.push(token);
     this.numTestsRunning++;
     is(this.numTestsRunning, this.tokens.length, "[started " + token + "] Length of array should match number of running tests");
   }
 
-  this.watchdog = null;
-
-  this.watchdogFn = function() {
-    if (this.tokens.length > 0) {
-      info("Watchdog remaining tests= " + this.tokens);
-    }
-  }
-
   // Registers that the test corresponding to 'token' has finished. Call when
   // you've finished your test. If all tests are complete this will finish the
   // run, otherwise it may start up the next run. It's ok to call multiple times
   // per token.
   this.finished = function(token) {
     var i = this.tokens.indexOf(token);
     if (i != -1) {
       // Remove the element from the list of running tests.
       this.tokens.splice(i, 1);
     }
-
-    if (this.watchdog) {
-      clearTimeout(this.watchdog);
-      this.watchdog = null;
-    }
-
-    info("[finished " + token + "] remaining= " + this.tokens);
     this.numTestsRunning--;
     is(this.numTestsRunning, this.tokens.length, "[finished " + token + "] Length of array should match number of running tests");
     if (this.tokens.length < PARALLEL_TESTS) {
       this.nextTest();
-      this.watchdog = setTimeout(this.watchdogFn.bind(this), 10000);
     }
   }
 
   // Starts the next batch of tests, or finishes if they're all done.
   // Don't call this directly, call finished(token) when you're done.
   this.nextTest = function() {
     // Force an exact  GC after every completed testcase. This ensures that any
     // decoders with live threads waiting for the GC are killed promptly, to free
--- a/content/media/test/test_bug465498.html
+++ b/content/media/test/test_bug465498.html
@@ -9,53 +9,44 @@
 <body>
 <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=465498">Mozilla Bug 465498</a>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
 var manager = new MediaTestManager;
 
 function startTest(e) {
-  var v = e.target;
-  info(v._name + " loadedmetadata");
   e.target.play();
 }
 
 function playbackEnded(e) {
   var v = e.target;
-  info(v._name + " ended");
   if (v._finished)
     return;
   ok(v.currentTime >= v.duration - 0.1 && v.currentTime <= v.duration + 0.1,
      "Checking currentTime at end: " + v.currentTime + " for " + v._name);
   ok(v.ended, "Checking playback has ended for " + v._name);
   v.pause();
   v.currentTime = 0;
   ok(!v.ended, "Checking ended is no longer true for " + v._name);
   v._seeked = true;
 }
 
 function seekEnded(e) {
   var v = e.target;
-  info(v._name + " seeked");
   if (v._finished)
     return;
   ok(v.currentTime == 0, "Checking currentTime after seek: " +
      v.currentTime  + " for " + v._name);
   ok(!v.ended, "Checking ended is false for " + v._name);
   v._finished = true;
   v.parentNode.removeChild(v);
   manager.finished(v.token);
 }
 
-function seeking(e) {
-  var v = e.target;
-  info(v._name + " seeking");
-}
-
 function initTest(test, token) {
   var type = getMajorMimeType(test.type);
   var v = document.createElement(type);
   if (!v.canPlayType(test.type))
     return;
   v.preload = "metadata";
   v.token = token;
   manager.started(token);
@@ -66,17 +57,16 @@ function initTest(test, token) {
   s.src = test.name;
   v.appendChild(s);
 
   v._seeked = false;
   v._finished = false;
   v.addEventListener("loadedmetadata", startTest, false);
   v.addEventListener("ended", playbackEnded, false);
   v.addEventListener("seeked", seekEnded, false);
-  v.addEventListener("seeking", seeking, false);
   document.body.appendChild(v);
 }
 
 manager.runTests(gSmallTests, initTest);
 
 </script>
 </pre>
 </body>
--- a/content/media/test/test_bug493187.html
+++ b/content/media/test/test_bug493187.html
@@ -15,49 +15,34 @@ https://bugzilla.mozilla.org/show_bug.cg
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
 SimpleTest.expectAssertions(0, 2);
 
 var manager = new MediaTestManager;
 
 function start(e) {
-  var v = e.target;
-  info("[" + v._name + "] start");
   e.target.currentTime = e.target.duration / 4;
 }
 
 function startSeeking(e) {
-  var v = e.target;
-  info("[" + v._name + "] seeking");
   e.target._seeked = true;
 }
 
 function canPlayThrough(e) {
   var v = e.target;
-  info("[" + v._name + "] canPlayThrough");
   if (v._seeked && !v._finished) {
-    ok(true, "[" + v._name + "] got canplaythrough after seek");
+    ok(true, "Got canplaythrough after seek for " + v._name);
     v._finished = true;
     v.parentNode.removeChild(v);
     v.src = "";
     manager.finished(v.token);
   }
 }
 
-function seeked(e) {
-  var v = e.target;
-  info("[" + v._name + "] seeked");
-}
-
-function error(e) {
-  var v = e.target;
-  info("[" + v._name + "] error");
-}
-
 function startTest(test, token) {
   // TODO: Bug 568402, there's a bug in the WAV backend where we sometimes
   // don't send canplaythrough events after seeking. Once that is fixed,
   // we should remove this guard below so that we run this test for audio.
   var type = getMajorMimeType(test.type);
   if (type != "video")
     return;
 
@@ -68,18 +53,16 @@ function startTest(test, token) {
   v.src = test.name;
   v._name = test.name;
   v._seeked = false;
   v._finished = false;
   v.preload = "auto";
   v.addEventListener("loadedmetadata", start, false);
   v.addEventListener("canplaythrough", canPlayThrough, false);
   v.addEventListener("seeking", startSeeking, false);
-  v.addEventListener("seeked", seeked, false);
-  v.addEventListener("error", error, false);
   document.body.appendChild(v);
 }
 
 SimpleTest.waitForExplicitFinish();
 SpecialPowers.pushPrefEnv({"set": [["media.cache_size", 40000]]}, beginTest);
 function beginTest() {
   manager.runTests(gSeekTests, startTest);
 }
--- a/content/media/test/test_seek.html
+++ b/content/media/test/test_seek.html
@@ -56,32 +56,32 @@ function createTestArray() {
       tests.push(t);
     }
   }
   return tests;
 }
 
 function startTest(test, token) {
   var v = document.createElement('video');
-  v.token = token += "-seek" + test.number + ".js";
-  manager.started(v.token);
+  manager.started(token);
   v.src = test.name;
   v.preload = "metadata";
+  v.token = token;
   document.body.appendChild(v);
   var name = test.name + " seek test " + test.number;
   var localIs = function(name) { return function(a, b, msg) {
     is(a, b, name + ": " + msg);
   }}(name);
   var localOk = function(name) { return function(a, msg) {
     ok(a, name + ": " + msg);
   }}(name);
   var localFinish = function(v, manager) { return function() {
     v.onerror = null;
     removeNodeAndSource(v);
-    dump("SEEK-TEST: Finished " + name + " token: " + v.token + "\n");
+    dump("SEEK-TEST: Finished " + name + "\n");
     manager.finished(v.token);
   }}(v, manager);
   dump("SEEK-TEST: Started " + name + "\n");
   window['test_seek' + test.number](v, test.duration/2, localIs, localOk, localFinish);
 }
 
 manager.runTests(createTestArray(), startTest);
 
--- a/content/media/webaudio/MediaBufferDecoder.cpp
+++ b/content/media/webaudio/MediaBufferDecoder.cpp
@@ -247,33 +247,22 @@ MediaDecodeTask::Decode()
     return;
   }
 
   if (!mDecoderReader->HasAudio()) {
     ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
     return;
   }
 
-  MediaQueue<AudioData> audioQueue;
-  nsRefPtr<AudioDecodeRendezvous> barrier(new AudioDecodeRendezvous());
-  mDecoderReader->SetCallback(barrier);
-  while (1) {
-    mDecoderReader->RequestAudioData();
-    nsAutoPtr<AudioData> audio;
-    if (NS_FAILED(barrier->Await(audio))) {
-      ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
-      return;
-    }
-    if (!audio) {
-      // End of stream.
-      break;
-    }
-    audioQueue.Push(audio.forget());
+  while (mDecoderReader->DecodeAudioData()) {
+    // consume all of the buffer
+    continue;
   }
 
+  MediaQueue<AudioData>& audioQueue = mDecoderReader->AudioQueue();
   uint32_t frameCount = audioQueue.FrameCount();
   uint32_t channelCount = mediaInfo.mAudio.mChannels;
   uint32_t sampleRate = mediaInfo.mAudio.mRate;
 
   if (!frameCount || !channelCount || !sampleRate) {
     ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
     return;
   }