Backed out 10 changesets (bug 1091008, bug 1093020, bug 1063323) for windows m2 permanent test failures on a CLOSED TREE
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Wed, 05 Nov 2014 12:57:43 +0100
changeset 238397 43a51201545a2f950d012c6561be42f06b275885
parent 238396 054bd325286d46da8596538afb8122c19aa9f157
child 238398 dffbf0a5595585183535eb864b6ed46ad72ec59c
push id4311
push userraliiev@mozilla.com
push dateMon, 12 Jan 2015 19:37:41 +0000
treeherdermozilla-beta@150c9fed433b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1091008, 1093020, 1063323
milestone36.0a1
backs out21ddb8a58fea635e047c00827675fed3c42b3d31
fe9e11333c7d2489716d6708129b6adbd443cd09
bba774c54652013351f0b7f0f4b601706122b55c
16f58d7e1e17254bb285368e14a140eb008c1a78
649bfc6dad4df2e7847a48ac3489cad41a0b1d9c
6f270b2d90f471653dcf0e82b1b924feb97e17f2
966093bbc26ad7d904ccf89d6775a72e2045b005
9de4746aa59a3236c122fc9fba27bd4fe2f6c544
856016c0118ab665cf79f947e2ee03e207b1fa38
8aaa10a8d95656121d06939273d8de0d485a19d9
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 10 changesets (bug 1091008, bug 1093020, bug 1063323) for windows m2 permanent test failures on a CLOSED TREE Backed out changeset 21ddb8a58fea (bug 1093020) Backed out changeset fe9e11333c7d (bug 1093020) Backed out changeset bba774c54652 (bug 1063323) Backed out changeset 16f58d7e1e17 (bug 1091008) Backed out changeset 649bfc6dad4d (bug 1091008) Backed out changeset 6f270b2d90f4 (bug 1091008) Backed out changeset 966093bbc26a (bug 1091008) Backed out changeset 9de4746aa59a (bug 1091008) Backed out changeset 856016c0118a (bug 1091008) Backed out changeset 8aaa10a8d956 (bug 1091008)
dom/html/HTMLMediaElement.cpp
dom/html/TimeRanges.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderReader.cpp
dom/media/MediaDecoderReader.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaResource.h
dom/media/fmp4/MP4Reader.cpp
dom/media/fmp4/MP4Reader.h
dom/media/gstreamer/GStreamerReader.cpp
dom/media/gstreamer/GStreamerReader.h
dom/media/gtest/TestMP4Reader.cpp
dom/media/mediasource/MediaSource.cpp
dom/media/mediasource/MediaSource.h
dom/media/mediasource/MediaSourceDecoder.cpp
dom/media/mediasource/MediaSourceDecoder.h
dom/media/mediasource/MediaSourceReader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/mediasource/SourceBufferDecoder.cpp
dom/media/mediasource/TrackBuffer.cpp
dom/media/mediasource/TrackBuffer.h
dom/media/mediasource/test/mochitest.ini
dom/media/mediasource/test/test_BufferingWait.html
dom/media/mediasource/test/test_WaitingOnMissingData.html
dom/media/ogg/OggReader.cpp
dom/media/ogg/OggReader.h
dom/media/omx/RtspMediaCodecReader.h
dom/media/omx/RtspOmxReader.h
dom/media/raw/RawReader.cpp
dom/media/raw/RawReader.h
dom/media/wave/WaveReader.cpp
dom/media/wave/WaveReader.h
dom/media/webm/WebMReader.cpp
dom/media/webm/WebMReader.h
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -3673,17 +3673,19 @@ HTMLMediaElement::CopyInnerTo(Element* a
   return rv;
 }
 
 already_AddRefed<TimeRanges>
 HTMLMediaElement::Buffered() const
 {
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
   if (mReadyState > nsIDOMHTMLMediaElement::HAVE_NOTHING) {
-    if (mDecoder) {
+    if (mMediaSource) {
+      mMediaSource->GetBuffered(ranges);
+    } else if (mDecoder) {
       // If GetBuffered fails we ignore the error result and just return the
       // time ranges we found up till the error.
       mDecoder->GetBuffered(ranges);
     }
   }
   ranges->Normalize();
   return ranges.forget();
 }
--- a/dom/html/TimeRanges.h
+++ b/dom/html/TimeRanges.h
@@ -87,23 +87,14 @@ private:
 
   nsAutoTArray<TimeRange,4> mRanges;
 
 public:
   typedef nsTArray<TimeRange>::index_type index_type;
   static const index_type NoIndex = index_type(-1);
 
   index_type Find(double aTime);
-
-  bool Contains(double aStart, double aEnd) {
-    index_type target = Find(aStart);
-    if (target == NoIndex) {
-      return false;
-    }
-
-    return mRanges[target].mEnd >= aEnd;
-  }
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif // mozilla_dom_TimeRanges_h_
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -674,32 +674,21 @@ void MediaDecoder::QueueMetadata(int64_t
                                  MetadataTags* aTags)
 {
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   GetReentrantMonitor().AssertCurrentThreadIn();
   mDecoderStateMachine->QueueMetadata(aPublishTime, aInfo, aTags);
 }
 
 bool
-MediaDecoder::IsExpectingMoreData()
+MediaDecoder::IsDataCachedToEndOfResource()
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-
-  // If there's no resource, we're probably just getting set up.
-  if (!mResource) {
-    return true;
-  }
-
-  // If we've downloaded anything, we're not waiting for anything.
-  if (mResource->IsDataCachedToEndOfResource(mDecoderPosition)) {
-    return false;
-  }
-
-  // Otherwise, we should be getting data unless the stream is suspended.
-  return !mResource->IsSuspended();
+  return (mResource &&
+          mResource->IsDataCachedToEndOfResource(mDecoderPosition));
 }
 
 void MediaDecoder::MetadataLoaded(MediaInfo* aInfo, MetadataTags* aTags)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (mShuttingDown) {
     return;
   }
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -793,22 +793,19 @@ public:
   // Removes all audio tracks and video tracks that are previously added into
   // the track list. Call on the main thread only.
   virtual void RemoveMediaTracks() MOZ_OVERRIDE;
 
   // Called when the first frame has been loaded.
   // Call on the main thread only.
   void FirstFrameLoaded();
 
-  // Returns true if the this decoder is expecting any more data to arrive
-  // sometime in the not-too-distant future, either from the network or from
-  // an appendBuffer call on a MediaSource element.
-  //
-  // Acquires the monitor. Call from any thread.
-  virtual bool IsExpectingMoreData();
+  // Returns true if the resource has been loaded. Acquires the monitor.
+  // Call from any thread.
+  virtual bool IsDataCachedToEndOfResource();
 
   // Called when the video has completed playing.
   // Call on the main thread only.
   void PlaybackEnded();
 
   // Seeking has stopped. Inform the element on the main
   // thread.
   void SeekingStopped();
--- a/dom/media/MediaDecoderReader.cpp
+++ b/dom/media/MediaDecoderReader.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaDecoderReader.h"
 #include "AbstractMediaDecoder.h"
-#include "MediaResource.h"
 #include "VideoUtils.h"
 #include "ImageContainer.h"
 
 #include "mozilla/mozalloc.h"
 #include <stdint.h>
 #include <algorithm>
 
 namespace mozilla {
@@ -57,17 +56,16 @@ public:
 
   size_t mSize;
 };
 
 MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
   : mAudioCompactor(mAudioQueue)
   , mDecoder(aDecoder)
   , mIgnoreAudioOutputFormat(false)
-  , mStartTime(-1)
   , mAudioDiscontinuity(false)
   , mVideoDiscontinuity(false)
 {
   MOZ_COUNT_CTOR(MediaDecoderReader);
 }
 
 MediaDecoderReader::~MediaDecoderReader()
 {
@@ -117,27 +115,21 @@ VideoData* MediaDecoderReader::DecodeToF
   }
   if (eof) {
     VideoQueue().Finish();
   }
   VideoData* d = nullptr;
   return (d = VideoQueue().PeekFront()) ? d : nullptr;
 }
 
-void
-MediaDecoderReader::SetStartTime(int64_t aStartTime)
+nsresult
+MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
+                                int64_t aStartTime)
 {
-  mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
-  mStartTime = aStartTime;
-}
-
-nsresult
-MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered)
-{
-  AutoPinned<MediaResource> stream(mDecoder->GetResource());
+  MediaResource* stream = mDecoder->GetResource();
   int64_t durationUs = 0;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     durationUs = mDecoder->GetMediaDuration();
   }
   GetEstimatedBufferedTimeRanges(stream, durationUs, aBuffered);
   return NS_OK;
 }
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -124,39 +124,37 @@ public:
   // Tell the reader that the data decoded are not for direct playback, so it
   // can accept more files, in particular those which have more channels than
   // available in the audio output.
   void SetIgnoreAudioOutputFormat()
   {
     mIgnoreAudioOutputFormat = true;
   }
 
-  // Populates aBuffered with the time ranges which are buffered. This function
+  // Populates aBuffered with the time ranges which are buffered. aStartTime
+  // must be the presentation time of the first frame in the media, e.g.
+  // the media time corresponding to playback time/position 0. This function
   // is called on the main, decode, and state machine threads.
   //
   // This base implementation in MediaDecoderReader estimates the time ranges
   // buffered by interpolating the cached byte ranges with the duration
   // of the media. Reader subclasses should override this method if they
   // can quickly calculate the buffered ranges more accurately.
   //
   // The primary advantage of this implementation in the reader base class
   // is that it's a fast approximation, which does not perform any I/O.
   //
   // The OggReader relies on this base implementation not performing I/O,
   // since in FirefoxOS we can't do I/O on the main thread, where this is
   // called.
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered,
+                               int64_t aStartTime);
 
   virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio);
 
-  // Wait this number of seconds when buffering, then leave and play
-  // as best as we can if the required amount of data hasn't been
-  // retrieved.
-  virtual uint32_t GetBufferingWait() { return 30; }
-
   // Returns the number of bytes of memory allocated by structures/frames in
   // the video queue.
   size_t SizeOfVideoQueueInBytes() const;
 
   // Returns the number of bytes of memory allocated by structures/frames in
   // the audio queue.
   size_t SizeOfAudioQueueInBytes() const;
 
@@ -176,17 +174,16 @@ public:
   // TODO: DEPRECATED.  This uses synchronous decoding.
   VideoData* DecodeToFirstVideoData();
 
   MediaInfo GetMediaInfo() { return mInfo; }
 
   // Indicates if the media is seekable.
   // ReadMetada should be called before calling this method.
   virtual bool IsMediaSeekable() = 0;
-  void SetStartTime(int64_t aStartTime);
 
 protected:
   virtual ~MediaDecoderReader();
 
   // Overrides of this function should decodes an unspecified amount of
   // audio data, enqueuing the audio data in mAudioQueue. Returns true
   // when there's more audio to decode, false if the audio is finished,
   // end of file has been reached, or an un-recoverable read error has
@@ -233,21 +230,16 @@ protected:
   // Stores presentation info required for playback.
   MediaInfo mInfo;
 
   // Whether we should accept media that we know we can't play
   // directly, because they have a number of channel higher than
   // what we support.
   bool mIgnoreAudioOutputFormat;
 
-  // The start time of the media, in microseconds. This is the presentation
-  // time of the first frame decoded from the media. This is initialized to -1,
-  // and then set to a value >= by MediaDecoderStateMachine::SetStartTime(),
-  // after which point it never changes.
-  int64_t mStartTime;
 private:
 
   nsRefPtr<RequestSampleCallback> mSampleDecodedCallback;
 
   nsRefPtr<MediaTaskQueue> mTaskQueue;
 
   // Flags whether a the next audio/video sample comes after a "gap" or
   // "discontinuity" in the stream. For example after a seek.
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -79,16 +79,21 @@ extern PRLogModuleInfo* gMediaDecoderLog
 
 // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
 // implementation.  With unified builds, putting this in headers is not enough.
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
 
+// Wait this number of seconds when buffering, then leave and play
+// as best as we can if the required amount of data hasn't been
+// retrieved.
+static const uint32_t BUFFERING_WAIT_S = 30;
+
 // If audio queue has less than this many usecs of decoded audio, we won't risk
 // trying to decode the video, we'll skip decoding video up to the next
 // keyframe. We may increase this value for an individual decoder if we
 // encounter video frames which take a long time to decode.
 static const uint32_t LOW_AUDIO_USECS = 300000;
 
 // If more than this many usecs of decoded audio is queued, we'll hold off
 // decoding more audio. If we increase the low audio threshold (see
@@ -213,17 +218,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mDecodingFrozenAtStateDecoding(false)
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   mAmpleVideoFrames =
     std::max<uint32_t>(Preferences::GetUint("media.video-queue.default-size", 10), 3);
 
-  mBufferingWait = mScheduler->IsRealTime() ? 0 : mReader->GetBufferingWait();
+  mBufferingWait = mScheduler->IsRealTime() ? 0 : BUFFERING_WAIT_S;
   mLowDataThresholdUsecs = mScheduler->IsRealTime() ? 0 : LOW_DATA_THRESHOLD_USECS;
 
   mVideoPrerollFrames = mScheduler->IsRealTime() ? 0 : mAmpleVideoFrames / 2;
   mAudioPrerollUsecs = mScheduler->IsRealTime() ? 0 : LOW_AUDIO_USECS * 2;
 
 #ifdef XP_WIN
   // Ensure high precision timers are enabled on Windows, otherwise the state
   // machine thread isn't woken up at reliable intervals to set the next frame,
@@ -1484,21 +1489,18 @@ void MediaDecoderStateMachine::NotifyDat
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
 
   // While playing an unseekable stream of unknown duration, mEndTime is
   // updated (in AdvanceFrame()) as we play. But if data is being downloaded
   // faster than played, mEndTime won't reflect the end of playable data
   // since we haven't played the frame at the end of buffered data. So update
   // mEndTime here as new data is downloaded to prevent such a lag.
-  //
-  // Make sure to only do this if we have a start time, otherwise the reader
-  // doesn't know how to compute GetBuffered.
   nsRefPtr<dom::TimeRanges> buffered = new dom::TimeRanges();
-  if (mDecoder->IsInfinite() && (mStartTime != -1) &&
+  if (mDecoder->IsInfinite() &&
       NS_SUCCEEDED(mDecoder->GetBuffered(buffered)))
   {
     uint32_t length = 0;
     buffered->GetLength(&length);
     if (length) {
       double end = 0;
       buffered->End(length - 1, &end);
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -1845,39 +1847,38 @@ bool MediaDecoderStateMachine::HasLowDec
           static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
 }
 
 bool MediaDecoderStateMachine::HasLowUndecodedData()
 {
   return HasLowUndecodedData(mLowDataThresholdUsecs);
 }
 
-bool MediaDecoderStateMachine::HasLowUndecodedData(int64_t aUsecs)
+bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs)
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
                "Must have loaded metadata for GetBuffered() to work");
 
-  nsRefPtr<dom::TimeRanges> buffered = new dom::TimeRanges();
-  nsresult rv = mReader->GetBuffered(buffered.get());
-  NS_ENSURE_SUCCESS(rv, false);
-
-  int64_t endOfDecodedVideoData = INT64_MAX;
-  if (HasVideo() && !VideoQueue().AtEndOfStream()) {
-    endOfDecodedVideoData = VideoQueue().Peek() ? VideoQueue().Peek()->GetEndTime() : mVideoFrameEndTime;
+  bool reliable;
+  double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable);
+  if (!reliable) {
+    // Default to assuming we have enough
+    return false;
   }
-  int64_t endOfDecodedAudioData = INT64_MAX;
-  if (HasAudio() && !AudioQueue().AtEndOfStream()) {
-    endOfDecodedAudioData = AudioQueue().Peek() ? AudioQueue().Peek()->GetEndTime() : GetAudioClock();
+
+  MediaResource* stream = mDecoder->GetResource();
+  int64_t currentPos = stream->Tell();
+  int64_t requiredPos = currentPos + int64_t((aUsecs/1000000.0)*bytesPerSecond);
+  int64_t length = stream->GetLength();
+  if (length >= 0) {
+    requiredPos = std::min(requiredPos, length);
   }
-  int64_t endOfDecodedData = std::min(endOfDecodedVideoData, endOfDecodedAudioData);
-
-  return endOfDecodedData != INT64_MAX &&
-         !buffered->Contains(static_cast<double>(endOfDecodedData) / USECS_PER_S,
-                             static_cast<double>(std::min(endOfDecodedData + aUsecs, GetDuration())) / USECS_PER_S);
+
+  return stream->GetCachedDataEnd(currentPos) < requiredPos;
 }
 
 void
 MediaDecoderStateMachine::DecodeError()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
 
@@ -2415,18 +2416,19 @@ nsresult MediaDecoderStateMachine::RunSt
       // We will remain in the buffering state if we've not decoded enough
       // data to begin playback, or if we've not downloaded a reasonable
       // amount of data inside our buffering time.
       TimeDuration elapsed = now - mBufferingStart;
       bool isLiveStream = resource->GetLength() == -1;
       if ((isLiveStream || !mDecoder->CanPlayThrough()) &&
             elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) &&
             (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS)
-                             : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) &&
-            mDecoder->IsExpectingMoreData())
+                            : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) &&
+            !mDecoder->IsDataCachedToEndOfResource() &&
+            !resource->IsSuspended())
       {
         DECODER_LOG("Buffering: wait %ds, timeout in %.3lfs %s",
                     mBufferingWait, mBufferingWait - elapsed.ToSeconds(),
                     (mQuickBuffering ? "(quick exit)" : ""));
         ScheduleStateMachine(USECS_PER_S);
         return NS_OK;
       } else {
         DECODER_LOG("Changed state from BUFFERING to DECODING");
@@ -2675,20 +2677,22 @@ void MediaDecoderStateMachine::AdvanceFr
       int64_t now = IsPlaying() ? clock_time : mStartTime + mPlayDuration;
 
       remainingTime = frame->mTime - now;
     }
   }
 
   // Check to see if we don't have enough data to play up to the next frame.
   // If we don't, switch to buffering mode.
+  MediaResource* resource = mDecoder->GetResource();
   if (mState == DECODER_STATE_DECODING &&
       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
       HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
-      mDecoder->IsExpectingMoreData()) {
+      !mDecoder->IsDataCachedToEndOfResource() &&
+      !resource->IsSuspended()) {
     if (JustExitedQuickBuffering() || HasLowUndecodedData()) {
       if (currentFrame) {
         VideoQueue().PushFront(currentFrame.forget());
       }
       StartBuffering();
       // Don't go straight back to the state machine loop since that might
       // cause us to start decoding again and we could flip-flop between
       // decoding and quick-buffering.
@@ -2884,21 +2888,16 @@ void MediaDecoderStateMachine::SetStartT
       NS_ASSERTION(mEndTime != -1,
                    "We should have mEndTime as supplied duration here");
       // We were specified a duration from a Content-Duration HTTP header.
       // Adjust mEndTime so that mEndTime-mStartTime matches the specified
       // duration.
       mEndTime = mStartTime + mEndTime;
     }
   }
-
-  // Pass along this immutable value to the reader so that it can make
-  // calculations independently of the state machine.
-  mReader->SetStartTime(mStartTime);
-
   // Set the audio start time to be start of media. If this lies before the
   // first actual audio frame we have, we'll inject silence during playback
   // to ensure the audio starts at the correct time.
   mAudioStartTime = mStartTime;
   DECODER_LOG("Set media start time to %lld", mStartTime);
 }
 
 void MediaDecoderStateMachine::UpdateReadyState() {
@@ -2976,16 +2975,25 @@ void MediaDecoderStateMachine::StartBuff
 #ifdef PR_LOGGING
   MediaDecoder::Statistics stats = mDecoder->GetStatistics();
   DECODER_LOG("Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s",
               stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)",
               stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)");
 #endif
 }
 
+nsresult MediaDecoderStateMachine::GetBuffered(dom::TimeRanges* aBuffered) {
+  MediaResource* resource = mDecoder->GetResource();
+  NS_ENSURE_TRUE(resource, NS_ERROR_FAILURE);
+  resource->Pin();
+  nsresult res = mReader->GetBuffered(aBuffered, mStartTime);
+  resource->Unpin();
+  return res;
+}
+
 void MediaDecoderStateMachine::SetPlayStartTime(const TimeStamp& aTimeStamp)
 {
   AssertCurrentThreadInMonitor();
   mPlayStartTime = aTimeStamp;
   if (!mAudioSink) {
     return;
   }
   if (!mPlayStartTime.IsNull()) {
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -249,19 +249,17 @@ public:
 
   // Must be called with the decode monitor held.
   bool IsSeeking() const {
     AssertCurrentThreadInMonitor();
 
     return mState == DECODER_STATE_SEEKING;
   }
 
-  nsresult GetBuffered(dom::TimeRanges* aBuffered) {
-    return mReader->GetBuffered(aBuffered);
-  }
+  nsresult GetBuffered(dom::TimeRanges* aBuffered);
 
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
 
   size_t SizeOfVideoQueue() {
     if (mReader) {
       return mReader->SizeOfVideoQueueInBytes();
     }
@@ -422,17 +420,17 @@ protected:
   // and playable data. The decoder monitor must be held.
   bool HasLowDecodedData(int64_t aAudioUsecs);
 
   // Returns true if we're running low on data which is not yet decoded.
   // The decoder monitor must be held.
   bool HasLowUndecodedData();
 
   // Returns true if we have less than aUsecs of undecoded data available.
-  bool HasLowUndecodedData(int64_t aUsecs);
+  bool HasLowUndecodedData(double aUsecs);
 
   // Returns the number of unplayed usecs of audio we've got decoded and/or
   // pushed to the hardware waiting to play. This is how much audio we can
   // play without having to run the audio decoder. The decoder monitor
   // must be held.
   int64_t AudioDecodedUsecs();
 
   // Returns true when there's decoded audio waiting to play.
--- a/dom/media/MediaResource.h
+++ b/dom/media/MediaResource.h
@@ -713,38 +713,11 @@ protected:
   // Start and end offset of the bytes to be requested.
   MediaByteRange mByteRange;
 
   // True if the stream can seek into unbuffered ranged, i.e. if the
   // connection supports byte range requests.
   bool mIsTransportSeekable;
 };
 
-/**
- * RAII class that handles pinning and unpinning for MediaResource and derived.
- * This should be used when making calculations that involve potentially-cached
- * MediaResource data, so that the state of the world can't change out from under
- * us.
- */
-template<class T>
-class MOZ_STACK_CLASS AutoPinned {
- public:
-  explicit AutoPinned(T* aResource MOZ_GUARD_OBJECT_NOTIFIER_PARAM) : mResource(aResource) {
-    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
-    MOZ_ASSERT(mResource);
-    mResource->Pin();
-  }
-
-  ~AutoPinned() {
-    mResource->Unpin();
-  }
-
-  operator T*() const { return mResource; }
-  T* operator->() const { return mResource; }
-
-private:
-  T* mResource;
-  MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
-};
-
 } // namespace mozilla
 
 #endif
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -795,53 +795,56 @@ MP4Reader::NotifyDataArrived(const char*
 void
 MP4Reader::UpdateIndex()
 {
   MonitorAutoLock mon(mIndexMonitor);
   if (!mIndexReady) {
     return;
   }
 
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
+  MediaResource* resource = mDecoder->GetResource();
+  resource->Pin();
   nsTArray<MediaByteRange> ranges;
   if (NS_SUCCEEDED(resource->GetCachedRanges(ranges))) {
     mDemuxer->UpdateIndex(ranges);
   }
+  resource->Unpin();
 }
 
 int64_t
 MP4Reader::GetEvictionOffset(double aTime)
 {
   MonitorAutoLock mon(mIndexMonitor);
   if (!mIndexReady) {
     return 0;
   }
 
   return mDemuxer->GetEvictionOffset(aTime * 1000000.0);
 }
 
 nsresult
-MP4Reader::GetBuffered(dom::TimeRanges* aBuffered)
+MP4Reader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
 {
   MonitorAutoLock mon(mIndexMonitor);
   if (!mIndexReady) {
     return NS_OK;
   }
-  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
 
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
+  MediaResource* resource = mDecoder->GetResource();
   nsTArray<MediaByteRange> ranges;
+  resource->Pin();
   nsresult rv = resource->GetCachedRanges(ranges);
+  resource->Unpin();
 
   if (NS_SUCCEEDED(rv)) {
     nsTArray<Interval<Microseconds>> timeRanges;
     mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
     for (size_t i = 0; i < timeRanges.Length(); i++) {
-      aBuffered->Add((timeRanges[i].start - mStartTime) / 1000000.0,
-                     (timeRanges[i].end - mStartTime) / 1000000.0);
+      aBuffered->Add((timeRanges[i].start - aStartTime) / 1000000.0,
+                     (timeRanges[i].end - aStartTime) / 1000000.0);
     }
   }
 
   return NS_OK;
 }
 
 bool MP4Reader::IsDormantNeeded()
 {
--- a/dom/media/fmp4/MP4Reader.h
+++ b/dom/media/fmp4/MP4Reader.h
@@ -52,17 +52,18 @@ public:
 
   virtual bool IsMediaSeekable() MOZ_OVERRIDE;
 
   virtual void NotifyDataArrived(const char* aBuffer, uint32_t aLength,
                                  int64_t aOffset) MOZ_OVERRIDE;
 
   virtual int64_t GetEvictionOffset(double aTime) MOZ_OVERRIDE;
 
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered) MOZ_OVERRIDE;
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered,
+                               int64_t aStartTime) MOZ_OVERRIDE;
 
   // For Media Resource Management
   virtual bool IsWaitingMediaResources() MOZ_OVERRIDE;
   virtual bool IsDormantNeeded() MOZ_OVERRIDE;
   virtual void ReleaseMediaResources() MOZ_OVERRIDE;
 
   virtual nsresult ResetDecode() MOZ_OVERRIDE;
 
--- a/dom/media/gstreamer/GStreamerReader.cpp
+++ b/dom/media/gstreamer/GStreamerReader.cpp
@@ -810,26 +810,27 @@ nsresult GStreamerReader::Seek(int64_t a
   GstMessage* message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE,
                (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR));
   gst_message_unref(message);
   LOG(PR_LOG_DEBUG, "seek completed");
 
   return NS_OK;
 }
 
-nsresult GStreamerReader::GetBuffered(dom::TimeRanges* aBuffered)
+nsresult GStreamerReader::GetBuffered(dom::TimeRanges* aBuffered,
+                                      int64_t aStartTime)
 {
   if (!mInfo.HasValidMedia()) {
     return NS_OK;
   }
 
 #if GST_VERSION_MAJOR == 0
   GstFormat format = GST_FORMAT_TIME;
 #endif
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
+  MediaResource* resource = mDecoder->GetResource();
   nsTArray<MediaByteRange> ranges;
   resource->GetCachedRanges(ranges);
 
   if (resource->IsDataCachedToEndOfResource(0)) {
     /* fast path for local or completely cached files */
     gint64 duration = 0;
 
     {
--- a/dom/media/gstreamer/GStreamerReader.h
+++ b/dom/media/gstreamer/GStreamerReader.h
@@ -47,17 +47,17 @@ public:
   virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
                                 int64_t aTimeThreshold);
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime,
                         int64_t aStartTime,
                         int64_t aEndTime,
                         int64_t aCurrentTime);
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime);
 
   virtual void NotifyDataArrived(const char *aBuffer,
                                  uint32_t aLength,
                                  int64_t aOffset) MOZ_OVERRIDE;
 
   virtual bool HasAudio() {
     return mInfo.HasAudio();
   }
--- a/dom/media/gtest/TestMP4Reader.cpp
+++ b/dom/media/gtest/TestMP4Reader.cpp
@@ -31,22 +31,16 @@ public:
   {
     EXPECT_EQ(NS_OK, Preferences::SetBool(
                        "media.fragmented-mp4.use-blank-decoder", true));
 
     EXPECT_EQ(NS_OK, resource->Open(nullptr));
     decoder->SetResource(resource);
 
     reader->Init(nullptr);
-    {
-      // This needs to be done before invoking GetBuffered. This is normally
-      // done by MediaDecoderStateMachine.
-      ReentrantMonitorAutoEnter mon(decoder->GetReentrantMonitor());
-      reader->SetStartTime(0);
-    }
   }
 
   void Init() {
     nsCOMPtr<nsIThread> thread;
     nsresult rv = NS_NewThread(getter_AddRefs(thread),
                                NS_NewRunnableMethod(this, &TestBinding::ReadMetadata));
     EXPECT_EQ(NS_OK, rv);
     thread->Shutdown();
@@ -73,17 +67,17 @@ TEST(MP4Reader, BufferedRange)
 {
   nsRefPtr<TestBinding> b = new TestBinding();
   b->Init();
 
   // Video 3-4 sec, audio 2.986666-4.010666 sec
   b->resource->MockAddBufferedRange(248400, 327455);
 
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
-  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges));
+  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges, 0));
   EXPECT_EQ(1U, ranges->Length());
   double start = 0;
   EXPECT_EQ(NS_OK, ranges->Start(0, &start));
   EXPECT_NEAR(270000 / 90000.0, start, 0.000001);
   double end = 0;
   EXPECT_EQ(NS_OK, ranges->End(0, &end));
   EXPECT_NEAR(360000 / 90000.0, end, 0.000001);
 }
@@ -94,17 +88,17 @@ TEST(MP4Reader, BufferedRangeMissingLast
   b->Init();
 
   // Dropping the last byte of the video
   b->resource->MockClearBufferedRanges();
   b->resource->MockAddBufferedRange(248400, 324912);
   b->resource->MockAddBufferedRange(324913, 327455);
 
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
-  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges));
+  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges, 0));
   EXPECT_EQ(1U, ranges->Length());
   double start = 0;
   EXPECT_EQ(NS_OK, ranges->Start(0, &start));
   EXPECT_NEAR(270000.0 / 90000.0, start, 0.000001);
   double end = 0;
   EXPECT_EQ(NS_OK, ranges->End(0, &end));
   EXPECT_NEAR(357000 / 90000.0, end, 0.000001);
 }
@@ -115,17 +109,17 @@ TEST(MP4Reader, BufferedRangeSyncFrame)
   b->Init();
 
   // Check that missing the first byte at 2 seconds skips right through to 3
   // seconds because of a missing sync frame
   b->resource->MockClearBufferedRanges();
   b->resource->MockAddBufferedRange(146336, 327455);
 
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
-  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges));
+  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges, 0));
   EXPECT_EQ(1U, ranges->Length());
   double start = 0;
   EXPECT_EQ(NS_OK, ranges->Start(0, &start));
   EXPECT_NEAR(270000.0 / 90000.0, start, 0.000001);
   double end = 0;
   EXPECT_EQ(NS_OK, ranges->End(0, &end));
   EXPECT_NEAR(360000 / 90000.0, end, 0.000001);
 }
@@ -173,17 +167,17 @@ TEST(MP4Reader, CompositionOrder)
   b->resource->MockAddBufferedRange(9734, 10314);
   b->resource->MockAddBufferedRange(10314, 10895);
   b->resource->MockAddBufferedRange(11207, 11787);
   b->resource->MockAddBufferedRange(12035, 12616);
   b->resource->MockAddBufferedRange(12616, 13196);
   b->resource->MockAddBufferedRange(13220, 13901);
 
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
-  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges));
+  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges, 0));
   EXPECT_EQ(2U, ranges->Length());
 
   double start = 0;
   EXPECT_EQ(NS_OK, ranges->Start(0, &start));
   EXPECT_NEAR(166.0 / 2500.0, start, 0.000001);
   double end = 0;
   EXPECT_EQ(NS_OK, ranges->End(0, &end));
   EXPECT_NEAR(332.0 / 2500.0, end, 0.000001);
@@ -223,17 +217,17 @@ TEST(MP4Reader, Normalised)
   //     9 12035   581  8212      1014  Yes
   //    10 12616   580  9226      1015  Yes
   //    11 13220   581  10241     1014  Yes
 
   b->resource->MockClearBufferedRanges();
   b->resource->MockAddBufferedRange(48, 13901);
 
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
-  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges));
+  EXPECT_EQ(NS_OK, b->reader->GetBuffered(ranges, 0));
   EXPECT_EQ(1U, ranges->Length());
 
   double start = 0;
   EXPECT_EQ(NS_OK, ranges->Start(0, &start));
   EXPECT_NEAR(166.0 / 2500.0, start, 0.000001);
   double end = 0;
   EXPECT_EQ(NS_OK, ranges->End(0, &end));
   EXPECT_NEAR(11255.0 / 44100.0, end, 0.000001);
--- a/dom/media/mediasource/MediaSource.cpp
+++ b/dom/media/mediasource/MediaSource.cpp
@@ -345,16 +345,57 @@ MediaSource::Detach()
   if (mActiveSourceBuffers) {
     mActiveSourceBuffers->Clear();
   }
   if (mSourceBuffers) {
     mSourceBuffers->Clear();
   }
 }
 
+void
+MediaSource::GetBuffered(TimeRanges* aBuffered)
+{
+  MOZ_ASSERT(aBuffered->Length() == 0);
+  if (mActiveSourceBuffers->IsEmpty()) {
+    return;
+  }
+
+  double highestEndTime = 0;
+
+  nsTArray<nsRefPtr<TimeRanges>> activeRanges;
+  for (uint32_t i = 0; i < mActiveSourceBuffers->Length(); ++i) {
+    bool found;
+    SourceBuffer* sourceBuffer = mActiveSourceBuffers->IndexedGetter(i, found);
+
+    ErrorResult dummy;
+    *activeRanges.AppendElement() = sourceBuffer->GetBuffered(dummy);
+
+    highestEndTime = std::max(highestEndTime, activeRanges.LastElement()->GetEndTime());
+  }
+
+  TimeRanges* intersectionRanges = aBuffered;
+  intersectionRanges->Add(0, highestEndTime);
+
+  for (uint32_t i = 0; i < activeRanges.Length(); ++i) {
+    TimeRanges* sourceRanges = activeRanges[i];
+
+    if (mReadyState == MediaSourceReadyState::Ended) {
+      // Set the end time on the last range to highestEndTime by adding a
+      // new range spanning the current end time to highestEndTime, which
+      // Normalize() will then merge with the old last range.
+      sourceRanges->Add(sourceRanges->GetEndTime(), highestEndTime);
+      sourceRanges->Normalize();
+    }
+
+    intersectionRanges->Intersection(sourceRanges);
+  }
+
+  MSE_DEBUG("MediaSource(%p)::GetBuffered ranges=%s", this, DumpTimeRanges(intersectionRanges).get());
+}
+
 MediaSource::MediaSource(nsPIDOMWindow* aWindow)
   : DOMEventTargetHelper(aWindow)
   , mDuration(UnspecifiedNaN<double>())
   , mDecoder(nullptr)
   , mPrincipal(nullptr)
   , mReadyState(MediaSourceReadyState::Closed)
   , mFirstSourceBufferInitialized(false)
 {
--- a/dom/media/mediasource/MediaSource.h
+++ b/dom/media/mediasource/MediaSource.h
@@ -69,16 +69,18 @@ public:
   nsPIDOMWindow* GetParentObject() const;
 
   JSObject* WrapObject(JSContext* aCx) MOZ_OVERRIDE;
 
   // Attach this MediaSource to Decoder aDecoder.  Returns false if already attached.
   bool Attach(MediaSourceDecoder* aDecoder);
   void Detach();
 
+  void GetBuffered(TimeRanges* aBuffered);
+
   // Set mReadyState to aState and fire the required events at the MediaSource.
   void SetReadyState(MediaSourceReadyState aState);
 
  // Used by SourceBuffer to call CreateSubDecoder.
   MediaSourceDecoder* GetDecoder()
   {
     return mDecoder;
   }
--- a/dom/media/mediasource/MediaSourceDecoder.cpp
+++ b/dom/media/mediasource/MediaSourceDecoder.cpp
@@ -77,17 +77,17 @@ MediaSourceDecoder::GetSeekable(dom::Tim
     return NS_ERROR_FAILURE;
   }
 
   double duration = mMediaSource->Duration();
   if (IsNaN(duration)) {
     // Return empty range.
   } else if (duration > 0 && mozilla::IsInfinite(duration)) {
     nsRefPtr<dom::TimeRanges> bufferedRanges = new dom::TimeRanges();
-    mReader->GetBuffered(bufferedRanges);
+    mMediaSource->GetBuffered(bufferedRanges);
     aSeekable->Add(bufferedRanges->GetStartTime(), bufferedRanges->GetEndTime());
   } else {
     aSeekable->Add(0, duration);
   }
   MSE_DEBUG("MediaSourceDecoder(%p)::GetSeekable ranges=%s", this, DumpTimeRanges(aSeekable).get());
   return NS_OK;
 }
 
@@ -159,23 +159,16 @@ MediaSourceDecoder::OnTrackBufferConfigu
 void
 MediaSourceDecoder::Ended()
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   mReader->Ended();
   mon.NotifyAll();
 }
 
-bool
-MediaSourceDecoder::IsExpectingMoreData()
-{
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-  return !mReader->IsEnded();
-}
-
 void
 MediaSourceDecoder::SetMediaSourceDuration(double aDuration)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!mMediaSource) {
     return;
   }
   ErrorResult dummy;
--- a/dom/media/mediasource/MediaSourceDecoder.h
+++ b/dom/media/mediasource/MediaSourceDecoder.h
@@ -47,17 +47,16 @@ public:
   void DetachMediaSource();
 
   already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType);
   void AddTrackBuffer(TrackBuffer* aTrackBuffer);
   void RemoveTrackBuffer(TrackBuffer* aTrackBuffer);
   void OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo);
 
   void Ended();
-  bool IsExpectingMoreData() MOZ_OVERRIDE;
 
   void SetMediaSourceDuration(double aDuration);
 
   // Called whenever a TrackBuffer has new data appended or a new decoder
   // initializes.  Safe to call from any thread.
   void NotifyTimeRangesChanged();
 
   // Indicates the point in time at which the reader should consider
--- a/dom/media/mediasource/MediaSourceReader.cpp
+++ b/dom/media/mediasource/MediaSourceReader.cpp
@@ -28,18 +28,16 @@ extern PRLogModuleInfo* GetMediaSourceAP
 #define MSE_DEBUGV(...) PR_LOG(GetMediaSourceLog(), PR_LOG_DEBUG+1, (__VA_ARGS__))
 #define MSE_API(...) PR_LOG(GetMediaSourceAPILog(), PR_LOG_DEBUG, (__VA_ARGS__))
 #else
 #define MSE_DEBUG(...)
 #define MSE_DEBUGV(...)
 #define MSE_API(...)
 #endif
 
-using mozilla::dom::TimeRanges;
-
 namespace mozilla {
 
 MediaSourceReader::MediaSourceReader(MediaSourceDecoder* aDecoder)
   : MediaDecoderReader(aDecoder)
   , mLastAudioTime(-1)
   , mLastVideoTime(-1)
   , mTimeThreshold(-1)
   , mDropAudioBeforeThreshold(false)
@@ -306,25 +304,16 @@ MediaSourceReader::CreateSubDecoder(cons
   }
   MOZ_ASSERT(GetTaskQueue());
   nsRefPtr<SourceBufferDecoder> decoder =
     new SourceBufferDecoder(new SourceBufferResource(aType), mDecoder);
   nsRefPtr<MediaDecoderReader> reader(CreateReaderForType(aType, decoder));
   if (!reader) {
     return nullptr;
   }
-
-  // MSE uses a start time of 0 everywhere. Set that immediately on the
-  // subreader to make sure that it's always in a state where we can invoke
-  // GetBuffered on it.
-  {
-    ReentrantMonitorAutoEnter mon(decoder->GetReentrantMonitor());
-    reader->SetStartTime(0);
-  }
-
   // Set a callback on the subreader that forwards calls to this reader.
   // This reader will then forward them onto the state machine via this
   // reader's callback.
   RefPtr<MediaDataDecodedListener<MediaSourceReader>> callback =
     new MediaDataDecodedListener<MediaSourceReader>(this, GetTaskQueue());
   reader->SetCallback(callback);
   reader->SetTaskQueue(GetTaskQueue());
   reader->Init(nullptr);
@@ -444,56 +433,16 @@ MediaSourceReader::Seek(int64_t aTime, i
     if (NS_FAILED(rv)) {
       return rv;
     }
   }
   return NS_OK;
 }
 
 nsresult
-MediaSourceReader::GetBuffered(dom::TimeRanges* aBuffered)
-{
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  MOZ_ASSERT(aBuffered->Length() == 0);
-  if (mTrackBuffers.IsEmpty()) {
-    return NS_OK;
-  }
-
-  double highestEndTime = 0;
-
-  nsTArray<nsRefPtr<TimeRanges>> activeRanges;
-  for (uint32_t i = 0; i < mTrackBuffers.Length(); ++i) {
-    nsRefPtr<TimeRanges> r = new TimeRanges();
-    mTrackBuffers[i]->Buffered(r);
-    activeRanges.AppendElement(r);
-    highestEndTime = std::max(highestEndTime, activeRanges.LastElement()->GetEndTime());
-  }
-
-  TimeRanges* intersectionRanges = aBuffered;
-  intersectionRanges->Add(0, highestEndTime);
-
-  for (uint32_t i = 0; i < activeRanges.Length(); ++i) {
-    TimeRanges* sourceRanges = activeRanges[i];
-
-    if (IsEnded()) {
-      // Set the end time on the last range to highestEndTime by adding a
-      // new range spanning the current end time to highestEndTime, which
-      // Normalize() will then merge with the old last range.
-      sourceRanges->Add(sourceRanges->GetEndTime(), highestEndTime);
-      sourceRanges->Normalize();
-    }
-
-    intersectionRanges->Intersection(sourceRanges);
-  }
-
-  MSE_DEBUG("MediaSourceReader(%p)::GetBuffered ranges=%s", this, DumpTimeRanges(intersectionRanges).get());
-  return NS_OK;
-}
-
-nsresult
 MediaSourceReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags)
 {
   MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata tracks=%u/%u audio=%p video=%p",
             this, mEssentialTrackBuffers.Length(), mTrackBuffers.Length(),
             mAudioTrack.get(), mVideoTrack.get());
 
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -66,31 +66,22 @@ public:
     return mInfo.HasAudio();
   }
 
   // We can't compute a proper start time since we won't necessarily
   // have the first frame of the resource available. This does the same
   // as chrome/blink and assumes that we always start at t=0.
   virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) MOZ_OVERRIDE { return 0; }
 
-  // Buffering waits (in which we decline to present decoded frames because we
-  // "don't have enough") don't really make sense for MSE. The delay is
-  // essentially a streaming heuristic, but JS is supposed to take care of that
-  // in the MSE world. Avoid injecting inexplicable delays.
-  virtual uint32_t GetBufferingWait() { return 0; }
-
   bool IsMediaSeekable() { return true; }
 
   nsresult ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) MOZ_OVERRIDE;
   nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
                 int64_t aCurrentTime) MOZ_OVERRIDE;
 
-  // Acquires the decoder monitor, and is thus callable on any thread.
-  nsresult GetBuffered(dom::TimeRanges* aBuffered) MOZ_OVERRIDE;
-
   already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType);
 
   void AddTrackBuffer(TrackBuffer* aTrackBuffer);
   void RemoveTrackBuffer(TrackBuffer* aTrackBuffer);
   void OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo);
 
   void Shutdown();
 
--- a/dom/media/mediasource/SourceBufferDecoder.cpp
+++ b/dom/media/mediasource/SourceBufferDecoder.cpp
@@ -205,17 +205,18 @@ SourceBufferDecoder::NotifyDataArrived(c
   // force parent decoder's state machine to recompute end time for
   // infinite length media.
   mParentDecoder->NotifyDataArrived(nullptr, 0, 0);
 }
 
 nsresult
 SourceBufferDecoder::GetBuffered(dom::TimeRanges* aBuffered)
 {
-  return mReader->GetBuffered(aBuffered);
+  // XXX: Need mStartTime (from StateMachine) instead of passing 0.
+  return mReader->GetBuffered(aBuffered, 0);
 }
 
 int64_t
 SourceBufferDecoder::ConvertToByteOffset(double aTime)
 {
   int64_t readerOffset = mReader->GetEvictionOffset(aTime);
   if (readerOffset >= 0) {
     return readerOffset;
--- a/dom/media/mediasource/TrackBuffer.cpp
+++ b/dom/media/mediasource/TrackBuffer.cpp
@@ -186,16 +186,17 @@ TrackBuffer::EvictBefore(double aTime)
     }
   }
 }
 
 double
 TrackBuffer::Buffered(dom::TimeRanges* aRanges)
 {
   ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
+  MOZ_ASSERT(NS_IsMainThread());
 
   double highestEndTime = 0;
 
   for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
     nsRefPtr<dom::TimeRanges> r = new dom::TimeRanges();
     mDecoders[i]->GetBuffered(r);
     if (r->Length() > 0) {
       highestEndTime = std::max(highestEndTime, r->GetEndTime());
--- a/dom/media/mediasource/TrackBuffer.h
+++ b/dom/media/mediasource/TrackBuffer.h
@@ -38,17 +38,17 @@ public:
   // NotifyDataArrived on the decoder to keep buffered range computation up
   // to date.  Returns false if the append failed.
   bool AppendData(const uint8_t* aData, uint32_t aLength);
   bool EvictData(uint32_t aThreshold);
   void EvictBefore(double aTime);
 
   // Returns the highest end time of all of the buffered ranges in the
   // decoders managed by this TrackBuffer, and returns the union of the
-  // decoders buffered ranges in aRanges. This may be called on any thread.
+  // decoders buffered ranges in aRanges.
   double Buffered(dom::TimeRanges* aRanges);
 
   // Mark the current decoder's resource as ended, clear mCurrentDecoder and
   // reset mLast{Start,End}Timestamp.
   void DiscardDecoder();
 
   void Detach();
 
--- a/dom/media/mediasource/test/mochitest.ini
+++ b/dom/media/mediasource/test/mochitest.ini
@@ -3,18 +3,16 @@ skip-if = e10s || buildapp == 'b2g' # b2
 support-files =
   mediasource.js
   seek.webm seek.webm^headers^
   seek_lowres.webm seek_lowres.webm^headers^
 
 [test_MediaSource.html]
 [test_MediaSource_disabled.html]
 [test_BufferedSeek.html]
-[test_BufferingWait.html]
 [test_FrameSelection.html]
 [test_HaveMetadataUnbufferedSeek.html]
 [test_SeekableAfterEndOfStream.html]
 [test_SeekableAfterEndOfStreamSplit.html]
 [test_SeekableBeforeEndOfStream.html]
 [test_SeekableBeforeEndOfStreamSplit.html]
 [test_SplitAppendDelay.html]
 [test_SplitAppend.html]
-[test_WaitingOnMissingData.html]
deleted file mode 100644
--- a/dom/media/mediasource/test/test_BufferingWait.html
+++ /dev/null
@@ -1,82 +0,0 @@
-<!DOCTYPE html>
-<html><head>
-<meta http-equiv="content-type" content="text/html; charset=windows-1252">
-  <title>MSE: Don't get stuck buffering for too long when we have frames to show</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="text/javascript" src="mediasource.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test"><script class="testbody" type="text/javascript">
-
-SimpleTest.waitForExplicitFinish();
-
-var receivedSourceOpen = false;
-runWithMSE(function(ms, v) {
-  ms.addEventListener("sourceopen", function() {
-    ok(true, "Receive a sourceopen event");
-    ok(!receivedSourceOpen, "Should only receive one sourceopen for this test");
-    receivedSourceOpen = true;
-    var sb = ms.addSourceBuffer("video/webm");
-    ok(sb, "Create a SourceBuffer");
-
-    function once(target, name, cb) {
-      target.addEventListener(name, function() {
-        target.removeEventListener(name, cb);
-        cb();
-      });
-    }
-    function loadSegment(typedArray) {
-      info(`Loading buffer: [${typedArray.byteOffset}, ${typedArray.byteOffset + typedArray.byteLength})`);
-      return new Promise(function(resolve, reject) {
-        once(sb, 'update', resolve);
-        sb.appendBuffer(typedArray);
-      });
-    }
-
-    function waitUntilTime(targetTime) {
-      return new Promise(function(resolve, reject) {
-        v.addEventListener("waiting", function onwaiting() {
-          info("Got a waiting event at " + v.currentTime);
-          if (v.currentTime >= targetTime) {
-            ok(true, "Reached target time of: " + targetTime);
-            v.removeEventListener("waiting", onwaiting);
-            resolve();
-          }
-        });
-      });
-    }
-
-    fetchWithXHR("seek.webm", function(arrayBuffer) {
-      sb.addEventListener('error', (e) => { ok(false, "Got Error: " + e); SimpleTest.finish(); });
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 0, 318))().then(
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 318, 25223-318))).then(
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 25223, 46712-25223))).then(
-      /* Note - Missing |46712, 67833 - 46712| segment here corresponding to (0.8, 1.2] */
-      /* Note - Missing |67833, 88966 - 67833| segment here corresponding to (1.2, 1.6]  */
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 88966))).then(function() {
-        var promise = waitUntilTime(0.7);
-        info("Playing video. It should play for a bit, then fire 'waiting'");
-        v.play();
-        return promise;
-      }).then(function() {
-        window.firstStop = Date.now();
-        loadSegment(new Uint8Array(arrayBuffer, 46712, 67833 - 46712));
-        return waitUntilTime(1.0);
-      }).then(function() {
-        var waitDuration = (Date.now() - window.firstStop) / 1000;
-        ok(waitDuration < 15, "Should not spend an inordinate amount of time buffering: " + waitDuration);
-        SimpleTest.finish();
-        /* If we allow the rest of the stream to be played, we get stuck at
-           around 2s. See bug 1093133.
-        once(v, 'ended', SimpleTest.finish.bind(SimpleTest));
-        return loadSegment(new Uint8Array(arrayBuffer, 67833, 88966 - 67833));
-        */
-      });
-    });
-  });
-});
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/dom/media/mediasource/test/test_WaitingOnMissingData.html
+++ /dev/null
@@ -1,72 +0,0 @@
-<!DOCTYPE html>
-<html><head>
-<meta http-equiv="content-type" content="text/html; charset=windows-1252">
-  <title>MSE: |waiting| event when source data is missing</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="text/javascript" src="mediasource.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test"><script class="testbody" type="text/javascript">
-
-SimpleTest.waitForExplicitFinish();
-
-var receivedSourceOpen = false;
-runWithMSE(function(ms, v) {
-  ms.addEventListener("sourceopen", function() {
-    ok(true, "Receive a sourceopen event");
-    ok(!receivedSourceOpen, "Should only receive one sourceopen for this test");
-    receivedSourceOpen = true;
-    var sb = ms.addSourceBuffer("video/webm");
-    ok(sb, "Create a SourceBuffer");
-
-    function once(target, name, cb) {
-      target.addEventListener(name, function() {
-        target.removeEventListener(name, cb);
-        cb();
-      });
-    }
-    function loadSegment(typedArray) {
-      info(`Loading buffer: [${typedArray.byteOffset}, ${typedArray.byteOffset + typedArray.byteLength})`);
-      return new Promise(function(resolve, reject) {
-        once(sb, 'update', resolve);
-        sb.appendBuffer(typedArray);
-      });
-    }
-
-    fetchWithXHR("seek.webm", function(arrayBuffer) {
-      sb.addEventListener('error', (e) => { ok(false, "Got Error: " + e); SimpleTest.finish(); });
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 0, 318))().then(
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 318, 25223-318))).then(
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 25223, 46712-25223))).then(
-      /* Note - Missing |46712, 67833 - 46712| segment here */
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 67833, 88966 - 67833))).then(
-      loadSegment.bind(null, new Uint8Array(arrayBuffer, 88966))).then(function() {
-
-        v.addEventListener("waiting", function onwaiting() {
-          ok(true, "Got a waiting event at " + v.currentTime);
-          if (v.currentTime > 0.7 && v.currentTime < 1.2) {
-            v.removeEventListener("waiting", onwaiting);
-            todo(v.currentTime >= 0.8, "See bug 1091774");
-            gotWaiting = true;
-            ok(true, "Received waiting event at time " + v.currentTime);
-            loadSegment(new Uint8Array(arrayBuffer, 46712, 67833 - 46712)).then(() => ms.endOfStream());
-          }
-        });
-
-        info("Playing video. It should play for a bit, then fire 'waiting'");
-        v.play();
-      });
-    });
-  });
-  v.addEventListener("ended", function () {
-    ok(Math.abs(v.duration - 4) < 0.1, "Video has correct duration. This fuzz factor is due to bug 1065207");
-    is(v.currentTime, v.duration, "Video has correct current time: " + v.currentTime);
-    ok(gotWaiting, "Received waiting event and playback continued after data added");
-    SimpleTest.finish();
-  });
-});
-</script>
-</pre>
-</body>
-</html>
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -1876,36 +1876,35 @@ nsresult OggReader::SeekBisection(int64_
     NS_ASSERTION(endTime >= seekTarget, "End must be after seek target");
   }
 
   SEEK_LOG(PR_LOG_DEBUG, ("Seek complete in %d bisections.", hops));
 
   return NS_OK;
 }
 
-nsresult OggReader::GetBuffered(dom::TimeRanges* aBuffered)
+nsresult OggReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
 {
-  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
   {
     mozilla::ReentrantMonitorAutoEnter mon(mMonitor);
     if (mIsChained)
       return NS_ERROR_FAILURE;
   }
 #ifdef OGG_ESTIMATE_BUFFERED
-  return MediaDecoderReader::GetBuffered(aBuffered);
+  return MediaDecoderReader::GetBuffered(aBuffered, aStartTime);
 #else
   // HasAudio and HasVideo are not used here as they take a lock and cause
   // a deadlock. Accessing mInfo doesn't require a lock - it doesn't change
   // after metadata is read.
   if (!mInfo.HasValidMedia()) {
     // No need to search through the file if there are no audio or video tracks
     return NS_OK;
   }
 
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
+  MediaResource* resource = mDecoder->GetResource();
   nsTArray<MediaByteRange> ranges;
   nsresult res = resource->GetCachedRanges(ranges);
   NS_ENSURE_SUCCESS(res, res);
 
   // Traverse across the buffered byte ranges, determining the time ranges
   // they contain. MediaResource::GetNextCachedData(offset) returns -1 when
   // offset is after the end of the media resource, or there's no more cached
   // data after the offset. This loop will run until we've checked every
@@ -1915,17 +1914,17 @@ nsresult OggReader::GetBuffered(dom::Tim
     // Ensure the offsets are after the header pages.
     int64_t startOffset = ranges[index].mStart;
     int64_t endOffset = ranges[index].mEnd;
 
     // Because the granulepos time is actually the end time of the page,
     // we special-case (startOffset == 0) so that the first
     // buffered range always appears to be buffered from the media start
     // time, rather than from the end-time of the first page.
-    int64_t startTime = (startOffset == 0) ? mStartTime : -1;
+    int64_t startTime = (startOffset == 0) ? aStartTime : -1;
 
     // Find the start time of the range. Read pages until we find one with a
     // granulepos which we can convert into a timestamp to use as the time of
     // the start of the buffered range.
     ogg_sync_reset(&sync.mState);
     while (startTime == -1) {
       ogg_page page;
       int32_t discard;
@@ -1983,18 +1982,18 @@ nsresult OggReader::GetBuffered(dom::Tim
       }
     }
 
     if (startTime != -1) {
       // We were able to find a start time for that range, see if we can
       // find an end time.
       int64_t endTime = RangeEndTime(startOffset, endOffset, true);
       if (endTime != -1) {
-        aBuffered->Add((startTime - mStartTime) / static_cast<double>(USECS_PER_S),
-                       (endTime - mStartTime) / static_cast<double>(USECS_PER_S));
+        aBuffered->Add((startTime - aStartTime) / static_cast<double>(USECS_PER_S),
+                       (endTime - aStartTime) / static_cast<double>(USECS_PER_S));
       }
     }
   }
 
   return NS_OK;
 #endif
 }
 
--- a/dom/media/ogg/OggReader.h
+++ b/dom/media/ogg/OggReader.h
@@ -74,17 +74,17 @@ public:
 
   virtual bool HasVideo() {
     return mTheoraState != 0 && mTheoraState->mActive;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime);
 
   virtual bool IsMediaSeekable() MOZ_OVERRIDE;
 
 private:
   // TODO: DEPRECATED. This uses synchronous decoding.
   // Stores the presentation time of the first frame we'd be able to play if
   // we started playback at the current position. Returns the first video
   // frame, if we have video.
--- a/dom/media/omx/RtspMediaCodecReader.h
+++ b/dom/media/omx/RtspMediaCodecReader.h
@@ -43,17 +43,18 @@ public:
   // 1. Because the Rtsp stream is a/v separated. The buffered data in a/v
   // tracks are not consistent with time stamp.
   // For example: audio buffer: 1~2s, video buffer: 1.5~2.5s
   // 2. Since the Rtsp is a realtime streaming, the buffer we made for
   // RtspMediaResource is quite small. The small buffer implies the time ranges
   // we returned are not useful for the MediaDecodeStateMachine. Unlike the
   // ChannelMediaResource, it has a "cache" that can store the whole streaming
   // data so the |GetBuffered| function can retrieve useful time ranges.
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered) MOZ_OVERRIDE {
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered,
+                               int64_t aStartTime) MOZ_OVERRIDE {
     return NS_OK;
   }
 
   virtual void SetIdle() MOZ_OVERRIDE;
 
   // Disptach a DecodeVideoFrameTask to decode video data.
   virtual void RequestVideoData(bool aSkipToNextKeyframe,
                                 int64_t aTimeThreshold) MOZ_OVERRIDE;
--- a/dom/media/omx/RtspOmxReader.h
+++ b/dom/media/omx/RtspOmxReader.h
@@ -53,17 +53,18 @@ public:
   // 1. Because the Rtsp stream is a/v separated. The buffered data in a/v
   // tracks are not consistent with time stamp.
   // For example: audio buffer: 1~2s, video buffer: 1.5~2.5s
   // 2. Since the Rtsp is a realtime streaming, the buffer we made for
   // RtspMediaResource is quite small. The small buffer implies the time ranges
   // we returned are not useful for the MediaDecodeStateMachine. Unlike the
   // ChannelMediaResource, it has a "cache" that can store the whole streaming
   // data so the |GetBuffered| function can retrieve useful time ranges.
-  virtual nsresult GetBuffered(mozilla::dom::TimeRanges* aBuffered) MOZ_FINAL MOZ_OVERRIDE {
+  virtual nsresult GetBuffered(mozilla::dom::TimeRanges* aBuffered,
+                               int64_t aStartTime) MOZ_FINAL MOZ_OVERRIDE {
     return NS_OK;
   }
 
   virtual void SetIdle() MOZ_OVERRIDE;
 
   virtual nsresult ReadMetadata(MediaInfo *aInfo, MetadataTags **aTags)
     MOZ_FINAL MOZ_OVERRIDE;
 
--- a/dom/media/raw/RawReader.cpp
+++ b/dom/media/raw/RawReader.cpp
@@ -279,12 +279,12 @@ nsresult RawReader::Seek(int64_t aTime, 
     } else {
       video.forget();
     }
   }
 
   return NS_OK;
 }
 
-nsresult RawReader::GetBuffered(dom::TimeRanges* aBuffered)
+nsresult RawReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
 {
   return NS_OK;
 }
--- a/dom/media/raw/RawReader.h
+++ b/dom/media/raw/RawReader.h
@@ -35,17 +35,17 @@ public:
   virtual bool HasVideo()
   {
     return true;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime);
 
   virtual bool IsMediaSeekable() MOZ_OVERRIDE;
 
 private:
   bool ReadFromResource(MediaResource *aResource, uint8_t *aBuf, uint32_t aLength);
 
   RawVideoHeader mMetadata;
   uint32_t mCurrentFrame;
--- a/dom/media/wave/WaveReader.cpp
+++ b/dom/media/wave/WaveReader.cpp
@@ -273,35 +273,34 @@ nsresult WaveReader::Seek(int64_t aTarge
   position += mWavePCMOffset;
   return mDecoder->GetResource()->Seek(nsISeekableStream::NS_SEEK_SET, position);
 }
 
 static double RoundToUsecs(double aSeconds) {
   return floor(aSeconds * USECS_PER_S) / USECS_PER_S;
 }
 
-nsresult WaveReader::GetBuffered(dom::TimeRanges* aBuffered)
+nsresult WaveReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
 {
   if (!mInfo.HasAudio()) {
     return NS_OK;
   }
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
-  int64_t startOffset = resource->GetNextCachedData(mWavePCMOffset);
+  int64_t startOffset = mDecoder->GetResource()->GetNextCachedData(mWavePCMOffset);
   while (startOffset >= 0) {
-    int64_t endOffset = resource->GetCachedDataEnd(startOffset);
+    int64_t endOffset = mDecoder->GetResource()->GetCachedDataEnd(startOffset);
     // Bytes [startOffset..endOffset] are cached.
     NS_ASSERTION(startOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
     NS_ASSERTION(endOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
 
     // We need to round the buffered ranges' times to microseconds so that they
     // have the same precision as the currentTime and duration attribute on
     // the media element.
     aBuffered->Add(RoundToUsecs(BytesToTime(startOffset - mWavePCMOffset)),
                    RoundToUsecs(BytesToTime(endOffset - mWavePCMOffset)));
-    startOffset = resource->GetNextCachedData(endOffset);
+    startOffset = mDecoder->GetResource()->GetNextCachedData(endOffset);
   }
   return NS_OK;
 }
 
 bool
 WaveReader::ReadAll(char* aBuf, int64_t aSize, int64_t* aBytesRead)
 {
   uint32_t got = 0;
--- a/dom/media/wave/WaveReader.h
+++ b/dom/media/wave/WaveReader.h
@@ -39,17 +39,17 @@ public:
   virtual bool HasVideo()
   {
     return false;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime);
 
   // To seek in a buffered range, we just have to seek the stream.
   virtual bool IsSeekableInBufferedRanges() {
     return true;
   }
 
   virtual bool IsMediaSeekable() MOZ_OVERRIDE;
 
--- a/dom/media/webm/WebMReader.cpp
+++ b/dom/media/webm/WebMReader.cpp
@@ -1080,24 +1080,23 @@ nsresult WebMReader::Seek(int64_t aTarge
                        this, offset, r));
     if (r != 0) {
       return NS_ERROR_FAILURE;
     }
   }
   return NS_OK;
 }
 
-nsresult WebMReader::GetBuffered(dom::TimeRanges* aBuffered)
+nsresult WebMReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
 {
-  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
   if (aBuffered->Length() != 0) {
     return NS_ERROR_FAILURE;
   }
 
-  AutoPinned<MediaResource> resource(mDecoder->GetResource());
+  MediaResource* resource = mDecoder->GetResource();
 
   // Special case completely cached files.  This also handles local files.
   if (mContext && resource->IsDataCachedToEndOfResource(0)) {
     uint64_t duration = 0;
     if (nestegg_duration(mContext, &duration) == 0) {
       aBuffered->Add(0, duration / NS_PER_S);
       return NS_OK;
     }
@@ -1110,17 +1109,17 @@ nsresult WebMReader::GetBuffered(dom::Ti
   NS_ENSURE_SUCCESS(res, res);
 
   for (uint32_t index = 0; index < ranges.Length(); index++) {
     uint64_t start, end;
     bool rv = mBufferedState->CalculateBufferedForRange(ranges[index].mStart,
                                                         ranges[index].mEnd,
                                                         &start, &end);
     if (rv) {
-      int64_t startOffset = mStartTime * NS_PER_USEC;
+      int64_t startOffset = aStartTime * NS_PER_USEC;
       NS_ASSERTION(startOffset >= 0 && uint64_t(startOffset) <= start,
                    "startOffset negative or larger than start time");
       if (!(startOffset >= 0 && uint64_t(startOffset) <= start)) {
         startOffset = 0;
       }
       double startTime = (start - startOffset) / NS_PER_S;
       double endTime = (end - startOffset) / NS_PER_S;
       // If this range extends to the end of the file, the true end time
--- a/dom/media/webm/WebMReader.h
+++ b/dom/media/webm/WebMReader.h
@@ -131,17 +131,17 @@ public:
     NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     return mHasVideo;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
                         int64_t aCurrentTime);
-  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered);
+  virtual nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime);
   virtual void NotifyDataArrived(const char* aBuffer, uint32_t aLength,
                                  int64_t aOffset);
   virtual int64_t GetEvictionOffset(double aTime);
 
   virtual bool IsMediaSeekable() MOZ_OVERRIDE;
 
 protected:
   // Value passed to NextPacket to determine if we are reading a video or an