Bug 1175768 - Use mirroring for buffered ranges. r=jya
authorBobby Holley <bobbyholley@gmail.com>
Wed, 17 Jun 2015 14:22:10 -0700
changeset 250402 f5b56f26141b0f9891f210ef1753d41b066b21af
parent 250401 0d17eedad8a9cc9eca4fa69793f07ed678c42d0e
child 250403 9fb9dbd7a0dd018483ca56c725f6eff55012902a
push id61547
push userbobbyholley@gmail.com
push dateSat, 27 Jun 2015 08:19:49 +0000
treeherdermozilla-inbound@9fb9dbd7a0dd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1175768
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1175768 - Use mirroring for buffered ranges. r=jya
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderReader.cpp
dom/media/MediaDecoderReader.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/fmp4/MP4Reader.cpp
dom/media/gstreamer/GStreamerReader.cpp
dom/media/gtest/TestMP4Reader.cpp
dom/media/mediasource/MediaSourceDecoder.cpp
dom/media/mediasource/MediaSourceDecoder.h
dom/media/mediasource/MediaSourceReader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/mediasource/SourceBuffer.cpp
dom/media/mediasource/SourceBuffer.h
dom/media/mediasource/SourceBufferDecoder.cpp
dom/media/mediasource/TrackBuffer.cpp
dom/media/mediasource/TrackBuffer.h
dom/media/ogg/OggReader.cpp
dom/media/raw/RawReader.cpp
dom/media/wave/WaveReader.cpp
dom/media/webm/WebMReader.cpp
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -323,16 +323,17 @@ void MediaDecoder::SetInfinite(bool aInf
 bool MediaDecoder::IsInfinite()
 {
   MOZ_ASSERT(NS_IsMainThread());
   return mInfiniteStream;
 }
 
 MediaDecoder::MediaDecoder() :
   mWatchManager(this, AbstractThread::MainThread()),
+  mBuffered(AbstractThread::MainThread(), TimeIntervals(), "MediaDecoder::mBuffered (Mirror)"),
   mNextFrameStatus(AbstractThread::MainThread(),
                    MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED,
                    "MediaDecoder::mNextFrameStatus (Mirror)"),
   mDecoderPosition(0),
   mPlaybackPosition(0),
   mLogicalPosition(0.0),
   mCurrentPosition(AbstractThread::MainThread(), 0, "MediaDecoder::mCurrentPosition (Mirror)"),
   mVolume(AbstractThread::MainThread(), 0.0, "MediaDecoder::mVolume (Canonical)"),
@@ -1253,20 +1254,22 @@ bool MediaDecoder::OnDecodeTaskQueue() c
 void
 MediaDecoder::SetStateMachine(MediaDecoderStateMachine* aStateMachine)
 {
   MOZ_ASSERT_IF(aStateMachine, !mDecoderStateMachine);
   mDecoderStateMachine = aStateMachine;
 
   if (mDecoderStateMachine) {
     mStateMachineDuration.Connect(mDecoderStateMachine->CanonicalDuration());
+    mBuffered.Connect(mDecoderStateMachine->CanonicalBuffered());
     mNextFrameStatus.Connect(mDecoderStateMachine->CanonicalNextFrameStatus());
     mCurrentPosition.Connect(mDecoderStateMachine->CanonicalCurrentPosition());
   } else {
     mStateMachineDuration.DisconnectIfConnected();
+    mBuffered.DisconnectIfConnected();
     mNextFrameStatus.DisconnectIfConnected();
     mCurrentPosition.DisconnectIfConnected();
   }
 }
 
 ReentrantMonitor& MediaDecoder::GetReentrantMonitor() {
   return mReentrantMonitor;
 }
@@ -1288,18 +1291,17 @@ void MediaDecoder::Invalidate()
   if (mVideoFrameContainer) {
     mVideoFrameContainer->Invalidate();
   }
 }
 
 // Constructs the time ranges representing what segments of the media
 // are buffered and playable.
 media::TimeIntervals MediaDecoder::GetBuffered() {
-  NS_ENSURE_TRUE(mDecoderStateMachine && !mShuttingDown, media::TimeIntervals::Invalid());
-  return mDecoderStateMachine->GetBuffered();
+  return mBuffered.Ref();
 }
 
 size_t MediaDecoder::SizeOfVideoQueue() {
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfVideoQueue();
   }
   return 0;
 }
@@ -1307,16 +1309,18 @@ size_t MediaDecoder::SizeOfVideoQueue() 
 size_t MediaDecoder::SizeOfAudioQueue() {
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfAudioQueue();
   }
   return 0;
 }
 
 void MediaDecoder::NotifyDataArrived(uint32_t aLength, int64_t aOffset) {
+  MOZ_ASSERT(NS_IsMainThread());
+
   if (mDecoderStateMachine) {
     mDecoderStateMachine->DispatchNotifyDataArrived(aLength, aOffset);
   }
 
   // ReadyState computation depends on MediaDecoder::CanPlayThrough, which
   // depends on the download rate.
   UpdateReadyState();
 }
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -887,16 +887,19 @@ protected:
 
   // Called by the state machine to notify the decoder that the duration
   // has changed.
   void DurationChanged();
 
   // State-watching manager.
   WatchManager<MediaDecoder> mWatchManager;
 
+  // Buffered range, mirrored from the reader.
+  Mirror<media::TimeIntervals> mBuffered;
+
   // NextFrameStatus, mirrored from the state machine.
   Mirror<MediaDecoderOwner::NextFrameStatus> mNextFrameStatus;
 
   /******
    * The following members should be accessed with the decoder lock held.
    ******/
 
   // Current decoding position in the stream. This is where the decoder
--- a/dom/media/MediaDecoderReader.cpp
+++ b/dom/media/MediaDecoderReader.cpp
@@ -64,16 +64,18 @@ public:
 
 MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder,
                                        MediaTaskQueue* aBorrowedTaskQueue)
   : mAudioCompactor(mAudioQueue)
   , mDecoder(aDecoder)
   , mTaskQueue(aBorrowedTaskQueue ? aBorrowedTaskQueue
                                   : new MediaTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                                                        /* aSupportsTailDispatch = */ true))
+  , mWatchManager(this, mTaskQueue)
+  , mBuffered(mTaskQueue, TimeIntervals(), "MediaDecoderReader::mBuffered (Canonical)")
   , mDuration(mTaskQueue, NullableTimeUnit(), "MediaDecoderReader::mDuration (Mirror)")
   , mIgnoreAudioOutputFormat(false)
   , mStartTime(-1)
   , mHitAudioDecodeError(false)
   , mShutdown(false)
   , mTaskQueueIsBorrowed(!!aBorrowedTaskQueue)
   , mAudioDiscontinuity(false)
   , mVideoDiscontinuity(false)
@@ -87,16 +89,19 @@ MediaDecoderReader::MediaDecoderReader(A
 }
 
 void
 MediaDecoderReader::InitializationTask()
 {
   if (mDecoder->CanonicalDurationOrNull()) {
     mDuration.Connect(mDecoder->CanonicalDurationOrNull());
   }
+
+  // Initialize watchers.
+  mWatchManager.Watch(mDuration, &MediaDecoderReader::UpdateBuffered);
 }
 
 MediaDecoderReader::~MediaDecoderReader()
 {
   MOZ_ASSERT(mShutdown);
   MOZ_ASSERT(!mDecoder);
   ResetDecode();
   MOZ_COUNT_DTOR(MediaDecoderReader);
@@ -156,26 +161,27 @@ VideoData* MediaDecoderReader::DecodeToF
   if (eof) {
     VideoQueue().Finish();
   }
   VideoData* d = nullptr;
   return (d = VideoQueue().PeekFront()) ? d : nullptr;
 }
 
 void
-MediaDecoderReader::SetStartTime(int64_t aStartTime)
+MediaDecoderReader::UpdateBuffered()
 {
-  mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
-  MOZ_ASSERT(mStartTime == -1);
-  mStartTime = aStartTime;
+  MOZ_ASSERT(OnTaskQueue());
+  NS_ENSURE_TRUE_VOID(!mShutdown);
+  mBuffered = GetBuffered();
 }
 
 media::TimeIntervals
 MediaDecoderReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
   AutoPinned<MediaResource> stream(mDecoder->GetResource());
 
   if (!mDuration.ReadOnWrongThread().isSome()) {
     return TimeIntervals();
   }
 
   return GetEstimatedBufferedTimeRanges(stream, mDuration.ReadOnWrongThread().ref().ToMicroseconds());
@@ -353,16 +359,20 @@ MediaDecoderReader::Shutdown()
   MOZ_ASSERT(OnTaskQueue());
   mShutdown = true;
 
   mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__);
   mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__);
 
   ReleaseMediaResources();
   mDuration.DisconnectIfConnected();
+  mBuffered.DisconnectAll();
+
+  // Shut down the watch manager before shutting down our task queue.
+  mWatchManager.Shutdown();
 
   nsRefPtr<ShutdownPromise> p;
 
   // Spin down the task queue if necessary. We wait until BreakCycles to null
   // out mTaskQueue, since otherwise any remaining tasks could crash when they
   // invoke OnTaskQueue().
   if (mTaskQueue && !mTaskQueueIsBorrowed) {
     // If we own our task queue, shutdown ends when the task queue is done.
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -198,32 +198,37 @@ public:
   // Tell the reader that the data decoded are not for direct playback, so it
   // can accept more files, in particular those which have more channels than
   // available in the audio output.
   void SetIgnoreAudioOutputFormat()
   {
     mIgnoreAudioOutputFormat = true;
   }
 
-  // Populates aBuffered with the time ranges which are buffered. This function
-  // is called on the main, decode, and state machine threads.
+  // Populates aBuffered with the time ranges which are buffered. This may only
+  // be called on the decode task queue, and should only be used internally by
+  // UpdateBuffered - mBuffered (or mirrors of it) should be used for everything
+  // else.
   //
   // This base implementation in MediaDecoderReader estimates the time ranges
   // buffered by interpolating the cached byte ranges with the duration
   // of the media. Reader subclasses should override this method if they
   // can quickly calculate the buffered ranges more accurately.
   //
   // The primary advantage of this implementation in the reader base class
   // is that it's a fast approximation, which does not perform any I/O.
   //
   // The OggReader relies on this base implementation not performing I/O,
   // since in FirefoxOS we can't do I/O on the main thread, where this is
   // called.
   virtual media::TimeIntervals GetBuffered();
 
+  // Recomputes mBuffered.
+  virtual void UpdateBuffered();
+
   // MediaSourceReader opts out of the start-time-guessing mechanism.
   virtual bool ForceZeroStartTime() const { return false; }
 
   // The MediaDecoderStateMachine uses various heuristics that assume that
   // raw media data is arriving sequentially from a network channel. This
   // makes sense in the <video src="foo"> case, but not for more advanced use
   // cases like MSE.
   virtual bool UseBufferingHeuristics() { return true; }
@@ -235,22 +240,24 @@ public:
   // Returns the number of bytes of memory allocated by structures/frames in
   // the audio queue.
   size_t SizeOfAudioQueueInBytes() const;
 
   virtual size_t SizeOfVideoQueueInFrames();
   virtual size_t SizeOfAudioQueueInFrames();
 
 protected:
+  friend class TrackBuffer;
   virtual void NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { }
   void NotifyDataArrived(uint32_t aLength, int64_t aOffset)
   {
     MOZ_ASSERT(OnTaskQueue());
     NS_ENSURE_TRUE_VOID(!mShutdown);
     NotifyDataArrivedInternal(aLength, aOffset);
+    UpdateBuffered();
   }
 
 public:
   void DispatchNotifyDataArrived(uint32_t aLength, int64_t aOffset)
   {
     RefPtr<nsRunnable> r =
       NS_NewRunnableMethodWithArgs<uint32_t, int64_t>(this, &MediaDecoderReader::NotifyDataArrived, aLength, aOffset);
     TaskQueue()->Dispatch(r.forget(), AbstractThread::DontAssertDispatchSuccess);
@@ -271,17 +278,30 @@ public:
   // TODO: DEPRECATED.  This uses synchronous decoding.
   VideoData* DecodeToFirstVideoData();
 
   MediaInfo GetMediaInfo() { return mInfo; }
 
   // Indicates if the media is seekable.
   // ReadMetada should be called before calling this method.
   virtual bool IsMediaSeekable() = 0;
-  void SetStartTime(int64_t aStartTime);
+
+  void DispatchSetStartTime(int64_t aStartTime)
+  {
+    nsRefPtr<MediaDecoderReader> self = this;
+    nsCOMPtr<nsIRunnable> r =
+      NS_NewRunnableFunction([self, aStartTime] () -> void
+    {
+      MOZ_ASSERT(self->OnTaskQueue());
+      MOZ_ASSERT(self->mStartTime == -1);
+      self->mStartTime = aStartTime;
+      self->UpdateBuffered();
+    });
+    TaskQueue()->Dispatch(r.forget());
+  }
 
   MediaTaskQueue* TaskQueue() {
     return mTaskQueue;
   }
 
   // Returns true if the reader implements RequestAudioData()
   // and RequestVideoData() asynchronously, rather than using the
   // implementation in this class to adapt the old synchronous to
@@ -330,16 +350,25 @@ protected:
   AudioCompactor mAudioCompactor;
 
   // Reference to the owning decoder object.
   AbstractMediaDecoder* mDecoder;
 
   // Decode task queue.
   nsRefPtr<MediaTaskQueue> mTaskQueue;
 
+  // State-watching manager.
+  WatchManager<MediaDecoderReader> mWatchManager;
+
+  // Buffered range.
+  Canonical<media::TimeIntervals> mBuffered;
+public:
+  AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() { return &mBuffered; }
+protected:
+
   // Stores presentation info required for playback.
   MediaInfo mInfo;
 
   // Duration, mirrored from the state machine task queue.
   Mirror<media::NullableTimeUnit> mDuration;
 
   // Whether we should accept media that we know we can't play
   // directly, because they have a number of channel higher than
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -182,16 +182,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mTaskQueue(new MediaTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                                 /* aSupportsTailDispatch = */ true)),
   mWatchManager(this, mTaskQueue),
   mRealTime(aRealTime),
   mDispatchedStateMachine(false),
   mDelayedScheduler(this),
   mState(DECODER_STATE_DECODING_NONE, "MediaDecoderStateMachine::mState"),
   mPlayDuration(0),
+  mBuffered(mTaskQueue, TimeIntervals(), "MediaDecoderStateMachine::mBuffered (Mirror)"),
   mDuration(mTaskQueue, NullableTimeUnit(), "MediaDecoderStateMachine::mDuration (Canonical"),
   mEstimatedDuration(mTaskQueue, NullableTimeUnit(),
                     "MediaDecoderStateMachine::mEstimatedDuration (Mirror)"),
   mExplicitDuration(mTaskQueue, Maybe<double>(),
                     "MediaDecoderStateMachine::mExplicitDuration (Mirror)"),
   mObservedDuration(TimeUnit(), "MediaDecoderStateMachine::mObservedDuration"),
   mPlayState(mTaskQueue, MediaDecoder::PLAY_STATE_LOADING,
              "MediaDecoderStateMachine::mPlayState (Mirror)"),
@@ -291,26 +292,28 @@ MediaDecoderStateMachine::~MediaDecoderS
 }
 
 void
 MediaDecoderStateMachine::InitializationTask()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
+  mBuffered.Connect(mReader->CanonicalBuffered());
   mEstimatedDuration.Connect(mDecoder->CanonicalEstimatedDuration());
   mExplicitDuration.Connect(mDecoder->CanonicalExplicitDuration());
   mPlayState.Connect(mDecoder->CanonicalPlayState());
   mNextPlayState.Connect(mDecoder->CanonicalNextPlayState());
   mLogicallySeeking.Connect(mDecoder->CanonicalLogicallySeeking());
   mVolume.Connect(mDecoder->CanonicalVolume());
   mLogicalPlaybackRate.Connect(mDecoder->CanonicalPlaybackRate());
   mPreservesPitch.Connect(mDecoder->CanonicalPreservesPitch());
 
   // Initialize watchers.
+  mWatchManager.Watch(mBuffered, &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mState, &MediaDecoderStateMachine::UpdateNextFrameStatus);
   mWatchManager.Watch(mAudioCompleted, &MediaDecoderStateMachine::UpdateNextFrameStatus);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
   mWatchManager.Watch(mLogicalPlaybackRate, &MediaDecoderStateMachine::LogicalPlaybackRateChanged);
   mWatchManager.Watch(mPreservesPitch, &MediaDecoderStateMachine::PreservesPitchChanged);
   mWatchManager.Watch(mEstimatedDuration, &MediaDecoderStateMachine::RecomputeDuration);
   mWatchManager.Watch(mExplicitDuration, &MediaDecoderStateMachine::RecomputeDuration);
   mWatchManager.Watch(mObservedDuration, &MediaDecoderStateMachine::RecomputeDuration);
@@ -1634,32 +1637,29 @@ void MediaDecoderStateMachine::PlayState
 
 void MediaDecoderStateMachine::LogicallySeekingChanged()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   ScheduleStateMachine();
 }
 
-void MediaDecoderStateMachine::NotifyDataArrived(uint32_t aLength,
-                                                 int64_t aOffset)
+void MediaDecoderStateMachine::BufferedRangeUpdated()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   // While playing an unseekable stream of unknown duration, mObservedDuration
   // is updated (in AdvanceFrame()) as we play. But if data is being downloaded
   // faster than played, mObserved won't reflect the end of playable data
   // since we haven't played the frame at the end of buffered data. So update
   // mObservedDuration here as new data is downloaded to prevent such a lag.
-  media::TimeIntervals buffered{mDecoder->GetBuffered()};
-  if (!buffered.IsInvalid()) {
+  if (!mBuffered.Ref().IsInvalid()) {
     bool exists;
-    media::TimeUnit end{buffered.GetEnd(&exists)};
+    media::TimeUnit end{mBuffered.Ref().GetEnd(&exists)};
     if (exists) {
-      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       mObservedDuration = std::max(mObservedDuration.Ref(), end);
     }
   }
 }
 
 nsRefPtr<MediaDecoder::SeekPromise>
 MediaDecoderStateMachine::Seek(SeekTarget aTarget)
 {
@@ -2045,27 +2045,26 @@ bool MediaDecoderStateMachine::HasLowUnd
   return HasLowUndecodedData(mLowDataThresholdUsecs);
 }
 
 bool MediaDecoderStateMachine::HasLowUndecodedData(int64_t aUsecs)
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(mState > DECODER_STATE_DECODING_FIRSTFRAME,
-               "Must have loaded first frame for GetBuffered() to work");
-
-  // If we don't have a duration, GetBuffered is probably not going to produce
+               "Must have loaded first frame for mBuffered to be valid");
+
+  // If we don't have a duration, mBuffered is probably not going to have
   // a useful buffered range. Return false here so that we don't get stuck in
   // buffering mode for live streams.
   if (Duration().IsInfinite()) {
     return false;
   }
 
-  media::TimeIntervals buffered{mReader->GetBuffered()};
-  if (buffered.IsInvalid()) {
+  if (mBuffered.Ref().IsInvalid()) {
     return false;
   }
 
   int64_t endOfDecodedVideoData = INT64_MAX;
   if (HasVideo() && !VideoQueue().AtEndOfStream()) {
     endOfDecodedVideoData = VideoQueue().Peek() ? VideoQueue().Peek()->GetEndTime() : mVideoFrameEndTime;
   }
   int64_t endOfDecodedAudioData = INT64_MAX;
@@ -2077,17 +2076,17 @@ bool MediaDecoderStateMachine::HasLowUnd
   }
   int64_t endOfDecodedData = std::min(endOfDecodedVideoData, endOfDecodedAudioData);
   if (Duration().ToMicroseconds() < endOfDecodedData) {
     // Our duration is not up to date. No point buffering.
     return false;
   }
   media::TimeInterval interval(media::TimeUnit::FromMicroseconds(endOfDecodedData),
                                media::TimeUnit::FromMicroseconds(std::min(endOfDecodedData + aUsecs, Duration().ToMicroseconds())));
-  return endOfDecodedData != INT64_MAX && !buffered.Contains(interval);
+  return endOfDecodedData != INT64_MAX && !mBuffered.Ref().Contains(interval);
 }
 
 void
 MediaDecoderStateMachine::DecodeError()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   if (IsShutdown()) {
@@ -2129,18 +2128,17 @@ MediaDecoderStateMachine::OnMetadataRead
   // generally the case, unless we're coming out of dormant mode).
   if (!mStartTimeRendezvous) {
     mStartTimeRendezvous = new StartTimeRendezvous(TaskQueue(), HasAudio(), HasVideo(),
                                                    mReader->ForceZeroStartTime() || IsRealTime());
 
     mStartTimeRendezvous->AwaitStartTime()->Then(TaskQueue(), __func__,
       [self] () -> void {
         NS_ENSURE_TRUE_VOID(!self->IsShutdown());
-        ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
-        self->mReader->SetStartTime(self->StartTime());
+        self->mReader->DispatchSetStartTime(self->StartTime());
       },
       [] () -> void { NS_WARNING("Setting start time on reader failed"); }
     );
   }
 
   if (mInfo.mMetadataDuration.isSome()) {
     RecomputeDuration();
   } else if (mInfo.mUnadjustedMetadataEndTime.isSome()) {
@@ -2477,16 +2475,17 @@ MediaDecoderStateMachine::FinishShutdown
 
   // The reader's listeners hold references to the state machine,
   // creating a cycle which keeps the state machine and its shared
   // thread pools alive. So break it here.
   AudioQueue().ClearListeners();
   VideoQueue().ClearListeners();
 
   // Disconnect canonicals and mirrors before shutting down our task queue.
+  mBuffered.DisconnectIfConnected();
   mEstimatedDuration.DisconnectIfConnected();
   mExplicitDuration.DisconnectIfConnected();
   mPlayState.DisconnectIfConnected();
   mNextPlayState.DisconnectIfConnected();
   mLogicallySeeking.DisconnectIfConnected();
   mVolume.DisconnectIfConnected();
   mLogicalPlaybackRate.DisconnectIfConnected();
   mPreservesPitch.DisconnectIfConnected();
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -247,46 +247,37 @@ public:
 
   // Must be called with the decode monitor held.
   bool IsSeeking() const {
     MOZ_ASSERT(OnTaskQueue());
     AssertCurrentThreadInMonitor();
     return mState == DECODER_STATE_SEEKING;
   }
 
-  media::TimeIntervals GetBuffered() {
-    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-    return mReader->GetBuffered();
-  }
-
   size_t SizeOfVideoQueue() {
     if (mReader) {
       return mReader->SizeOfVideoQueueInBytes();
     }
     return 0;
   }
 
   size_t SizeOfAudioQueue() {
     if (mReader) {
       return mReader->SizeOfAudioQueueInBytes();
     }
     return 0;
   }
 
-private:
-  void NotifyDataArrived(uint32_t aLength, int64_t aOffset);
-public:
   void DispatchNotifyDataArrived(uint32_t aLength, int64_t aOffset)
   {
-    RefPtr<nsRunnable> r =
-      NS_NewRunnableMethodWithArgs<uint32_t, int64_t>(this, &MediaDecoderStateMachine::NotifyDataArrived, aLength, aOffset);
-    TaskQueue()->Dispatch(r.forget());
     mReader->DispatchNotifyDataArrived(aLength, aOffset);
   }
 
+  AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() { return mReader->CanonicalBuffered(); }
+
   // Returns the state machine task queue.
   MediaTaskQueue* TaskQueue() const { return mTaskQueue; }
 
   // Calls ScheduleStateMachine() after taking the decoder lock. Also
   // notifies the decoder thread in case it's waiting on the decoder lock.
   void ScheduleStateMachineWithLockAndWakeDecoder();
 
   // Schedules the shared state machine thread to run the state machine.
@@ -397,16 +388,18 @@ public:
 
 protected:
   virtual ~MediaDecoderStateMachine();
 
   void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
 
   void SetState(State aState);
 
+  void BufferedRangeUpdated();
+
   // Inserts MediaData* samples into their respective MediaQueues.
   // aSample must not be null.
   void Push(AudioData* aSample);
   void Push(VideoData* aSample);
   void PushFront(AudioData* aSample);
   void PushFront(VideoData* aSample);
 
   void OnAudioPopped();
@@ -942,16 +935,19 @@ private:
   // by decoder monitor.
   int64_t mPlayDuration;
 
   // Time that buffering started. Used for buffering timeout and only
   // accessed on the state machine thread. This is null while we're not
   // buffering.
   TimeStamp mBufferingStart;
 
+  // The buffered range. Mirrored from the decoder thread.
+  Mirror<media::TimeIntervals> mBuffered;
+
   // Duration of the media. This is guaranteed to be non-null after we finish
   // decoding the first frame.
   Canonical<media::NullableTimeUnit> mDuration;
   media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.Ref().ref(); }
 public:
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() { return &mDuration; }
 protected:
 
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -68,17 +68,16 @@ MediaFormatReader::MediaFormatReader(Abs
   , mAudio(this, MediaData::AUDIO_DATA, Preferences::GetUint("media.audio-decode-ahead", 2))
   , mVideo(this, MediaData::VIDEO_DATA, Preferences::GetUint("media.video-decode-ahead", 2))
   , mLastReportedNumDecodedFrames(0)
   , mLayersBackendType(layers::LayersBackend::LAYERS_NONE)
   , mInitDone(false)
   , mSeekable(false)
   , mIsEncrypted(false)
   , mTrackDemuxersMayBlock(false)
-  , mCachedTimeRangesStale(true)
 #if defined(READER_DORMANT_HEURISTIC)
   , mDormantEnabled(Preferences::GetBool("media.decoder.heuristic.dormant.enabled", false))
 #endif
 {
   MOZ_ASSERT(aDemuxer);
   MOZ_COUNT_CTOR(MediaFormatReader);
 }
 
@@ -789,21 +788,18 @@ MediaFormatReader::UpdateReceivedNewData
 
   if (!decoder.mReceivedNewData) {
     return false;
   }
   decoder.mReceivedNewData = false;
   decoder.mWaitingForData = false;
   bool hasLastEnd;
   media::TimeUnit lastEnd = decoder.mTimeRanges.GetEnd(&hasLastEnd);
-  {
-    MonitorAutoLock lock(decoder.mMonitor);
-    // Update our cached TimeRange.
-    decoder.mTimeRanges = decoder.mTrackDemuxer->GetBuffered();
-  }
+  // Update our cached TimeRange.
+  decoder.mTimeRanges = decoder.mTrackDemuxer->GetBuffered();
   if (decoder.mTimeRanges.Length() &&
       (!hasLastEnd || decoder.mTimeRanges.GetEnd() > lastEnd)) {
     // New data was added after our previous end, we can clear the EOS flag.
     decoder.mDemuxEOS = false;
     decoder.mDemuxEOSServiced = false;
   }
 
   if (decoder.mError) {
@@ -1397,77 +1393,49 @@ MediaFormatReader::GetEvictionOffset(dou
     videoOffset = HasVideo() ? mVideo.mTrackDemuxer->GetEvictionOffset(media::TimeUnit::FromSeconds(aTime)) : INT64_MAX;
   }
   return std::min(audioOffset, videoOffset);
 }
 
 media::TimeIntervals
 MediaFormatReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   media::TimeIntervals videoti;
   media::TimeIntervals audioti;
   media::TimeIntervals intervals;
 
   if (!mInitDone) {
     return intervals;
   }
   int64_t startTime;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
     startTime = mStartTime;
   }
-  if (NS_IsMainThread()) {
-    if (mCachedTimeRangesStale) {
-      MOZ_ASSERT(mMainThreadDemuxer);
-      if (!mDataRange.IsEmpty()) {
-        mMainThreadDemuxer->NotifyDataArrived(mDataRange.Length(), mDataRange.mStart);
-      }
-      if (mVideoTrackDemuxer) {
-        videoti = mVideoTrackDemuxer->GetBuffered();
-      }
-      if (mAudioTrackDemuxer) {
-        audioti = mAudioTrackDemuxer->GetBuffered();
-      }
-      if (HasAudio() && HasVideo()) {
-        mCachedTimeRanges = media::Intersection(Move(videoti), Move(audioti));
-      } else if (HasAudio()) {
-        mCachedTimeRanges = Move(audioti);
-      } else if (HasVideo()) {
-        mCachedTimeRanges = Move(videoti);
-      }
-      mDataRange = ByteInterval();
-      mCachedTimeRangesStale = false;
-    }
-    intervals = mCachedTimeRanges;
-  } else {
-    if (OnTaskQueue()) {
-      // Ensure we have up to date buffered time range.
-      if (HasVideo()) {
-        UpdateReceivedNewData(TrackType::kVideoTrack);
-      }
-      if (HasAudio()) {
-        UpdateReceivedNewData(TrackType::kAudioTrack);
-      }
-    }
-    if (HasVideo()) {
-      MonitorAutoLock lock(mVideo.mMonitor);
-      videoti = mVideo.mTimeRanges;
-    }
-    if (HasAudio()) {
-      MonitorAutoLock lock(mAudio.mMonitor);
-      audioti = mAudio.mTimeRanges;
-    }
-    if (HasAudio() && HasVideo()) {
-      intervals = media::Intersection(Move(videoti), Move(audioti));
-    } else if (HasAudio()) {
-      intervals = Move(audioti);
-    } else if (HasVideo()) {
-      intervals = Move(videoti);
-    }
+  // Ensure we have up to date buffered time range.
+  if (HasVideo()) {
+    UpdateReceivedNewData(TrackType::kVideoTrack);
+  }
+  if (HasAudio()) {
+    UpdateReceivedNewData(TrackType::kAudioTrack);
+  }
+  if (HasVideo()) {
+    videoti = mVideo.mTimeRanges;
+  }
+  if (HasAudio()) {
+    audioti = mAudio.mTimeRanges;
+  }
+  if (HasAudio() && HasVideo()) {
+    intervals = media::Intersection(Move(videoti), Move(audioti));
+  } else if (HasAudio()) {
+    intervals = Move(audioti);
+  } else if (HasVideo()) {
+    intervals = Move(videoti);
   }
 
   return intervals.Shift(media::TimeUnit::FromMicroseconds(-startTime));
 }
 
 bool MediaFormatReader::IsDormantNeeded()
 {
 #if defined(READER_DORMANT_HEURISTIC)
@@ -1538,56 +1506,49 @@ MediaFormatReader::NotifyDemuxer(uint32_
   }
 }
 
 void
 MediaFormatReader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(aLength);
-  if (mDataRange.IsEmpty()) {
-    mDataRange = ByteInterval(aOffset, aOffset + aLength);
-  } else {
-    mDataRange = mDataRange.Span(ByteInterval(aOffset, aOffset + aLength));
-  }
-  mCachedTimeRangesStale = true;
 
   if (!mInitDone) {
     return;
   }
 
-  // Queue a task to notify our main demuxer.
-  RefPtr<nsIRunnable> task =
-    NS_NewRunnableMethodWithArgs<int32_t, uint64_t>(
-      this, &MediaFormatReader::NotifyDemuxer,
+  // Queue a task to notify our main thread demuxer.
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableMethodWithArgs<uint32_t, int64_t>(
+      mMainThreadDemuxer, &MediaDataDemuxer::NotifyDataArrived,
       aLength, aOffset);
-  TaskQueue()->Dispatch(task.forget());
+  AbstractThread::MainThread()->Dispatch(task.forget());
+
+  NotifyDemuxer(aLength, aOffset);
 }
 
 void
 MediaFormatReader::NotifyDataRemoved()
 {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  mDataRange = ByteInterval();
-  mCachedTimeRangesStale = true;
+  MOZ_ASSERT(OnTaskQueue());
 
   if (!mInitDone) {
     return;
   }
 
   MOZ_ASSERT(mMainThreadDemuxer);
-  mMainThreadDemuxer->NotifyDataRemoved();
 
-  // Queue a task to notify our main demuxer.
-  RefPtr<nsIRunnable> task =
-    NS_NewRunnableMethodWithArgs<int32_t, uint64_t>(
-      this, &MediaFormatReader::NotifyDemuxer,
-      0, 0);
-  TaskQueue()->Dispatch(task.forget());
+  // Queue a task to notify our main thread demuxer.
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableMethod(
+      mMainThreadDemuxer, &MediaDataDemuxer::NotifyDataRemoved);
+  AbstractThread::MainThread()->Dispatch(task.forget());
+
+  NotifyDemuxer(0, 0);
 }
 
 bool
 MediaFormatReader::ForceZeroStartTime() const
 {
   return !mDemuxer->ShouldComputeStartTime();
 }
 
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #if !defined(MediaFormatReader_h_)
 #define MediaFormatReader_h_
 
 #include "mozilla/Atomics.h"
 #include "mozilla/Maybe.h"
-#include "mozilla/Monitor.h"
 #include "MediaDataDemuxer.h"
 #include "MediaDecoderReader.h"
 #include "MediaTaskQueue.h"
 #include "PlatformDecoderModule.h"
 
 namespace mozilla {
 
 #if defined(MOZ_GONK_MEDIACODEC) || defined(XP_WIN) || defined(MOZ_APPLEMEDIA) || defined(MOZ_FFMPEG)
@@ -202,18 +201,16 @@ private:
       , mOutputRequested(false)
       , mInputExhausted(false)
       , mError(false)
       , mDrainComplete(false)
       , mNumSamplesInput(0)
       , mNumSamplesOutput(0)
       , mSizeOfQueue(0)
       , mLastStreamSourceID(UINT32_MAX)
-      , mMonitor(aType == MediaData::AUDIO_DATA ? "audio decoder data"
-                                                : "video decoder data")
     {}
 
     MediaFormatReader* mOwner;
     // Disambiguate Audio vs Video.
     MediaData::Type mType;
     nsRefPtr<MediaTrackDemuxer> mTrackDemuxer;
     // The platform decoder.
     nsRefPtr<MediaDataDecoder> mDecoder;
@@ -290,19 +287,16 @@ private:
       mNumSamplesInput = 0;
       mNumSamplesOutput = 0;
     }
 
     // Used by the MDSM for logging purposes.
     Atomic<size_t> mSizeOfQueue;
     // Sample format monitoring.
     uint32_t mLastStreamSourceID;
-    // Monitor that protects all non-threadsafe state; the primitives
-    // that follow.
-    Monitor mMonitor;
     media::TimeIntervals mTimeRanges;
     nsRefPtr<SharedTrackInfo> mInfo;
   };
 
   template<typename PromiseType>
   struct DecoderDataWithPromise : public DecoderData {
     DecoderDataWithPromise(MediaFormatReader* aOwner,
                            MediaData::Type aType,
@@ -415,19 +409,16 @@ private:
   nsRefPtr<SharedDecoderManager> mSharedDecoderManager;
 
   // Main thread objects
   // Those are only used to calculate our buffered range on the main thread.
   // The cached buffered range is calculated one when required.
   nsRefPtr<MediaDataDemuxer> mMainThreadDemuxer;
   nsRefPtr<MediaTrackDemuxer> mAudioTrackDemuxer;
   nsRefPtr<MediaTrackDemuxer> mVideoTrackDemuxer;
-  ByteInterval mDataRange;
-  media::TimeIntervals mCachedTimeRanges;
-  bool mCachedTimeRangesStale;
 
 #if defined(READER_DORMANT_HEURISTIC)
   const bool mDormantEnabled;
 #endif
 };
 
 } // namespace mozilla
 
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -1070,16 +1070,17 @@ MP4Reader::GetEvictionOffset(double aTim
   }
 
   return mDemuxer->GetEvictionOffset(aTime * 1000000.0);
 }
 
 media::TimeIntervals
 MP4Reader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   MonitorAutoLock mon(mDemuxerMonitor);
   media::TimeIntervals buffered;
   if (!mIndexReady) {
     return buffered;
   }
   UpdateIndex();
   NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
 
--- a/dom/media/gstreamer/GStreamerReader.cpp
+++ b/dom/media/gstreamer/GStreamerReader.cpp
@@ -870,16 +870,17 @@ GStreamerReader::Seek(int64_t aTarget, i
   gst_message_unref(message);
   LOG(LogLevel::Debug, "seek completed");
 
   return SeekPromise::CreateAndResolve(aTarget, __func__);
 }
 
 media::TimeIntervals GStreamerReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   media::TimeIntervals buffered;
   if (!mInfo.HasValidMedia()) {
     return buffered;
   }
 
 #if GST_VERSION_MAJOR == 0
   GstFormat format = GST_FORMAT_TIME;
 #endif
--- a/dom/media/gtest/TestMP4Reader.cpp
+++ b/dom/media/gtest/TestMP4Reader.cpp
@@ -31,22 +31,19 @@ public:
   {
     EXPECT_EQ(NS_OK, Preferences::SetBool(
                        "media.fragmented-mp4.use-blank-decoder", true));
 
     EXPECT_EQ(NS_OK, resource->Open(nullptr));
     decoder->SetResource(resource);
 
     reader->Init(nullptr);
-    {
-      // This needs to be done before invoking GetBuffered. This is normally
-      // done by MediaDecoderStateMachine.
-      ReentrantMonitorAutoEnter mon(decoder->GetReentrantMonitor());
-      reader->SetStartTime(0);
-    }
+    // This needs to be done before invoking GetBuffered. This is normally
+    // done by MediaDecoderStateMachine.
+    reader->DispatchSetStartTime(0);
   }
 
   void Init() {
     nsCOMPtr<nsIThread> thread;
     nsresult rv = NS_NewThread(getter_AddRefs(thread),
                                NS_NewRunnableMethod(this, &TestBinding::ReadMetadata));
     EXPECT_EQ(NS_OK, rv);
     thread->Shutdown();
--- a/dom/media/mediasource/MediaSourceDecoder.cpp
+++ b/dom/media/mediasource/MediaSourceDecoder.cpp
@@ -82,28 +82,66 @@ MediaSourceDecoder::GetSeekable()
     return media::TimeIntervals::Invalid();
   }
 
   media::TimeIntervals seekable;
   double duration = mMediaSource->Duration();
   if (IsNaN(duration)) {
     // Return empty range.
   } else if (duration > 0 && mozilla::IsInfinite(duration)) {
-    media::TimeIntervals buffered = mReader->GetBuffered();
+    media::TimeIntervals buffered = GetBuffered();
     if (buffered.Length()) {
       seekable += media::TimeInterval(buffered.GetStart(), buffered.GetEnd());
     }
   } else {
     seekable += media::TimeInterval(media::TimeUnit::FromSeconds(0),
                                     media::TimeUnit::FromSeconds(duration));
   }
   MSE_DEBUG("ranges=%s", DumpTimeRanges(seekable).get());
   return seekable;
 }
 
+media::TimeIntervals
+MediaSourceDecoder::GetBuffered()
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  dom::SourceBufferList* sourceBuffers = mMediaSource->ActiveSourceBuffers();
+  media::TimeUnit highestEndTime;
+  nsTArray<media::TimeIntervals> activeRanges;
+  media::TimeIntervals buffered;
+
+  for (uint32_t i = 0; i < sourceBuffers->Length(); i++) {
+    bool found;
+    dom::SourceBuffer* sb = sourceBuffers->IndexedGetter(i, found);
+    MOZ_ASSERT(found);
+
+    activeRanges.AppendElement(sb->GetTimeIntervals());
+    highestEndTime =
+      std::max(highestEndTime, activeRanges.LastElement().GetEnd());
+  }
+
+  buffered +=
+    media::TimeInterval(media::TimeUnit::FromMicroseconds(0), highestEndTime);
+
+  for (auto& range : activeRanges) {
+    if (mEnded && range.Length()) {
+      // Set the end time on the last range to highestEndTime by adding a
+      // new range spanning the current end time to highestEndTime, which
+      // Normalize() will then merge with the old last range.
+      range +=
+        media::TimeInterval(range.GetEnd(), highestEndTime);
+    }
+    buffered.Intersection(range);
+  }
+
+  MSE_DEBUG("ranges=%s", DumpTimeRanges(buffered).get());
+  return buffered;
+}
+
 void
 MediaSourceDecoder::Shutdown()
 {
   MSE_DEBUG("Shutdown");
   // Detach first so that TrackBuffers are unused on the main thread when
   // shut down on the decode task queue.
   if (mMediaSource) {
     mMediaSource->Detach();
@@ -293,43 +331,12 @@ MediaSourceDecoder::IsActiveReader(Media
 
 double
 MediaSourceDecoder::GetDuration()
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   return ExplicitDuration();
 }
 
-already_AddRefed<SourceBufferDecoder>
-MediaSourceDecoder::SelectDecoder(int64_t aTarget,
-                                  int64_t aTolerance,
-                                  const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
-{
-  MOZ_ASSERT(!mIsUsingFormatReader);
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-
-  media::TimeUnit target{media::TimeUnit::FromMicroseconds(aTarget)};
-  media::TimeUnit tolerance{media::TimeUnit::FromMicroseconds(aTolerance + aTarget)};
-
-  // aTolerance gives a slight bias toward the start of a range only.
-  // Consider decoders in order of newest to oldest, as a newer decoder
-  // providing a given buffered range is expected to replace an older one.
-  for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) {
-    nsRefPtr<SourceBufferDecoder> newDecoder = aTrackDecoders[i];
-
-    media::TimeIntervals ranges = newDecoder->GetBuffered();
-    for (uint32_t j = 0; j < ranges.Length(); j++) {
-      if (target < ranges.End(j) && tolerance >= ranges.Start(j)) {
-        return newDecoder.forget();
-      }
-    }
-    MSE_DEBUGV("SelectDecoder(%lld fuzz:%lld) newDecoder=%p (%d/%d) target not in ranges=%s",
-               aTarget, aTolerance, newDecoder.get(), i+1,
-               aTrackDecoders.Length(), DumpTimeRanges(ranges).get());
-  }
-
-  return nullptr;
-}
-
 #undef MSE_DEBUG
 #undef MSE_DEBUGV
 
 } // namespace mozilla
--- a/dom/media/mediasource/MediaSourceDecoder.h
+++ b/dom/media/mediasource/MediaSourceDecoder.h
@@ -36,16 +36,17 @@ class MediaSourceDecoder : public MediaD
 {
 public:
   explicit MediaSourceDecoder(dom::HTMLMediaElement* aElement);
 
   virtual MediaDecoder* Clone() override;
   virtual MediaDecoderStateMachine* CreateStateMachine() override;
   virtual nsresult Load(nsIStreamListener**, MediaDecoder*) override;
   virtual media::TimeIntervals GetSeekable() override;
+  media::TimeIntervals GetBuffered() override;
 
   virtual void Shutdown() override;
 
   static already_AddRefed<MediaResource> CreateResource(nsIPrincipal* aPrincipal = nullptr);
 
   void AttachMediaSource(dom::MediaSource* aMediaSource);
   void DetachMediaSource();
 
@@ -86,22 +87,16 @@ public:
   {
     return mDemuxer;
   }
 
   // Returns true if aReader is a currently active audio or video
   // reader in this decoders MediaSourceReader.
   bool IsActiveReader(MediaDecoderReader* aReader);
 
-  // Return a decoder from the set available in aTrackDecoders that has data
-  // available in the range requested by aTarget.
-  already_AddRefed<SourceBufferDecoder> SelectDecoder(int64_t aTarget /* microseconds */,
-                                                      int64_t aTolerance /* microseconds */,
-                                                      const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
-
   // Returns a string describing the state of the MediaSource internal
   // buffered data. Used for debugging purposes.
   void GetMozDebugReaderData(nsAString& aString);
 
 private:
   void DoSetMediaSourceDuration(double aDuration);
 
   // The owning MediaSource holds a strong reference to this decoder, and
--- a/dom/media/mediasource/MediaSourceReader.cpp
+++ b/dom/media/mediasource/MediaSourceReader.cpp
@@ -548,28 +548,50 @@ MediaSourceReader::BreakCycles()
     mShutdownTrackBuffers[i]->BreakCycles();
   }
   mShutdownTrackBuffers.Clear();
 }
 
 already_AddRefed<SourceBufferDecoder>
 MediaSourceReader::SelectDecoder(int64_t aTarget,
                                  int64_t aTolerance,
-                                 const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
+                                 TrackBuffer* aTrackBuffer)
 {
-  return static_cast<MediaSourceDecoder*>(mDecoder)
-      ->SelectDecoder(aTarget, aTolerance, aTrackDecoders);
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+
+  media::TimeUnit target{media::TimeUnit::FromMicroseconds(aTarget)};
+  media::TimeUnit tolerance{media::TimeUnit::FromMicroseconds(aTolerance + aTarget)};
+
+  const nsTArray<nsRefPtr<SourceBufferDecoder>>& decoders{aTrackBuffer->Decoders()};
+
+  // aTolerance gives a slight bias toward the start of a range only.
+  // Consider decoders in order of newest to oldest, as a newer decoder
+  // providing a given buffered range is expected to replace an older one.
+  for (int32_t i = decoders.Length() - 1; i >= 0; --i) {
+    nsRefPtr<SourceBufferDecoder> newDecoder = decoders[i];
+    media::TimeIntervals ranges = aTrackBuffer->GetBuffered(newDecoder);
+    for (uint32_t j = 0; j < ranges.Length(); j++) {
+      if (target < ranges.End(j) && tolerance >= ranges.Start(j)) {
+        return newDecoder.forget();
+      }
+    }
+    MSE_DEBUGV("SelectDecoder(%lld fuzz:%lld) newDecoder=%p (%d/%d) target not in ranges=%s",
+               aTarget, aTolerance, newDecoder.get(), i+1,
+               decoders.Length(), DumpTimeRanges(ranges).get());
+  }
+
+  return nullptr;
 }
 
 bool
 MediaSourceReader::HaveData(int64_t aTarget, MediaData::Type aType)
 {
   TrackBuffer* trackBuffer = aType == MediaData::AUDIO_DATA ? mAudioTrack : mVideoTrack;
   MOZ_ASSERT(trackBuffer);
-  nsRefPtr<SourceBufferDecoder> decoder = SelectDecoder(aTarget, EOS_FUZZ_US, trackBuffer->Decoders());
+  nsRefPtr<SourceBufferDecoder> decoder = SelectDecoder(aTarget, EOS_FUZZ_US, trackBuffer);
   return !!decoder;
 }
 
 MediaSourceReader::SwitchSourceResult
 MediaSourceReader::SwitchAudioSource(int64_t* aTarget)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   // XXX: Can't handle adding an audio track after ReadMetadata.
@@ -577,19 +599,19 @@ MediaSourceReader::SwitchAudioSource(int
     return SOURCE_NONE;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
   bool usedFuzz = false;
   nsRefPtr<SourceBufferDecoder> newDecoder =
-    SelectDecoder(*aTarget, /* aTolerance = */ 0, mAudioTrack->Decoders());
+    SelectDecoder(*aTarget, /* aTolerance = */ 0, mAudioTrack);
   if (!newDecoder) {
-    newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mAudioTrack->Decoders());
+    newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mAudioTrack);
     usedFuzz = true;
   }
   if (GetAudioReader() && mAudioSourceDecoder != newDecoder) {
     GetAudioReader()->SetIdle();
   }
   if (!newDecoder) {
     mAudioSourceDecoder = nullptr;
     return SOURCE_NONE;
@@ -622,19 +644,19 @@ MediaSourceReader::SwitchVideoSource(int
     return SOURCE_NONE;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
   bool usedFuzz = false;
   nsRefPtr<SourceBufferDecoder> newDecoder =
-    SelectDecoder(*aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders());
+    SelectDecoder(*aTarget, /* aTolerance = */ 0, mVideoTrack);
   if (!newDecoder) {
-    newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mVideoTrack->Decoders());
+    newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mVideoTrack);
     usedFuzz = true;
   }
   if (GetVideoReader() && mVideoSourceDecoder != newDecoder) {
     GetVideoReader()->SetIdle();
   }
   if (!newDecoder) {
     mVideoSourceDecoder = nullptr;
     return SOURCE_NONE;
@@ -727,20 +749,17 @@ MediaSourceReader::CreateSubDecoder(cons
   nsRefPtr<MediaDecoderReader> reader(CreateReaderForType(aType, decoder, TaskQueue()));
   if (!reader) {
     return nullptr;
   }
 
   // MSE uses a start time of 0 everywhere. Set that immediately on the
   // subreader to make sure that it's always in a state where we can invoke
   // GetBuffered on it.
-  {
-    ReentrantMonitorAutoEnter mon(decoder->GetReentrantMonitor());
-    reader->SetStartTime(0);
-  }
+  reader->DispatchSetStartTime(0);
 
 #ifdef MOZ_FMP4
   reader->SetSharedDecoderManager(mSharedDecoderManager);
 #endif
   reader->Init(nullptr);
 
   MSE_DEBUG("subdecoder %p subreader %p",
             decoder.get(), reader.get());
@@ -1008,16 +1027,17 @@ MediaSourceReader::DoVideoSeek()
                                  &MediaSourceReader::OnVideoSeekCompleted,
                                  &MediaSourceReader::OnVideoSeekFailed));
   MSE_DEBUG("reader=%p", GetVideoReader());
 }
 
 media::TimeIntervals
 MediaSourceReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   media::TimeIntervals buffered;
 
   media::TimeUnit highestEndTime;
   nsTArray<media::TimeIntervals> activeRanges;
   // Must set the capacity of the nsTArray first: bug #1164444
   activeRanges.SetCapacity(mTrackBuffers.Length());
 
@@ -1251,35 +1271,37 @@ void
 MediaSourceReader::GetMozDebugReaderData(nsAString& aString)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   nsAutoCString result;
   result += nsPrintfCString("Dumping data for reader %p:\n", this);
   if (mAudioTrack) {
     result += nsPrintfCString("\tDumping Audio Track Decoders: - mLastAudioTime: %f\n", double(mLastAudioTime) / USECS_PER_S);
     for (int32_t i = mAudioTrack->Decoders().Length() - 1; i >= 0; --i) {
-      nsRefPtr<MediaDecoderReader> newReader = mAudioTrack->Decoders()[i]->GetReader();
-
-      media::TimeIntervals ranges = mAudioTrack->Decoders()[i]->GetBuffered();
+      const nsRefPtr<SourceBufferDecoder>& newDecoder{mAudioTrack->Decoders()[i]};
+      media::TimeIntervals ranges = mAudioTrack->GetBuffered(newDecoder);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
-                                i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == GetAudioReader() ? "true" : "false",
-                                mAudioTrack->Decoders()[i]->GetResource()->GetSize());
+                                i,
+                                newDecoder->GetReader(),
+                                DumpTimeRanges(ranges).get(),
+                                newDecoder->GetReader() == GetAudioReader() ? "true" : "false",
+                                newDecoder->GetResource()->GetSize());
     }
   }
 
   if (mVideoTrack) {
     result += nsPrintfCString("\tDumping Video Track Decoders - mLastVideoTime: %f\n", double(mLastVideoTime) / USECS_PER_S);
     for (int32_t i = mVideoTrack->Decoders().Length() - 1; i >= 0; --i) {
-      nsRefPtr<MediaDecoderReader> newReader = mVideoTrack->Decoders()[i]->GetReader();
-
-      media::TimeIntervals ranges = mVideoTrack->Decoders()[i]->GetBuffered();
+      const nsRefPtr<SourceBufferDecoder>& newDecoder{mVideoTrack->Decoders()[i]};
+      media::TimeIntervals ranges = mVideoTrack->GetBuffered(newDecoder);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
-                                i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == GetVideoReader() ? "true" : "false",
+                                i,
+                                newDecoder->GetReader(),
+                                DumpTimeRanges(ranges).get(),
+                                newDecoder->GetReader() == GetVideoReader() ? "true" : "false",
                                 mVideoTrack->Decoders()[i]->GetResource()->GetSize());
     }
   }
   aString += NS_ConvertUTF8toUTF16(result);
 }
 
 #ifdef MOZ_EME
 nsresult
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -214,19 +214,20 @@ private:
   int64_t GetReaderVideoTime(int64_t aTime) const;
 
   // Will reject the MediaPromise with END_OF_STREAM if mediasource has ended
   // or with WAIT_FOR_DATA otherwise.
   void CheckForWaitOrEndOfStream(MediaData::Type aType, int64_t aTime /* microseconds */);
 
   // Return a decoder from the set available in aTrackDecoders that has data
   // available in the range requested by aTarget.
+  friend class TrackBuffer;
   already_AddRefed<SourceBufferDecoder> SelectDecoder(int64_t aTarget /* microseconds */,
                                                       int64_t aTolerance /* microseconds */,
-                                                      const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
+                                                      TrackBuffer* aTrackBuffer);
   bool HaveData(int64_t aTarget, MediaData::Type aType);
   already_AddRefed<SourceBufferDecoder> FirstDecoder(MediaData::Type aType);
 
   void AttemptSeek();
   bool IsSeeking() { return mPendingSeekTime != -1; }
 
   bool IsNearEnd(MediaData::Type aType, int64_t aTime /* microseconds */);
   int64_t LastSampleTime(MediaData::Type aType);
--- a/dom/media/mediasource/SourceBuffer.cpp
+++ b/dom/media/mediasource/SourceBuffer.cpp
@@ -142,16 +142,22 @@ SourceBuffer::GetBuffered(ErrorResult& a
   }
   TimeIntervals ranges = mContentManager->Buffered();
   MSE_DEBUGV("ranges=%s", DumpTimeRanges(ranges).get());
   nsRefPtr<dom::TimeRanges> tr = new dom::TimeRanges();
   ranges.ToTimeRanges(tr);
   return tr.forget();
 }
 
+media::TimeIntervals
+SourceBuffer::GetTimeIntervals()
+{
+  return mContentManager->Buffered();
+}
+
 void
 SourceBuffer::SetAppendWindowStart(double aAppendWindowStart, ErrorResult& aRv)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MSE_API("SetAppendWindowStart(aAppendWindowStart=%f)", aAppendWindowStart);
   if (!IsAttached() || mUpdating) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
--- a/dom/media/mediasource/SourceBuffer.h
+++ b/dom/media/mediasource/SourceBuffer.h
@@ -55,16 +55,17 @@ public:
   void SetMode(SourceBufferAppendMode aMode, ErrorResult& aRv);
 
   bool Updating() const
   {
     return mUpdating;
   }
 
   already_AddRefed<TimeRanges> GetBuffered(ErrorResult& aRv);
+  TimeIntervals GetTimeIntervals();
 
   double TimestampOffset() const
   {
     return mApparentTimestampOffset;
   }
 
   void SetTimestampOffset(double aTimestampOffset, ErrorResult& aRv);
 
--- a/dom/media/mediasource/SourceBufferDecoder.cpp
+++ b/dom/media/mediasource/SourceBufferDecoder.cpp
@@ -194,23 +194,16 @@ MediaDecoderOwner*
 SourceBufferDecoder::GetOwner()
 {
   return mParentDecoder->GetOwner();
 }
 
 void
 SourceBufferDecoder::NotifyDataArrived(uint32_t aLength, int64_t aOffset)
 {
-  mReader->DispatchNotifyDataArrived(aLength, aOffset);
-
-  // XXX: Params make no sense to parent decoder as it relates to a
-  // specific SourceBufferDecoder's data stream.  Pass bogus values here to
-  // force parent decoder's state machine to recompute end time for
-  // infinite length media.
-  mParentDecoder->NotifyDataArrived(0, 0);
 }
 
 media::TimeIntervals
 SourceBufferDecoder::GetBuffered()
 {
   media::TimeIntervals buffered = mReader->GetBuffered();
   if (buffered.IsInvalid()) {
     return buffered;
--- a/dom/media/mediasource/TrackBuffer.cpp
+++ b/dom/media/mediasource/TrackBuffer.cpp
@@ -31,16 +31,19 @@ extern PRLogModuleInfo* GetMediaSourceLo
 // prevent evicting the current playback point.
 #define MSE_EVICT_THRESHOLD_TIME 2.0
 
 // Time in microsecond under which a timestamp will be considered to be 0.
 #define FUZZ_TIMESTAMP_OFFSET 100000
 
 #define EOS_FUZZ_US 125000
 
+using media::TimeIntervals;
+using media::Interval;
+
 namespace mozilla {
 
 TrackBuffer::TrackBuffer(MediaSourceDecoder* aParentDecoder, const nsACString& aType)
   : mParentDecoder(aParentDecoder)
   , mType(aType)
   , mLastStartTimestamp(0)
   , mIsWaitingOnCDM(false)
   , mShutdown(false)
@@ -237,81 +240,161 @@ TrackBuffer::BufferAppend()
 
   if (gotMedia && starttu != mAdjustedTimestamp &&
       ((start < 0 && -start < FUZZ_TIMESTAMP_OFFSET && starttu < mAdjustedTimestamp) ||
        (start > 0 && (start < FUZZ_TIMESTAMP_OFFSET || starttu < mAdjustedTimestamp)))) {
     AdjustDecodersTimestampOffset(mAdjustedTimestamp - starttu);
     mAdjustedTimestamp = starttu;
   }
 
-  if (!AppendDataToCurrentResource(mInputBuffer, end - start)) {
+  int64_t offset = AppendDataToCurrentResource(mInputBuffer, end - start);
+  if (offset < 0) {
     mInitializationPromise.Reject(NS_ERROR_FAILURE, __func__);
     return p;
   }
 
+  mLastAppendRange =
+    Interval<int64_t>(offset, offset + int64_t(mInputBuffer->Length()));
+
   if (decoders.Length()) {
     // We're going to have to wait for the decoder to initialize, the promise
     // will be resolved once initialization completes.
     return p;
   }
 
-  // Tell our reader that we have more data to ensure that playback starts if
-  // required when data is appended.
-  NotifyTimeRangesChanged();
+  nsRefPtr<TrackBuffer> self = this;
 
-  mInitializationPromise.Resolve(HasInitSegment(), __func__);
+  ProxyMediaCall(mParentDecoder->GetReader()->TaskQueue(), this, __func__,
+                 &TrackBuffer::UpdateBufferedRanges,
+                 mLastAppendRange, /* aNotifyParent */ true)
+      ->Then(mParentDecoder->GetReader()->TaskQueue(), __func__,
+             [self] {
+               self->mInitializationPromise.ResolveIfExists(self->HasInitSegment(), __func__);
+             },
+             [self] (nsresult) { MOZ_CRASH("Never called."); });
+
   return p;
 }
 
-bool
+int64_t
 TrackBuffer::AppendDataToCurrentResource(MediaByteBuffer* aData, uint32_t aDuration)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!mCurrentDecoder) {
-    return false;
+    return -1;
   }
 
   SourceBufferResource* resource = mCurrentDecoder->GetResource();
   int64_t appendOffset = resource->GetLength();
   resource->AppendData(aData);
   mCurrentDecoder->SetRealMediaDuration(mCurrentDecoder->GetRealMediaDuration() + aDuration);
-  mCurrentDecoder->NotifyDataArrived(aData->Length(), appendOffset);
-  mParentDecoder->NotifyBytesDownloaded();
+
+  return appendOffset;
+}
+
+nsRefPtr<TrackBuffer::BufferedRangesUpdatedPromise>
+TrackBuffer::UpdateBufferedRanges(Interval<int64_t> aByteRange, bool aNotifyParent)
+{
+  if (aByteRange.Length()) {
+    mCurrentDecoder->GetReader()->NotifyDataArrived(uint32_t(aByteRange.Length()),
+                                                    aByteRange.mStart);
+  }
+
+  // Recalculate and cache our new buffered range.
+  {
+    ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
+    TimeIntervals buffered;
+
+    for (auto& decoder : mInitializedDecoders) {
+      TimeIntervals decoderBuffered(decoder->GetBuffered());
+      mReadersBuffered[decoder] = decoderBuffered;
+      buffered += decoderBuffered;
+    }
+    // mParser may not be initialized yet, and will only be so if we have a
+    // buffered range.
+    if (buffered.Length()) {
+      buffered.SetFuzz(TimeUnit::FromMicroseconds(mParser->GetRoundingError()));
+    }
+
+    mBufferedRanges = buffered;
+  }
+
+  if (aNotifyParent) {
+    nsRefPtr<MediaSourceDecoder> parent = mParentDecoder;
+    nsCOMPtr<nsIRunnable> task =
+      NS_NewRunnableFunction([parent] () {
+        // XXX: Params make no sense to parent decoder as it relates to a
+        // specific SourceBufferDecoder's data stream.  Pass bogus values here to
+        // force parent decoder's state machine to recompute end time for
+        // infinite length media.
+        parent->NotifyDataArrived(0, 0);
+        parent->NotifyBytesDownloaded();
+      });
+    AbstractThread::MainThread()->Dispatch(task.forget());
+  }
+
+  // Tell our reader that we have more data to ensure that playback starts if
+  // required when data is appended.
   NotifyTimeRangesChanged();
 
-  return true;
+  return BufferedRangesUpdatedPromise::CreateAndResolve(true, __func__);
 }
 
 void
 TrackBuffer::NotifyTimeRangesChanged()
 {
   RefPtr<nsIRunnable> task =
     NS_NewRunnableMethod(mParentDecoder->GetReader(),
                          &MediaSourceReader::NotifyTimeRangesChanged);
   mParentDecoder->GetReader()->TaskQueue()->Dispatch(task.forget());
 }
 
+void
+TrackBuffer::NotifyReaderDataRemoved(MediaDecoderReader* aReader)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  nsRefPtr<TrackBuffer> self = this;
+  nsRefPtr<MediaDecoderReader> reader = aReader;
+  RefPtr<nsIRunnable> task =
+    NS_NewRunnableFunction([self, reader] () {
+      reader->NotifyDataRemoved();
+      self->UpdateBufferedRanges(Interval<int64_t>(), /* aNotifyParent */ false);
+    });
+  aReader->TaskQueue()->Dispatch(task.forget());
+}
+
 class DecoderSorter
 {
 public:
+  explicit DecoderSorter(const TrackBuffer::DecoderBufferedMap& aMap)
+    : mMap(aMap)
+  {}
+
   bool LessThan(SourceBufferDecoder* aFirst, SourceBufferDecoder* aSecond) const
   {
-    TimeIntervals first = aFirst->GetBuffered();
-    TimeIntervals second = aSecond->GetBuffered();
+    MOZ_ASSERT(mMap.find(aFirst) != mMap.end());
+    MOZ_ASSERT(mMap.find(aSecond) != mMap.end());
+    const TimeIntervals& first = mMap.find(aFirst)->second;
+    const TimeIntervals& second = mMap.find(aSecond)->second;
 
     return first.GetStart() < second.GetStart();
   }
 
   bool Equals(SourceBufferDecoder* aFirst, SourceBufferDecoder* aSecond) const
   {
-    TimeIntervals first = aFirst->GetBuffered();
-    TimeIntervals second = aSecond->GetBuffered();
+    MOZ_ASSERT(mMap.find(aFirst) != mMap.end());
+    MOZ_ASSERT(mMap.find(aSecond) != mMap.end());
+    const TimeIntervals& first = mMap.find(aFirst)->second;
+    const TimeIntervals& second = mMap.find(aSecond)->second;
 
     return first.GetStart() == second.GetStart();
   }
+
+  const TrackBuffer::DecoderBufferedMap& mMap;
 };
 
 TrackBuffer::EvictDataResult
 TrackBuffer::EvictData(TimeUnit aPlaybackTime,
                        uint32_t aThreshold,
                        TimeUnit* aBufferStartTime)
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -324,24 +407,34 @@ TrackBuffer::EvictData(TimeUnit aPlaybac
   int64_t totalSize = GetSize();
 
   int64_t toEvict = totalSize - aThreshold;
   if (toEvict <= 0) {
     return EvictDataResult::NO_DATA_EVICTED;
   }
 
   // Get a list of initialized decoders.
-  nsTArray<SourceBufferDecoder*> decoders;
+  nsTArray<nsRefPtr<SourceBufferDecoder>> decoders;
   decoders.AppendElements(mInitializedDecoders);
   const TimeUnit evictThresholdTime{TimeUnit::FromSeconds(MSE_EVICT_THRESHOLD_TIME)};
 
+  // Find the reader currently being played with.
+  SourceBufferDecoder* playingDecoder = nullptr;
+  for (const auto& decoder : decoders) {
+    if (mParentDecoder->IsActiveReader(decoder->GetReader())) {
+      playingDecoder = decoder;
+      break;
+    }
+  }
+  TimeUnit playingDecoderStartTime{GetBuffered(playingDecoder).GetStart()};
+
   // First try to evict data before the current play position, starting
   // with the oldest decoder.
   for (uint32_t i = 0; i < decoders.Length() && toEvict > 0; ++i) {
-    TimeIntervals buffered = decoders[i]->GetBuffered();
+    TimeIntervals buffered = GetBuffered(decoders[i]);
 
     MSE_DEBUG("Step1. decoder=%u/%u threshold=%u toEvict=%lld",
               i, decoders.Length(), aThreshold, toEvict);
 
     // To ensure we don't evict data past the current playback position
     // we apply a threshold of a few seconds back and evict data up to
     // that point.
     if (aPlaybackTime > evictThresholdTime) {
@@ -359,143 +452,146 @@ TrackBuffer::EvictData(TimeUnit aPlaybac
       } else {
         int64_t playbackOffset =
           decoders[i]->ConvertToByteOffset(time.ToSeconds());
         MSE_DEBUG("evicting some bufferedEnd=%f "
                   "aPlaybackTime=%f time=%f, playbackOffset=%lld size=%lld",
                   buffered.GetEnd().ToSeconds(), aPlaybackTime.ToSeconds(),
                   time, playbackOffset, decoders[i]->GetResource()->GetSize());
         if (playbackOffset > 0) {
+          if (decoders[i] == playingDecoder) {
+            // This is an approximation only, likely pessimistic.
+            playingDecoderStartTime = time;
+          }
           ErrorResult rv;
           toEvict -= decoders[i]->GetResource()->EvictData(playbackOffset,
                                                            playbackOffset,
                                                            rv);
           if (NS_WARN_IF(rv.Failed())) {
             rv.SuppressException();
             return EvictDataResult::CANT_EVICT;
           }
         }
       }
-      decoders[i]->GetReader()->NotifyDataRemoved();
+      NotifyReaderDataRemoved(decoders[i]->GetReader());
     }
   }
 
   // Evict all data from decoders we've likely already read from.
   for (uint32_t i = 0; i < decoders.Length() && toEvict > 0; ++i) {
     MSE_DEBUG("Step2. decoder=%u/%u threshold=%u toEvict=%lld",
               i, decoders.Length(), aThreshold, toEvict);
-    if (mParentDecoder->IsActiveReader(decoders[i]->GetReader())) {
+    if (decoders[i] == playingDecoder) {
       break;
     }
     if (decoders[i] == mCurrentDecoder) {
       continue;
     }
-    TimeIntervals buffered = decoders[i]->GetBuffered();
+    // The buffered value is potentially stale should eviction occurred in
+    // step 1. However this is only used for logging.
+    TimeIntervals buffered = GetBuffered(decoders[i]);
 
     // Remove data from older decoders than the current one.
     MSE_DEBUG("evicting all "
               "bufferedStart=%f bufferedEnd=%f aPlaybackTime=%f size=%lld",
               buffered.GetStart().ToSeconds(), buffered.GetEnd().ToSeconds(),
               aPlaybackTime, decoders[i]->GetResource()->GetSize());
     toEvict -= decoders[i]->GetResource()->EvictAll();
-    decoders[i]->GetReader()->NotifyDataRemoved();
+    NotifyReaderDataRemoved(decoders[i]->GetReader());
   }
 
   // Evict all data from future decoders, starting furthest away from
   // current playback position.
   // We will ignore the currently playing decoder and the one playing after that
   // in order to ensure we give enough time to the DASH player to re-buffer
   // as necessary.
   // TODO: This step should be done using RangeRemoval:
   // Something like: RangeRemoval(aPlaybackTime + 60s, End);
 
-  // Find the reader currently being played with.
-  SourceBufferDecoder* playingDecoder = nullptr;
-  for (uint32_t i = 0; i < decoders.Length() && toEvict > 0; ++i) {
-    if (mParentDecoder->IsActiveReader(decoders[i]->GetReader())) {
-      playingDecoder = decoders[i];
-      break;
-    }
-  }
   // Find the next decoder we're likely going to play with.
   nsRefPtr<SourceBufferDecoder> nextPlayingDecoder = nullptr;
   if (playingDecoder) {
-    TimeIntervals buffered = playingDecoder->GetBuffered();
+    // The buffered value is potentially stale should eviction occurred in
+    // step 1. However step 1 modified the start of the range value, and now
+    // will use the end value.
+    TimeIntervals buffered = GetBuffered(playingDecoder);
     nextPlayingDecoder =
-      mParentDecoder->SelectDecoder(buffered.GetEnd().ToMicroseconds() + 1,
-                                    EOS_FUZZ_US,
-                                    mInitializedDecoders);
+      mParentDecoder->GetReader()->SelectDecoder(buffered.GetEnd().ToMicroseconds() + 1,
+                                                 EOS_FUZZ_US,
+                                                 this);
   }
 
   // Sort decoders by their start times.
-  decoders.Sort(DecoderSorter());
+  decoders.Sort(DecoderSorter{mReadersBuffered});
 
   for (int32_t i = int32_t(decoders.Length()) - 1; i >= 0 && toEvict > 0; --i) {
     MSE_DEBUG("Step3. decoder=%u/%u threshold=%u toEvict=%lld",
               i, decoders.Length(), aThreshold, toEvict);
     if (decoders[i] == playingDecoder || decoders[i] == nextPlayingDecoder ||
         decoders[i] == mCurrentDecoder) {
       continue;
     }
-    TimeIntervals buffered = decoders[i]->GetBuffered();
+    // The buffered value is potentially stale should eviction occurred in
+    // step 1 and 2. However step 3 is a last resort step where we will remove
+    // all content and the buffered value is only used for logging.
+    TimeIntervals buffered = GetBuffered(decoders[i]);
 
     MSE_DEBUG("evicting all "
               "bufferedStart=%f bufferedEnd=%f aPlaybackTime=%f size=%lld",
               buffered.GetStart().ToSeconds(), buffered.GetEnd().ToSeconds(),
               aPlaybackTime, decoders[i]->GetResource()->GetSize());
     toEvict -= decoders[i]->GetResource()->EvictAll();
-    decoders[i]->GetReader()->NotifyDataRemoved();
+    NotifyReaderDataRemoved(decoders[i]->GetReader());
   }
 
   RemoveEmptyDecoders(decoders);
 
   bool evicted = toEvict < (totalSize - aThreshold);
   if (evicted) {
     if (playingDecoder) {
-      TimeIntervals ranges = playingDecoder->GetBuffered();
-      *aBufferStartTime = std::max(TimeUnit::FromSeconds(0), ranges.GetStart());
+      *aBufferStartTime =
+        std::max(TimeUnit::FromSeconds(0), playingDecoderStartTime);
     } else {
       // We do not currently have data to play yet.
       // Avoid evicting anymore data to minimize rebuffering time.
       *aBufferStartTime = TimeUnit::FromSeconds(0.0);
     }
   }
 
-  if (evicted) {
-    NotifyTimeRangesChanged();
-  }
-
   return evicted ?
     EvictDataResult::DATA_EVICTED :
     (HasOnlyIncompleteMedia() ? EvictDataResult::CANT_EVICT : EvictDataResult::NO_DATA_EVICTED);
 }
 
 void
-TrackBuffer::RemoveEmptyDecoders(nsTArray<mozilla::SourceBufferDecoder*>& aDecoders)
+TrackBuffer::RemoveEmptyDecoders(const nsTArray<nsRefPtr<mozilla::SourceBufferDecoder>>& aDecoders)
 {
-  ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
+  nsRefPtr<TrackBuffer> self = this;
+  nsTArray<nsRefPtr<mozilla::SourceBufferDecoder>> decoders(aDecoders);
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableFunction([self, decoders] () {
+      if (!self->mParentDecoder) {
+        return;
+      }
+      ReentrantMonitorAutoEnter mon(self->mParentDecoder->GetReentrantMonitor());
 
-  // Remove decoders that have no data in them
-  for (uint32_t i = 0; i < aDecoders.Length(); ++i) {
-    TimeIntervals buffered = aDecoders[i]->GetBuffered();
-    MSE_DEBUG("maybe remove empty decoders=%d "
-              "size=%lld start=%f end=%f",
-              i, aDecoders[i]->GetResource()->GetSize(),
-              buffered.GetStart().ToSeconds(), buffered.GetEnd().ToSeconds());
-    if (aDecoders[i] == mCurrentDecoder ||
-        mParentDecoder->IsActiveReader(aDecoders[i]->GetReader())) {
-      continue;
-    }
-
-    if (aDecoders[i]->GetResource()->GetSize() == 0 || !buffered.Length() ||
-        buffered[0].IsEmpty()) {
-      MSE_DEBUG("remove empty decoders=%d", i);
-      RemoveDecoder(aDecoders[i]);
-    }
-  }
+      // Remove decoders that have decoders data in them
+      for (uint32_t i = 0; i < decoders.Length(); ++i) {
+        if (decoders[i] == self->mCurrentDecoder ||
+            self->mParentDecoder->IsActiveReader(decoders[i]->GetReader())) {
+          continue;
+        }
+        TimeIntervals buffered = self->GetBuffered(decoders[i]);
+        if (decoders[i]->GetResource()->GetSize() == 0 || !buffered.Length() ||
+            buffered[0].IsEmpty()) {
+          self->RemoveDecoder(decoders[i]);
+        }
+      }
+    });
+  AbstractThread::MainThread()->Dispatch(task.forget());
 }
 
 int64_t
 TrackBuffer::GetSize()
 {
   int64_t totalSize = 0;
   for (uint32_t i = 0; i < mInitializedDecoders.Length(); ++i) {
     totalSize += mInitializedDecoders[i]->GetResource()->GetSize();
@@ -504,17 +600,17 @@ TrackBuffer::GetSize()
 }
 
 bool
 TrackBuffer::HasOnlyIncompleteMedia()
 {
   if (!mCurrentDecoder) {
     return false;
   }
-  TimeIntervals buffered = mCurrentDecoder->GetBuffered();
+  TimeIntervals buffered = GetBuffered(mCurrentDecoder);
   MSE_DEBUG("mCurrentDecoder.size=%lld, start=%f end=%f",
             mCurrentDecoder->GetResource()->GetSize(),
             buffered.GetStart(), buffered.GetEnd());
   return mCurrentDecoder->GetResource()->GetSize() && !buffered.Length();
 }
 
 void
 TrackBuffer::EvictBefore(TimeUnit aTime)
@@ -527,39 +623,40 @@ TrackBuffer::EvictBefore(TimeUnit aTime)
       MSE_DEBUG("decoder=%u offset=%lld",
                 i, endOffset);
       ErrorResult rv;
       mInitializedDecoders[i]->GetResource()->EvictBefore(endOffset, rv);
       if (NS_WARN_IF(rv.Failed())) {
         rv.SuppressException();
         return;
       }
-      mInitializedDecoders[i]->GetReader()->NotifyDataRemoved();
+      NotifyReaderDataRemoved(mInitializedDecoders[i]->GetReader());
     }
   }
-  NotifyTimeRangesChanged();
 }
 
 TimeIntervals
 TrackBuffer::Buffered()
 {
   ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
 
-  TimeIntervals buffered;
+  return mBufferedRanges;
+}
 
-  for (auto& decoder : mInitializedDecoders) {
-    buffered += decoder->GetBuffered();
+TimeIntervals
+TrackBuffer::GetBuffered(SourceBufferDecoder* aDecoder)
+{
+  ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
+
+  DecoderBufferedMap::const_iterator val = mReadersBuffered.find(aDecoder);
+
+  if (val == mReadersBuffered.end()) {
+    return TimeIntervals::Invalid();
   }
-  // mParser may not be initialized yet, and will only be so if we have a
-  // buffered range.
-  if (buffered.Length()) {
-    buffered.SetFuzz(TimeUnit::FromMicroseconds(mParser->GetRoundingError()));
-  }
-
-  return buffered;
+  return val->second;
 }
 
 already_AddRefed<SourceBufferDecoder>
 TrackBuffer::NewDecoder(TimeUnit aTimestampOffset)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mParentDecoder);
 
@@ -660,16 +757,19 @@ TrackBuffer::InitializeDecoder(SourceBuf
 
   MOZ_ASSERT(aDecoder->GetReader()->OnTaskQueue());
 
   MediaDecoderReader* reader = aDecoder->GetReader();
 
   MSE_DEBUG("Initializing subdecoder %p reader %p",
             aDecoder, reader);
 
+  reader->NotifyDataArrived(uint32_t(mLastAppendRange.Length()),
+                            mLastAppendRange.mStart);
+
   // HACK WARNING:
   // We only reach this point once we know that we have a complete init segment.
   // We don't want the demuxer to do a blocking read as no more data can be
   // appended while this routine is running. Marking the SourceBufferResource
   // as ended will cause any incomplete reads to abort.
   // As this decoder hasn't been initialized yet, the resource isn't yet in use
   // and so it is safe to do so.
   bool wasEnded = aDecoder->GetResource()->IsEnded();
@@ -816,32 +916,39 @@ TrackBuffer::CompleteInitializeDecoder(S
   if (!RegisterDecoder(aDecoder)) {
     MSE_DEBUG("Reader %p not activated",
               aDecoder->GetReader());
     RemoveDecoder(aDecoder);
     mInitializationPromise.RejectIfExists(NS_ERROR_FAILURE, __func__);
     return;
   }
 
-
   int64_t duration = mInfo.mMetadataDuration.isSome()
     ? mInfo.mMetadataDuration.ref().ToMicroseconds() : -1;
   if (!duration) {
     // Treat a duration of 0 as infinity
     duration = -1;
   }
   mParentDecoder->SetInitialDuration(duration);
 
   // Tell our reader that we have more data to ensure that playback starts if
   // required when data is appended.
   NotifyTimeRangesChanged();
 
   MSE_DEBUG("Reader %p activated",
             aDecoder->GetReader());
-  mInitializationPromise.ResolveIfExists(true, __func__);
+  nsRefPtr<TrackBuffer> self = this;
+  ProxyMediaCall(mParentDecoder->GetReader()->TaskQueue(), this, __func__,
+                 &TrackBuffer::UpdateBufferedRanges,
+                 Interval<int64_t>(), /* aNotifyParent */ true)
+      ->Then(mParentDecoder->GetReader()->TaskQueue(), __func__,
+             [self] {
+               self->mInitializationPromise.ResolveIfExists(self->HasInitSegment(), __func__);
+             },
+             [self] (nsresult) { MOZ_CRASH("Never called."); });
 }
 
 bool
 TrackBuffer::ValidateTrackFormats(const MediaInfo& aInfo)
 {
   if (mInfo.HasAudio() != aInfo.HasAudio() ||
       mInfo.HasVideo() != aInfo.HasVideo()) {
     MSE_DEBUG("audio/video track mismatch");
@@ -927,17 +1034,17 @@ TrackBuffer::IsWaitingOnCDMResource()
 }
 
 bool
 TrackBuffer::ContainsTime(int64_t aTime, int64_t aTolerance)
 {
   ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
   TimeUnit time{TimeUnit::FromMicroseconds(aTime)};
   for (auto& decoder : mInitializedDecoders) {
-    TimeIntervals r = decoder->GetBuffered();
+    TimeIntervals r = GetBuffered(decoder);
     r.SetFuzz(TimeUnit::FromMicroseconds(aTolerance));
     if (r.Contains(time)) {
       return true;
     }
   }
 
   return false;
 }
@@ -1080,16 +1187,18 @@ TrackBuffer::RemoveDecoder(SourceBufferD
   RefPtr<nsIRunnable> task = new DelayedDispatchToMainThread(aDecoder, this);
   {
     ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
     // There should be no other references to the decoder. Assert that
     // we aren't using it in the MediaSourceReader.
     MOZ_ASSERT(!mParentDecoder->IsActiveReader(aDecoder->GetReader()));
     mInitializedDecoders.RemoveElement(aDecoder);
     mDecoders.RemoveElement(aDecoder);
+    // Remove associated buffered range from our cache.
+    mReadersBuffered.erase(aDecoder);
   }
   aDecoder->GetReader()->TaskQueue()->Dispatch(task.forget());
 }
 
 nsRefPtr<TrackBuffer::RangeRemovalPromise>
 TrackBuffer::RangeRemoval(TimeUnit aStart, TimeUnit aEnd)
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -1106,23 +1215,23 @@ TrackBuffer::RangeRemoval(TimeUnit aStar
 
   if (aStart > bufferedStart && aEnd < bufferedEnd) {
     // TODO. We only handle trimming and removal from the start.
     NS_WARNING("RangeRemoval unsupported arguments. "
                "Can only handle trimming (trim left or trim right");
     return RangeRemovalPromise::CreateAndResolve(false, __func__);
   }
 
-  nsTArray<SourceBufferDecoder*> decoders;
+  nsTArray<nsRefPtr<SourceBufferDecoder>> decoders;
   decoders.AppendElements(mInitializedDecoders);
 
   if (aStart <= bufferedStart && aEnd < bufferedEnd) {
     // Evict data from beginning.
     for (size_t i = 0; i < decoders.Length(); ++i) {
-      TimeIntervals buffered = decoders[i]->GetBuffered();
+      TimeIntervals buffered = GetBuffered(decoders[i]);
       if (buffered.GetEnd() < aEnd) {
         // Can be fully removed.
         MSE_DEBUG("remove all bufferedEnd=%f size=%lld",
                   buffered.GetEnd().ToSeconds(),
                   decoders[i]->GetResource()->GetSize());
         decoders[i]->GetResource()->EvictAll();
       } else {
         int64_t offset = decoders[i]->ConvertToByteOffset(aEnd.ToSeconds());
@@ -1133,35 +1242,47 @@ TrackBuffer::RangeRemoval(TimeUnit aStar
           ErrorResult rv;
           decoders[i]->GetResource()->EvictData(offset, offset, rv);
           if (NS_WARN_IF(rv.Failed())) {
             rv.SuppressException();
             return RangeRemovalPromise::CreateAndResolve(false, __func__);
           }
         }
       }
-      decoders[i]->GetReader()->NotifyDataRemoved();
+      NotifyReaderDataRemoved(decoders[i]->GetReader());
     }
   } else {
     // Only trimming existing buffers.
     for (size_t i = 0; i < decoders.Length(); ++i) {
       if (aStart <= buffered.GetStart()) {
         // It will be entirely emptied, can clear all data.
         decoders[i]->GetResource()->EvictAll();
       } else {
         decoders[i]->Trim(aStart.ToMicroseconds());
       }
-      decoders[i]->GetReader()->NotifyDataRemoved();
+      NotifyReaderDataRemoved(decoders[i]->GetReader());
     }
   }
 
   RemoveEmptyDecoders(decoders);
 
-  NotifyTimeRangesChanged();
-  return RangeRemovalPromise::CreateAndResolve(true, __func__);
+  nsRefPtr<RangeRemovalPromise> p = mRangeRemovalPromise.Ensure(__func__);
+
+  // Make sure our buffered ranges got updated before resolving promise.
+  nsRefPtr<TrackBuffer> self = this;
+  ProxyMediaCall(mParentDecoder->GetReader()->TaskQueue(), this, __func__,
+                 &TrackBuffer::UpdateBufferedRanges,
+                 Interval<int64_t>(), /* aNotifyParent */ false)
+    ->Then(mParentDecoder->GetReader()->TaskQueue(), __func__,
+           [self] {
+             self->mRangeRemovalPromise.ResolveIfExists(true, __func__);
+           },
+           [self] (nsresult) { MOZ_CRASH("Never called."); });
+
+  return p;
 }
 
 void
 TrackBuffer::AdjustDecodersTimestampOffset(TimeUnit aOffset)
 {
   ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
   for (uint32_t i = 0; i < mDecoders.Length(); i++) {
     mDecoders[i]->SetTimestampOffset(mDecoders[i]->GetTimestampOffset() + aOffset.ToMicroseconds());
--- a/dom/media/mediasource/TrackBuffer.h
+++ b/dom/media/mediasource/TrackBuffer.h
@@ -12,16 +12,17 @@
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/mozalloc.h"
 #include "mozilla/Maybe.h"
 #include "nsCOMPtr.h"
 #include "nsString.h"
 #include "nscore.h"
 #include "TimeUnits.h"
+#include <map>
 
 namespace mozilla {
 
 class ContainerParser;
 class MediaSourceDecoder;
 class MediaByteBuffer;
 
 class TrackBuffer final : public SourceBufferContentManager {
@@ -92,42 +93,52 @@ public:
   // to select decoders.
   // TODO: Refactor to a cleaner interface between TrackBuffer and MediaSourceReader.
   const nsTArray<nsRefPtr<SourceBufferDecoder>>& Decoders();
 
   // Return true if we have a partial media segment being appended that is
   // currently not playable.
   bool HasOnlyIncompleteMedia();
 
+  // Return the buffered ranges for given decoder.
+  media::TimeIntervals GetBuffered(SourceBufferDecoder* aDecoder);
+
 #ifdef MOZ_EME
   nsresult SetCDMProxy(CDMProxy* aProxy);
 #endif
 
 #if defined(DEBUG)
   void Dump(const char* aPath) override;
 #endif
 
+  typedef std::map<SourceBufferDecoder*, media::TimeIntervals> DecoderBufferedMap;
+
 private:
   friend class DecodersToInitialize;
   friend class MetadataRecipient;
   virtual ~TrackBuffer();
 
   // Create a new decoder, set mCurrentDecoder to the new decoder and
   // returns it. The new decoder must be queued using QueueInitializeDecoder
   // for initialization.
   // The decoder is not considered initialized until it is added to
   // mInitializedDecoders.
-  already_AddRefed<SourceBufferDecoder> NewDecoder(TimeUnit aTimestampOffset);
+  already_AddRefed<SourceBufferDecoder> NewDecoder(media::TimeUnit aTimestampOffset);
 
   // Helper for AppendData, ensures NotifyDataArrived is called whenever
   // data is appended to the current decoder's SourceBufferResource.
-  bool AppendDataToCurrentResource(MediaByteBuffer* aData,
+  int64_t AppendDataToCurrentResource(MediaByteBuffer* aData,
                                    uint32_t aDuration /* microseconds */);
   // Queue on the parent's decoder task queue a call to NotifyTimeRangesChanged.
   void NotifyTimeRangesChanged();
+  // Queue on the parent's decoder task queue a call to NotifyDataRemoved.
+  void NotifyReaderDataRemoved(MediaDecoderReader* aReader);
+
+  typedef MediaPromise<bool, nsresult, /* IsExclusive = */ true> BufferedRangesUpdatedPromise;
+  nsRefPtr<BufferedRangesUpdatedPromise> UpdateBufferedRanges(Interval<int64_t> aByteRange, bool aNotifyParent);
 
   // Queue execution of InitializeDecoder on mTaskQueue.
   bool QueueInitializeDecoder(SourceBufferDecoder* aDecoder);
 
   // Runs decoder initialization including calling ReadMetadata.  Runs as an
   // event on the decode thread pool.
   void InitializeDecoder(SourceBufferDecoder* aDecoder);
   // Once decoder has been initialized, set mediasource duration if required
@@ -148,17 +159,17 @@ private:
 
   // Remove aDecoder from mDecoders and dispatch an event to the main thread
   // to clean up the decoder.  If aDecoder was added to
   // mInitializedDecoders, it must have been removed before calling this
   // function.
   void RemoveDecoder(SourceBufferDecoder* aDecoder);
 
   // Remove all empty decoders from the provided list;
-  void RemoveEmptyDecoders(nsTArray<SourceBufferDecoder*>& aDecoders);
+  void RemoveEmptyDecoders(const nsTArray<nsRefPtr<SourceBufferDecoder>>& aDecoders);
 
   void OnMetadataRead(MetadataHolder* aMetadata,
                       SourceBufferDecoder* aDecoder,
                       bool aWasEnded);
 
   void OnMetadataNotRead(ReadMetadataFailureReason aReason,
                          SourceBufferDecoder* aDecoder);
 
@@ -192,31 +203,40 @@ private:
 
   // The last start and end timestamps added to the TrackBuffer via
   // AppendData.  Accessed on the main thread only.
   int64_t mLastStartTimestamp;
   Maybe<int64_t> mLastEndTimestamp;
   void AdjustDecodersTimestampOffset(TimeUnit aOffset);
 
   // The timestamp offset used by our current decoder.
-  TimeUnit mLastTimestampOffset;
-  TimeUnit mTimestampOffset;
-  TimeUnit mAdjustedTimestamp;
+  media::TimeUnit mLastTimestampOffset;
+  media::TimeUnit mTimestampOffset;
+  media::TimeUnit mAdjustedTimestamp;
 
   // True if at least one of our decoders has encrypted content.
   bool mIsWaitingOnCDM;
 
   // Set when the first decoder used by this TrackBuffer is initialized.
   // Protected by mParentDecoder's monitor.
   MediaInfo mInfo;
 
   void ContinueShutdown();
   MediaPromiseHolder<ShutdownPromise> mShutdownPromise;
   bool mDecoderPerSegment;
   bool mShutdown;
 
   MediaPromiseHolder<AppendPromise> mInitializationPromise;
   // Track our request for metadata from the reader.
   MediaPromiseRequestHolder<MediaDecoderReader::MetadataPromise> mMetadataRequest;
+
+  MediaPromiseHolder<RangeRemovalPromise> mRangeRemovalPromise;
+
+  Interval<int64_t> mLastAppendRange;
+
+  // Protected by Parent's decoder Monitor.
+  media::TimeIntervals mBufferedRanges;
+
+  DecoderBufferedMap mReadersBuffered;
 };
 
 } // namespace mozilla
 #endif /* MOZILLA_TRACKBUFFER_H_ */
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -1847,16 +1847,17 @@ nsresult OggReader::SeekBisection(int64_
 
   SEEK_LOG(LogLevel::Debug, ("Seek complete in %d bisections.", hops));
 
   return NS_OK;
 }
 
 media::TimeIntervals OggReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
   {
     mozilla::ReentrantMonitorAutoEnter mon(mMonitor);
     if (mIsChained) {
       return media::TimeIntervals::Invalid();
     }
   }
 #ifdef OGG_ESTIMATE_BUFFERED
--- a/dom/media/raw/RawReader.cpp
+++ b/dom/media/raw/RawReader.cpp
@@ -280,10 +280,11 @@ nsresult RawReader::SeekInternal(int64_t
     }
   }
 
   return NS_OK;
 }
 
 media::TimeIntervals RawReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   return media::TimeIntervals();
 }
--- a/dom/media/wave/WaveReader.cpp
+++ b/dom/media/wave/WaveReader.cpp
@@ -270,16 +270,17 @@ WaveReader::Seek(int64_t aTarget, int64_
     return SeekPromise::CreateAndReject(res, __func__);
   } else {
     return SeekPromise::CreateAndResolve(aTarget, __func__);
   }
 }
 
 media::TimeIntervals WaveReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   if (!mInfo.HasAudio()) {
     return media::TimeIntervals();
   }
   media::TimeIntervals buffered;
   AutoPinned<MediaResource> resource(mDecoder->GetResource());
   int64_t startOffset = resource->GetNextCachedData(mWavePCMOffset);
   while (startOffset >= 0) {
     int64_t endOffset = resource->GetCachedDataEnd(startOffset);
--- a/dom/media/webm/WebMReader.cpp
+++ b/dom/media/webm/WebMReader.cpp
@@ -771,16 +771,17 @@ nsresult WebMReader::SeekInternal(int64_
       return NS_ERROR_FAILURE;
     }
   }
   return NS_OK;
 }
 
 media::TimeIntervals WebMReader::GetBuffered()
 {
+  MOZ_ASSERT(OnTaskQueue());
   NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
   AutoPinned<MediaResource> resource(mDecoder->GetResource());
 
   media::TimeIntervals buffered;
   // Special case completely cached files.  This also handles local files.
   if (mContext && resource->IsDataCachedToEndOfResource(0)) {
     uint64_t duration = 0;
     if (nestegg_duration(mContext, &duration) == 0) {