Bug 1002266 - Access MediaQueues from MediaDecoderStateMachine through accessors. r=kinetik
authorChris Pearce <cpearce@mozilla.com>
Mon, 28 Apr 2014 13:12:50 +1200
changeset 192530 61b91c85a7aacda7fa9d1583188854bc4b187d53
parent 192528 84ba8cd9ab6740304bb04818e787ec5b108590e7
child 192531 b937bd3175a068ecfed267a04b7078de69f7ec5c
push idunknown
push userunknown
push dateunknown
reviewerskinetik
bugs1002266
milestone31.0a1
Bug 1002266 - Access MediaQueues from MediaDecoderStateMachine through accessors. r=kinetik
content/media/MediaDecoderStateMachine.cpp
content/media/MediaDecoderStateMachine.h
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -253,39 +253,39 @@ MediaDecoderStateMachine::~MediaDecoderS
   mTimer = nullptr;
   mReader = nullptr;
 
 #ifdef XP_WIN
   timeEndPeriod(1);
 #endif
 }
 
-bool MediaDecoderStateMachine::HasFutureAudio() const {
+bool MediaDecoderStateMachine::HasFutureAudio() {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio");
   // We've got audio ready to play if:
   // 1. We've not completed playback of audio, and
   // 2. we either have more than the threshold of decoded audio available, or
   //    we've completely decoded all audio (but not finished playing it yet
   //    as per 1).
   return !mAudioCompleted &&
-         (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || mReader->AudioQueue().IsFinished());
+         (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished());
 }
 
-bool MediaDecoderStateMachine::HaveNextFrameData() const {
+bool MediaDecoderStateMachine::HaveNextFrameData() {
   AssertCurrentThreadInMonitor();
   return (!HasAudio() || HasFutureAudio()) &&
-         (!HasVideo() || mReader->VideoQueue().GetSize() > 0);
+         (!HasVideo() || VideoQueue().GetSize() > 0);
 }
 
 int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() {
   NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(),
                "Should be on decode thread or state machine thread");
   AssertCurrentThreadInMonitor();
-  int64_t audioDecoded = mReader->AudioQueue().Duration();
+  int64_t audioDecoded = AudioQueue().Duration();
   if (mAudioEndTime != -1) {
     audioDecoded += mAudioEndTime - GetMediaTime();
   }
   return audioDecoded;
 }
 
 void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
                                                DecodedStreamData* aStream,
@@ -376,18 +376,18 @@ void MediaDecoderStateMachine::SendStrea
   // data yet since both SendStreamData and the audio thread want to be in
   // charge of popping the audio queue. We're waiting for the audio thread
   // to die before sending anything to our stream.
   if (mAudioThread)
     return;
 
   int64_t minLastAudioPacketTime = INT64_MAX;
   bool finished =
-      (!mInfo.HasAudio() || mReader->AudioQueue().IsFinished()) &&
-      (!mInfo.HasVideo() || mReader->VideoQueue().IsFinished());
+      (!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
+      (!mInfo.HasVideo() || VideoQueue().IsFinished());
   if (mDecoder->IsSameOriginMedia()) {
     SourceMediaStream* mediaStream = stream->mStream;
     StreamTime endPosition = 0;
 
     if (!stream->mStreamInitialized) {
       if (mInfo.HasAudio()) {
         AudioSegment* audio = new AudioSegment();
         mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio);
@@ -402,38 +402,38 @@ void MediaDecoderStateMachine::SendStrea
       }
       stream->mStreamInitialized = true;
     }
 
     if (mInfo.HasAudio()) {
       nsAutoTArray<AudioData*,10> audio;
       // It's OK to hold references to the AudioData because while audio
       // is captured, only the decoder thread pops from the queue (see below).
-      mReader->AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
+      AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
       AudioSegment output;
       for (uint32_t i = 0; i < audio.Length(); ++i) {
         SendStreamAudio(audio[i], stream, &output);
       }
       if (output.GetDuration() > 0) {
         mediaStream->AppendToTrack(TRACK_AUDIO, &output);
       }
-      if (mReader->AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
+      if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
         mediaStream->EndTrack(TRACK_AUDIO);
         stream->mHaveSentFinishAudio = true;
       }
       minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime);
       endPosition = std::max(endPosition,
           TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten));
     }
 
     if (mInfo.HasVideo()) {
       nsAutoTArray<VideoData*,10> video;
       // It's OK to hold references to the VideoData only the decoder thread
       // pops from the queue.
-      mReader->VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
+      VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
       VideoSegment output;
       for (uint32_t i = 0; i < video.Length(); ++i) {
         VideoData* v = video[i];
         if (stream->mNextVideoTime < v->mTime) {
           VERBOSE_LOG("writing last video to MediaStream %p for %lldus",
                       mediaStream, v->mTime - stream->mNextVideoTime);
           // Write last video frame to catch up. mLastVideoImage can be null here
           // which is fine, it just means there's no video.
@@ -454,17 +454,17 @@ void MediaDecoderStateMachine::SendStrea
         } else {
           VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream",
                       v->mTime, v->GetEndTime());
         }
       }
       if (output.GetDuration() > 0) {
         mediaStream->AppendToTrack(TRACK_VIDEO, &output);
       }
-      if (mReader->VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
+      if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
         mediaStream->EndTrack(TRACK_VIDEO);
         stream->mHaveSentFinishVideo = true;
       }
       endPosition = std::max(endPosition,
           TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
     }
 
     if (!stream->mHaveSentFinish) {
@@ -475,27 +475,27 @@ void MediaDecoderStateMachine::SendStrea
       stream->mHaveSentFinish = true;
       stream->mStream->Finish();
     }
   }
 
   if (mAudioCaptured) {
     // Discard audio packets that are no longer needed.
     while (true) {
-      const AudioData* a = mReader->AudioQueue().PeekFront();
+      const AudioData* a = AudioQueue().PeekFront();
       // Packet times are not 100% reliable so this may discard packets that
       // actually contain data for mCurrentFrameTime. This means if someone might
       // create a new output stream and we actually don't have the audio for the
       // very start. That's OK, we'll play silence instead for a brief moment.
       // That's OK. Seeking to this time would have a similar issue for such
       // badly muxed resources.
       if (!a || a->GetEndTime() >= minLastAudioPacketTime)
         break;
       mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
-      delete mReader->AudioQueue().PopFront();
+      delete AudioQueue().PopFront();
     }
 
     if (finished) {
       mAudioCompleted = true;
       UpdateReadyState();
     }
   }
 }
@@ -510,17 +510,17 @@ MediaDecoderStateMachine::GetWakeDecoder
   }
   return mPendingWakeDecoder.get();
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
 {
   AssertCurrentThreadInMonitor();
 
-  if (mReader->AudioQueue().GetSize() == 0 ||
+  if (AudioQueue().GetSize() == 0 ||
       GetDecodedAudioDuration() < aAmpleAudioUSecs) {
     return false;
   }
   if (!mAudioCaptured) {
     return true;
   }
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
@@ -534,17 +534,17 @@ bool MediaDecoderStateMachine::HaveEnoug
 
   return true;
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   AssertCurrentThreadInMonitor();
 
-  if (static_cast<uint32_t>(mReader->VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) {
+  if (static_cast<uint32_t>(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) {
     return false;
   }
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
     if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
       return false;
     }
@@ -577,17 +577,17 @@ MediaDecoderStateMachine::DecodeVideo()
     return;
   }
   EnsureActive();
 
   // We don't want to consider skipping to the next keyframe if we've
   // only just started up the decode loop, so wait until we've decoded
   // some frames before enabling the keyframe skip logic on video.
   if (mIsVideoPrerolling &&
-      (static_cast<uint32_t>(mReader->VideoQueue().GetSize())
+      (static_cast<uint32_t>(VideoQueue().GetSize())
         >= mVideoPrerollFrames * mPlaybackRate))
   {
     mIsVideoPrerolling = false;
   }
 
   // We'll skip the video decode to the nearest keyframe if we're low on
   // audio, or if we're low on video, provided we're not running low on
   // data to decode. If we're running low on downloaded data to decode,
@@ -598,17 +598,17 @@ MediaDecoderStateMachine::DecodeVideo()
       !mSkipToNextKeyFrame &&
       mIsVideoDecoding &&
       ((!mIsAudioPrerolling && mIsAudioDecoding &&
         GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
         (!mIsVideoPrerolling && mIsVideoDecoding &&
          // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
          // we are still in the safe range without underrunning video frames
          GetClock() > mVideoFrameEndTime &&
-        (static_cast<uint32_t>(mReader->VideoQueue().GetSize())
+        (static_cast<uint32_t>(VideoQueue().GetSize())
           < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
       !HasLowUndecodedData())
   {
     mSkipToNextKeyFrame = true;
     DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
   }
 
   // Time the video decode, so that if it's slow, we can increase our low
@@ -619,17 +619,17 @@ MediaDecoderStateMachine::DecodeVideo()
     int64_t currentTime = GetMediaTime();
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     TimeStamp start = TimeStamp::Now();
     mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime);
     decodeTime = TimeStamp::Now() - start;
   }
   if (!mIsVideoDecoding) {
     // Playback ended for this stream, close the sample queue.
-    mReader->VideoQueue().Finish();
+    VideoQueue().Finish();
     CheckIfDecodeComplete();
   }
 
   if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
       !HasLowUndecodedData())
   {
     mLowAudioThresholdUsecs =
       std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
@@ -681,17 +681,17 @@ MediaDecoderStateMachine::DecodeAudio()
   }
 
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     mIsAudioDecoding = mReader->DecodeAudioData();
   }
   if (!mIsAudioDecoding) {
     // Playback ended for this stream, close the sample queue.
-    mReader->AudioQueue().Finish();
+    AudioQueue().Finish();
     CheckIfDecodeComplete();
   }
 
   SendStreamData();
 
   // Notify to ensure that the AudioLoop() is not waiting, in case it was
   // waiting for more audio to be decoded.
   mDecoder->GetReentrantMonitor().NotifyAll();
@@ -710,18 +710,18 @@ MediaDecoderStateMachine::CheckIfDecodeC
   AssertCurrentThreadInMonitor();
   if (mState == DECODER_STATE_SHUTDOWN ||
       mState == DECODER_STATE_SEEKING ||
       mState == DECODER_STATE_COMPLETED) {
     // Don't change our state if we've already been shutdown, or we're seeking,
     // since we don't want to abort the shutdown or seek processes.
     return;
   }
-  MOZ_ASSERT(!mReader->AudioQueue().IsFinished() || !mIsAudioDecoding);
-  MOZ_ASSERT(!mReader->VideoQueue().IsFinished() || !mIsVideoDecoding);
+  MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
+  MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
   if (!mIsVideoDecoding && !mIsAudioDecoding) {
     // We've finished decoding all active streams,
     // so move to COMPLETED state.
     mState = DECODER_STATE_COMPLETED;
     DispatchDecodeTasksIfNeeded();
     ScheduleStateMachine();
   }
   DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted",
@@ -817,30 +817,30 @@ void MediaDecoderStateMachine::AudioLoop
     {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA,
                    "Should have meta data before audio started playing.");
       while (mState != DECODER_STATE_SHUTDOWN &&
              !mStopAudioThread &&
              (!IsPlaying() ||
               mState == DECODER_STATE_BUFFERING ||
-              (mReader->AudioQueue().GetSize() == 0 &&
-               !mReader->AudioQueue().AtEndOfStream())))
+              (AudioQueue().GetSize() == 0 &&
+               !AudioQueue().AtEndOfStream())))
       {
         if (!IsPlaying() && !mAudioStream->IsPaused()) {
           mAudioStream->Pause();
         }
         mon.Wait();
       }
 
       // If we're shutting down, break out and exit the audio thread.
       // Also break out if audio is being captured.
       if (mState == DECODER_STATE_SHUTDOWN ||
           mStopAudioThread ||
-          mReader->AudioQueue().AtEndOfStream())
+          AudioQueue().AtEndOfStream())
       {
         break;
       }
 
       // We only want to go to the expense of changing the volume if
       // the volume has changed.
       setVolume = volume != mVolume;
       volume = mVolume;
@@ -868,21 +868,21 @@ void MediaDecoderStateMachine::AudioLoop
         NS_WARNING("Setting the playback rate failed in AudioLoop.");
       }
     }
     if (setPreservesPitch) {
       if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
         NS_WARNING("Setting the pitch preservation failed in AudioLoop.");
       }
     }
-    NS_ASSERTION(mReader->AudioQueue().GetSize() > 0,
+    NS_ASSERTION(AudioQueue().GetSize() > 0,
                  "Should have data to play");
     // See if there's a gap in the audio. If there is, push silence into the
     // audio hardware, so we can play across the gap.
-    const AudioData* s = mReader->AudioQueue().PeekFront();
+    const AudioData* s = AudioQueue().PeekFront();
 
     // Calculate the number of frames that have been pushed onto the audio
     // hardware.
     CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) +
                                               audioDuration;
     // Calculate the timestamp of the next chunk of audio in numbers of
     // samples.
     CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate);
@@ -913,17 +913,17 @@ void MediaDecoderStateMachine::AudioLoop
         NS_WARNING("Int overflow calculating audio end time");
         break;
       }
       mAudioEndTime = playedUsecs.value();
     }
   }
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-    if (mReader->AudioQueue().AtEndOfStream() &&
+    if (AudioQueue().AtEndOfStream() &&
         mState != DECODER_STATE_SHUTDOWN &&
         !mStopAudioThread)
     {
       // If the media was too short to trigger the start of the audio stream,
       // start it now.
       mAudioStream->Start();
       // Last frame pushed to audio hardware, wait for the audio to finish,
       // before the audio thread terminates.
@@ -983,17 +983,17 @@ uint32_t MediaDecoderStateMachine::PlayS
   return frames;
 }
 
 uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset,
                                                       uint32_t aChannels)
 {
   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  nsAutoPtr<AudioData> audio(mReader->AudioQueue().PopFront());
+  nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
     // Awaken the decode loop if it's waiting for space to free up in the
     // audio queue.
     mDecoder->GetReentrantMonitor().NotifyAll();
   }
   int64_t offset = -1;
@@ -1320,18 +1320,18 @@ void MediaDecoderStateMachine::StartDeco
     return;
   }
   mState = DECODER_STATE_DECODING;
 
   mDecodeStartTime = TimeStamp::Now();
 
   // Reset our "stream finished decoding" flags, so we try to decode all
   // streams that we have when we start decoding.
-  mIsVideoDecoding = HasVideo() && !mReader->VideoQueue().IsFinished();
-  mIsAudioDecoding = HasAudio() && !mReader->AudioQueue().IsFinished();
+  mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
+  mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
 
   CheckIfDecodeComplete();
   if (mState == DECODER_STATE_COMPLETED) {
     return;
   }
 
   // Reset other state to pristine values before starting decode.
   mSkipToNextKeyFrame = false;
@@ -1514,17 +1514,17 @@ MediaDecoderStateMachine::EnsureActive()
 void
 MediaDecoderStateMachine::SetReaderIdle()
 {
 #ifdef PR_LOGGING
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld",
                 GetDecodedAudioDuration(),
-                mReader->VideoQueue().Duration());
+                VideoQueue().Duration());
   }
 #endif
   MOZ_ASSERT(OnDecodeThread());
   mReader->SetIdle();
 }
 
 void
 MediaDecoderStateMachine::SetReaderActive()
@@ -1710,49 +1710,49 @@ MediaDecoderStateMachine::StartAudioThre
 
     nsCOMPtr<nsIRunnable> event =
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop);
     mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL);
   }
   return NS_OK;
 }
 
-int64_t MediaDecoderStateMachine::AudioDecodedUsecs() const
+int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
 {
   NS_ASSERTION(HasAudio(),
                "Should only call AudioDecodedUsecs() when we have audio");
   // The amount of audio we have decoded is the amount of audio data we've
   // already decoded and pushed to the hardware, plus the amount of audio
   // data waiting to be pushed to the hardware.
   int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0;
-  return pushed + mReader->AudioQueue().Duration();
+  return pushed + AudioQueue().Duration();
 }
 
-bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) const
+bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
 {
   AssertCurrentThreadInMonitor();
   // We consider ourselves low on decoded data if we're low on audio,
   // provided we've not decoded to the end of the audio stream, or
   // if we're low on video frames, provided
   // we've not decoded to the end of the video stream.
   return ((HasAudio() &&
-           !mReader->AudioQueue().IsFinished() &&
+           !AudioQueue().IsFinished() &&
            AudioDecodedUsecs() < aAudioUsecs)
           ||
          (HasVideo() &&
-          !mReader->VideoQueue().IsFinished() &&
-          static_cast<uint32_t>(mReader->VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
+          !VideoQueue().IsFinished() &&
+          static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
 }
 
-bool MediaDecoderStateMachine::HasLowUndecodedData() const
+bool MediaDecoderStateMachine::HasLowUndecodedData()
 {
   return HasLowUndecodedData(mLowDataThresholdUsecs);
 }
 
-bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs) const
+bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs)
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
                "Must have loaded metadata for GetBuffered() to work");
 
   bool reliable;
   double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable);
   if (!reliable) {
@@ -1881,22 +1881,22 @@ nsresult MediaDecoderStateMachine::Decod
                                  HasAudio(),
                                  HasVideo(),
                                  tags);
   NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
 
   if (HasAudio()) {
     RefPtr<nsIRunnable> decodeTask(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
-    mReader->AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
   }
   if (HasVideo()) {
     RefPtr<nsIRunnable> decodeTask(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
-    mReader->VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
   }
 
   if (mState == DECODER_STATE_DECODING_METADATA) {
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING");
     StartDecoding();
   }
 
   // For very short media FindStartTime() can decode the entire media.
@@ -1984,17 +1984,17 @@ void MediaDecoderStateMachine::DecodeSee
         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
         video = mReader->FindStartTime(nextSampleStartTime);
       }
 
       // Setup timestamp state.
       if (seekTime == mEndTime) {
         newCurrentTime = mAudioStartTime = seekTime;
       } else if (HasAudio()) {
-        AudioData* audio = mReader->AudioQueue().PeekFront();
+        AudioData* audio = AudioQueue().PeekFront();
         newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
       } else {
         newCurrentTime = video ? video->mTime : seekTime;
       }
       mPlayDuration = newCurrentTime - mStartTime;
 
       if (HasVideo()) {
         if (video) {
@@ -2123,18 +2123,18 @@ nsresult MediaDecoderStateMachine::RunSt
       if (mAudioThread) {
         MOZ_ASSERT(mStopAudioThread);
         return NS_OK;
       }
 
       // The reader's listeners hold references to the state machine,
       // creating a cycle which keeps the state machine and its shared
       // thread pools alive. So break it here.
-      mReader->AudioQueue().ClearListeners();
-      mReader->VideoQueue().ClearListeners();
+      AudioQueue().ClearListeners();
+      VideoQueue().ClearListeners();
 
       {
         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
         // Wait for the thread decoding to exit.
         mDecodeTaskQueue->Shutdown();
         mReader->ReleaseMediaResources();
       }
       // Now that those threads are stopped, there's no possibility of
@@ -2253,17 +2253,17 @@ nsresult MediaDecoderStateMachine::RunSt
       // Ensure we have a decode thread to perform the seek.
      return EnqueueDecodeSeekTask();
     }
 
     case DECODER_STATE_COMPLETED: {
       // Play the remaining media. We want to run AdvanceFrame() at least
       // once to ensure the current playback position is advanced to the
       // end of the media, and so that we update the readyState.
-      if (mReader->VideoQueue().GetSize() > 0 ||
+      if (VideoQueue().GetSize() > 0 ||
           (HasAudio() && !mAudioCompleted) ||
           (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished()))
       {
         AdvanceFrame();
         NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING ||
                      mPlaybackRate == 0 ||
                      IsStateMachineScheduled(),
                      "Must have timer scheduled");
@@ -2419,35 +2419,35 @@ void MediaDecoderStateMachine::AdvanceFr
   // Skip frames up to the frame at the playback position, and figure out
   // the time remaining until it's time to display the next frame.
   int64_t remainingTime = AUDIO_DURATION_USECS;
   NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
   nsAutoPtr<VideoData> currentFrame;
 #ifdef PR_LOGGING
   int32_t droppedFrames = 0;
 #endif
-  if (mReader->VideoQueue().GetSize() > 0) {
-    VideoData* frame = mReader->VideoQueue().PeekFront();
+  if (VideoQueue().GetSize() > 0) {
+    VideoData* frame = VideoQueue().PeekFront();
     while (mRealTime || clock_time >= frame->mTime) {
       mVideoFrameEndTime = frame->GetEndTime();
       currentFrame = frame;
 #ifdef PR_LOGGING
       VERBOSE_LOG("discarding video frame %lld", frame->mTime);
       if (droppedFrames++) {
         VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1);
       }
 #endif
-      mReader->VideoQueue().PopFront();
+      VideoQueue().PopFront();
       // Notify the decode thread that the video queue's buffers may have
       // free'd up space for more frames.
       mDecoder->GetReentrantMonitor().NotifyAll();
       mDecoder->UpdatePlaybackOffset(frame->mOffset);
-      if (mReader->VideoQueue().GetSize() == 0)
+      if (VideoQueue().GetSize() == 0)
         break;
-      frame = mReader->VideoQueue().PeekFront();
+      frame = VideoQueue().PeekFront();
     }
     // Current frame has already been presented, wait until it's time to
     // present the next frame.
     if (frame && !currentFrame) {
       int64_t now = IsPlaying() ? clock_time : mPlayDuration;
 
       remainingTime = frame->mTime - now;
     }
@@ -2458,17 +2458,17 @@ void MediaDecoderStateMachine::AdvanceFr
   MediaResource* resource = mDecoder->GetResource();
   if (mState == DECODER_STATE_DECODING &&
       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
       HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
       !mDecoder->IsDataCachedToEndOfResource() &&
       !resource->IsSuspended()) {
     if (JustExitedQuickBuffering() || HasLowUndecodedData()) {
       if (currentFrame) {
-        mReader->VideoQueue().PushFront(currentFrame.forget());
+        VideoQueue().PushFront(currentFrame.forget());
       }
       StartBuffering();
       // Don't go straight back to the state machine loop since that might
       // cause us to start decoding again and we could flip-flop between
       // decoding and quick-buffering.
       ScheduleStateMachine(USECS_PER_S);
       return;
     }
--- a/content/media/MediaDecoderStateMachine.h
+++ b/content/media/MediaDecoderStateMachine.h
@@ -236,17 +236,17 @@ public:
   // This is called on the state machine thread and audio thread.
   // The decoder monitor must be obtained before calling this.
   bool HasVideo() const {
     AssertCurrentThreadInMonitor();
     return mInfo.HasVideo();
   }
 
   // Should be called by main thread.
-  bool HaveNextFrameData() const;
+  bool HaveNextFrameData();
 
   // Must be called with the decode monitor held.
   bool IsBuffering() const {
     AssertCurrentThreadInMonitor();
 
     return mState == DECODER_STATE_BUFFERING;
   }
 
@@ -392,50 +392,53 @@ protected:
     // would mean in some cases we'd have to destroy mStateMachine from this
     // object, which would be problematic since MediaDecoderStateMachine can
     // only be destroyed on the main thread whereas this object can be destroyed
     // on the media stream graph thread.
     MediaDecoderStateMachine* mStateMachine;
   };
   WakeDecoderRunnable* GetWakeDecoderRunnable();
 
+  MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
+  MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
+
   // True if our buffers of decoded audio are not full, and we should
   // decode more.
   bool NeedToDecodeAudio();
 
   // Decodes some audio. This should be run on the decode task queue.
   void DecodeAudio();
 
   // True if our buffers of decoded video are not full, and we should
   // decode more.
   bool NeedToDecodeVideo();
 
   // Decodes some video. This should be run on the decode task queue.
   void DecodeVideo();
 
   // Returns true if we've got less than aAudioUsecs microseconds of decoded
   // and playable data. The decoder monitor must be held.
-  bool HasLowDecodedData(int64_t aAudioUsecs) const;
+  bool HasLowDecodedData(int64_t aAudioUsecs);
 
   // Returns true if we're running low on data which is not yet decoded.
   // The decoder monitor must be held.
-  bool HasLowUndecodedData() const;
+  bool HasLowUndecodedData();
 
   // Returns true if we have less than aUsecs of undecoded data available.
-  bool HasLowUndecodedData(double aUsecs) const;
+  bool HasLowUndecodedData(double aUsecs);
 
   // Returns the number of unplayed usecs of audio we've got decoded and/or
   // pushed to the hardware waiting to play. This is how much audio we can
   // play without having to run the audio decoder. The decoder monitor
   // must be held.
-  int64_t AudioDecodedUsecs() const;
+  int64_t AudioDecodedUsecs();
 
   // Returns true when there's decoded audio waiting to play.
   // The decoder monitor must be held.
-  bool HasFutureAudio() const;
+  bool HasFutureAudio();
 
   // Returns true if we recently exited "quick buffering" mode.
   bool JustExitedQuickBuffering();
 
   // Waits on the decoder ReentrantMonitor for aUsecs microseconds. If the decoder
   // monitor is awoken by a Notify() call, we'll continue waiting, unless
   // we've moved into shutdown state. This enables us to ensure that we
   // wait for a specified time, and that the myriad of Notify()s we do on