Bug 1265634. Part 2 - rename mReaderWrapper for less wording. r=kaku.
authorJW Wang <jwwang@mozilla.com>
Wed, 20 Apr 2016 14:45:41 +0800
changeset 331865 9161ee282b677f56af7f70501bc2f471674d7998
parent 331864 913714b39c4cc64baac11515e7a9e8c21b98729c
child 331866 7ce9649c759f241dcd104da7dfa28e1f333c6c9d
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskaku
bugs1265634
milestone48.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1265634. Part 2 - rename mReaderWrapper for less wording. r=kaku. MozReview-Commit-ID: FRjCQMqoAlR
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -214,17 +214,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mWatchManager(this, mTaskQueue),
   mRealTime(aRealTime),
   mDispatchedStateMachine(false),
   mDelayedScheduler(mTaskQueue),
   mState(DECODER_STATE_DECODING_METADATA, "MediaDecoderStateMachine::mState"),
   mCurrentFrameID(0),
   mObservedDuration(TimeUnit(), "MediaDecoderStateMachine::mObservedDuration"),
   mFragmentEndTime(-1),
-  mReaderWrapper(new MediaDecoderReaderWrapper(aRealTime, mTaskQueue, aReader)),
+  mReader(new MediaDecoderReaderWrapper(aRealTime, mTaskQueue, aReader)),
   mDecodedAudioEndTime(0),
   mDecodedVideoEndTime(0),
   mPlaybackRate(1.0),
   mLowAudioThresholdUsecs(detail::LOW_AUDIO_USECS),
   mAmpleAudioThresholdUsecs(detail::AMPLE_AUDIO_USECS),
   mQuickBufferingLowDataThresholdUsecs(detail::QUICK_BUFFERING_LOW_DATA_USECS),
   mIsAudioPrerolling(false),
   mIsVideoPrerolling(false),
@@ -317,17 +317,17 @@ MediaDecoderStateMachine::~MediaDecoderS
 }
 
 void
 MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
-  mBuffered.Connect(mReaderWrapper->CanonicalBuffered());
+  mBuffered.Connect(mReader->CanonicalBuffered());
   mEstimatedDuration.Connect(aDecoder->CanonicalEstimatedDuration());
   mExplicitDuration.Connect(aDecoder->CanonicalExplicitDuration());
   mPlayState.Connect(aDecoder->CanonicalPlayState());
   mNextPlayState.Connect(aDecoder->CanonicalNextPlayState());
   mLogicallySeeking.Connect(aDecoder->CanonicalLogicallySeeking());
   mVolume.Connect(aDecoder->CanonicalVolume());
   mLogicalPlaybackRate.Connect(aDecoder->CanonicalPlaybackRate());
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
@@ -516,28 +516,28 @@ MediaDecoderStateMachine::NeedToSkipToNe
   // We'll skip the video decode to the next keyframe if we're low on
   // audio, or if we're low on video, provided we're not running low on
   // data to decode. If we're running low on downloaded data to decode,
   // we won't start keyframe skipping, as we'll be pausing playback to buffer
   // soon anyway and we'll want to be able to display frames immediately
   // after buffering finishes. We ignore the low audio calculations for
   // readers that are async, as since their audio decode runs on a different
   // task queue it should never run low and skipping won't help their decode.
-  bool isLowOnDecodedAudio = !mReaderWrapper->IsAsync() &&
+  bool isLowOnDecodedAudio = !mReader->IsAsync() &&
                              !mIsAudioPrerolling && IsAudioDecoding() &&
                              (GetDecodedAudioDuration() <
                               mLowAudioThresholdUsecs * mPlaybackRate);
   bool isLowOnDecodedVideo = !mIsVideoPrerolling &&
                              ((GetClock() - mDecodedVideoEndTime) * mPlaybackRate >
                               LOW_VIDEO_THRESHOLD_USECS);
   bool lowUndecoded = HasLowUndecodedData();
 
   if ((isLowOnDecodedAudio || isLowOnDecodedVideo) && !lowUndecoded) {
     DECODER_LOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d lowUndecoded=%d async=%d",
-                isLowOnDecodedAudio, isLowOnDecodedVideo, lowUndecoded, mReaderWrapper->IsAsync());
+                isLowOnDecodedAudio, isLowOnDecodedVideo, lowUndecoded, mReader->IsAsync());
     return true;
   }
 
   return false;
 }
 
 bool
 MediaDecoderStateMachine::NeedToDecodeAudio()
@@ -688,21 +688,21 @@ MediaDecoderStateMachine::OnNotDecoded(M
   if (aReason == MediaDecoderReader::DECODE_ERROR) {
     DecodeError();
     return;
   }
 
   // If the decoder is waiting for data, we tell it to call us back when the
   // data arrives.
   if (aReason == MediaDecoderReader::WAITING_FOR_DATA) {
-    MOZ_ASSERT(mReaderWrapper->IsWaitForDataSupported(),
+    MOZ_ASSERT(mReader->IsWaitForDataSupported(),
                "Readers that send WAITING_FOR_DATA need to implement WaitForData");
     RefPtr<MediaDecoderStateMachine> self = this;
     WaitRequestRef(aType).Begin(
-      mReaderWrapper->WaitForData(aType)
+      mReader->WaitForData(aType)
       ->Then(OwnerThread(), __func__,
              [self] (MediaData::Type aType) -> void {
                self->WaitRequestRef(aType).Complete();
                if (aType == MediaData::AUDIO_DATA) {
                  self->EnsureAudioDecodeTaskQueued();
                } else {
                  self->EnsureVideoDecodeTaskQueued();
                }
@@ -821,17 +821,17 @@ MediaDecoderStateMachine::OnVideoDecoded
         StopPrerollingVideo();
       }
 
       // For non async readers, if the requested video sample was slow to
       // arrive, increase the amount of audio we buffer to ensure that we
       // don't run out of audio. This is unnecessary for async readers,
       // since they decode audio and video on different threads so they
       // are unlikely to run out of decoded audio.
-      if (mReaderWrapper->IsAsync()) {
+      if (mReader->IsAsync()) {
         return;
       }
       TimeDuration decodeTime = TimeStamp::Now() - aDecodeStartTime;
       if (!IsDecodingFirstFrame() &&
           THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
           !HasLowUndecodedData())
       {
         mLowAudioThresholdUsecs =
@@ -902,28 +902,28 @@ nsresult MediaDecoderStateMachine::Init(
     this, &MediaDecoderStateMachine::InitializationTask, aDecoder);
   mTaskQueue->Dispatch(r.forget());
 
   mAudioQueueListener = AudioQueue().PopEvent().Connect(
     mTaskQueue, this, &MediaDecoderStateMachine::OnAudioPopped);
   mVideoQueueListener = VideoQueue().PopEvent().Connect(
     mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
 
-  mMetadataManager.Connect(mReaderWrapper->TimedMetadataEvent(), OwnerThread());
+  mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread());
 
   mMediaSink = CreateMediaSink(mAudioCaptured);
 
 #ifdef MOZ_EME
   mCDMProxyPromise.Begin(aDecoder->RequestCDMProxy()->Then(
     OwnerThread(), __func__, this,
     &MediaDecoderStateMachine::OnCDMProxyReady,
     &MediaDecoderStateMachine::OnCDMProxyNotReady));
 #endif
 
-  nsresult rv = mReaderWrapper->Init();
+  nsresult rv = mReader->Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
   r = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::ReadMetadata);
   OwnerThread()->Dispatch(r.forget());
 
   return NS_OK;
 }
 
@@ -980,21 +980,21 @@ void
 MediaDecoderStateMachine::MaybeStartBuffering()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (mState == DECODER_STATE_DECODING &&
       mPlayState == MediaDecoder::PLAY_STATE_PLAYING &&
       mResource->IsExpectingMoreData()) {
     bool shouldBuffer;
-    if (mReaderWrapper->UseBufferingHeuristics()) {
+    if (mReader->UseBufferingHeuristics()) {
       shouldBuffer = HasLowDecodedData(EXHAUSTED_DATA_MARGIN_USECS) &&
                      (JustExitedQuickBuffering() || HasLowUndecodedData());
     } else {
-      MOZ_ASSERT(mReaderWrapper->IsWaitForDataSupported());
+      MOZ_ASSERT(mReader->IsWaitForDataSupported());
       shouldBuffer = (OutOfDecodedAudio() && mAudioWaitRequest.Exists()) ||
                      (OutOfDecodedVideo() && mVideoWaitRequest.Exists());
     }
     if (shouldBuffer) {
       StartBuffering();
       // Don't go straight back to the state machine loop since that might
       // cause us to start decoding again and we could flip-flop between
       // decoding and quick-buffering.
@@ -1162,17 +1162,17 @@ MediaDecoderStateMachine::SetDormant(boo
     Reset();
 
     // Note that we do not wait for the decode task queue to go idle before
     // queuing the ReleaseMediaResources task - instead, we disconnect promises,
     // reset state, and put a ResetDecode in the decode task queue. Any tasks
     // that run after ResetDecode are supposed to run with a clean slate. We rely
     // on that in other places (i.e. seeking), so it seems reasonable to rely on
     // it here as well.
-    mReaderWrapper->ReleaseMediaResources();
+    mReader->ReleaseMediaResources();
   } else if ((aDormant != true) && (mState == DECODER_STATE_DORMANT)) {
     mDecodingFirstFrame = true;
     SetState(DECODER_STATE_DECODING_METADATA);
     ReadMetadata();
   }
 }
 
 RefPtr<ShutdownPromise>
@@ -1205,17 +1205,17 @@ MediaDecoderStateMachine::Shutdown()
   Reset();
 
   mMediaSink->Shutdown();
 
   DECODER_LOG("Shutdown started");
 
   // Put a task in the decode queue to shutdown the reader.
   // the queue to spin down.
-  return mReaderWrapper->Shutdown()
+  return mReader->Shutdown()
     ->Then(OwnerThread(), __func__, this,
            &MediaDecoderStateMachine::FinishShutdown,
            &MediaDecoderStateMachine::FinishShutdown)
     ->CompletionPromise();
 }
 
 void MediaDecoderStateMachine::StartDecoding()
 {
@@ -1335,17 +1335,17 @@ MediaDecoderStateMachine::ReadMetadata()
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(!IsShutdown());
   MOZ_ASSERT(mState == DECODER_STATE_DECODING_METADATA);
   MOZ_ASSERT(!mMetadataRequest.Exists());
 
   DECODER_LOG("Dispatching AsyncReadMetadata");
   // Set mode to METADATA since we are about to read metadata.
   mResource->SetReadMode(MediaCacheStream::MODE_METADATA);
-  mMetadataRequest.Begin(mReaderWrapper->ReadMetadata()
+  mMetadataRequest.Begin(mReader->ReadMetadata()
     ->Then(OwnerThread(), __func__, this,
            &MediaDecoderStateMachine::OnMetadataRead,
            &MediaDecoderStateMachine::OnMetadataNotRead));
 }
 
 RefPtr<MediaDecoder::SeekPromise>
 MediaDecoderStateMachine::Seek(SeekTarget aTarget)
 {
@@ -1360,17 +1360,17 @@ MediaDecoderStateMachine::Seek(SeekTarge
     DECODER_WARN("Seek() function should not be called on a non-seekable state machine");
     return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
   }
 
   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA,
                "We should have got duration already");
 
   if (mState < DECODER_STATE_DECODING ||
-      (IsDecodingFirstFrame() && !mReaderWrapper->ForceZeroStartTime())) {
+      (IsDecodingFirstFrame() && !mReader->ForceZeroStartTime())) {
     DECODER_LOG("Seek() Not Enough Data to continue at this stage, queuing seek");
     mQueuedSeek.RejectIfExists(__func__);
     mQueuedSeek.mTarget = aTarget;
     return mQueuedSeek.mPromise.Ensure(__func__);
   }
   mQueuedSeek.RejectIfExists(__func__);
 
   DECODER_LOG("Changed state to SEEKING (to %lld)", aTarget.GetTime().ToMicroseconds());
@@ -1448,17 +1448,17 @@ MediaDecoderStateMachine::DispatchDecode
   if (needToDecodeVideo) {
     EnsureVideoDecodeTaskQueued();
   }
 
   if (needIdle) {
     DECODER_LOG("Dispatching SetIdle() audioQueue=%lld videoQueue=%lld",
                 GetDecodedAudioDuration(),
                 VideoQueue().Duration());
-    mReaderWrapper->SetIdle();
+    mReader->SetIdle();
   }
 }
 
 void
 MediaDecoderStateMachine::InitiateSeek(SeekJob aSeekJob)
 {
   MOZ_ASSERT(OnTaskQueue());
 
@@ -1466,17 +1466,17 @@ MediaDecoderStateMachine::InitiateSeek(S
   if (mSeekTask) {
     mSeekTask->Discard();
   }
 
   mSeekTaskRequest.DisconnectIfExists();
 
   // Create a new SeekTask instance for the incoming seek task.
   mSeekTask = SeekTask::CreateSeekTask(mDecoderID, OwnerThread(),
-                                       mReaderWrapper.get(), Move(aSeekJob),
+                                       mReader.get(), Move(aSeekJob),
                                        mInfo, Duration(), GetMediaTime());
 
   // Stop playback now to ensure that while we're outside the monitor
   // dispatching SeekingStarted, playback doesn't advance and mess with
   // mCurrentPosition that we've setting to seekTime here.
   StopPlayback();
   UpdatePlaybackPositionInternal(mSeekTask->GetSeekJob().mTarget.GetTime().ToMicroseconds());
 
@@ -1609,20 +1609,20 @@ MediaDecoderStateMachine::EnsureAudioDec
 
 void
 MediaDecoderStateMachine::RequestAudioData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
 
   SAMPLE_LOG("Queueing audio task - queued=%i, decoder-queued=%o",
-             AudioQueue().GetSize(), mReaderWrapper->SizeOfAudioQueueInFrames());
+             AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
 
   mAudioDataRequest.Begin(
-    mReaderWrapper->RequestAudioData()
+    mReader->RequestAudioData()
     ->Then(OwnerThread(), __func__, this,
            &MediaDecoderStateMachine::OnAudioDecoded,
            &MediaDecoderStateMachine::OnAudioNotDecoded));
 }
 
 nsresult
 MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded()
 {
@@ -1674,22 +1674,22 @@ MediaDecoderStateMachine::RequestVideoDa
   TimeStamp videoDecodeStartTime = TimeStamp::Now();
 
   bool skipToNextKeyFrame = mSentFirstFrameLoadedEvent &&
     NeedToSkipToNextKeyframe();
 
   media::TimeUnit currentTime = media::TimeUnit::FromMicroseconds(GetMediaTime());
 
   SAMPLE_LOG("Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
-             VideoQueue().GetSize(), mReaderWrapper->SizeOfVideoQueueInFrames(), skipToNextKeyFrame,
+             VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(), skipToNextKeyFrame,
              currentTime.ToMicroseconds());
 
   RefPtr<MediaDecoderStateMachine> self = this;
   mVideoDataRequest.Begin(
-    mReaderWrapper->RequestVideoData(skipToNextKeyFrame, currentTime)
+    mReader->RequestVideoData(skipToNextKeyFrame, currentTime)
     ->Then(OwnerThread(), __func__,
            [self, videoDecodeStartTime] (MediaData* aVideoSample) {
              self->OnVideoDecoded(aVideoSample, videoDecodeStartTime);
            },
            [self] (MediaDecoderReader::NotDecodedReason aReason) {
              self->OnVideoNotDecoded(aReason);
            }));
 }
@@ -1718,17 +1718,17 @@ MediaDecoderStateMachine::StartMediaSink
         &MediaDecoderStateMachine::OnMediaSinkVideoError));
     }
   }
 }
 
 bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
 {
   MOZ_ASSERT(OnTaskQueue());
-  MOZ_ASSERT(mReaderWrapper->UseBufferingHeuristics());
+  MOZ_ASSERT(mReader->UseBufferingHeuristics());
   // We consider ourselves low on decoded data if we're low on audio,
   // provided we've not decoded to the end of the audio stream, or
   // if we're low on video frames, provided
   // we've not decoded to the end of the video stream.
   return ((IsAudioDecoding() && GetDecodedAudioDuration() < aAudioUsecs) ||
          (IsVideoDecoding() &&
           static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
 }
@@ -1822,31 +1822,31 @@ MediaDecoderStateMachine::OnMetadataRead
   mResource->SetReadMode(MediaCacheStream::MODE_PLAYBACK);
   mInfo = aMetadata->mInfo;
   mMetadataTags = aMetadata->mTags.forget();
   RefPtr<MediaDecoderStateMachine> self = this;
 
   if (mInfo.mMetadataDuration.isSome()) {
     RecomputeDuration();
   } else if (mInfo.mUnadjustedMetadataEndTime.isSome()) {
-    mReaderWrapper->AwaitStartTime()->Then(OwnerThread(), __func__,
+    mReader->AwaitStartTime()->Then(OwnerThread(), __func__,
       [self] () -> void {
         NS_ENSURE_TRUE_VOID(!self->IsShutdown());
         TimeUnit unadjusted = self->mInfo.mUnadjustedMetadataEndTime.ref();
-        TimeUnit adjustment = self->mReaderWrapper->StartTime();
+        TimeUnit adjustment = self->mReader->StartTime();
         self->mInfo.mMetadataDuration.emplace(unadjusted - adjustment);
         self->RecomputeDuration();
       }, [] () -> void { NS_WARNING("Adjusting metadata end time failed"); }
     );
   }
 
   if (HasVideo()) {
     DECODER_LOG("Video decode isAsync=%d HWAccel=%d videoQueueSize=%d",
-                mReaderWrapper->IsAsync(),
-                mReaderWrapper->VideoIsHardwareAccelerated(),
+                mReader->IsAsync(),
+                mReader->VideoIsHardwareAccelerated(),
                 GetAmpleVideoFrames());
   }
 
   // In general, we wait until we know the duration before notifying the decoder.
   // However, we notify  unconditionally in this case without waiting for the start
   // time, since the caller might be waiting on metadataloaded to be fired before
   // feeding in the CDM, which we need to decode the first frame (and
   // thus get the metadata). We could fix this if we could compute the start
@@ -1901,17 +1901,17 @@ void
 MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent()
 {
   MOZ_ASSERT(OnTaskQueue());
   // Track value of mSentFirstFrameLoadedEvent from before updating it
   bool firstFrameBeenLoaded = mSentFirstFrameLoadedEvent;
   mSentFirstFrameLoadedEvent = true;
   RefPtr<MediaDecoderStateMachine> self = this;
   mBufferedUpdateRequest.Begin(
-    mReaderWrapper->UpdateBufferedWithPromise()
+    mReader->UpdateBufferedWithPromise()
     ->Then(OwnerThread(),
     __func__,
     // Resolve
     [self, firstFrameBeenLoaded]() {
       self->mBufferedUpdateRequest.Complete();
       MediaDecoderEventVisibility visibility =
         firstFrameBeenLoaded ? MediaDecoderEventVisibility::Suppressed
                              : MediaDecoderEventVisibility::Observable;
@@ -1943,17 +1943,17 @@ MediaDecoderStateMachine::FinishDecodeFi
     mDuration = Some(TimeUnit::FromInfinity());
   }
 
   DECODER_LOG("Media duration %lld, "
               "transportSeekable=%d, mediaSeekable=%d",
               Duration().ToMicroseconds(), mResource->IsTransportSeekable(), mMediaSeekable.Ref());
 
   // Get potentially updated metadata
-  mReaderWrapper->ReadUpdatedMetadata(&mInfo);
+  mReader->ReadUpdatedMetadata(&mInfo);
 
   if (!mNotifyMetadataBeforeFirstFrame) {
     // If we didn't have duration and/or start time before, we should now.
     EnqueueLoadedMetadataEvent();
   }
   EnqueueFirstFrameLoadedEvent();
 
   mDecodingFirstFrame = false;
@@ -2142,33 +2142,33 @@ nsresult MediaDecoderStateMachine::RunSt
 
     case DECODER_STATE_BUFFERING: {
       TimeStamp now = TimeStamp::Now();
       NS_ASSERTION(!mBufferingStart.IsNull(), "Must know buffering start time.");
 
       // With buffering heuristics we will remain in the buffering state if
       // we've not decoded enough data to begin playback, or if we've not
       // downloaded a reasonable amount of data inside our buffering time.
-      if (mReaderWrapper->UseBufferingHeuristics()) {
+      if (mReader->UseBufferingHeuristics()) {
         TimeDuration elapsed = now - mBufferingStart;
         bool isLiveStream = resource->IsLiveStream();
         if ((isLiveStream || !CanPlayThrough()) &&
               elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) &&
               (mQuickBuffering ? HasLowDecodedData(mQuickBufferingLowDataThresholdUsecs)
                                : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) &&
               mResource->IsExpectingMoreData())
         {
           DECODER_LOG("Buffering: wait %ds, timeout in %.3lfs %s",
                       mBufferingWait, mBufferingWait - elapsed.ToSeconds(),
                       (mQuickBuffering ? "(quick exit)" : ""));
           ScheduleStateMachineIn(USECS_PER_S);
           return NS_OK;
         }
       } else if (OutOfDecodedAudio() || OutOfDecodedVideo()) {
-        MOZ_ASSERT(mReaderWrapper->IsWaitForDataSupported(),
+        MOZ_ASSERT(mReader->IsWaitForDataSupported(),
                    "Don't yet have a strategy for non-heuristic + non-WaitForData");
         DispatchDecodeTasksIfNeeded();
         MOZ_ASSERT_IF(!mMinimizePreroll && OutOfDecodedAudio(), mAudioDataRequest.Exists() || mAudioWaitRequest.Exists());
         MOZ_ASSERT_IF(!mMinimizePreroll && OutOfDecodedVideo(), mVideoDataRequest.Exists() || mVideoWaitRequest.Exists());
         DECODER_LOG("In buffering mode, waiting to be notified: outOfAudio: %d, "
                     "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
                     OutOfDecodedAudio(), AudioRequestStatus(),
                     OutOfDecodedVideo(), VideoRequestStatus());
@@ -2265,17 +2265,17 @@ MediaDecoderStateMachine::Reset()
   mAudioDataRequest.DisconnectIfExists();
   mAudioWaitRequest.DisconnectIfExists();
   mVideoDataRequest.DisconnectIfExists();
   mVideoWaitRequest.DisconnectIfExists();
   mSeekTaskRequest.DisconnectIfExists();
 
   mPlaybackOffset = 0;
 
-  mReaderWrapper->ResetDecode();
+  mReader->ResetDecode();
 }
 
 int64_t
 MediaDecoderStateMachine::GetClock(TimeStamp* aTimeStamp) const
 {
   MOZ_ASSERT(OnTaskQueue());
   int64_t clockTime = mMediaSink->GetPosition(aTimeStamp);
   NS_ASSERTION(GetMediaTime() <= clockTime, "Clock should go forwards.");
@@ -2592,17 +2592,17 @@ void MediaDecoderStateMachine::OnMediaSi
 
 #ifdef MOZ_EME
 void
 MediaDecoderStateMachine::OnCDMProxyReady(RefPtr<CDMProxy> aProxy)
 {
   MOZ_ASSERT(OnTaskQueue());
   mCDMProxyPromise.Complete();
   mCDMProxy = aProxy;
-  mReaderWrapper->SetCDMProxy(aProxy);
+  mReader->SetCDMProxy(aProxy);
   if (mState == DECODER_STATE_WAIT_FOR_CDM) {
     StartDecoding();
   }
 }
 
 void
 MediaDecoderStateMachine::OnCDMProxyNotReady()
 {
@@ -2654,17 +2654,17 @@ MediaDecoderStateMachine::SetAudioCaptur
   if (mIsAudioPrerolling && DonePrerollingAudio()) {
     StopPrerollingAudio();
   }
 }
 
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
 {
   MOZ_ASSERT(OnTaskQueue());
-  return (mReaderWrapper->IsAsync() && mReaderWrapper->VideoIsHardwareAccelerated())
+  return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated())
     ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
     : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
                                                bool aFinishWhenEnded)
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -2685,35 +2685,35 @@ void MediaDecoderStateMachine::RemoveOut
       this, &MediaDecoderStateMachine::SetAudioCaptured, false);
     OwnerThread()->Dispatch(r.forget());
   }
 }
 
 size_t
 MediaDecoderStateMachine::SizeOfVideoQueue() const
 {
-  return mReaderWrapper->SizeOfVideoQueueInBytes();
+  return mReader->SizeOfVideoQueueInBytes();
 }
 
 size_t
 MediaDecoderStateMachine::SizeOfAudioQueue() const
 {
-  return mReaderWrapper->SizeOfAudioQueueInBytes();
+  return mReader->SizeOfAudioQueueInBytes();
 }
 
 AbstractCanonical<media::TimeIntervals>*
 MediaDecoderStateMachine::CanonicalBuffered()
 {
-  return mReaderWrapper->CanonicalBuffered();
+  return mReader->CanonicalBuffered();
 }
 
 MediaEventSource<void>&
 MediaDecoderStateMachine::OnMediaNotSeekable()
 {
-  return mReaderWrapper->OnMediaNotSeekable();
+  return mReader->OnMediaNotSeekable();
 }
 
 } // namespace mozilla
 
 // avoid redefined macro in unified build
 #undef LOG
 #undef DECODER_LOG
 #undef VERBOSE_LOG
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -683,17 +683,17 @@ private:
   void OnSeekTaskRejected(SeekTaskRejectValue aValue);
 
   // Media Fragment end time in microseconds. Access controlled by decoder monitor.
   int64_t mFragmentEndTime;
 
   // The media sink resource.  Used on the state machine thread.
   RefPtr<media::MediaSink> mMediaSink;
 
-  const RefPtr<MediaDecoderReaderWrapper> mReaderWrapper;
+  const RefPtr<MediaDecoderReaderWrapper> mReader;
 
   // The end time of the last audio frame that's been pushed onto the media sink
   // in microseconds. This will approximately be the end time
   // of the audio stream, unless another frame is pushed to the hardware.
   int64_t AudioEndTime() const;
 
   // The end time of the last rendered video frame that's been sent to
   // compositor.