Bug 1349145. P3 - use concrete types in MDSM and its friends. Also remove unnecessary casts. r?kaku draft
authorJW Wang <jwwang@mozilla.com>
Wed, 22 Mar 2017 11:59:54 +0800
changeset 502751 bfccb949439ea77a5bfbe58efd16503792816cae
parent 502750 08fe1b1ffa76b3a3759fc3e232c859fb2a10fae4
child 502771 a70d147b5297de35d3df5389e08b431fcab7c46c
push id50387
push userjwwang@mozilla.com
push dateWed, 22 Mar 2017 09:35:25 +0000
reviewerskaku
bugs1349145
milestone55.0a1
Bug 1349145. P3 - use concrete types in MDSM and its friends. Also remove unnecessary casts. r?kaku MozReview-Commit-ID: C2MZb01XtTC
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/mediasink/AudioSink.h
dom/media/mediasink/DecodedAudioDataSink.cpp
dom/media/mediasink/DecodedAudioDataSink.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/VideoSink.cpp
dom/media/mediasink/VideoSink.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -191,21 +191,21 @@ public:
   virtual ~StateObject() { }
   virtual void Exit() { }   // Exit action.
   virtual void Step() { }   // Perform a 'cycle' of this state object.
   virtual State GetState() const = 0;
 
   // Event handlers for various events.
   virtual void HandleCDMProxyReady() { }
   virtual void HandleAudioCaptured() { }
-  virtual void HandleAudioDecoded(MediaData* aAudio)
+  virtual void HandleAudioDecoded(AudioData* aAudio)
   {
     Crash("Unexpected event!", __func__);
   }
-  virtual void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart)
+  virtual void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart)
   {
     Crash("Unexpected event!", __func__);
   }
   virtual void HandleAudioWaited(MediaData::Type aType)
   {
     Crash("Unexpected event!", __func__);
   }
   virtual void HandleVideoWaited(MediaData::Type aType)
@@ -278,18 +278,18 @@ protected:
   bool IsExpectingMoreData() const
   {
     // We are expecting more data if either the resource states so, or if we
     // have a waiting promise pending (such as with non-MSE EME).
     return Resource()->IsExpectingMoreData()
            || mMaster->IsWaitingAudioData()
            || mMaster->IsWaitingVideoData();
   }
-  MediaQueue<MediaData>& AudioQueue() const { return mMaster->mAudioQueue; }
-  MediaQueue<MediaData>& VideoQueue() const { return mMaster->mVideoQueue; }
+  MediaQueue<AudioData>& AudioQueue() const { return mMaster->mAudioQueue; }
+  MediaQueue<VideoData>& VideoQueue() const { return mMaster->mVideoQueue; }
 
   template <class S, typename... Args, size_t... Indexes>
   auto
   CallEnterMemberFunction(S* aS,
                           Tuple<Args...>& aTuple,
                           IndexSequence<Indexes...>)
     -> decltype(ReturnTypeHelper(&S::Enter))
   {
@@ -550,23 +550,23 @@ public:
     mPendingSeek.RejectIfExists(__func__);
   }
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING_FIRSTFRAME;
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override
+  void HandleAudioDecoded(AudioData* aAudio) override
   {
     mMaster->PushAudio(aAudio);
     MaybeFinishDecodeFirstFrame();
   }
 
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
+  void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     mMaster->PushVideo(aVideo);
     MaybeFinishDecodeFirstFrame();
   }
 
   void HandleWaitingForAudio() override
   {
     mMaster->WaitForData(MediaData::AUDIO_DATA);
@@ -697,24 +697,24 @@ public:
     MaybeStartBuffering();
   }
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING;
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override
+  void HandleAudioDecoded(AudioData* aAudio) override
   {
     mMaster->PushAudio(aAudio);
     DispatchDecodeTasksIfNeeded();
     MaybeStopPrerolling();
   }
 
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
+  void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     mMaster->PushVideo(aVideo);
     DispatchDecodeTasksIfNeeded();
     MaybeStopPrerolling();
     CheckSlowDecoding(aDecodeStart);
   }
 
   void HandleAudioCanceled() override
@@ -965,18 +965,18 @@ public:
 
   virtual void Exit() override = 0;
 
   State GetState() const override
   {
     return DECODER_STATE_SEEKING;
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override = 0;
-  void HandleVideoDecoded(MediaData* aVideo,
+  void HandleAudioDecoded(AudioData* aAudio) override = 0;
+  void HandleVideoDecoded(VideoData* aVideo,
                           TimeStamp aDecodeStart) override = 0;
   void HandleAudioWaited(MediaData::Type aType) override = 0;
   void HandleVideoWaited(MediaData::Type aType) override = 0;
 
   void HandleVideoSuspendTimeout() override
   {
     // Do nothing since we want a valid video frame to show when seek is done.
   }
@@ -1021,17 +1021,17 @@ public:
     mSeekJob.RejectIfExists(__func__);
 
     // Disconnect MediaDecoderReaderWrapper.
     mSeekRequest.DisconnectIfExists();
 
     mWaitRequest.DisconnectIfExists();
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override
+  void HandleAudioDecoded(AudioData* aAudio) override
   {
     MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
                "Seek shouldn't be finished");
     MOZ_ASSERT(aAudio);
 
     // Video-only seek doesn't reset audio decoder. There might be pending audio
     // requests when AccurateSeekTask::Seek() begins. We will just store the
     // data without checking |mDiscontinuity| or calling
@@ -1043,31 +1043,31 @@ public:
 
     AdjustFastSeekIfNeeded(aAudio);
 
     if (mSeekJob.mTarget->IsFast()) {
       // Non-precise seek; we can stop the seek at the first sample.
       mMaster->PushAudio(aAudio);
       mDoneAudioSeeking = true;
     } else {
-      nsresult rv = DropAudioUpToSeekTarget(aAudio->As<AudioData>());
+      nsresult rv = DropAudioUpToSeekTarget(aAudio);
       if (NS_FAILED(rv)) {
         mMaster->DecodeError(rv);
         return;
       }
     }
 
     if (!mDoneAudioSeeking) {
       RequestAudioData();
       return;
     }
     MaybeFinishSeek();
   }
 
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
+  void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
                "Seek shouldn't be finished");
     MOZ_ASSERT(aVideo);
 
     AdjustFastSeekIfNeeded(aVideo);
 
     if (mSeekJob.mTarget->IsFast()) {
@@ -1200,18 +1200,18 @@ private:
     // newCurrentTime and the real decoded samples' start time.
     if (mSeekJob.mTarget->IsAccurate()) {
       return seekTime;
     }
 
     // For the fast seek, we update the newCurrentTime with the decoded audio
     // and video samples, set it to be the one which is closet to the seekTime.
     if (mSeekJob.mTarget->IsFast()) {
-      RefPtr<MediaData> audio = AudioQueue().PeekFront();
-      RefPtr<MediaData> video = VideoQueue().PeekFront();
+      RefPtr<AudioData> audio = AudioQueue().PeekFront();
+      RefPtr<VideoData> video = VideoQueue().PeekFront();
 
       // A situation that both audio and video approaches the end.
       if (!audio && !video) {
         return seekTime;
       }
 
       const int64_t audioStart = audio ? audio->mTime : INT64_MAX;
       const int64_t videoStart = video ? video->mTime : INT64_MAX;
@@ -1373,45 +1373,44 @@ private:
     MOZ_ASSERT(AudioQueue().GetSize() == 0,
                "Should be the 1st sample after seeking");
     mMaster->PushAudio(data);
     mDoneAudioSeeking = true;
 
     return NS_OK;
   }
 
-  nsresult DropVideoUpToSeekTarget(MediaData* aSample)
+  nsresult DropVideoUpToSeekTarget(VideoData* aVideo)
   {
-    RefPtr<VideoData> video(aSample->As<VideoData>());
-    MOZ_ASSERT(video);
+    MOZ_ASSERT(aVideo);
     SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
-         video->mTime, video->GetEndTime());
+         aVideo->mTime, aVideo->GetEndTime());
     const int64_t target = mSeekJob.mTarget->GetTime().ToMicroseconds();
 
     // If the frame end time is less than the seek target, we won't want
     // to display this frame after the seek, so discard it.
-    if (target >= video->GetEndTime()) {
+    if (target >= aVideo->GetEndTime()) {
       SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64,
-           video->mTime, video->GetEndTime(), target);
-      mFirstVideoFrameAfterSeek = video;
+           aVideo->mTime, aVideo->GetEndTime(), target);
+      mFirstVideoFrameAfterSeek = aVideo;
     } else {
-      if (target >= video->mTime && video->GetEndTime() >= target) {
+      if (target >= aVideo->mTime && aVideo->GetEndTime() >= target) {
         // The seek target lies inside this frame's time slice. Adjust the
         // frame's start time to match the seek target.
-        video->UpdateTimestamp(target);
+        aVideo->UpdateTimestamp(target);
       }
       mFirstVideoFrameAfterSeek = nullptr;
 
       SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] "
            "containing target=%" PRId64,
-           video->mTime, video->GetEndTime(), target);
+           aVideo->mTime, aVideo->GetEndTime(), target);
 
       MOZ_ASSERT(VideoQueue().GetSize() == 0,
                  "Should be the 1st sample after seeking");
-      mMaster->PushVideo(video);
+      mMaster->PushVideo(aVideo);
       mDoneVideoSeeking = true;
     }
 
     return NS_OK;
   }
 
   void MaybeFinishSeek()
   {
@@ -1432,30 +1431,31 @@ private:
   bool mDoneAudioSeeking = false;
   bool mDoneVideoSeeking = false;
   MozPromiseRequestHolder<WaitForDataPromise> mWaitRequest;
 
   // This temporarily stores the first frame we decode after we seek.
   // This is so that if we hit end of stream while we're decoding to reach
   // the seek target, we will still have a frame that we can display as the
   // last frame in the media.
-  RefPtr<MediaData> mFirstVideoFrameAfterSeek;
+  RefPtr<VideoData> mFirstVideoFrameAfterSeek;
 };
 
 /*
  * Remove samples from the queue until aCompare() returns false.
  * aCompare A function object with the signature bool(int64_t) which returns
  *          true for samples that should be removed.
  */
-template <typename Function> static void
-DiscardFrames(MediaQueue<MediaData>& aQueue, const Function& aCompare)
+template <typename Type, typename Function>
+static void
+DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare)
 {
   while(aQueue.GetSize() > 0) {
     if (aCompare(aQueue.PeekFront()->mTime)) {
-      RefPtr<MediaData> releaseMe = aQueue.PopFront();
+      RefPtr<Type> releaseMe = aQueue.PopFront();
       continue;
     }
     break;
   }
 }
 
 class MediaDecoderStateMachine::NextFrameSeekingState
   : public MediaDecoderStateMachine::SeekingState
@@ -1533,22 +1533,22 @@ private:
     // "ended" event before the seek promise is resolved.
     // An asynchronous seek operation helps to solve this issue since while the
     // seek is actually performed, the ThenValue of SeekPromise has already
     // been set so that it won't be postponed.
     RefPtr<Runnable> r = mAsyncSeekTask = new AysncNextFrameSeekTask(this);
     OwnerThread()->Dispatch(r.forget());
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override
+  void HandleAudioDecoded(AudioData* aAudio) override
   {
     mMaster->PushAudio(aAudio);
   }
 
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
+  void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     MOZ_ASSERT(aVideo);
     MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
     MOZ_ASSERT(NeedMoreVideo());
 
     if (aVideo->mTime > mCurrentTime) {
       mMaster->PushVideo(aVideo);
       FinishSeek();
@@ -1634,17 +1634,17 @@ private:
            && !VideoQueue().IsFinished();
   }
 
   // Update the seek target's time before resolving this seek task, the updated
   // time will be used in the MDSM::SeekCompleted() to update the MDSM's
   // position.
   void UpdateSeekTargetTime()
   {
-    RefPtr<MediaData> data = VideoQueue().PeekFront();
+    RefPtr<VideoData> data = VideoQueue().PeekFront();
     if (data) {
       mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime));
     } else {
       MOZ_ASSERT(VideoQueue().AtEndOfStream());
       mSeekJob.mTarget->SetTime(mDuration);
     }
   }
 
@@ -1705,25 +1705,25 @@ public:
 
   void Step() override;
 
   State GetState() const override
   {
     return DECODER_STATE_BUFFERING;
   }
 
-  void HandleAudioDecoded(MediaData* aAudio) override
+  void HandleAudioDecoded(AudioData* aAudio) override
   {
     // This might be the sample we need to exit buffering.
     // Schedule Step() to check it.
     mMaster->PushAudio(aAudio);
     mMaster->ScheduleStateMachine();
   }
 
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
+  void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     // This might be the sample we need to exit buffering.
     // Schedule Step() to check it.
     mMaster->PushVideo(aVideo);
     mMaster->ScheduleStateMachine();
   }
 
   void HandleAudioCanceled() override
@@ -2755,41 +2755,41 @@ MediaDecoderStateMachine::HaveEnoughDeco
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1;
 }
 
 void
-MediaDecoderStateMachine::PushAudio(MediaData* aSample)
+MediaDecoderStateMachine::PushAudio(AudioData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(aSample);
   AudioQueue().Push(aSample);
 }
 
 void
-MediaDecoderStateMachine::PushVideo(MediaData* aSample)
+MediaDecoderStateMachine::PushVideo(VideoData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(aSample);
-  aSample->As<VideoData>()->mFrameID = ++mCurrentFrameID;
+  aSample->mFrameID = ++mCurrentFrameID;
   VideoQueue().Push(aSample);
 }
 
 void
-MediaDecoderStateMachine::OnAudioPopped(const RefPtr<MediaData>& aSample)
+MediaDecoderStateMachine::OnAudioPopped(const RefPtr<AudioData>& aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   mPlaybackOffset = std::max(mPlaybackOffset.Ref(), aSample->mOffset);
 }
 
 void
-MediaDecoderStateMachine::OnVideoPopped(const RefPtr<MediaData>& aSample)
+MediaDecoderStateMachine::OnVideoPopped(const RefPtr<VideoData>& aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   mPlaybackOffset = std::max(mPlaybackOffset.Ref(), aSample->mOffset);
 }
 
 bool
 MediaDecoderStateMachine::IsAudioDecoding()
 {
@@ -3133,17 +3133,17 @@ MediaDecoderStateMachine::RequestAudioDa
   MOZ_ASSERT(!IsRequestingAudioData());
   MOZ_ASSERT(!IsWaitingAudioData());
   SAMPLE_LOG("Queueing audio task - queued=%" PRIuSIZE ", decoder-queued=%" PRIuSIZE,
              AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
 
   RefPtr<MediaDecoderStateMachine> self = this;
   mReader->RequestAudioData()->Then(
     OwnerThread(), __func__,
-    [this, self] (MediaData* aAudio) {
+    [this, self] (AudioData* aAudio) {
       MOZ_ASSERT(aAudio);
       mAudioDataRequest.Complete();
       // audio->GetEndTime() is not always mono-increasing in chained ogg.
       mDecodedAudioEndTime =
         std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
       SAMPLE_LOG("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime,
                  aAudio->GetEndTime());
       mStateObj->HandleAudioDecoded(aAudio);
@@ -3180,17 +3180,17 @@ MediaDecoderStateMachine::RequestVideoDa
     ", skip=%i, time=%" PRId64,
     VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(),
     aSkipToNextKeyframe, aCurrentTime.ToMicroseconds());
 
   TimeStamp videoDecodeStartTime = TimeStamp::Now();
   RefPtr<MediaDecoderStateMachine> self = this;
   mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
     OwnerThread(), __func__,
-    [this, self, videoDecodeStartTime] (MediaData* aVideo) {
+    [this, self, videoDecodeStartTime] (VideoData* aVideo) {
       MOZ_ASSERT(aVideo);
       mVideoDataRequest.Complete();
       // Handle abnormal or negative timestamps.
       mDecodedVideoEndTime =
         std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
       SAMPLE_LOG("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime,
                  aVideo->GetEndTime());
       mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -320,32 +320,32 @@ private:
 
 protected:
   virtual ~MediaDecoderStateMachine();
 
   void BufferedRangeUpdated();
 
   void ReaderSuspendedChanged();
 
-  // Inserts MediaData* samples into their respective MediaQueues.
+  // Inserts a sample into the Audio/Video queue.
   // aSample must not be null.
-  void PushAudio(MediaData* aSample);
-  void PushVideo(MediaData* aSample);
+  void PushAudio(AudioData* aSample);
+  void PushVideo(VideoData* aSample);
 
-  void OnAudioPopped(const RefPtr<MediaData>& aSample);
-  void OnVideoPopped(const RefPtr<MediaData>& aSample);
+  void OnAudioPopped(const RefPtr<AudioData>& aSample);
+  void OnVideoPopped(const RefPtr<VideoData>& aSample);
 
   void AudioAudibleChanged(bool aAudible);
 
   void VolumeChanged();
   void SetPlaybackRate(double aPlaybackRate);
   void PreservesPitchChanged();
 
-  MediaQueue<MediaData>& AudioQueue() { return mAudioQueue; }
-  MediaQueue<MediaData>& VideoQueue() { return mVideoQueue; }
+  MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
+  MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
 
   // True if we are low in decoded audio/video data.
   // May not be invoked when mReader->UseBufferingHeuristics() is false.
   bool HasLowDecodedData();
 
   bool HasLowDecodedAudio();
 
   bool HasLowDecodedVideo();
@@ -501,20 +501,20 @@ private:
   // yet to run.
   bool mDispatchedStateMachine;
 
   // Used to dispatch another round schedule with specific target time.
   DelayedScheduler mDelayedScheduler;
 
   // Queue of audio frames. This queue is threadsafe, and is accessed from
   // the audio, decoder, state machine, and main threads.
-  MediaQueue<MediaData> mAudioQueue;
+  MediaQueue<AudioData> mAudioQueue;
   // Queue of video frames. This queue is threadsafe, and is accessed from
   // the decoder, state machine, and main threads.
-  MediaQueue<MediaData> mVideoQueue;
+  MediaQueue<VideoData> mVideoQueue;
 
   UniquePtr<StateObject> mStateObj;
 
   media::TimeUnit Duration() const
   {
     MOZ_ASSERT(OnTaskQueue());
     return mDuration.Ref().ref();
   }
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -9,29 +9,28 @@
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
 #include "MediaSink.h"
 
 namespace mozilla {
 
-class MediaData;
 template <class T> class MediaQueue;
 
 namespace media {
 
 /*
  * Define basic APIs for derived class instance to operate or obtain
  * information from it.
  */
 class AudioSink {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioSink)
-  AudioSink(MediaQueue<MediaData>& aAudioQueue)
+  AudioSink(MediaQueue<AudioData>& aAudioQueue)
     : mAudioQueue(aAudioQueue)
   {}
 
   typedef MediaSink::PlaybackParams PlaybackParams;
 
   // Return a promise which will be resolved when AudioSink finishes playing,
   // or rejected if any error.
   virtual RefPtr<GenericPromise> Init(const PlaybackParams& aParams) = 0;
@@ -52,21 +51,19 @@ public:
   virtual void SetPreservesPitch(bool aPreservesPitch) = 0;
 
   // Change audio playback status pause/resume.
   virtual void SetPlaying(bool aPlaying) = 0;
 
 protected:
   virtual ~AudioSink() {}
 
-  virtual MediaQueue<MediaData>& AudioQueue() const {
+  virtual MediaQueue<AudioData>& AudioQueue() const {
     return mAudioQueue;
   }
 
-  // To queue audio data (no matter it's plain or encoded or encrypted, depends
-  // on the subclass)
-  MediaQueue<MediaData>& mAudioQueue;
+  MediaQueue<AudioData>& mAudioQueue;
 };
 
 } // namespace media
 } // namespace mozilla
 
 #endif
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -29,17 +29,17 @@ namespace media {
 
 // The amount of audio frames that is used to fuzz rounding errors.
 static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
 // Amount of audio frames we will be processing ahead of use
 static const int32_t LOW_AUDIO_USECS = 300000;
 
 DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
-                                           MediaQueue<MediaData>& aAudioQueue,
+                                           MediaQueue<AudioData>& aAudioQueue,
                                            int64_t aStartTime,
                                            const AudioInfo& aInfo,
                                            dom::AudioChannel aChannel)
   : AudioSink(aAudioQueue)
   , mStartTime(aStartTime)
   , mLastGoodPosition(0)
   , mInfo(aInfo)
   , mChannel(aChannel)
@@ -338,42 +338,41 @@ DecodedAudioDataSink::CheckIsAudible(con
   bool isAudible = aData->IsAudible();
   if (isAudible != mIsAudioDataAudible) {
     mIsAudioDataAudible = isAudible;
     mAudibleEvent.Notify(mIsAudioDataAudible);
   }
 }
 
 void
-DecodedAudioDataSink::OnAudioPopped(const RefPtr<MediaData>& aSample)
+DecodedAudioDataSink::OnAudioPopped(const RefPtr<AudioData>& aSample)
 {
   SINK_LOG_V("AudioStream has used an audio packet.");
   NotifyAudioNeeded();
 }
 
 void
-DecodedAudioDataSink::OnAudioPushed(const RefPtr<MediaData>& aSample)
+DecodedAudioDataSink::OnAudioPushed(const RefPtr<AudioData>& aSample)
 {
   SINK_LOG_V("One new audio packet available.");
   NotifyAudioNeeded();
 }
 
 void
 DecodedAudioDataSink::NotifyAudioNeeded()
 {
   MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
              "Not called from the owner's thread");
 
   // Always ensure we have two processed frames pending to allow for processing
   // latency.
   while (AudioQueue().GetSize() && (AudioQueue().IsFinished() ||
                                     mProcessedQueueLength < LOW_AUDIO_USECS ||
                                     mProcessedQueue.GetSize() < 2)) {
-    RefPtr<AudioData> data =
-      dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+    RefPtr<AudioData> data = AudioQueue().PopFront();
 
     // Ignore the element with 0 frames and try next.
     if (!data->mFrames) {
       continue;
     }
 
     if (!mConverter ||
         (data->mRate != mConverter->InputConfig().Rate() ||
--- a/dom/media/mediasink/DecodedAudioDataSink.h
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -25,17 +25,17 @@ namespace mozilla {
 class AudioConverter;
 
 namespace media {
 
 class DecodedAudioDataSink : public AudioSink,
                              private AudioStream::DataSource {
 public:
   DecodedAudioDataSink(AbstractThread* aThread,
-                       MediaQueue<MediaData>& aAudioQueue,
+                       MediaQueue<AudioData>& aAudioQueue,
                        int64_t aStartTime,
                        const AudioInfo& aInfo,
                        dom::AudioChannel aChannel);
 
   // Return a promise which will be resolved when DecodedAudioDataSink
   // finishes playing, or rejected if any error.
   RefPtr<GenericPromise> Init(const PlaybackParams& aParams) override;
 
@@ -120,18 +120,18 @@ private:
   Atomic<bool> mErrored;
 
   // Set on the callback thread of cubeb once the stream has drained.
   Atomic<bool> mPlaybackComplete;
 
   const RefPtr<AbstractThread> mOwnerThread;
 
   // Audio Processing objects and methods
-  void OnAudioPopped(const RefPtr<MediaData>& aSample);
-  void OnAudioPushed(const RefPtr<MediaData>& aSample);
+  void OnAudioPopped(const RefPtr<AudioData>& aSample);
+  void OnAudioPushed(const RefPtr<AudioData>& aSample);
   void NotifyAudioNeeded();
   // Drain the converter and add the output to the processed audio queue.
   // A maximum of aMaxFrames will be added.
   uint32_t DrainConverter(uint32_t aMaxFrames = UINT32_MAX);
   already_AddRefed<AudioData> CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
                                                     AudioData* aReference);
   // Add data to the processsed queue, update mProcessedQueueLength and
   // return the number of frames added.
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -235,18 +235,18 @@ DecodedStreamData::GetDebugInfo()
     " mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64 " mHaveSentFinish=%d "
     "mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
     this, mPlaying, mAudioFramesWritten, mNextAudioTime, mNextVideoTime,
     mHaveSentFinish, mHaveSentFinishAudio, mHaveSentFinishVideo);
 }
 
 DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
                              AbstractThread* aMainThread,
-                             MediaQueue<MediaData>& aAudioQueue,
-                             MediaQueue<MediaData>& aVideoQueue,
+                             MediaQueue<AudioData>& aAudioQueue,
+                             MediaQueue<VideoData>& aVideoQueue,
                              OutputStreamManager* aOutputStreamManager,
                              const bool& aSameOrigin,
                              const PrincipalHandle& aPrincipalHandle)
   : mOwnerThread(aOwnerThread)
   , mAbstractMainThread(aMainThread)
   , mOutputStreamManager(aOutputStreamManager)
   , mPlaying(false)
   , mSameOrigin(aSameOrigin)
@@ -443,24 +443,24 @@ void
 DecodedStream::SetPreservesPitch(bool aPreservesPitch)
 {
   AssertOwnerThread();
   mParams.mPreservesPitch = aPreservesPitch;
 }
 
 static void
 SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
-                MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
+                AudioData* aData, AudioSegment* aOutput, uint32_t aRate,
                 const PrincipalHandle& aPrincipalHandle)
 {
   // The amount of audio frames that is used to fuzz rounding errors.
   static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
   MOZ_ASSERT(aData);
-  AudioData* audio = aData->As<AudioData>();
+  AudioData* audio = aData;
   // This logic has to mimic AudioSink closely to make sure we write
   // the exact same silences
   CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                     UsecsToFrames(aStartTime, aRate);
   CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
 
   if (!audioWrittenOffset.isValid() ||
       !frameOffset.isValid() ||
@@ -501,17 +501,17 @@ DecodedStream::SendAudio(double aVolume,
   AssertOwnerThread();
 
   if (!mInfo.HasAudio()) {
     return;
   }
 
   AudioSegment output;
   uint32_t rate = mInfo.mAudio.mRate;
-  AutoTArray<RefPtr<MediaData>,10> audio;
+  AutoTArray<RefPtr<AudioData>,10> audio;
   TrackID audioTrackId = mInfo.mAudio.mTrackId;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the AudioData because AudioData
   // is ref-counted.
   mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
   for (uint32_t i = 0; i < audio.Length(); ++i) {
     SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate,
@@ -572,32 +572,32 @@ DecodedStream::SendVideo(bool aIsSameOri
   AssertOwnerThread();
 
   if (!mInfo.HasVideo()) {
     return;
   }
 
   VideoSegment output;
   TrackID videoTrackId = mInfo.mVideo.mTrackId;
-  AutoTArray<RefPtr<MediaData>, 10> video;
+  AutoTArray<RefPtr<VideoData>, 10> video;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the VideoData because VideoData
   // is ref-counted.
   mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
 
   // tracksStartTimeStamp might be null when the SourceMediaStream not yet
   // be added to MediaStreamGraph.
   TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
   if (tracksStartTimeStamp.IsNull()) {
     tracksStartTimeStamp = TimeStamp::Now();
   }
 
   for (uint32_t i = 0; i < video.Length(); ++i) {
-    VideoData* v = video[i]->As<VideoData>();
+    VideoData* v = video[i];
 
     if (mData->mNextVideoTime < v->mTime) {
       // Write last video frame to catch up. mLastVideoImage can be null here
       // which is fine, it just means there's no video.
 
       // TODO: |mLastVideoImage| should come from the last image rendered
       // by the state machine. This will avoid the black frame when capture
       // happens in the middle of playback (especially in th middle of a
@@ -742,19 +742,19 @@ DecodedStream::GetPosition(TimeStamp* aT
 void
 DecodedStream::NotifyOutput(int64_t aTime)
 {
   AssertOwnerThread();
   mLastOutputTime = aTime;
   int64_t currentTime = GetPosition();
 
   // Remove audio samples that have been played by MSG from the queue.
-  RefPtr<MediaData> a = mAudioQueue.PeekFront();
+  RefPtr<AudioData> a = mAudioQueue.PeekFront();
   for (; a && a->mTime < currentTime;) {
-    RefPtr<MediaData> releaseMe = mAudioQueue.PopFront();
+    RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
     a = mAudioQueue.PeekFront();
   }
 }
 
 void
 DecodedStream::ConnectListener()
 {
   AssertOwnerThread();
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -15,33 +15,34 @@
 #include "mozilla/Maybe.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 
 class DecodedStreamData;
-class MediaData;
+class AudioData;
+class VideoData;
 class MediaStream;
 class OutputStreamManager;
 struct PlaybackInfoInit;
 class ProcessedMediaStream;
 class TimeStamp;
 
 template <class T> class MediaQueue;
 
 class DecodedStream : public media::MediaSink {
   using media::MediaSink::PlaybackParams;
 
 public:
   DecodedStream(AbstractThread* aOwnerThread,
                 AbstractThread* aMainThread,
-                MediaQueue<MediaData>& aAudioQueue,
-                MediaQueue<MediaData>& aVideoQueue,
+                MediaQueue<AudioData>& aAudioQueue,
+                MediaQueue<VideoData>& aVideoQueue,
                 OutputStreamManager* aOutputStreamManager,
                 const bool& aSameOrigin,
                 const PrincipalHandle& aPrincipalHandle);
 
   // MediaSink functions.
   const PlaybackParams& GetPlaybackParams() const override;
   void SetPlaybackParams(const PlaybackParams& aParams) override;
 
@@ -105,18 +106,18 @@ private:
   const PrincipalHandle& mPrincipalHandle; // valid until Shutdown() is called.
 
   PlaybackParams mParams;
 
   Maybe<int64_t> mStartTime;
   int64_t mLastOutputTime = 0; // microseconds
   MediaInfo mInfo;
 
-  MediaQueue<MediaData>& mAudioQueue;
-  MediaQueue<MediaData>& mVideoQueue;
+  MediaQueue<AudioData>& mAudioQueue;
+  MediaQueue<VideoData>& mVideoQueue;
 
   MediaEventListener mAudioPushListener;
   MediaEventListener mVideoPushListener;
   MediaEventListener mAudioFinishListener;
   MediaEventListener mVideoFinishListener;
   MediaEventListener mOutputListener;
 };
 
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -26,17 +26,17 @@ using namespace mozilla::layers;
 namespace media {
 
 // Minimum update frequency is 1/120th of a second, i.e. half the
 // duration of a 60-fps frame.
 static const int64_t MIN_UPDATE_INTERVAL_US = 1000000 / (60 * 2);
 
 VideoSink::VideoSink(AbstractThread* aThread,
                      MediaSink* aAudioSink,
-                     MediaQueue<MediaData>& aVideoQueue,
+                     MediaQueue<VideoData>& aVideoQueue,
                      VideoFrameContainer* aContainer,
                      FrameStatistics& aFrameStats,
                      uint32_t aVQueueSentToCompositerSize)
   : mOwnerThread(aThread)
   , mAudioSink(aAudioSink)
   , mVideoQueue(aVideoQueue)
   , mContainer(aContainer)
   , mProducerID(ImageContainer::AllocateProducerID())
@@ -250,23 +250,22 @@ VideoSink::Shutdown()
   AssertOwnerThread();
   MOZ_ASSERT(!mAudioSink->IsStarted(), "must be called after playback stops.");
   VSINK_LOG("[%s]", __func__);
 
   mAudioSink->Shutdown();
 }
 
 void
-VideoSink::OnVideoQueuePushed(RefPtr<MediaData>&& aSample)
+VideoSink::OnVideoQueuePushed(RefPtr<VideoData>&& aSample)
 {
   AssertOwnerThread();
   // Listen to push event, VideoSink should try rendering ASAP if first frame
   // arrives but update scheduler is not triggered yet.
-  VideoData* v = aSample->As<VideoData>();
-  if (!v->IsSentToCompositor()) {
+  if (!aSample->IsSentToCompositor()) {
     // Since we push rendered frames back to the queue, we will receive
     // push events for them. We only need to trigger render loop
     // when this frame is not rendered yet.
     TryUpdateRenderedVideoFrames();
   }
 }
 
 void
@@ -286,19 +285,18 @@ VideoSink::Redraw(const VideoInfo& aInfo
 {
   AssertOwnerThread();
 
   // No video track, nothing to draw.
   if (!aInfo.IsValid() || !mContainer) {
     return;
   }
 
-  RefPtr<MediaData> frame = VideoQueue().PeekFront();
-  if (frame) {
-    VideoData* video = frame->As<VideoData>();
+  RefPtr<VideoData> video = VideoQueue().PeekFront();
+  if (video) {
     video->MarkSentToCompositor();
     mContainer->SetCurrentFrame(video->mDisplay, video->mImage, TimeStamp::Now());
     return;
   }
 
   // When we reach here, it means there are no frames in this video track.
   // Draw a blank frame to ensure there is something in the image container
   // to fire 'loadeddata'.
@@ -345,27 +343,27 @@ VideoSink::DisconnectListener()
 
 void
 VideoSink::RenderVideoFrames(int32_t aMaxFrames,
                              int64_t aClockTime,
                              const TimeStamp& aClockTimeStamp)
 {
   AssertOwnerThread();
 
-  AutoTArray<RefPtr<MediaData>,16> frames;
+  AutoTArray<RefPtr<VideoData>,16> frames;
   VideoQueue().GetFirstElements(aMaxFrames, &frames);
   if (frames.IsEmpty() || !mContainer) {
     return;
   }
 
   AutoTArray<ImageContainer::NonOwningImage,16> images;
   TimeStamp lastFrameTime;
   MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
   for (uint32_t i = 0; i < frames.Length(); ++i) {
-    VideoData* frame = frames[i]->As<VideoData>();
+    VideoData* frame = frames[i];
 
     frame->MarkSentToCompositor();
 
     if (!frame->mImage || !frame->mImage->IsValid() ||
         !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
       continue;
     }
 
@@ -397,17 +395,17 @@ VideoSink::RenderVideoFrames(int32_t aMa
     img->mFrameID = frame->mFrameID;
     img->mProducerID = mProducerID;
 
     VSINK_LOG_V("playing video frame %" PRId64 " (id=%x) (vq-queued=%" PRIuSIZE ")",
                 frame->mTime, frame->mFrameID, VideoQueue().GetSize());
   }
 
   if (images.Length() > 0) {
-    mContainer->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images);
+    mContainer->SetCurrentFrames(frames[0]->mDisplay, images);
   }
 }
 
 void
 VideoSink::UpdateRenderedVideoFrames()
 {
   AssertOwnerThread();
   MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing.");
@@ -416,42 +414,42 @@ VideoSink::UpdateRenderedVideoFrames()
   TimeStamp nowTime;
   const int64_t clockTime = mAudioSink->GetPosition(&nowTime);
   NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
 
   // Skip frames up to the playback position.
   int64_t lastFrameEndTime = 0;
   while (VideoQueue().GetSize() > mMinVideoQueueSize &&
          clockTime >= VideoQueue().PeekFront()->GetEndTime()) {
-    RefPtr<MediaData> frame = VideoQueue().PopFront();
+    RefPtr<VideoData> frame = VideoQueue().PopFront();
     lastFrameEndTime = frame->GetEndTime();
-    if (frame->As<VideoData>()->IsSentToCompositor()) {
+    if (frame->IsSentToCompositor()) {
       mFrameStats.NotifyPresentedFrame();
     } else {
       mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
       VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64,
                   frame->mTime, clockTime);
     }
   }
 
   // The presentation end time of the last video frame displayed is either
   // the end time of the current frame, or if we dropped all frames in the
   // queue, the end time of the last frame we removed from the queue.
-  RefPtr<MediaData> currentFrame = VideoQueue().PeekFront();
+  RefPtr<VideoData> currentFrame = VideoQueue().PeekFront();
   mVideoFrameEndTime = std::max(mVideoFrameEndTime,
     currentFrame ? currentFrame->GetEndTime() : lastFrameEndTime);
 
   MaybeResolveEndPromise();
 
   RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime);
 
   // Get the timestamp of the next frame. Schedule the next update at
   // the start time of the next frame. If we don't have a next frame,
   // we will run render loops again upon incoming frames.
-  nsTArray<RefPtr<MediaData>> frames;
+  nsTArray<RefPtr<VideoData>> frames;
   VideoQueue().GetFirstElements(2, &frames);
   if (frames.Length() < 2) {
     return;
   }
 
   int64_t nextFrameTime = frames[1]->mTime;
   int64_t delta = std::max<int64_t>((nextFrameTime - clockTime), MIN_UPDATE_INTERVAL_US);
   TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
--- a/dom/media/mediasink/VideoSink.h
+++ b/dom/media/mediasink/VideoSink.h
@@ -26,17 +26,17 @@ template <class T> class MediaQueue;
 namespace media {
 
 class VideoSink : public MediaSink
 {
   typedef mozilla::layers::ImageContainer::ProducerID ProducerID;
 public:
   VideoSink(AbstractThread* aThread,
             MediaSink* aAudioSink,
-            MediaQueue<MediaData>& aVideoQueue,
+            MediaQueue<VideoData>& aVideoQueue,
             VideoFrameContainer* aContainer,
             FrameStatistics& aFrameStats,
             uint32_t aVQueueSentToCompositerSize);
 
   const PlaybackParams& GetPlaybackParams() const override;
 
   void SetPlaybackParams(const PlaybackParams& aParams) override;
 
@@ -69,17 +69,17 @@ public:
   void Shutdown() override;
 
   nsCString GetDebugInfo() override;
 
 private:
   virtual ~VideoSink();
 
   // VideoQueue listener related.
-  void OnVideoQueuePushed(RefPtr<MediaData>&& aSample);
+  void OnVideoQueuePushed(RefPtr<VideoData>&& aSample);
   void OnVideoQueueFinished();
   void ConnectListener();
   void DisconnectListener();
 
   // Sets VideoQueue images into the VideoFrameContainer. Called on the shared
   // state machine thread. The first aMaxFrames (at most) are set.
   // aClockTime and aClockTimeStamp are used as the baseline for deriving
   // timestamps for the frames; when omitted, aMaxFrames must be 1 and
@@ -101,23 +101,23 @@ private:
 
   void MaybeResolveEndPromise();
 
   void AssertOwnerThread() const
   {
     MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
   }
 
-  MediaQueue<MediaData>& VideoQueue() const {
+  MediaQueue<VideoData>& VideoQueue() const {
     return mVideoQueue;
   }
 
   const RefPtr<AbstractThread> mOwnerThread;
   RefPtr<MediaSink> mAudioSink;
-  MediaQueue<MediaData>& mVideoQueue;
+  MediaQueue<VideoData>& mVideoQueue;
   VideoFrameContainer* mContainer;
 
   // Producer ID to help ImageContainer distinguish different streams of
   // FrameIDs. A unique and immutable value per VideoSink.
   const ProducerID mProducerID;
 
   // Used to notify MediaDecoder's frame statistics
   FrameStatistics& mFrameStats;