Bug 1203047 - Make MediaDecoderReader know less about AudioData/VideoData by using MediaData instead. r=jya
authorJames Cheng <jacheng@mozilla.com>
Thu, 10 Sep 2015 03:06:00 +0200
changeset 294532 0be0207d4271031fe7a6de5b8bda797f9210386b
parent 294531 8b81e380f22900059b184573cc4dc429d6ac2f43
child 294533 04aa9165c832254e43a9b168aeb02cac3da9aed8
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1203047
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1203047 - Make MediaDecoderReader know less about AudioData/VideoData by using MediaData instead. r=jya
dom/media/MediaData.h
dom/media/MediaDecoderReader.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/android/AndroidMediaReader.cpp
dom/media/mediasource/MediaSourceReader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/omx/MediaOmxReader.cpp
dom/media/webaudio/MediaBufferDecoder.cpp
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -129,17 +129,17 @@ public:
     : MediaData(sType, aOffset, aTime, aDuration, aFrames)
     , mChannels(aChannels)
     , mRate(aRate)
     , mAudioData(aData) {}
 
   static const Type sType = AUDIO_DATA;
   static const char* sTypeName;
 
-  // Creates a new VideoData identical to aOther, but with a different
+  // Creates a new AudioData identical to aOther, but with a different
   // specified timestamp and duration. All data from aOther is copied
   // into the new AudioData but the audio data which is transferred.
   // After such call, the original aOther is unusable.
   static already_AddRefed<AudioData>
   TransferAndUpdateTimestampAndDuration(AudioData* aOther,
                                         int64_t aTimestamp,
                                         int64_t aDuration);
 
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -64,18 +64,18 @@ public:
   enum NotDecodedReason {
     END_OF_STREAM,
     DECODE_ERROR,
     WAITING_FOR_DATA,
     CANCELED
   };
 
   typedef MozPromise<nsRefPtr<MetadataHolder>, ReadMetadataFailureReason, /* IsExclusive = */ true> MetadataPromise;
-  typedef MozPromise<nsRefPtr<AudioData>, NotDecodedReason, /* IsExclusive = */ true> AudioDataPromise;
-  typedef MozPromise<nsRefPtr<VideoData>, NotDecodedReason, /* IsExclusive = */ true> VideoDataPromise;
+  typedef MozPromise<nsRefPtr<MediaData>, NotDecodedReason, /* IsExclusive = */ true> AudioDataPromise;
+  typedef MozPromise<nsRefPtr<MediaData>, NotDecodedReason, /* IsExclusive = */ true> VideoDataPromise;
   typedef MozPromise<int64_t, nsresult, /* IsExclusive = */ true> SeekPromise;
 
   // Note that, conceptually, WaitForData makes sense in a non-exclusive sense.
   // But in the current architecture it's only ever used exclusively (by MDSM),
   // so we mark it that way to verify our assumptions. If you have a use-case
   // for multiple WaitForData consumers, feel free to flip the exclusivity here.
   typedef MozPromise<MediaData::Type, WaitForDataRejectValue, /* IsExclusive = */ true> WaitForDataPromise;
 
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -557,42 +557,42 @@ MediaDecoderStateMachine::IsVideoSeekCom
   return
     !HasVideo() ||
     (mCurrentSeek.Exists() &&
      !mDropVideoUntilNextDiscontinuity &&
      (VideoQueue().IsFinished() || VideoQueue().GetSize() > 0));
 }
 
 void
-MediaDecoderStateMachine::OnAudioDecoded(AudioData* aAudioSample)
+MediaDecoderStateMachine::OnAudioDecoded(MediaData* aAudioSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  nsRefPtr<AudioData> audio(aAudioSample);
+  nsRefPtr<MediaData> audio(aAudioSample);
   MOZ_ASSERT(audio);
   mAudioDataRequest.Complete();
   aAudioSample->AdjustForStartTime(StartTime());
   mDecodedAudioEndTime = audio->GetEndTime();
 
   SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
              (audio ? audio->mTime : -1),
              (audio ? audio->GetEndTime() : -1),
              (audio ? audio->mDiscontinuity : 0));
 
   switch (mState) {
     case DECODER_STATE_BUFFERING: {
       // If we're buffering, this may be the sample we need to stop buffering.
       // Save it and schedule the state machine.
-      Push(audio);
+      Push(audio, MediaData::AUDIO_DATA);
       ScheduleStateMachine();
       return;
     }
 
     case DECODER_STATE_DECODING: {
-      Push(audio);
+      Push(audio, MediaData::AUDIO_DATA);
       if (MaybeFinishDecodeFirstFrame()) {
         return;
       }
       if (mIsAudioPrerolling && DonePrerollingAudio()) {
         StopPrerollingAudio();
       }
       return;
     }
@@ -616,17 +616,17 @@ MediaDecoderStateMachine::OnAudioDecoded
           // seek and decode to the seek target. This is not conformant to the
           // spec, fastSeek should always be fast, but until we get the time to
           // change all Readers to seek to the keyframe after the currentTime
           // in this case, we'll just decode forward. Bug 1026330.
           mCurrentSeek.mTarget.mType = SeekTarget::Accurate;
         }
         if (mCurrentSeek.mTarget.mType == SeekTarget::PrevSyncPoint) {
           // Non-precise seek; we can stop the seek at the first sample.
-          Push(audio);
+          Push(audio, MediaData::AUDIO_DATA);
         } else {
           // We're doing an accurate seek. We must discard
           // MediaData up to the one containing exact seek target.
           if (NS_FAILED(DropAudioUpToSeekTarget(audio))) {
             DecodeError();
             return;
           }
         }
@@ -637,61 +637,52 @@ MediaDecoderStateMachine::OnAudioDecoded
     default: {
       // Ignore other cases.
       return;
     }
   }
 }
 
 void
-MediaDecoderStateMachine::Push(AudioData* aSample)
+MediaDecoderStateMachine::Push(MediaData* aSample, MediaData::Type aSampleType)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(aSample);
-  // TODO: Send aSample to MSG and recalculate readystate before pushing,
-  // otherwise AdvanceFrame may pop the sample before we have a chance
-  // to reach playing.
-  AudioQueue().Push(aSample);
-  UpdateNextFrameStatus();
-  DispatchDecodeTasksIfNeeded();
-
-}
-
-void
-MediaDecoderStateMachine::PushFront(AudioData* aSample)
-{
-  MOZ_ASSERT(OnTaskQueue());
-  MOZ_ASSERT(aSample);
-
-  AudioQueue().PushFront(aSample);
-  UpdateNextFrameStatus();
-}
-
-void
-MediaDecoderStateMachine::Push(VideoData* aSample)
-{
-  MOZ_ASSERT(OnTaskQueue());
-  MOZ_ASSERT(aSample);
-  // TODO: Send aSample to MSG and recalculate readystate before pushing,
-  // otherwise AdvanceFrame may pop the sample before we have a chance
-  // to reach playing.
-  aSample->mFrameID = ++mCurrentFrameID;
-  VideoQueue().Push(aSample);
+
+  if (aSample->mType == MediaData::AUDIO_DATA) {
+    // TODO: Send aSample to MSG and recalculate readystate before pushing,
+    // otherwise AdvanceFrame may pop the sample before we have a chance
+    // to reach playing.
+    AudioQueue().Push(aSample);
+  } else if (aSample->mType == MediaData::VIDEO_DATA) {
+    // TODO: Send aSample to MSG and recalculate readystate before pushing,
+    // otherwise AdvanceFrame may pop the sample before we have a chance
+    // to reach playing.
+    aSample->As<VideoData>()->mFrameID = ++mCurrentFrameID;
+    VideoQueue().Push(aSample);
+  } else {
+    // TODO: Handle MediaRawData, determine which queue should be pushed.
+  }
   UpdateNextFrameStatus();
   DispatchDecodeTasksIfNeeded();
 }
 
 void
-MediaDecoderStateMachine::PushFront(VideoData* aSample)
+MediaDecoderStateMachine::PushFront(MediaData* aSample, MediaData::Type aSampleType)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(aSample);
-
-  aSample->mFrameID = ++mCurrentFrameID;
-  VideoQueue().PushFront(aSample);
+  if (aSample->mType == MediaData::AUDIO_DATA) {
+    AudioQueue().PushFront(aSample);
+  } else if (aSample->mType == MediaData::VIDEO_DATA) {
+    aSample->As<VideoData>()->mFrameID = ++mCurrentFrameID;
+    VideoQueue().PushFront(aSample);
+  } else {
+    // TODO: Handle MediaRawData, determine which queue should be pushed.
+  }
   UpdateNextFrameStatus();
 }
 
 void
 MediaDecoderStateMachine::OnAudioPopped(const nsRefPtr<MediaData>& aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -767,17 +758,17 @@ MediaDecoderStateMachine::OnNotDecoded(M
   // state.
   MOZ_ASSERT(aReason == MediaDecoderReader::END_OF_STREAM);
   if (!isAudio && mState == DECODER_STATE_SEEKING &&
       mCurrentSeek.Exists() && mFirstVideoFrameAfterSeek) {
     // Null sample. Hit end of stream. If we have decoded a frame,
     // insert it into the queue so that we have something to display.
     // We make sure to do this before invoking VideoQueue().Finish()
     // below.
-    Push(mFirstVideoFrameAfterSeek);
+    Push(mFirstVideoFrameAfterSeek, MediaData::VIDEO_DATA);
     mFirstVideoFrameAfterSeek = nullptr;
   }
   if (isAudio) {
     AudioQueue().Finish();
     StopPrerollingAudio();
   } else {
     VideoQueue().Finish();
     StopPrerollingVideo();
@@ -830,42 +821,42 @@ MediaDecoderStateMachine::MaybeFinishDec
   // We can now complete the pending seek.
   mPendingSeek.Steal(mQueuedSeek);
   SetState(DECODER_STATE_SEEKING);
   ScheduleStateMachine();
   return true;
 }
 
 void
-MediaDecoderStateMachine::OnVideoDecoded(VideoData* aVideoSample)
+MediaDecoderStateMachine::OnVideoDecoded(MediaData* aVideoSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  nsRefPtr<VideoData> video(aVideoSample);
+  nsRefPtr<MediaData> video(aVideoSample);
   MOZ_ASSERT(video);
   mVideoDataRequest.Complete();
   aVideoSample->AdjustForStartTime(StartTime());
   mDecodedVideoEndTime = video ? video->GetEndTime() : mDecodedVideoEndTime;
 
   SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d",
              (video ? video->mTime : -1),
              (video ? video->GetEndTime() : -1),
              (video ? video->mDiscontinuity : 0));
 
   switch (mState) {
     case DECODER_STATE_BUFFERING: {
       // If we're buffering, this may be the sample we need to stop buffering.
       // Save it and schedule the state machine.
-      Push(video);
+      Push(video, MediaData::VIDEO_DATA);
       ScheduleStateMachine();
       return;
     }
 
     case DECODER_STATE_DECODING: {
-      Push(video);
+      Push(video, MediaData::VIDEO_DATA);
       if (MaybeFinishDecodeFirstFrame()) {
         return;
       }
       if (mIsVideoPrerolling && DonePrerollingVideo()) {
         StopPrerollingVideo();
       }
 
       // Schedule the state machine to send stream data as soon as possible or
@@ -922,17 +913,17 @@ MediaDecoderStateMachine::OnVideoDecoded
           // seek and decode to the seek target. This is not conformant to the
           // spec, fastSeek should always be fast, but until we get the time to
           // change all Readers to seek to the keyframe after the currentTime
           // in this case, we'll just decode forward. Bug 1026330.
           mCurrentSeek.mTarget.mType = SeekTarget::Accurate;
         }
         if (mCurrentSeek.mTarget.mType == SeekTarget::PrevSyncPoint) {
           // Non-precise seek; we can stop the seek at the first sample.
-          Push(video);
+          Push(video, MediaData::VIDEO_DATA);
         } else {
           // We're doing an accurate seek. We still need to discard
           // MediaData up to the one containing exact seek target.
           if (NS_FAILED(DropVideoUpToSeekTarget(video))) {
             DecodeError();
             return;
           }
         }
@@ -1665,18 +1656,18 @@ MediaDecoderStateMachine::RequestAudioDa
       ->Then(OwnerThread(), __func__, this,
              &MediaDecoderStateMachine::OnAudioDecoded,
              &MediaDecoderStateMachine::OnAudioNotDecoded));
   } else {
     mAudioDataRequest.Begin(
       InvokeAsync(DecodeTaskQueue(), mReader.get(), __func__,
                   &MediaDecoderReader::RequestAudioData)
       ->Then(OwnerThread(), __func__, mStartTimeRendezvous.get(),
-             &StartTimeRendezvous::ProcessFirstSample<AudioDataPromise>,
-             &StartTimeRendezvous::FirstSampleRejected<AudioData>)
+             &StartTimeRendezvous::ProcessFirstSample<AudioDataPromise, MediaData::AUDIO_DATA>,
+             &StartTimeRendezvous::FirstSampleRejected<MediaData::AUDIO_DATA>)
       ->CompletionPromise()
       ->Then(OwnerThread(), __func__, this,
              &MediaDecoderStateMachine::OnAudioDecoded,
              &MediaDecoderStateMachine::OnAudioNotDecoded)
     );
   }
 }
 
@@ -1749,18 +1740,18 @@ MediaDecoderStateMachine::RequestVideoDa
              &MediaDecoderStateMachine::OnVideoDecoded,
              &MediaDecoderStateMachine::OnVideoNotDecoded));
   } else {
     mVideoDataRequest.Begin(
       InvokeAsync(DecodeTaskQueue(), mReader.get(), __func__,
                   &MediaDecoderReader::RequestVideoData,
                   skipToNextKeyFrame, currentTime)
       ->Then(OwnerThread(), __func__, mStartTimeRendezvous.get(),
-             &StartTimeRendezvous::ProcessFirstSample<VideoDataPromise>,
-             &StartTimeRendezvous::FirstSampleRejected<VideoData>)
+             &StartTimeRendezvous::ProcessFirstSample<VideoDataPromise, MediaData::VIDEO_DATA>,
+             &StartTimeRendezvous::FirstSampleRejected<MediaData::VIDEO_DATA>)
       ->CompletionPromise()
       ->Then(OwnerThread(), __func__, this,
              &MediaDecoderStateMachine::OnVideoDecoded,
              &MediaDecoderStateMachine::OnVideoNotDecoded));
   }
 }
 
 void
@@ -2645,20 +2636,20 @@ void MediaDecoderStateMachine::UpdateRen
   // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   // assertion in GetClock().
 
   int64_t delay = std::max<int64_t>(1, remainingTime / mPlaybackRate);
   ScheduleStateMachineIn(delay);
 }
 
 nsresult
-MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample)
+MediaDecoderStateMachine::DropVideoUpToSeekTarget(MediaData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
-  nsRefPtr<VideoData> video(aSample);
+  nsRefPtr<VideoData> video(aSample->As<VideoData>());
   MOZ_ASSERT(video);
   DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
               video->mTime, video->GetEndTime());
   MOZ_ASSERT(mCurrentSeek.Exists());
   const int64_t target = mCurrentSeek.mTarget.mTime;
 
   // If the frame end time is less than the seek target, we won't want
   // to display this frame after the seek, so discard it.
@@ -2674,27 +2665,27 @@ MediaDecoderStateMachine::DropVideoUpToS
       nsRefPtr<VideoData> temp = VideoData::ShallowCopyUpdateTimestamp(video, target);
       video = temp;
     }
     mFirstVideoFrameAfterSeek = nullptr;
 
     DECODER_LOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
                 video->mTime, video->GetEndTime(), target);
 
-    PushFront(video);
+    PushFront(video, MediaData::VIDEO_DATA);
   }
 
   return NS_OK;
 }
 
 nsresult
-MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample)
+MediaDecoderStateMachine::DropAudioUpToSeekTarget(MediaData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
-  nsRefPtr<AudioData> audio(aSample);
+  nsRefPtr<AudioData> audio(aSample->As<AudioData>());
   MOZ_ASSERT(audio &&
              mCurrentSeek.Exists() &&
              mCurrentSeek.mTarget.mType == SeekTarget::Accurate);
 
   CheckedInt64 startFrame = UsecsToFrames(audio->mTime,
                                           mInfo.mAudio.mRate);
   CheckedInt64 targetFrame = UsecsToFrames(mCurrentSeek.mTarget.mTime,
                                            mInfo.mAudio.mRate);
@@ -2710,17 +2701,17 @@ MediaDecoderStateMachine::DropAudioUpToS
     // The seek target doesn't lie in the audio block just after the last
     // audio frames we've seen which were before the seek target. This
     // could have been the first audio data we've seen after seek, i.e. the
     // seek terminated after the seek target in the audio stream. Just
     // abort the audio decode-to-target, the state machine will play
     // silence to cover the gap. Typically this happens in poorly muxed
     // files.
     DECODER_WARN("Audio not synced after seek, maybe a poorly muxed file?");
-    Push(audio);
+    Push(audio, MediaData::AUDIO_DATA);
     return NS_OK;
   }
 
   // The seek target lies somewhere in this AudioData's frames, strip off
   // any frames which lie before the seek target, so we'll begin playback
   // exactly at the seek target.
   NS_ASSERTION(targetFrame.value() >= startFrame.value(),
                "Target must at or be after data start.");
@@ -2746,17 +2737,17 @@ MediaDecoderStateMachine::DropAudioUpToS
   }
   nsRefPtr<AudioData> data(new AudioData(audio->mOffset,
                                          mCurrentSeek.mTarget.mTime,
                                          duration.value(),
                                          frames,
                                          audioData.forget(),
                                          channels,
                                          audio->mRate));
-  PushFront(data);
+  PushFront(data, MediaData::AUDIO_DATA);
 
   return NS_OK;
 }
 
 void MediaDecoderStateMachine::UpdateNextFrameStatus()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -356,18 +356,18 @@ public:
 
       // Make sure that this arrives before playback starts, otherwise this won't
       // have the intended effect.
       MOZ_DIAGNOSTIC_ASSERT(self->mPlayState == MediaDecoder::PLAY_STATE_LOADING);
     });
     OwnerThread()->Dispatch(r.forget());
   }
 
-  void OnAudioDecoded(AudioData* aSample);
-  void OnVideoDecoded(VideoData* aSample);
+  void OnAudioDecoded(MediaData* aAudioSample);
+  void OnVideoDecoded(MediaData* aVideoSample);
   void OnNotDecoded(MediaData::Type aType, MediaDecoderReader::NotDecodedReason aReason);
   void OnAudioNotDecoded(MediaDecoderReader::NotDecodedReason aReason)
   {
     MOZ_ASSERT(OnTaskQueue());
     OnNotDecoded(MediaData::AUDIO_DATA, aReason);
   }
   void OnVideoNotDecoded(MediaDecoderReader::NotDecodedReason aReason)
   {
@@ -385,20 +385,19 @@ protected:
   void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
 
   void SetState(State aState);
 
   void BufferedRangeUpdated();
 
   // Inserts MediaData* samples into their respective MediaQueues.
   // aSample must not be null.
-  void Push(AudioData* aSample);
-  void Push(VideoData* aSample);
-  void PushFront(AudioData* aSample);
-  void PushFront(VideoData* aSample);
+
+  void Push(MediaData* aSample, MediaData::Type aSampleType);
+  void PushFront(MediaData* aSample, MediaData::Type aSampleType);
 
   void OnAudioPopped(const nsRefPtr<MediaData>& aSample);
   void OnVideoPopped(const nsRefPtr<MediaData>& aSample);
 
   void VolumeChanged();
   void LogicalPlaybackRateChanged();
   void PreservesPitchChanged();
 
@@ -454,18 +453,18 @@ protected:
 
   // Return the current time, either the audio clock if available (if the media
   // has audio, and the playback is possible), or a clock for the video.
   // Called on the state machine thread.
   // If aTimeStamp is non-null, set *aTimeStamp to the TimeStamp corresponding
   // to the returned stream time.
   int64_t GetClock(TimeStamp* aTimeStamp = nullptr) const;
 
-  nsresult DropAudioUpToSeekTarget(AudioData* aSample);
-  nsresult DropVideoUpToSeekTarget(VideoData* aSample);
+  nsresult DropAudioUpToSeekTarget(MediaData* aSample);
+  nsresult DropVideoUpToSeekTarget(MediaData* aSample);
 
   void SetStartTime(int64_t aStartTimeUsecs);
 
   // Update only the state machine's current playback position (and duration,
   // if unknown).  Does not update the playback position on the decoder or
   // media element -- use UpdatePlaybackPosition for that.  Called on the state
   // machine thread, caller must hold the decoder lock.
   void UpdatePlaybackPositionInternal(int64_t aTime);
@@ -772,74 +771,74 @@ private:
       return mHaveStartTimePromise.Ensure(__func__);
     }
 
     template<typename PromiseType>
     struct PromiseSampleType {
       typedef typename PromiseType::ResolveValueType::element_type Type;
     };
 
-    template<typename PromiseType>
+    template<typename PromiseType, MediaData::Type SampleType>
     nsRefPtr<PromiseType> ProcessFirstSample(typename PromiseSampleType<PromiseType>::Type* aData)
     {
       typedef typename PromiseSampleType<PromiseType>::Type DataType;
       typedef typename PromiseType::Private PromisePrivate;
       MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
 
-      MaybeSetChannelStartTime<DataType>(aData->mTime);
+      MaybeSetChannelStartTime<SampleType>(aData->mTime);
 
       nsRefPtr<PromisePrivate> p = new PromisePrivate(__func__);
       nsRefPtr<DataType> data = aData;
       nsRefPtr<StartTimeRendezvous> self = this;
       AwaitStartTime()->Then(mOwnerThread, __func__,
                              [p, data, self] () -> void {
                                MOZ_ASSERT(self->mOwnerThread->IsCurrentThreadIn());
                                p->Resolve(data, __func__);
                              },
                              [p] () -> void { p->Reject(MediaDecoderReader::CANCELED, __func__); });
 
       return p.forget();
     }
 
-    template<typename SampleType>
+    template<MediaData::Type SampleType>
     void FirstSampleRejected(MediaDecoderReader::NotDecodedReason aReason)
     {
       MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
       if (aReason == MediaDecoderReader::DECODE_ERROR) {
         mHaveStartTimePromise.RejectIfExists(false, __func__);
       } else if (aReason == MediaDecoderReader::END_OF_STREAM) {
         MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
-                ("StartTimeRendezvous=%p %s Has no samples.", this, SampleType::sTypeName));
+                ("StartTimeRendezvous=%p SampleType(%d) Has no samples.", this, SampleType));
         MaybeSetChannelStartTime<SampleType>(INT64_MAX);
       }
     }
 
     bool HaveStartTime() { return mAudioStartTime.isSome() && mVideoStartTime.isSome(); }
     int64_t StartTime()
     {
       int64_t time = std::min(mAudioStartTime.ref(), mVideoStartTime.ref());
       return time == INT64_MAX ? 0 : time;
     }
   private:
     virtual ~StartTimeRendezvous() {}
 
-    template<typename SampleType>
+    template<MediaData::Type SampleType>
     void MaybeSetChannelStartTime(int64_t aStartTime)
     {
-      if (ChannelStartTime(SampleType::sType).isSome()) {
+      if (ChannelStartTime(SampleType).isSome()) {
         // If we're initialized with aForceZeroStartTime=true, the channel start
         // times are already set.
         return;
       }
 
       MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
-              ("StartTimeRendezvous=%p Setting %s start time to %lld",
-               this, SampleType::sTypeName, aStartTime));
+              ("StartTimeRendezvous=%p Setting SampleType(%d) start time to %lld",
+               this, SampleType, aStartTime));
 
-      ChannelStartTime(SampleType::sType).emplace(aStartTime);
+      ChannelStartTime(SampleType).emplace(aStartTime);
       if (HaveStartTime()) {
         mHaveStartTimePromise.ResolveIfExists(true, __func__);
       }
     }
 
     Maybe<int64_t>& ChannelStartTime(MediaData::Type aType)
     {
       return aType == MediaData::AUDIO_DATA ? mAudioStartTime : mVideoStartTime;
@@ -1078,17 +1077,17 @@ private:
       ScheduleStateMachine();
     }
   }
 
   // This temporarily stores the first frame we decode after we seek.
   // This is so that if we hit end of stream while we're decoding to reach
   // the seek target, we will still have a frame that we can display as the
   // last frame in the media.
-  nsRefPtr<VideoData> mFirstVideoFrameAfterSeek;
+  nsRefPtr<MediaData> mFirstVideoFrameAfterSeek;
 
   // When we start decoding (either for the first time, or after a pause)
   // we may be low on decoded data. We don't want our "low data" logic to
   // kick in and decide that we're low on decoded data because the download
   // can't keep up with the decode, and cause us to pause playback. So we
   // have a "preroll" stage, where we ignore the results of our "low data"
   // logic during the first few frames of our decode. This occurs during
   // playback. The flags below are true when the corresponding stream is
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -331,17 +331,17 @@ AndroidMediaReader::Seek(int64_t aTarget
     // a sync point, whereas for video there are only keyframes once every few
     // seconds. So if we have both audio and video, we must seek the video
     // stream to the preceeding keyframe first, get the stream time, and then
     // seek the audio stream to match the video stream's time. Otherwise, the
     // audio and video streams won't be in sync after the seek.
     mVideoSeekTimeUs = aTarget;
 
     nsRefPtr<AndroidMediaReader> self = this;
-    mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (VideoData* v) {
+    mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
       self->mSeekRequest.Complete();
       self->mAudioSeekTimeUs = v->mTime;
       self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__);
     }, [self, aTarget] () {
       self->mSeekRequest.Complete();
       self->mAudioSeekTimeUs = aTarget;
       self->mSeekPromise.Resolve(aTarget, __func__);
     }));
--- a/dom/media/mediasource/MediaSourceReader.cpp
+++ b/dom/media/mediasource/MediaSourceReader.cpp
@@ -189,18 +189,19 @@ void MediaSourceReader::DoAudioRequest()
 {
   mAudioRequest.Begin(GetAudioReader()->RequestAudioData()
                       ->Then(OwnerThread(), __func__, this,
                              &MediaSourceReader::OnAudioDecoded,
                              &MediaSourceReader::OnAudioNotDecoded));
 }
 
 void
-MediaSourceReader::OnAudioDecoded(AudioData* aSample)
+MediaSourceReader::OnAudioDecoded(MediaData* aSample)
 {
+  MOZ_ASSERT(aSample->mType == MediaData::AUDIO_DATA);
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mAudioRequest.Complete();
 
   int64_t ourTime = aSample->mTime + mAudioSourceDecoder->GetTimestampOffset();
   if (aSample->mDiscontinuity) {
     mAudioDiscontinuity = true;
   }
 
@@ -216,17 +217,17 @@ MediaSourceReader::OnAudioDecoded(AudioD
                                  &MediaSourceReader::OnAudioNotDecoded));
       return;
     }
     mDropAudioBeforeThreshold = false;
   }
 
   // Adjust the sample time into our reference.
   nsRefPtr<AudioData> newSample =
-    AudioData::TransferAndUpdateTimestampAndDuration(aSample,
+    AudioData::TransferAndUpdateTimestampAndDuration(aSample->As<AudioData>(),
                                                      ourTime,
                                                      aSample->mDuration);
   mLastAudioTime = newSample->GetEndTime();
   if (mAudioDiscontinuity) {
     newSample->mDiscontinuity = true;
     mAudioDiscontinuity = false;
   }
 
@@ -365,18 +366,19 @@ MediaSourceReader::DoVideoRequest()
   mVideoRequest.Begin(GetVideoReader()->RequestVideoData(mDropVideoBeforeThreshold,
                                                          GetReaderVideoTime(mTimeThreshold))
                       ->Then(OwnerThread(), __func__, this,
                              &MediaSourceReader::OnVideoDecoded,
                              &MediaSourceReader::OnVideoNotDecoded));
 }
 
 void
-MediaSourceReader::OnVideoDecoded(VideoData* aSample)
+MediaSourceReader::OnVideoDecoded(MediaData* aSample)
 {
+  MOZ_ASSERT(aSample->mType == MediaData::VIDEO_DATA);
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mVideoRequest.Complete();
 
   // Adjust the sample time into our reference.
   int64_t ourTime = aSample->mTime + mVideoSourceDecoder->GetTimestampOffset();
   if (aSample->mDiscontinuity) {
     mVideoDiscontinuity = true;
   }
@@ -391,17 +393,17 @@ MediaSourceReader::OnVideoDecoded(VideoD
       return;
     }
     mDropVideoBeforeThreshold = false;
     mTimeThreshold = 0;
   }
 
   // Adjust the sample time into our reference.
   nsRefPtr<VideoData> newSample =
-    VideoData::ShallowCopyUpdateTimestampAndDuration(aSample,
+    VideoData::ShallowCopyUpdateTimestampAndDuration(aSample->As<VideoData>(),
                                                      ourTime,
                                                      aSample->mDuration);
 
   mLastVideoTime = newSample->GetEndTime();
   if (mVideoDiscontinuity) {
     newSample->mDiscontinuity = true;
     mVideoDiscontinuity = false;
   }
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -51,19 +51,19 @@ public:
   nsRefPtr<VideoDataPromise>
   RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) override;
 
   virtual size_t SizeOfVideoQueueInFrames() override;
   virtual size_t SizeOfAudioQueueInFrames() override;
 
   virtual void ReleaseMediaResources() override;
 
-  void OnAudioDecoded(AudioData* aSample);
+  void OnAudioDecoded(MediaData* aSample);
   void OnAudioNotDecoded(NotDecodedReason aReason);
-  void OnVideoDecoded(VideoData* aSample);
+  void OnVideoDecoded(MediaData* aSample);
   void OnVideoNotDecoded(NotDecodedReason aReason);
 
   void DoVideoSeek();
   void DoAudioSeek();
   void OnVideoSeekCompleted(int64_t aTime);
   void OnVideoSeekFailed(nsresult aResult);
   void OnAudioSeekCompleted(int64_t aTime);
   void OnAudioSeekFailed(nsresult aResult);
--- a/dom/media/omx/MediaOmxReader.cpp
+++ b/dom/media/omx/MediaOmxReader.cpp
@@ -537,17 +537,17 @@ MediaOmxReader::Seek(int64_t aTarget, in
     // a sync point, whereas for video there are only keyframes once every few
     // seconds. So if we have both audio and video, we must seek the video
     // stream to the preceeding keyframe first, get the stream time, and then
     // seek the audio stream to match the video stream's time. Otherwise, the
     // audio and video streams won't be in sync after the seek.
     mVideoSeekTimeUs = aTarget;
 
     nsRefPtr<MediaOmxReader> self = this;
-    mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (VideoData* v) {
+    mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
       self->mSeekRequest.Complete();
       self->mAudioSeekTimeUs = v->mTime;
       self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__);
     }, [self, aTarget] () {
       self->mSeekRequest.Complete();
       self->mAudioSeekTimeUs = aTarget;
       self->mSeekPromise.Resolve(aTarget, __func__);
     }));
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -124,17 +124,17 @@ private:
       NS_DispatchToMainThread(event);
     }
   }
 
   void Decode();
   void OnMetadataRead(MetadataHolder* aMetadata);
   void OnMetadataNotRead(ReadMetadataFailureReason aReason);
   void RequestSample();
-  void SampleDecoded(AudioData* aData);
+  void SampleDecoded(MediaData* aData);
   void SampleNotDecoded(MediaDecoderReader::NotDecodedReason aReason);
   void FinishDecode();
   void AllocateBuffer();
   void CallbackTheResult();
 
   void Cleanup()
   {
     MOZ_ASSERT(NS_IsMainThread());
@@ -150,17 +150,17 @@ private:
   nsCString mContentType;
   uint8_t* mBuffer;
   uint32_t mLength;
   WebAudioDecodeJob& mDecodeJob;
   PhaseEnum mPhase;
   nsRefPtr<BufferDecoder> mBufferDecoder;
   nsRefPtr<MediaDecoderReader> mDecoderReader;
   MediaInfo mMediaInfo;
-  MediaQueue<AudioData> mAudioQueue;
+  MediaQueue<MediaData> mAudioQueue;
   bool mFirstFrameDecoded;
 };
 
 NS_IMETHODIMP
 MediaDecodeTask::Run()
 {
   MOZ_ASSERT(mBufferDecoder);
   MOZ_ASSERT(mDecoderReader);
@@ -282,17 +282,17 @@ void
 MediaDecodeTask::RequestSample()
 {
   mDecoderReader->RequestAudioData()->Then(mDecoderReader->OwnerThread(), __func__, this,
                                            &MediaDecodeTask::SampleDecoded,
                                            &MediaDecodeTask::SampleNotDecoded);
 }
 
 void
-MediaDecodeTask::SampleDecoded(AudioData* aData)
+MediaDecodeTask::SampleDecoded(MediaData* aData)
 {
   MOZ_ASSERT(!NS_IsMainThread());
   mAudioQueue.Push(aData);
   if (!mFirstFrameDecoded) {
     mDecoderReader->ReadUpdatedMetadata(&mMediaInfo);
     mFirstFrameDecoded = true;
   }
   RequestSample();
@@ -349,18 +349,19 @@ MediaDecodeTask::FinishDecode()
   // case mWriteIndex will tell us how many valid samples we have.
   mDecodeJob.mBuffer = ThreadSharedFloatArrayBufferList::
     Create(channelCount, resampledFrames, fallible);
   if (!mDecodeJob.mBuffer) {
     ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError);
     return;
   }
 
-  nsRefPtr<AudioData> audioData;
-  while ((audioData = mAudioQueue.PopFront())) {
+  nsRefPtr<MediaData> mediaData;
+  while ((mediaData = mAudioQueue.PopFront())) {
+    nsRefPtr<AudioData> audioData = mediaData->As<AudioData>();
     audioData->EnsureAudioBuffer(); // could lead to a copy :(
     AudioDataValue* bufferData = static_cast<AudioDataValue*>
       (audioData->mAudioBuffer->Data());
 
     if (sampleRate != destSampleRate) {
       const uint32_t maxOutSamples = resampledFrames - mDecodeJob.mWriteIndex;
 
       for (uint32_t i = 0; i < audioData->mChannels; ++i) {