Bug 1172394 - Simplify MediaSink somewhat. r=padenot
☠☠ backed out by 7272d77d4e80 ☠ ☠
authorAndreas Pehrson <apehrson@mozilla.com>
Wed, 13 Nov 2019 08:55:54 +0000
changeset 501788 1c45b135318d8cb519b8d58af72ab864fe6bfd62
parent 501787 c57c41e8c39ea51e96c13af8ecdcbe1640e2a9d6
child 501789 c3bd415507e8bd658b1fe760da997015b098af3e
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1172394
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1172394 - Simplify MediaSink somewhat. r=padenot This patches does several minor things: - Moves SetSink (from setSinkid) to automatic coalescing of multiple calls through a Canonical/Mirror setup instead of a manual atomic counter. - Simplifies the logic for when to update the sink in SetSink. - Removes PlaybackParams as a general MediaSink property, as it only contains audio params. - Makes PlaybackParams an internal AudioSink concept, that AudioSinkWrapper knows about. - Ensures mMediaSink is only accessed on the decoder TaskQueue, to allow accessing mirrored members when creating it. Differential Revision: https://phabricator.services.mozilla.com/D52043
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/mediasink/AudioSink.cpp
dom/media/mediasink/AudioSink.h
dom/media/mediasink/AudioSinkWrapper.cpp
dom/media/mediasink/AudioSinkWrapper.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/MediaSink.h
dom/media/mediasink/VideoSink.cpp
dom/media/mediasink/VideoSink.h
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaDecoder.h"
 
+#include "AudioDeviceInfo.h"
 #include "DOMMediaStream.h"
 #include "DecoderBenchmark.h"
 #include "ImageContainer.h"
 #include "Layers.h"
 #include "MediaDecoderStateMachine.h"
 #include "MediaFormatReader.h"
 #include "MediaResource.h"
 #include "MediaShutdownManager.h"
@@ -220,20 +221,21 @@ void MediaDecoder::Pause() {
 }
 
 void MediaDecoder::SetVolume(double aVolume) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   mVolume = aVolume;
 }
 
-RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
+RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
-  return GetStateMachine()->InvokeSetSink(aSink);
+  mSinkDevice = aSinkDevice;
+  return GetStateMachine()->InvokeSetSink(aSinkDevice);
 }
 
 void MediaDecoder::SetOutputCaptured(bool aCaptured) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
   mOutputCaptured = aCaptured;
 }
@@ -304,16 +306,17 @@ MediaDecoder::MediaDecoder(MediaDecoderI
       mLogicallySeeking(false, "MediaDecoder::mLogicallySeeking"),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mCurrentPosition, TimeUnit::Zero()),
       INIT_MIRROR(mStateMachineDuration, NullableTimeUnit()),
       INIT_MIRROR(mIsAudioDataAudible, false),
       INIT_CANONICAL(mVolume, aInit.mVolume),
       INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
       INIT_CANONICAL(mLooping, aInit.mLooping),
+      INIT_CANONICAL(mSinkDevice, nullptr),
       INIT_CANONICAL(mOutputCaptured, false),
       INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
       INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
       mSameOriginMedia(false),
       mVideoDecodingOberver(
           new BackgroundVideoDecodingPermissionObserver(this)),
       mIsBackgroundVideoDecodingAllowed(false),
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -150,17 +150,17 @@ class MediaDecoder : public DecoderDocto
   // Adjust the speed of the playback, optionally with pitch correction,
   void SetVolume(double aVolume);
 
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetLooping(bool aLooping);
 
   // Set the given device as the output device.
-  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
+  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice);
 
   bool GetMinimizePreroll() const { return mMinimizePreroll; }
 
   // All MediaStream-related data is protected by mReentrantMonitor.
   // We have at most one DecodedStreamData per MediaDecoder. Its stream
   // is used as the input for each ProcessedMediaTrack created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
@@ -609,16 +609,20 @@ class MediaDecoder : public DecoderDocto
 
   // Volume of playback.  0.0 = muted. 1.0 = full volume.
   Canonical<double> mVolume;
 
   Canonical<bool> mPreservesPitch;
 
   Canonical<bool> mLooping;
 
+  // The device used with SetSink, or nullptr if no explicit device has been
+  // set.
+  Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
+
   // Whether this MediaDecoder's output is captured. When captured, all decoded
   // data must be played out through mOutputTracks.
   Canonical<bool> mOutputCaptured;
 
   // Tracks that, if set, will get data routed through them.
   Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
 
   // PrincipalHandle to be used when feeding data into mOutputTracks.
@@ -651,16 +655,19 @@ class MediaDecoder : public DecoderDocto
   bool mIsBackgroundVideoDecodingAllowed;
 
  public:
   AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
   AbstractCanonical<bool>* CanonicalPreservesPitch() {
     return &mPreservesPitch;
   }
   AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
+  AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
+    return &mSinkDevice;
+  }
   AbstractCanonical<bool>* CanonicalOutputCaptured() {
     return &mOutputCaptured;
   }
   AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
   CanonicalOutputTracks() {
     return &mOutputTracks;
   }
   AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2586,16 +2586,17 @@ RefPtr<ShutdownPromise> MediaDecoderStat
   master->mOnMediaNotSeekable.Disconnect();
 
   // Disconnect canonicals and mirrors before shutting down our task queue.
   master->mBuffered.DisconnectIfConnected();
   master->mPlayState.DisconnectIfConnected();
   master->mVolume.DisconnectIfConnected();
   master->mPreservesPitch.DisconnectIfConnected();
   master->mLooping.DisconnectIfConnected();
+  master->mSinkDevice.DisconnectIfConnected();
   master->mOutputCaptured.DisconnectIfConnected();
   master->mOutputTracks.DisconnectIfConnected();
   master->mOutputPrincipal.DisconnectIfConnected();
 
   master->mDuration.DisconnectAll();
   master->mCurrentPosition.DisconnectAll();
   master->mIsAudioDataAudible.DisconnectAll();
 
@@ -2636,26 +2637,26 @@ MediaDecoderStateMachine::MediaDecoderSt
       mVideoDecodeMode(VideoDecodeMode::Normal),
       mIsMSE(aDecoder->IsMSE()),
       mSeamlessLoopingAllowed(false),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING),
       INIT_MIRROR(mVolume, 1.0),
       INIT_MIRROR(mPreservesPitch, true),
       INIT_MIRROR(mLooping, false),
+      INIT_MIRROR(mSinkDevice, nullptr),
       INIT_MIRROR(mOutputCaptured, false),
       INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
       INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mCanonicalOutputTracks,
                      nsTArray<RefPtr<ProcessedMediaTrack>>()),
       INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mDuration, NullableTimeUnit()),
       INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
-      INIT_CANONICAL(mIsAudioDataAudible, false),
-      mSetSinkRequestsCount(0) {
+      INIT_CANONICAL(mIsAudioDataAudible, false) {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
 
   DDLINKCHILD("reader", aReader);
 }
 
@@ -2672,16 +2673,17 @@ void MediaDecoderStateMachine::Initializ
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
   mBuffered.Connect(mReader->CanonicalBuffered());
   mPlayState.Connect(aDecoder->CanonicalPlayState());
   mVolume.Connect(aDecoder->CanonicalVolume());
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mLooping.Connect(aDecoder->CanonicalLooping());
+  mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
   mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
   mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
   mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
 
   // Initialize watchers.
   mWatchManager.Watch(mBuffered,
                       &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
@@ -2693,47 +2695,51 @@ void MediaDecoderStateMachine::Initializ
                       &MediaDecoderStateMachine::UpdateOutputCaptured);
   mWatchManager.Watch(mOutputTracks,
                       &MediaDecoderStateMachine::UpdateOutputCaptured);
   mWatchManager.Watch(mOutputTracks,
                       &MediaDecoderStateMachine::OutputTracksChanged);
   mWatchManager.Watch(mOutputPrincipal,
                       &MediaDecoderStateMachine::OutputPrincipalChanged);
 
+  mMediaSink = CreateMediaSink();
+
   MOZ_ASSERT(!mStateObj);
   auto* s = new DecodeMetadataState(this);
   mStateObj.reset(s);
   s->Enter();
 }
 
 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) {
   mIsAudioDataAudible = aAudible;
 }
 
 MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self]() {
     MOZ_ASSERT(self->OnTaskQueue());
     AudioSink* audioSink =
         new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
-                      self->Info().mAudio);
+                      self->Info().mAudio, self->mSinkDevice.Ref());
 
     self->mAudibleListener = audioSink->AudibleEvent().Connect(
         self->mTaskQueue, self.get(),
         &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
-  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
+  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator,
+                              mVolume, mPlaybackRate, mPreservesPitch);
 }
 
-already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
-    bool aOutputCaptured) {
+already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
+  MOZ_ASSERT(OnTaskQueue());
   RefPtr<MediaSink> audioSink =
-      aOutputCaptured
-          ? new DecodedStream(this, mOutputTracks, mAudioQueue, mVideoQueue)
+      mOutputCaptured
+          ? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate,
+                              mPreservesPitch, mAudioQueue, mVideoQueue)
           : CreateAudioSink();
 
   RefPtr<MediaSink> mediaSink =
       new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
                     *mFrameStats, sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
 
@@ -2814,18 +2820,16 @@ nsresult MediaDecoderStateMachine::Init(
   mVideoQueueListener = VideoQueue().PopFrontEvent().Connect(
       mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
 
   mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread());
 
   mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
       OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
 
-  mMediaSink = CreateMediaSink(mOutputCaptured);
-
   nsresult rv = mReader->Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mReader->SetCanonicalDuration(&mDuration);
 
   return NS_OK;
 }
 
@@ -3539,17 +3543,17 @@ void MediaDecoderStateMachine::UpdateOut
   mAudioCompleted = false;
   mVideoCompleted = false;
 
   // Stop and shut down the existing sink.
   StopMediaSink();
   mMediaSink->Shutdown();
 
   // Create a new sink according to whether output is captured.
-  mMediaSink = CreateMediaSink(mOutputCaptured);
+  mMediaSink = CreateMediaSink();
 
   // Don't buffer as much when audio is captured because we don't need to worry
   // about high latency audio devices.
   mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
                                          : detail::AMPLE_AUDIO_THRESHOLD;
 
   mStateObj->HandleAudioCaptured();
 }
@@ -3565,52 +3569,45 @@ void MediaDecoderStateMachine::OutputPri
   mCanonicalOutputPrincipal = mOutputPrincipal;
 }
 
 RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
     RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aSink);
 
-  Unused << ++mSetSinkRequestsCount;
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::SetSink, aSink);
 }
 
 RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
-    RefPtr<AudioDeviceInfo> aSink) {
+    RefPtr<AudioDeviceInfo> aSinkDevice) {
   MOZ_ASSERT(OnTaskQueue());
   if (mOutputCaptured) {
     // Not supported yet.
     return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
   }
 
-  // Backup current playback parameters.
-  bool wasPlaying = mMediaSink->IsPlaying();
-
-  if (--mSetSinkRequestsCount > 0) {
-    MOZ_ASSERT(mSetSinkRequestsCount > 0);
-    return GenericPromise::CreateAndResolve(wasPlaying, __func__);
+  if (mSinkDevice.Ref() != aSinkDevice) {
+    // A new sink was set before this ran.
+    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
   }
 
-  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
-  params.mSink = std::move(aSink);
-
-  if (!mMediaSink->IsStarted()) {
-    mMediaSink->SetPlaybackParams(params);
-    return GenericPromise::CreateAndResolve(false, __func__);
+  if (mMediaSink->AudioDevice() == aSinkDevice) {
+    // The sink has not changed.
+    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
   }
 
+  const bool wasPlaying = IsPlaying();
+
   // Stop and shutdown the existing sink.
   StopMediaSink();
   mMediaSink->Shutdown();
   // Create a new sink according to whether audio is captured.
-  mMediaSink = CreateMediaSink(false);
-  // Restore playback parameters.
-  mMediaSink->SetPlaybackParams(params);
+  mMediaSink = CreateMediaSink();
   // Start the new sink
   if (wasPlaying) {
     nsresult rv = StartMediaSink();
     if (NS_FAILED(rv)) {
       return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
     }
   }
   return GenericPromise::CreateAndResolve(wasPlaying, __func__);
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -419,17 +419,17 @@ class MediaDecoderStateMachine
   // Update playback position and trigger next update by default time period.
   // Called on the state machine thread.
   void UpdatePlaybackPositionPeriodically();
 
   MediaSink* CreateAudioSink();
 
   // Always create mediasink which contains an AudioSink or DecodedStream
   // inside.
-  already_AddRefed<MediaSink> CreateMediaSink(bool aOutputCaptured);
+  already_AddRefed<MediaSink> CreateMediaSink();
 
   // Stops the media sink and shut it down.
   // The decoder monitor must be held with exactly one lock count.
   // Called on the state machine thread.
   void StopMediaSink();
 
   // Create and start the media sink.
   // The decoder monitor must be held with exactly one lock count.
@@ -699,16 +699,20 @@ class MediaDecoderStateMachine
 
   // Pitch preservation for the playback rate.
   Mirror<bool> mPreservesPitch;
 
   // Whether to seek back to the start of the media resource
   // upon reaching the end.
   Mirror<bool> mLooping;
 
+  // The device used with SetSink, or nullptr if no explicit device has been
+  // set.
+  Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
+
   // Whether all output should be captured into mOutputTracks. While true, the
   // media sink will only play if there are output tracks.
   Mirror<bool> mOutputCaptured;
 
   // Tracks to capture data into.
   Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
 
   // PrincipalHandle to feed with data captured into mOutputTracks.
@@ -724,19 +728,16 @@ class MediaDecoderStateMachine
   // The time of the current frame, corresponding to the "current
   // playback position" in HTML5. This is referenced from 0, which is the
   // initial playback position.
   Canonical<media::TimeUnit> mCurrentPosition;
 
   // Used to distinguish whether the audio is producing sound.
   Canonical<bool> mIsAudioDataAudible;
 
-  // Used to count the number of pending requests to set a new sink.
-  Atomic<int> mSetSinkRequestsCount;
-
  public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
   AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
   CanonicalOutputTracks() {
     return &mCanonicalOutputTracks;
   }
   AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSink.h"
 #include "AudioConverter.h"
+#include "AudioDeviceInfo.h"
 #include "MediaQueue.h"
 #include "VideoUtils.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "nsPrintfCString.h"
 
@@ -29,19 +30,21 @@ static const int64_t AUDIO_FUZZ_FRAMES =
 
 // Amount of audio frames we will be processing ahead of use
 static const int32_t LOW_AUDIO_USECS = 300000;
 
 using media::TimeUnit;
 
 AudioSink::AudioSink(AbstractThread* aThread,
                      MediaQueue<AudioData>& aAudioQueue,
-                     const TimeUnit& aStartTime, const AudioInfo& aInfo)
+                     const TimeUnit& aStartTime, const AudioInfo& aInfo,
+                     AudioDeviceInfo* aAudioDevice)
     : mStartTime(aStartTime),
       mInfo(aInfo),
+      mAudioDevice(aAudioDevice),
       mPlaying(true),
       mMonitor("AudioSink"),
       mWritten(0),
       mErrored(false),
       mPlaybackComplete(false),
       mOwnerThread(aThread),
       mProcessedQueueLength(0),
       mFramesParsed(0),
@@ -178,17 +181,17 @@ nsresult AudioSink::InitializeAudioStrea
   AudioConfig::ChannelLayout::ChannelMap channelMap =
       mConverter ? mConverter->OutputConfig().Layout().Map()
                  : AudioConfig::ChannelLayout(mOutputChannels).Map();
   // The layout map used here is already processed by mConverter with
   // mOutputChannels into SMPTE format, so there is no need to worry if
   // StaticPrefs::accessibility_monoaudio_enable() or
   // StaticPrefs::media_forcestereo_enabled() is applied.
   nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
-                                   aParams.mSink);
+                                   mAudioDevice);
   if (NS_FAILED(rv)) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
     return rv;
   }
 
   // Set playback params before calling Start() so they can take effect
   // as soon as the 1st DataCallback of the AudioStream fires.
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -18,21 +18,30 @@
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
 class AudioConverter;
 
 class AudioSink : private AudioStream::DataSource {
-  using PlaybackParams = MediaSink::PlaybackParams;
+ public:
+  struct PlaybackParams {
+    PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
+        : mVolume(aVolume),
+          mPlaybackRate(aPlaybackRate),
+          mPreservesPitch(aPreservesPitch) {}
+    double mVolume;
+    double mPlaybackRate;
+    bool mPreservesPitch;
+  };
 
- public:
   AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
-            const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
+            const media::TimeUnit& aStartTime, const AudioInfo& aInfo,
+            AudioDeviceInfo* aAudioDevice);
 
   ~AudioSink();
 
   // Return a promise which will be resolved when AudioSink
   // finishes playing, or rejected if any error.
   nsresult Init(const PlaybackParams& aParams,
                 RefPtr<MediaSink::EndedPromise>& aEndedPromise);
 
@@ -54,16 +63,18 @@ class AudioSink : private AudioStream::D
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetPlaying(bool aPlaying);
 
   MediaEventSource<bool>& AudibleEvent() { return mAudibleEvent; }
 
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
 
+  const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
+
  private:
   // Allocate and initialize mAudioStream. Returns NS_OK on success.
   nsresult InitializeAudioStream(const PlaybackParams& aParams);
 
   // Interface of AudioStream::DataSource.
   // Called on the callback thread of cubeb.
   UniquePtr<AudioStream::Chunk> PopFrames(uint32_t aFrames) override;
   bool Ended() const override;
@@ -82,16 +93,20 @@ class AudioSink : private AudioStream::D
 
   // Keep the last good position returned from the audio stream. Used to ensure
   // position returned by GetPosition() is mono-increasing in spite of audio
   // stream error. Used on the task queue of MDSM only.
   media::TimeUnit mLastGoodPosition;
 
   const AudioInfo mInfo;
 
+  // The output device this AudioSink is playing data to. The system's default
+  // device is used if this is null.
+  const RefPtr<AudioDeviceInfo> mAudioDevice;
+
   // Used on the task queue of MDSM only.
   bool mPlaying;
 
   MozPromiseHolder<MediaSink::EndedPromise> mEndedPromise;
 
   /*
    * Members to implement AudioStream::DataSource.
    * Used on the callback thread of cubeb.
--- a/dom/media/mediasink/AudioSinkWrapper.cpp
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -16,31 +16,16 @@ using media::TimeUnit;
 AudioSinkWrapper::~AudioSinkWrapper() {}
 
 void AudioSinkWrapper::Shutdown() {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "Must be called after playback stopped.");
   mCreator = nullptr;
 }
 
-const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
-  AssertOwnerThread();
-  return mParams;
-}
-
-void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-  if (mAudioSink) {
-    mAudioSink->SetVolume(aParams.mVolume);
-    mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
-    mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
-  }
-  mParams = aParams;
-}
-
 RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
   if (aType == TrackInfo::kAudioTrack) {
     return mEndedPromise;
   }
   return nullptr;
 }
@@ -149,16 +134,21 @@ void AudioSinkWrapper::SetPlaying(bool a
     // Remember how long we've played.
     mPlayDuration = GetPosition();
     // mPlayStartTime must be updated later since GetPosition()
     // depends on the value of mPlayStartTime.
     mPlayStartTime = TimeStamp();
   }
 }
 
+double AudioSinkWrapper::PlaybackRate() const {
+  AssertOwnerThread();
+  return mParams.mPlaybackRate;
+}
+
 nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
                                  const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "playback already started.");
 
   mIsStarted = true;
   mPlayDuration = aStartTime;
   mPlayStartTime = TimeStamp::Now();
--- a/dom/media/mediasink/AudioSinkWrapper.h
+++ b/dom/media/mediasink/AudioSinkWrapper.h
@@ -19,16 +19,18 @@ class AudioSink;
 class MediaData;
 template <class T>
 class MediaQueue;
 
 /**
  * A wrapper around AudioSink to provide the interface of MediaSink.
  */
 class AudioSinkWrapper : public MediaSink {
+  using PlaybackParams = AudioSink::PlaybackParams;
+
   // An AudioSink factory.
   class Creator {
    public:
     virtual ~Creator() {}
     virtual AudioSink* Create() = 0;
   };
 
   // Wrap around a function object which creates AudioSinks.
@@ -41,39 +43,40 @@ class AudioSinkWrapper : public MediaSin
    private:
     Function mFunction;
   };
 
  public:
   template <typename Function>
   AudioSinkWrapper(AbstractThread* aOwnerThread,
                    const MediaQueue<AudioData>& aAudioQueue,
-                   const Function& aFunc)
+                   const Function& aFunc, double aVolume, double aPlaybackRate,
+                   bool aPreservesPitch)
       : mOwnerThread(aOwnerThread),
         mCreator(new CreatorImpl<Function>(aFunc)),
         mIsStarted(false),
+        mParams(aVolume, aPlaybackRate, aPreservesPitch),
         // Give an invalid value to facilitate debug if used before playback
         // starts.
         mPlayDuration(media::TimeUnit::Invalid()),
         mAudioEnded(true),
         mAudioQueue(aAudioQueue) {}
 
-  const PlaybackParams& GetPlaybackParams() const override;
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
-
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
 
   void Shutdown() override;
 
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -358,45 +358,39 @@ void DecodedStreamData::GetDebugInfo(dom
       mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
   aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
 }
 
 DecodedStream::DecodedStream(
     MediaDecoderStateMachine* aStateMachine,
-    nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+    nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
+    double aPlaybackRate, bool aPreservesPitch,
     MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
     : mOwnerThread(aStateMachine->OwnerThread()),
       mWatchManager(this, mOwnerThread),
       mPlaying(false, "DecodedStream::mPlaying"),
       mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
                        "DecodedStream::mPrincipalHandle (Mirror)"),
       mOutputTracks(std::move(aOutputTracks)),
+      mVolume(aVolume),
+      mPlaybackRate(aPlaybackRate),
+      mPreservesPitch(aPreservesPitch),
       mAudioQueue(aAudioQueue),
       mVideoQueue(aVideoQueue) {
   mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
 
   mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
 }
 
 DecodedStream::~DecodedStream() {
   MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
 }
 
-const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
-  AssertOwnerThread();
-  return mParams;
-}
-
-void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-  mParams = aParams;
-}
-
 RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isSome());
 
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
     return mAudioEndedPromise;
   } else if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
     return mVideoEndedPromise;
@@ -544,27 +538,32 @@ void DecodedStream::SetPlaying(bool aPla
     return;
   }
 
   mPlaying = aPlaying;
 }
 
 void DecodedStream::SetVolume(double aVolume) {
   AssertOwnerThread();
-  mParams.mVolume = aVolume;
+  mVolume = aVolume;
 }
 
 void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
   AssertOwnerThread();
-  mParams.mPlaybackRate = aPlaybackRate;
+  mPlaybackRate = aPlaybackRate;
 }
 
 void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
-  mParams.mPreservesPitch = aPreservesPitch;
+  mPreservesPitch = aPreservesPitch;
+}
+
+double DecodedStream::PlaybackRate() const {
+  AssertOwnerThread();
+  return mPlaybackRate;
 }
 
 static void SendStreamAudio(DecodedStreamData* aStream,
                             const TimeUnit& aStartTime, AudioData* aData,
                             AudioSegment* aOutput, uint32_t aRate,
                             const PrincipalHandle& aPrincipalHandle) {
   // The amount of audio frames that is used to fuzz rounding errors.
   static const int64_t AUDIO_FUZZ_FRAMES = 1;
@@ -844,17 +843,17 @@ void DecodedStream::SendData() {
   if (!mData) {
     return;
   }
 
   if (!mPlaying) {
     return;
   }
 
-  SendAudio(mParams.mVolume, mPrincipalHandle);
+  SendAudio(mVolume, mPrincipalHandle);
   SendVideo(mPrincipalHandle);
 }
 
 TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
     auto t = mStartTime.ref() +
              FramesToTimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -28,41 +28,38 @@ class VideoData;
 struct PlaybackInfoInit;
 class ProcessedMediaTrack;
 class TimeStamp;
 
 template <class T>
 class MediaQueue;
 
 class DecodedStream : public MediaSink {
-  using MediaSink::PlaybackParams;
-
  public:
   DecodedStream(MediaDecoderStateMachine* aStateMachine,
                 nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+                double aVolume, double aPlaybackRate, bool aPreservesPitch,
                 MediaQueue<AudioData>& aAudioQueue,
                 MediaQueue<VideoData>& aVideoQueue);
 
-  // MediaSink functions.
-  const PlaybackParams& GetPlaybackParams() const override;
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
-
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override {
     // TODO: implement this.
     return false;
   }
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
   void Shutdown() override;
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) override;
 
@@ -95,17 +92,19 @@ class DecodedStream : public MediaSink {
   UniquePtr<DecodedStreamData> mData;
   RefPtr<EndedPromise> mAudioEndedPromise;
   RefPtr<EndedPromise> mVideoEndedPromise;
 
   Watchable<bool> mPlaying;
   Mirror<PrincipalHandle> mPrincipalHandle;
   const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
 
-  PlaybackParams mParams;
+  double mVolume;
+  double mPlaybackRate;
+  bool mPreservesPitch;
 
   media::NullableTimeUnit mStartTime;
   media::TimeUnit mLastOutputTime;
   MediaInfo mInfo;
 
   MediaQueue<AudioData>& mAudioQueue;
   MediaQueue<VideoData>& mVideoQueue;
 
--- a/dom/media/mediasink/MediaSink.h
+++ b/dom/media/mediasink/MediaSink.h
@@ -2,17 +2,16 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaSink_h_
 #define MediaSink_h_
 
-#include "AudioDeviceInfo.h"
 #include "MediaInfo.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/dom/MediaDebugInfoBinding.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
@@ -34,33 +33,16 @@ class VideoFrameContainer;
  * Note this class is not thread-safe and should be called from the state
  * machine thread only.
  */
 class MediaSink {
  public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
   typedef mozilla::TrackInfo::TrackType TrackType;
 
-  struct PlaybackParams {
-    PlaybackParams()
-        : mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
-    double mVolume;
-    double mPlaybackRate;
-    bool mPreservesPitch;
-    RefPtr<AudioDeviceInfo> mSink;
-  };
-
-  // Return the playback parameters of this sink.
-  // Can be called in any state.
-  virtual const PlaybackParams& GetPlaybackParams() const = 0;
-
-  // Set the playback parameters of this sink.
-  // Can be called in any state.
-  virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
-
   // EndedPromise needs to be a non-exclusive promise as it is shared between
   // both the AudioSink and VideoSink.
   typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
 
   // Return a promise which is resolved when the track finishes
   // or null if no such track.
   // Must be called after playback starts.
   virtual RefPtr<EndedPromise> OnEnded(TrackType aType) = 0;
@@ -95,16 +77,20 @@ class MediaSink {
   // Whether to preserve pitch of the audio track.
   // Do nothing if this sink has no audio track.
   // Can be called in any state.
   virtual void SetPreservesPitch(bool aPreservesPitch) {}
 
   // Pause/resume the playback. Only work after playback starts.
   virtual void SetPlaying(bool aPlaying) = 0;
 
+  // Get the playback rate.
+  // Can be called in any state.
+  virtual double PlaybackRate() const = 0;
+
   // Single frame rendering operation may need to be done before playback
   // started (1st frame) or right after seek completed or playback stopped.
   // Do nothing if this sink has no video track. Can be called in any state.
   virtual void Redraw(const VideoInfo& aInfo){};
 
   // Begin a playback session with the provided start time and media info.
   // Must be called when playback is stopped.
   virtual nsresult Start(const media::TimeUnit& aStartTime,
@@ -117,16 +103,20 @@ class MediaSink {
   // Return true if playback has started.
   // Can be called in any state.
   virtual bool IsStarted() const = 0;
 
   // Return true if playback is started and not paused otherwise false.
   // Can be called in any state.
   virtual bool IsPlaying() const = 0;
 
+  // The audio output device this MediaSink is playing audio data to. The
+  // default device is used if this returns null.
+  virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
+
   // Called on the state machine thread to shut down the sink. All resources
   // allocated by this sink should be released.
   // Must be called after playback stopped.
   virtual void Shutdown() {}
 
   virtual void SetSecondaryVideoContainer(VideoFrameContainer* aSecondary) {}
   virtual void ClearSecondaryVideoContainer() {}
 
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -151,28 +151,16 @@ VideoSink::VideoSink(AbstractThread* aTh
 }
 
 VideoSink::~VideoSink() {
 #ifdef XP_WIN
   MOZ_ASSERT(!mHiResTimersRequested);
 #endif
 }
 
-const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
-  AssertOwnerThread();
-
-  return mAudioSink->GetPlaybackParams();
-}
-
-void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-
-  mAudioSink->SetPlaybackParams(aParams);
-}
-
 RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
 
   if (aType == TrackInfo::kAudioTrack) {
     return mAudioSink->OnEnded(aType);
   } else if (aType == TrackInfo::kVideoTrack) {
     return mEndPromise;
@@ -218,16 +206,22 @@ void VideoSink::SetVolume(double aVolume
 }
 
 void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
 
   mAudioSink->SetPreservesPitch(aPreservesPitch);
 }
 
+double VideoSink::PlaybackRate() const {
+  AssertOwnerThread();
+
+  return mAudioSink->PlaybackRate();
+}
+
 void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
 #ifdef XP_WIN
   const bool needed = IsPlaying();
   if (needed == mHiResTimersRequested) {
     return;
   }
   if (needed) {
     // Ensure high precision timers are enabled on Windows, otherwise the
@@ -435,18 +429,18 @@ void VideoSink::TryUpdateRenderedVideoFr
     // Time to render this frame.
     UpdateRenderedVideoFrames();
     return;
   }
 
   // If we send this future frame to the compositor now, it will be rendered
   // immediately and break A/V sync. Instead, we schedule a timer to send it
   // later.
-  int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
-                  mAudioSink->GetPlaybackParams().mPlaybackRate;
+  int64_t delta =
+      (v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate();
   TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::UpdateRenderedVideoFramesByTimer() {
@@ -476,17 +470,17 @@ void VideoSink::RenderVideoFrames(int32_
   AutoTArray<RefPtr<VideoData>, 16> frames;
   VideoQueue().GetFirstElements(aMaxFrames, &frames);
   if (frames.IsEmpty() || !mContainer) {
     return;
   }
 
   AutoTArray<ImageContainer::NonOwningImage, 16> images;
   TimeStamp lastFrameTime;
-  MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
+  double playbackRate = mAudioSink->PlaybackRate();
   for (uint32_t i = 0; i < frames.Length(); ++i) {
     VideoData* frame = frames[i];
     bool wasSent = frame->IsSentToCompositor();
     frame->MarkSentToCompositor();
 
     if (!frame->mImage || !frame->mImage->IsValid() ||
         !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
       continue;
@@ -494,18 +488,18 @@ void VideoSink::RenderVideoFrames(int32_
 
     if (frame->mTime.IsNegative()) {
       // Frame times before the start time are invalid; drop such frames
       continue;
     }
 
     MOZ_ASSERT(!aClockTimeStamp.IsNull());
     int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
-    TimeStamp t = aClockTimeStamp +
-                  TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
+    TimeStamp t =
+        aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate);
     if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
       // Timestamps out of order; drop the new frame. In theory we should
       // probably replace the previous frame with the new frame if the
       // timestamps are equal, but this is a corrupt video file already so
       // never mind.
       continue;
     }
     MOZ_ASSERT(!t.IsNull());
@@ -608,19 +602,18 @@ void VideoSink::UpdateRenderedVideoFrame
   VideoQueue().GetFirstElements(2, &frames);
   if (frames.Length() < 2) {
     return;
   }
 
   int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
   int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
                            MIN_UPDATE_INTERVAL_US);
-  TimeStamp target =
-      nowTime + TimeDuration::FromMicroseconds(
-                    delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
+  TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
+                                   delta / mAudioSink->PlaybackRate());
 
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::MaybeResolveEndPromise() {
@@ -642,17 +635,17 @@ void VideoSink::MaybeResolveEndPromise()
     TimeStamp nowTime;
     const auto clockTime = mAudioSink->GetPosition(&nowTime);
     if (clockTime < mVideoFrameEndTime) {
       VSINK_LOG_V(
           "Not reach video end time yet, reschedule timer to resolve "
           "end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
           clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
       int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
-                      mAudioSink->GetPlaybackParams().mPlaybackRate;
+                      mAudioSink->PlaybackRate();
       TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
       auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
         self->mEndPromiseHolder.ResolveIfExists(true, __func__);
         self->mUpdateScheduler.CompleteRequest();
       };
       mUpdateScheduler.Ensure(target, std::move(resolveEndPromise),
                               std::move(resolveEndPromise));
     } else {
--- a/dom/media/mediasink/VideoSink.h
+++ b/dom/media/mediasink/VideoSink.h
@@ -27,36 +27,34 @@ class MediaQueue;
 class VideoSink : public MediaSink {
   typedef mozilla::layers::ImageContainer::ProducerID ProducerID;
 
  public:
   VideoSink(AbstractThread* aThread, MediaSink* aAudioSink,
             MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
             FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
 
-  const PlaybackParams& GetPlaybackParams() const override;
-
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
-
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
 
   TimeUnit GetEndTime(TrackType aType) const override;
 
   TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
 
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetPlaybackRate(double aPlaybackRate) override;
 
   void SetVolume(double aVolume) override;
 
   void SetPreservesPitch(bool aPreservesPitch) override;
 
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   void Redraw(const VideoInfo& aInfo) override;
 
   nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;
 
   void Stop() override;
 
   bool IsStarted() const override;