author | Paul Adenot <paul@paul.cx> |
Tue, 24 May 2022 13:09:05 +0000 | |
changeset 618726 | 0b20ed64f57cf7272cf1b4e9e5e2f52a34fd5075 |
parent 618725 | 0b1dc7c2fdfdd68da8bbdc2dcd2fb62a58de11e2 |
child 618727 | acb4ae1c1e57de7130cbfb499d6c30dcf134dfbb |
push id | 163385 |
push user | padenot@mozilla.com |
push date | Tue, 24 May 2022 13:12:53 +0000 |
treeherder | autoland@3774e6ab3869 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | alwu, media-playback-reviewers |
bugs | 1743834 |
milestone | 102.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/dom/media/AudioStream.cpp +++ b/dom/media/AudioStream.cpp @@ -315,43 +315,42 @@ void AudioStream::SetStreamName(const ns } MonitorAutoLock mon(mMonitor); if (InvokeCubeb(cubeb_stream_set_name, aRawStreamName.get()) != CUBEB_OK) { LOGE("Could not set cubeb stream name."); } } -Result<already_AddRefed<MediaSink::EndedPromise>, nsresult> -AudioStream::Start() { +nsresult AudioStream::Start( + MozPromiseHolder<MediaSink::EndedPromise>& aEndedPromise) { TRACE("AudioStream::Start"); MOZ_ASSERT(mState == INITIALIZED); mState = STARTED; - RefPtr<MediaSink::EndedPromise> promise; { MonitorAutoLock mon(mMonitor); // As cubeb might call audio stream's state callback very soon after we // start cubeb, we have to create the promise beforehand in order to handle // the case where we immediately get `drained`. - promise = mEndedPromise.Ensure(__func__); + mEndedPromise = std::move(aEndedPromise); mPlaybackComplete = false; if (InvokeCubeb(cubeb_stream_start) != CUBEB_OK) { mState = ERRORED; } } LOG("started, state %s", mState == STARTED ? "STARTED" : mState == DRAINED ? "DRAINED" : "ERRORED"); if (mState == STARTED || mState == DRAINED) { - return promise.forget(); + return NS_OK; } - return Err(NS_ERROR_FAILURE); + return NS_ERROR_FAILURE; } void AudioStream::Pause() { TRACE("AudioStream::Pause"); MOZ_ASSERT(mState != INITIALIZED, "Must be Start()ed."); MOZ_ASSERT(mState != STOPPED, "Already Pause()ed."); MOZ_ASSERT(mState != SHUTDOWN, "Already Shutdown()ed."); @@ -386,17 +385,18 @@ void AudioStream::Resume() { mState = ERRORED; } else if (mState != DRAINED && mState != ERRORED) { // Don't transition to other states if we are already // drained or errored. mState = STARTED; } } -void AudioStream::Shutdown() { +Maybe<MozPromiseHolder<MediaSink::EndedPromise>> AudioStream::Shutdown( + ShutdownCause aCause) { TRACE("AudioStream::Shutdown"); LOG("Shutdown, state %d", mState.load()); MonitorAutoLock mon(mMonitor); if (mCubebStream) { // Force stop to put the cubeb stream in a stable state before deletion. InvokeCubeb(cubeb_stream_stop); // Must not try to shut down cubeb from within the lock! wasapi may still @@ -409,17 +409,25 @@ void AudioStream::Shutdown() { // After `cubeb_stream_stop` has been called, there is no audio thread // anymore. We can delete the time stretcher. if (mTimeStretcher) { soundtouch::destroySoundTouchObj(mTimeStretcher); mTimeStretcher = nullptr; } mState = SHUTDOWN; - mEndedPromise.ResolveIfExists(true, __func__); + // When shutting down, if this AudioStream is shutting down because the + // HTMLMediaElement is now muted, hand back the ended promise, so that it can + // properly be resolved if the end of the media is reached while muted (i.e. + // without having an AudioStream) + if (aCause != ShutdownCause::Muting) { + mEndedPromise.ResolveIfExists(true, __func__); + return Nothing(); + } + return Some(std::move(mEndedPromise)); } int64_t AudioStream::GetPosition() { TRACE("AudioStream::GetPosition"); #ifndef XP_MACOSX MonitorAutoLock mon(mMonitor); #endif int64_t frames = GetPositionInFramesUnlocked();
--- a/dom/media/AudioStream.h +++ b/dom/media/AudioStream.h @@ -30,16 +30,23 @@ class MOZ_EXPORT SoundTouch; namespace mozilla { struct CubebDestroyPolicy { void operator()(cubeb_stream* aStream) const { cubeb_stream_destroy(aStream); } }; +enum class ShutdownCause { + // Regular shutdown, signal the end of the audio stream. + Regular, + // Shutdown for muting, don't signal the end of the audio stream. + Muting +}; + class AudioStream; class FrameHistory; class AudioConfig; // A struct that contains the number of frames serviced or underrun by a // callback, alongside the sample-rate for this callback (in case of playback // rate change, it can be variable). struct CallbackInfo { @@ -240,29 +247,29 @@ class AudioStream final { // 7.1 ). Initialize the audio stream.and aRate is the sample rate // (22050Hz, 44100Hz, etc). AudioStream(DataSource& aSource, uint32_t aInRate, uint32_t aOutputChannels, AudioConfig::ChannelLayout::ChannelMap aChannelMap); nsresult Init(AudioDeviceInfo* aSinkInfo); // Closes the stream. All future use of the stream is an error. - void Shutdown(); + Maybe<MozPromiseHolder<MediaSink::EndedPromise>> Shutdown( + ShutdownCause = ShutdownCause::Regular); void Reset(); // Set the current volume of the audio playback. This is a value from // 0 (meaning muted) to 1 (meaning full volume). Thread-safe. void SetVolume(double aVolume); void SetStreamName(const nsAString& aStreamName); - // Start the stream and return a promise that will be resolve when the - // playback completes. - Result<already_AddRefed<MediaSink::EndedPromise>, nsresult> Start(); + // Start the stream. + nsresult Start(MozPromiseHolder<MediaSink::EndedPromise>& aEndedPromise); // Pause audio playback. void Pause(); // Resume audio playback. void Resume(); // Return the position in microseconds of the audio frame being played by
--- a/dom/media/mediasink/AudioSink.cpp +++ b/dom/media/mediasink/AudioSink.cpp @@ -61,18 +61,19 @@ AudioSink::AudioSink(AbstractThread* aTh MakeUnique<SPSCQueue<AudioDataValue>>(static_cast<uint32_t>( capacitySeconds * static_cast<float>(mOutputChannels * mOutputRate))); SINK_LOG("Ringbuffer has space for %u elements (%lf seconds)", mProcessedSPSCQueue->Capacity(), capacitySeconds); } AudioSink::~AudioSink() = default; -Result<already_AddRefed<MediaSink::EndedPromise>, nsresult> AudioSink::Start( - const PlaybackParams& aParams) { +nsresult AudioSink::Start( + const PlaybackParams& aParams, + MozPromiseHolder<MediaSink::EndedPromise>& aEndedPromise) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); mAudioQueueListener = mAudioQueue.PushEvent().Connect( mOwnerThread, this, &AudioSink::OnAudioPushed); mAudioQueueFinishListener = mAudioQueue.FinishEvent().Connect( mOwnerThread, this, &AudioSink::NotifyAudioNeeded); mProcessedQueueListener = mAudioPopped.Connect(mOwnerThread, this, &AudioSink::OnAudioPopped); @@ -80,17 +81,17 @@ Result<already_AddRefed<MediaSink::Ended // To ensure at least one audio packet will be popped from AudioQueue and // ready to be played. NotifyAudioNeeded(); nsresult rv = InitializeAudioStream(aParams); if (NS_FAILED(rv)) { return Err(rv); } - return mAudioStream->Start(); + return mAudioStream->Start(aEndedPromise); } TimeUnit AudioSink::GetPosition() { int64_t tmp; if (mAudioStream && (tmp = mAudioStream->GetPosition()) >= 0) { TimeUnit pos = TimeUnit::FromMicroseconds(tmp); NS_ASSERTION(pos >= mLastGoodPosition, "AudioStream position shouldn't go backward"); @@ -114,28 +115,70 @@ bool AudioSink::HasUnplayedFrames() { return mProcessedSPSCQueue->AvailableRead() || (mAudioStream && mAudioStream->GetPositionInFrames() + 1 < mWritten); } TimeUnit AudioSink::UnplayedDuration() const { return TimeUnit::FromMicroseconds(AudioQueuedInRingBufferMS()); } -void AudioSink::Shutdown() { +void AudioSink::ReenqueueUnplayedAudioDataIfNeeded() { + // This is OK: the AudioStream has been shut down. Shutdown guarantees that + // the audio callback thread won't call back again. + mProcessedSPSCQueue->ResetThreadIds(); + + // construct an AudioData + int sampleCount = mProcessedSPSCQueue->AvailableRead(); + uint32_t channelCount = mConverter->OutputConfig().Channels(); + uint32_t rate = mConverter->OutputConfig().Rate(); + uint32_t frameCount = sampleCount / channelCount; + + auto duration = FramesToTimeUnit(frameCount, rate); + if (!duration.IsValid()) { + NS_WARNING("Int overflow in AudioSink"); + mErrored = true; + return; + } + + AlignedAudioBuffer queuedAudio(sampleCount); + DebugOnly<int> samplesRead = + mProcessedSPSCQueue->Dequeue(queuedAudio.Data(), sampleCount); + MOZ_ASSERT(samplesRead == sampleCount); + + // Extrapolate mOffset, mTime from the front of the queue + // We can't really find a good value for `mOffset`, so we take what we have + // at the front of the queue. + // For `mTime`, assume there hasn't been a discontinuity recently. + RefPtr<AudioData> frontPacket = mAudioQueue.PeekFront(); + RefPtr<AudioData> data = + new AudioData(frontPacket->mOffset, frontPacket->mTime - duration, std::move(queuedAudio), + channelCount, rate); + MOZ_DIAGNOSTIC_ASSERT(duration == data->mDuration, "must be equal"); + + mAudioQueue.PushFront(data); +} + +Maybe<MozPromiseHolder<MediaSink::EndedPromise>> AudioSink::Shutdown( + ShutdownCause aShutdownCause) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); mAudioQueueListener.Disconnect(); mAudioQueueFinishListener.Disconnect(); mProcessedQueueListener.Disconnect(); + Maybe<MozPromiseHolder<MediaSink::EndedPromise>> rv; + if (mAudioStream) { - mAudioStream->Shutdown(); + rv = mAudioStream->Shutdown(aShutdownCause); mAudioStream = nullptr; + ReenqueueUnplayedAudioDataIfNeeded(); } mProcessedQueueFinished = true; + + return rv; } void AudioSink::SetVolume(double aVolume) { if (mAudioStream) { mAudioStream->SetVolume(aVolume); } }
--- a/dom/media/mediasink/AudioSink.h +++ b/dom/media/mediasink/AudioSink.h @@ -37,37 +37,38 @@ class AudioSink : private AudioStream::D }; AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue, const media::TimeUnit& aStartTime, const AudioInfo& aInfo, AudioDeviceInfo* aAudioDevice); ~AudioSink(); - // Start audio playback and return a promise which will be resolved when the - // playback finishes, or return an error result if any error occurs. - Result<already_AddRefed<MediaSink::EndedPromise>, nsresult> Start( - const PlaybackParams& aParams); + // Start audio playback. + nsresult Start(const PlaybackParams& aParams, + MozPromiseHolder<MediaSink::EndedPromise>& aEndedPromise); /* * All public functions are not thread-safe. * Called on the task queue of MDSM only. */ media::TimeUnit GetPosition(); media::TimeUnit GetEndTime() const; // Check whether we've pushed more frames to the audio stream than it // has played. bool HasUnplayedFrames(); // The duration of the buffered frames. media::TimeUnit UnplayedDuration() const; - // Shut down the AudioSink's resources. - void Shutdown(); + // Shut down the AudioSink's resources. If an AudioStream existed, return the + // ended promise it had, if it's shutting down-mid stream becaues it's muting. + Maybe<MozPromiseHolder<MediaSink::EndedPromise>> Shutdown( + ShutdownCause aShutdownCause = ShutdownCause::Regular); void SetVolume(double aVolume); void SetStreamName(const nsAString& aStreamName); void SetPlaybackRate(double aPlaybackRate); void SetPreservesPitch(bool aPreservesPitch); void SetPlaying(bool aPlaying); MediaEventSource<bool>& AudibleEvent() { return mAudibleEvent; } @@ -82,16 +83,25 @@ class AudioSink : private AudioStream::D // Interface of AudioStream::DataSource. // Called on the callback thread of cubeb. Returns the number of frames that // were available. uint32_t PopFrames(AudioDataValue* aBuffer, uint32_t aFrames, bool aAudioThreadChanged) override; bool Ended() const override; + // When shutting down, it's important to not lose any audio data, it might be + // still of use, in two scenarios: + // - If the audio is now captured to a MediaStream, whatever is enqueued in + // the ring buffer needs to be played out now ; + // - If the AudioSink is shutting down because the audio is muted, it's + // important to keep the audio around in case it's quickly unmuted, + // and in general to keep A/V sync correct when unmuted. + void ReenqueueUnplayedAudioDataIfNeeded(); + void CheckIsAudible(const Span<AudioDataValue>& aInterleaved, size_t aChannel); // The audio stream resource. Used on the task queue of MDSM only. RefPtr<AudioStream> mAudioStream; // The presentation time of the first audio frame that was played. // We can add this to the audio stream position to determine
--- a/dom/media/mediasink/AudioSinkWrapper.cpp +++ b/dom/media/mediasink/AudioSinkWrapper.cpp @@ -22,16 +22,17 @@ namespace mozilla { using media::TimeUnit; AudioSinkWrapper::~AudioSinkWrapper() = default; void AudioSinkWrapper::Shutdown() { AssertOwnerThread(); MOZ_ASSERT(!mIsStarted, "Must be called after playback stopped."); mCreator = nullptr; + mEndedPromiseHolder.ResolveIfExists(true, __func__); } RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) { AssertOwnerThread(); MOZ_ASSERT(mIsStarted, "Must be called after playback starts."); if (aType == TrackInfo::kAudioTrack) { return mEndedPromise; } @@ -183,31 +184,42 @@ nsresult AudioSinkWrapper::Start(const T // Resolve promise if we start playback at the end position of the audio. mEndedPromise = aInfo.HasAudio() ? MediaSink::EndedPromise::CreateAndResolve(true, __func__) : nullptr; return NS_OK; } + nsresult rv = StartAudioSink(aStartTime); + + return rv; +} + +nsresult AudioSinkWrapper::StartAudioSink(const TimeUnit& aStartTime) { + MOZ_RELEASE_ASSERT(!mAudioSink); + + RefPtr<MediaSink::EndedPromise> promise = + mEndedPromiseHolder.Ensure(__func__); + mAudioSink.reset(mCreator->Create(aStartTime)); - Result<already_AddRefed<MediaSink::EndedPromise>, nsresult> rv = - mAudioSink->Start(mParams); - if (rv.isErr()) { - mEndedPromise = - MediaSink::EndedPromise::CreateAndReject(rv.unwrapErr(), __func__); + nsresult rv = mAudioSink->Start(mParams, mEndedPromiseHolder); + if (NS_FAILED(rv)) { + mEndedPromise = MediaSink::EndedPromise::CreateAndReject(rv, __func__); } else { - mEndedPromise = rv.unwrap(); + mEndedPromise = promise; } + mAudioSinkEndedPromise.DisconnectIfExists(); mEndedPromise ->Then(mOwnerThread.get(), __func__, this, &AudioSinkWrapper::OnAudioEnded, &AudioSinkWrapper::OnAudioEnded) ->Track(mAudioSinkEndedPromise); - return rv.isErr() ? rv.unwrapErr() : NS_OK; + + return rv; } bool AudioSinkWrapper::IsAudioSourceEnded(const MediaInfo& aInfo) const { // no audio or empty audio queue which won't get data anymore is equivalent to // audio ended return !aInfo.HasAudio() || (mAudioQueue.IsFinished() && mAudioQueue.GetSize() == 0u); } @@ -216,17 +228,19 @@ void AudioSinkWrapper::Stop() { AssertOwnerThread(); MOZ_ASSERT(mIsStarted, "playback not started."); mIsStarted = false; mAudioEnded = true; if (mAudioSink) { mAudioSinkEndedPromise.DisconnectIfExists(); - mAudioSink->Shutdown(); + DebugOnly<Maybe<MozPromiseHolder<EndedPromise>>> rv = + mAudioSink->Shutdown(); + MOZ_ASSERT(rv.inspect().isNothing()); mAudioSink = nullptr; mEndedPromise = nullptr; } } bool AudioSinkWrapper::IsStarted() const { AssertOwnerThread(); return mIsStarted;
--- a/dom/media/mediasink/AudioSinkWrapper.h +++ b/dom/media/mediasink/AudioSinkWrapper.h @@ -87,27 +87,30 @@ class AudioSinkWrapper : public MediaSin private: virtual ~AudioSinkWrapper(); void AssertOwnerThread() const { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); } + nsresult StartAudioSink(const media::TimeUnit& aStartTime); + media::TimeUnit GetVideoPosition(TimeStamp aNow) const; void OnAudioEnded(); bool IsAudioSourceEnded(const MediaInfo& aInfo) const; const RefPtr<AbstractThread> mOwnerThread; UniquePtr<Creator> mCreator; UniquePtr<AudioSink> mAudioSink; // Will only exist when media has an audio track. RefPtr<EndedPromise> mEndedPromise; + MozPromiseHolder<EndedPromise> mEndedPromiseHolder; bool mIsStarted; PlaybackParams mParams; TimeStamp mPlayStartTime; media::TimeUnit mPlayDuration; bool mAudioEnded;