--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -232,19 +232,16 @@ MediaDecoderStateMachine::MediaDecoderSt
mAudioCaptured(false),
mAudioCompleted(false, "MediaDecoderStateMachine::mAudioCompleted"),
mVideoCompleted(false, "MediaDecoderStateMachine::mVideoCompleted"),
mNotifyMetadataBeforeFirstFrame(false),
mDispatchedEventToDecode(false),
mQuickBuffering(false),
mMinimizePreroll(false),
mDecodeThreadWaiting(false),
- mDropAudioUntilNextDiscontinuity(false),
- mDropVideoUntilNextDiscontinuity(false),
- mCurrentTimeBeforeSeek(0),
mDecodingFirstFrame(true),
mSentLoadedMetadataEvent(false),
mSentFirstFrameLoadedEvent(false),
mSentPlaybackEndedEvent(false),
mOutputStreamManager(new OutputStreamManager()),
mResource(aDecoder->GetResource()),
mAudioOffloading(false),
mSilentDataDuration(0),
@@ -572,46 +569,22 @@ MediaDecoderStateMachine::NeedToDecodeAu
IsAudioDecoding(), mMinimizePreroll, HaveEnoughDecodedAudio());
return IsAudioDecoding() &&
mState != DECODER_STATE_SEEKING &&
((IsDecodingFirstFrame() && AudioQueue().GetSize() == 0) ||
(!mMinimizePreroll && !HaveEnoughDecodedAudio()));
}
-bool
-MediaDecoderStateMachine::IsAudioSeekComplete()
-{
- MOZ_ASSERT(OnTaskQueue());
- SAMPLE_LOG("IsAudioSeekComplete() curTarVal=%d mAudDis=%d aqFin=%d aqSz=%d",
- mCurrentSeek.Exists(), mDropAudioUntilNextDiscontinuity, AudioQueue().IsFinished(), AudioQueue().GetSize());
- return
- !HasAudio() ||
- (mCurrentSeek.Exists() &&
- !mDropAudioUntilNextDiscontinuity &&
- (AudioQueue().IsFinished() || AudioQueue().GetSize() > 0));
-}
-
-bool
-MediaDecoderStateMachine::IsVideoSeekComplete()
-{
- MOZ_ASSERT(OnTaskQueue());
- SAMPLE_LOG("IsVideoSeekComplete() curTarVal=%d mVidDis=%d vqFin=%d vqSz=%d",
- mCurrentSeek.Exists(), mDropVideoUntilNextDiscontinuity, VideoQueue().IsFinished(), VideoQueue().GetSize());
- return
- !HasVideo() ||
- (mCurrentSeek.Exists() &&
- !mDropVideoUntilNextDiscontinuity &&
- (VideoQueue().IsFinished() || VideoQueue().GetSize() > 0));
-}
-
void
MediaDecoderStateMachine::OnAudioDecoded(MediaData* aAudioSample)
{
MOZ_ASSERT(OnTaskQueue());
+ MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
+
RefPtr<MediaData> audio(aAudioSample);
MOZ_ASSERT(audio);
mAudioDataRequest.Complete();
// audio->GetEndTime() is not always mono-increasing in chained ogg.
mDecodedAudioEndTime = std::max(audio->GetEndTime(), mDecodedAudioEndTime);
SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
@@ -634,53 +607,16 @@ MediaDecoderStateMachine::OnAudioDecoded
return;
}
if (mIsAudioPrerolling && DonePrerollingAudio()) {
StopPrerollingAudio();
}
return;
}
- case DECODER_STATE_SEEKING: {
- if (!mCurrentSeek.Exists()) {
- // We've received a sample from a previous decode. Discard it.
- return;
- }
- if (audio->mDiscontinuity) {
- mDropAudioUntilNextDiscontinuity = false;
- }
- if (!mDropAudioUntilNextDiscontinuity) {
- // We must be after the discontinuity; we're receiving samples
- // at or after the seek target.
- if (mCurrentSeek.mTarget.IsFast() &&
- mCurrentSeek.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek &&
- audio->mTime < mCurrentTimeBeforeSeek) {
- // We are doing a fastSeek, but we ended up *before* the previous
- // playback position. This is surprising UX, so switch to an accurate
- // seek and decode to the seek target. This is not conformant to the
- // spec, fastSeek should always be fast, but until we get the time to
- // change all Readers to seek to the keyframe after the currentTime
- // in this case, we'll just decode forward. Bug 1026330.
- mCurrentSeek.mTarget.SetType(SeekTarget::Accurate);
- }
- if (mCurrentSeek.mTarget.IsFast()) {
- // Non-precise seek; we can stop the seek at the first sample.
- Push(audio, MediaData::AUDIO_DATA);
- } else {
- // We're doing an accurate seek. We must discard
- // MediaData up to the one containing exact seek target.
- if (NS_FAILED(DropAudioUpToSeekTarget(audio))) {
- DecodeError();
- return;
- }
- }
- }
- CheckIfSeekComplete();
- return;
- }
default: {
// Ignore other cases.
return;
}
}
}
void
@@ -749,16 +685,18 @@ MediaDecoderStateMachine::OnVideoPopped(
MaybeStartBuffering();
}
void
MediaDecoderStateMachine::OnNotDecoded(MediaData::Type aType,
MediaDecoderReader::NotDecodedReason aReason)
{
MOZ_ASSERT(OnTaskQueue());
+ MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
+
SAMPLE_LOG("OnNotDecoded (aType=%u, aReason=%u)", aType, aReason);
bool isAudio = aType == MediaData::AUDIO_DATA;
MOZ_ASSERT_IF(!isAudio, aType == MediaData::VIDEO_DATA);
if (isAudio) {
mAudioDataRequest.Complete();
} else {
mVideoDataRequest.Complete();
@@ -816,25 +754,16 @@ MediaDecoderStateMachine::OnNotDecoded(M
EnsureVideoDecodeTaskQueued();
}
return;
}
// This is an EOS. Finish off the queue, and then handle things based on our
// state.
MOZ_ASSERT(aReason == MediaDecoderReader::END_OF_STREAM);
- if (!isAudio && mState == DECODER_STATE_SEEKING &&
- mCurrentSeek.Exists() && mFirstVideoFrameAfterSeek) {
- // Null sample. Hit end of stream. If we have decoded a frame,
- // insert it into the queue so that we have something to display.
- // We make sure to do this before invoking VideoQueue().Finish()
- // below.
- Push(mFirstVideoFrameAfterSeek, MediaData::VIDEO_DATA);
- mFirstVideoFrameAfterSeek = nullptr;
- }
if (isAudio) {
AudioQueue().Finish();
StopPrerollingAudio();
} else {
VideoQueue().Finish();
StopPrerollingVideo();
}
switch (mState) {
@@ -846,31 +775,16 @@ MediaDecoderStateMachine::OnNotDecoded(M
CheckIfDecodeComplete();
// Schedule next cycle to see if we can leave buffering state.
if (mState == DECODER_STATE_BUFFERING) {
ScheduleStateMachine();
}
return;
}
- case DECODER_STATE_SEEKING: {
- if (!mCurrentSeek.Exists()) {
- // We've received a sample from a previous decode. Discard it.
- return;
- }
-
- if (isAudio) {
- mDropAudioUntilNextDiscontinuity = false;
- } else {
- mDropVideoUntilNextDiscontinuity = false;
- }
-
- CheckIfSeekComplete();
- return;
- }
default: {
return;
}
}
}
bool
MediaDecoderStateMachine::MaybeFinishDecodeFirstFrame()
@@ -892,16 +806,18 @@ MediaDecoderStateMachine::MaybeFinishDec
return true;
}
void
MediaDecoderStateMachine::OnVideoDecoded(MediaData* aVideoSample,
TimeStamp aDecodeStartTime)
{
MOZ_ASSERT(OnTaskQueue());
+ MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
+
RefPtr<MediaData> video(aVideoSample);
MOZ_ASSERT(video);
mVideoDataRequest.Complete();
// Handle abnormal or negative timestamps.
mDecodedVideoEndTime = std::max(mDecodedVideoEndTime, video->GetEndTime());
SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d",
@@ -944,96 +860,23 @@ MediaDecoderStateMachine::OnVideoDecoded
std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), mAmpleAudioThresholdUsecs);
mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
mAmpleAudioThresholdUsecs);
DECODER_LOG("Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
}
return;
}
- case DECODER_STATE_SEEKING: {
- if (!mCurrentSeek.Exists()) {
- // We've received a sample from a previous decode. Discard it.
- return;
- }
- if (mDropVideoUntilNextDiscontinuity) {
- if (video->mDiscontinuity) {
- mDropVideoUntilNextDiscontinuity = false;
- }
- }
- if (!mDropVideoUntilNextDiscontinuity) {
- // We must be after the discontinuity; we're receiving samples
- // at or after the seek target.
- if (mCurrentSeek.mTarget.IsFast() &&
- mCurrentSeek.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek &&
- video->mTime < mCurrentTimeBeforeSeek) {
- // We are doing a fastSeek, but we ended up *before* the previous
- // playback position. This is surprising UX, so switch to an accurate
- // seek and decode to the seek target. This is not conformant to the
- // spec, fastSeek should always be fast, but until we get the time to
- // change all Readers to seek to the keyframe after the currentTime
- // in this case, we'll just decode forward. Bug 1026330.
- mCurrentSeek.mTarget.SetType(SeekTarget::Accurate);
- }
- if (mCurrentSeek.mTarget.IsFast()) {
- // Non-precise seek. We can stop the seek at the first sample.
- Push(video, MediaData::VIDEO_DATA);
- } else {
- // We're doing an accurate seek. We still need to discard
- // MediaData up to the one containing exact seek target.
- if (NS_FAILED(DropVideoUpToSeekTarget(video))) {
- DecodeError();
- return;
- }
- }
- }
- CheckIfSeekComplete();
- return;
- }
default: {
// Ignore other cases.
return;
}
}
}
-void
-MediaDecoderStateMachine::CheckIfSeekComplete()
-{
- MOZ_ASSERT(OnTaskQueue());
- MOZ_ASSERT(mState == DECODER_STATE_SEEKING);
-
- const bool videoSeekComplete = IsVideoSeekComplete();
- if (HasVideo() && !videoSeekComplete) {
- // We haven't reached the target. Ensure we have requested another sample.
- if (NS_FAILED(EnsureVideoDecodeTaskQueued())) {
- DECODER_WARN("Failed to request video during seek");
- DecodeError();
- }
- }
-
- const bool audioSeekComplete = IsAudioSeekComplete();
- if (HasAudio() && !audioSeekComplete) {
- // We haven't reached the target. Ensure we have requested another sample.
- if (NS_FAILED(EnsureAudioDecodeTaskQueued())) {
- DECODER_WARN("Failed to request audio during seek");
- DecodeError();
- }
- }
-
- SAMPLE_LOG("CheckIfSeekComplete() audioSeekComplete=%d videoSeekComplete=%d",
- audioSeekComplete, videoSeekComplete);
-
- if (audioSeekComplete && videoSeekComplete) {
- NS_ASSERTION(AudioQueue().GetSize() <= 1, "Should decode at most one sample");
- NS_ASSERTION(VideoQueue().GetSize() <= 1, "Should decode at most one sample");
- SeekCompleted();
- }
-}
-
bool
MediaDecoderStateMachine::IsAudioDecoding()
{
MOZ_ASSERT(OnTaskQueue());
return HasAudio() && !AudioQueue().IsFinished();
}
bool
@@ -1281,35 +1124,42 @@ MediaDecoderStateMachine::SetDormant(boo
mPendingDormant.reset();
DECODER_LOG("SetDormant=%d", aDormant);
if (aDormant) {
if (mState == DECODER_STATE_SEEKING) {
if (mQueuedSeek.Exists()) {
// Keep latest seek target
- } else if (mCurrentSeek.Exists()) {
- mQueuedSeek = Move(mCurrentSeek);
+ } else if (mSeekTask && mSeekTask->Exists()) {
+ mQueuedSeek = Move(mSeekTask->GetSeekJob());
+ mSeekTaskRequest.DisconnectIfExists();
} else {
mQueuedSeek.mTarget = SeekTarget(mCurrentPosition,
SeekTarget::Accurate,
MediaDecoderEventVisibility::Suppressed);
// XXXbholley - Nobody is listening to this promise. Do we need to pass it
// back to MediaDecoder when we come out of dormant?
RefPtr<MediaDecoder::SeekPromise> unused = mQueuedSeek.mPromise.Ensure(__func__);
}
} else {
mQueuedSeek.mTarget = SeekTarget(mCurrentPosition,
SeekTarget::Accurate,
MediaDecoderEventVisibility::Suppressed);
// XXXbholley - Nobody is listening to this promise. Do we need to pass it
// back to MediaDecoder when we come out of dormant?
RefPtr<MediaDecoder::SeekPromise> unused = mQueuedSeek.mPromise.Ensure(__func__);
}
- mCurrentSeek.RejectIfExists(__func__);
+
+ // Discard the current seek task.
+ if (mSeekTask) {
+ mSeekTask->Discard();
+ mSeekTask = nullptr;
+ }
+
SetState(DECODER_STATE_DORMANT);
if (IsPlaying()) {
StopPlayback();
}
Reset();
// Note that we do not wait for the decode task queue to go idle before
@@ -1336,17 +1186,20 @@ MediaDecoderStateMachine::Shutdown()
// Change state before issuing shutdown request to threads so those
// threads can start exiting cleanly during the Shutdown call.
ScheduleStateMachine();
SetState(DECODER_STATE_SHUTDOWN);
mBufferedUpdateRequest.DisconnectIfExists();
mQueuedSeek.RejectIfExists(__func__);
- mCurrentSeek.RejectIfExists(__func__);
+ if (mSeekTask) {
+ mSeekTask->Discard();
+ mSeekTask = nullptr;
+ }
#ifdef MOZ_EME
mCDMProxyPromise.DisconnectIfExists();
#endif
if (IsPlaying()) {
StopPlayback();
}
@@ -1525,17 +1378,17 @@ MediaDecoderStateMachine::Seek(SeekTarge
mQueuedSeek.RejectIfExists(__func__);
DECODER_LOG("Changed state to SEEKING (to %lld)", aTarget.GetTime().ToMicroseconds());
SetState(DECODER_STATE_SEEKING);
SeekJob seekJob;
seekJob.mTarget = aTarget;
InitiateSeek(Move(seekJob));
- return mCurrentSeek.mPromise.Ensure(__func__);
+ return mSeekTask->GetSeekJob().mPromise.Ensure(__func__);
}
RefPtr<MediaDecoder::SeekPromise>
MediaDecoderStateMachine::InvokeSeek(SeekTarget aTarget)
{
return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::Seek, aTarget);
}
@@ -1609,60 +1462,116 @@ MediaDecoderStateMachine::DispatchDecode
}
}
void
MediaDecoderStateMachine::InitiateSeek(SeekJob aSeekJob)
{
MOZ_ASSERT(OnTaskQueue());
- mCurrentSeek.RejectIfExists(__func__);
- mCurrentSeek = Move(aSeekJob);
-
- // Bound the seek time to be inside the media range.
- int64_t end = Duration().ToMicroseconds();
- NS_ASSERTION(end != -1, "Should know end time by now");
- int64_t seekTime = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
- seekTime = std::min(seekTime, end);
- seekTime = std::max(int64_t(0), seekTime);
- NS_ASSERTION(seekTime >= 0 && seekTime <= end,
- "Can only seek in range [0,duration]");
- mCurrentSeek.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));
-
- mDropAudioUntilNextDiscontinuity = HasAudio();
- mDropVideoUntilNextDiscontinuity = HasVideo();
- mCurrentTimeBeforeSeek = GetMediaTime();
+ // Discard the existing seek task.
+ if (mSeekTask) {
+ mSeekTask->Discard();
+ }
+
+ mSeekTaskRequest.DisconnectIfExists();
+
+ // Create a new SeekTask instance for the incoming seek task.
+ mSeekTask = SeekTask::CreateSeekTask(mDecoderID, OwnerThread(), mReader.get(),
+ mReaderWrapper.get(), Move(aSeekJob),
+ mInfo, Duration(), GetMediaTime());
// Stop playback now to ensure that while we're outside the monitor
// dispatching SeekingStarted, playback doesn't advance and mess with
// mCurrentPosition that we've setting to seekTime here.
StopPlayback();
- UpdatePlaybackPositionInternal(mCurrentSeek.mTarget.GetTime().ToMicroseconds());
-
- mOnSeekingStart.Notify(mCurrentSeek.mTarget.mEventVisibility);
+ UpdatePlaybackPositionInternal(mSeekTask->GetSeekJob().mTarget.GetTime().ToMicroseconds());
+
+ mOnSeekingStart.Notify(mSeekTask->GetSeekJob().mTarget.mEventVisibility);
// Reset our state machine and decoding pipeline before seeking.
- Reset();
+ if (mSeekTask->NeedToResetMDSM()) { Reset(); }
// Do the seek.
- RefPtr<MediaDecoderStateMachine> self = this;
- mSeekRequest.Begin(
- mReaderWrapper->Seek(mCurrentSeek.mTarget, Duration())
- ->Then(OwnerThread(), __func__,
- [self] (media::TimeUnit) -> void {
- self->mSeekRequest.Complete();
- // We must decode the first samples of active streams, so we can determine
- // the new stream time. So dispatch tasks to do that.
- self->EnsureAudioDecodeTaskQueued();
- self->EnsureVideoDecodeTaskQueued();
- }, [self] (nsresult aResult) -> void {
- self->mSeekRequest.Complete();
- MOZ_ASSERT(NS_FAILED(aResult), "Cancels should also disconnect mSeekRequest");
- self->DecodeError();
- }));
+ mSeekTaskRequest.Begin(mSeekTask->Seek(Duration())
+ ->Then(OwnerThread(), __func__, this,
+ &MediaDecoderStateMachine::OnSeekTaskResolved,
+ &MediaDecoderStateMachine::OnSeekTaskRejected));
+}
+
+void
+MediaDecoderStateMachine::OnSeekTaskResolved(SeekTaskResolveValue aValue)
+{
+ MOZ_ASSERT(OnTaskQueue());
+
+ mSeekTaskRequest.Complete();
+
+ if (aValue.mSeekedAudioData) {
+ Push(aValue.mSeekedAudioData.get(), MediaData::AUDIO_DATA);
+ mDecodedAudioEndTime =
+ std::max(aValue.mSeekedAudioData->GetEndTime(), mDecodedAudioEndTime);
+ }
+
+ if (aValue.mSeekedVideoData) {
+ Push(aValue.mSeekedVideoData.get(), MediaData::VIDEO_DATA);
+ mDecodedVideoEndTime =
+ std::max(aValue.mSeekedVideoData->GetEndTime(), mDecodedVideoEndTime);
+ }
+
+ if (aValue.mIsAudioQueueFinished) {
+ AudioQueue().Finish();
+ StopPrerollingAudio();
+ }
+
+ if (aValue.mIsVideoQueueFinished) {
+ VideoQueue().Finish();
+ StopPrerollingVideo();
+ }
+
+ if (aValue.mNeedToStopPrerollingAudio) {
+ StopPrerollingAudio();
+ }
+
+ if (aValue.mNeedToStopPrerollingVideo) {
+ StopPrerollingVideo();
+ }
+
+ SeekCompleted();
+
+ mSeekTask = nullptr;
+}
+
+void
+MediaDecoderStateMachine::OnSeekTaskRejected(SeekTaskRejectValue aValue)
+{
+ MOZ_ASSERT(OnTaskQueue());
+
+ mSeekTaskRequest.Complete();
+
+ if (aValue.mIsAudioQueueFinished) {
+ AudioQueue().Finish();
+ StopPrerollingAudio();
+ }
+
+ if (aValue.mIsVideoQueueFinished) {
+ VideoQueue().Finish();
+ StopPrerollingVideo();
+ }
+
+ if (aValue.mNeedToStopPrerollingAudio) {
+ StopPrerollingAudio();
+ }
+
+ if (aValue.mNeedToStopPrerollingVideo) {
+ StopPrerollingVideo();
+ }
+
+ DecodeError();
+
+ mSeekTask = nullptr;
}
nsresult
MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded()
{
MOZ_ASSERT(OnTaskQueue());
if (IsShutdown()) {
@@ -1675,28 +1584,28 @@ MediaDecoderStateMachine::DispatchAudioD
return NS_OK;
}
nsresult
MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued()
{
MOZ_ASSERT(OnTaskQueue());
+ MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
SAMPLE_LOG("EnsureAudioDecodeTaskQueued isDecoding=%d status=%s",
IsAudioDecoding(), AudioRequestStatus());
if (mState != DECODER_STATE_DECODING &&
- mState != DECODER_STATE_BUFFERING &&
- mState != DECODER_STATE_SEEKING) {
+ mState != DECODER_STATE_BUFFERING) {
return NS_OK;
}
if (!IsAudioDecoding() || mAudioDataRequest.Exists() ||
- mAudioWaitRequest.Exists() || mSeekRequest.Exists()) {
+ mAudioWaitRequest.Exists()) {
return NS_OK;
}
RequestAudioData();
return NS_OK;
}
void
@@ -1729,28 +1638,28 @@ MediaDecoderStateMachine::DispatchVideoD
return NS_OK;
}
nsresult
MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued()
{
MOZ_ASSERT(OnTaskQueue());
+ MOZ_ASSERT(mState != DECODER_STATE_SEEKING);
SAMPLE_LOG("EnsureVideoDecodeTaskQueued isDecoding=%d status=%s",
IsVideoDecoding(), VideoRequestStatus());
if (mState != DECODER_STATE_DECODING &&
- mState != DECODER_STATE_BUFFERING &&
- mState != DECODER_STATE_SEEKING) {
+ mState != DECODER_STATE_BUFFERING) {
return NS_OK;
}
if (!IsVideoDecoding() || mVideoDataRequest.Exists() ||
- mVideoWaitRequest.Exists() || mSeekRequest.Exists()) {
+ mVideoWaitRequest.Exists()) {
return NS_OK;
}
RequestVideoData();
return NS_OK;
}
void
@@ -2051,17 +1960,17 @@ MediaDecoderStateMachine::FinishDecodeFi
}
void
MediaDecoderStateMachine::SeekCompleted()
{
MOZ_ASSERT(OnTaskQueue());
MOZ_ASSERT(mState == DECODER_STATE_SEEKING);
- int64_t seekTime = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
+ int64_t seekTime = mSeekTask->GetSeekJob().mTarget.GetTime().ToMicroseconds();
int64_t newCurrentTime = seekTime;
// Setup timestamp state.
RefPtr<MediaData> video = VideoQueue().PeekFront();
if (seekTime == Duration().ToMicroseconds()) {
newCurrentTime = seekTime;
} else if (HasAudio()) {
MediaData* audio = AudioQueue().PeekFront();
@@ -2096,17 +2005,17 @@ MediaDecoderStateMachine::SeekCompleted(
nextState = DECODER_STATE_COMPLETED;
} else {
DECODER_LOG("Changed state from SEEKING (to %lld) to DECODING", seekTime);
nextState = DECODER_STATE_DECODING;
}
// We want to resolve the seek request prior finishing the first frame
// to ensure that the seeked event is fired prior loadeded.
- mCurrentSeek.Resolve(nextState == DECODER_STATE_COMPLETED, __func__);
+ mSeekTask->GetSeekJob().Resolve(nextState == DECODER_STATE_COMPLETED, __func__);
if (mDecodingFirstFrame) {
// We were resuming from dormant, or initiated a seek early.
// We can fire loadeddata now.
FinishDecodeFirstFrame();
}
if (nextState == DECODER_STATE_DECODING) {
@@ -2346,26 +2255,23 @@ MediaDecoderStateMachine::Reset()
StopMediaSink();
mDecodedVideoEndTime = 0;
mDecodedAudioEndTime = 0;
mAudioCompleted = false;
mVideoCompleted = false;
AudioQueue().Reset();
VideoQueue().Reset();
- mFirstVideoFrameAfterSeek = nullptr;
- mDropAudioUntilNextDiscontinuity = true;
- mDropVideoUntilNextDiscontinuity = true;
mMetadataRequest.DisconnectIfExists();
mAudioDataRequest.DisconnectIfExists();
mAudioWaitRequest.DisconnectIfExists();
mVideoDataRequest.DisconnectIfExists();
mVideoWaitRequest.DisconnectIfExists();
- mSeekRequest.DisconnectIfExists();
+ mSeekTaskRequest.DisconnectIfExists();
mPlaybackOffset = 0;
nsCOMPtr<nsIRunnable> resetTask =
NS_NewRunnableMethod(mReader, &MediaDecoderReader::ResetDecode);
DecodeTaskQueue()->Dispatch(resetTask.forget());
}
@@ -2414,129 +2320,16 @@ MediaDecoderStateMachine::UpdatePlayback
// Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
// the monitor and get a staled value from GetCurrentTimeUs() which hits the
// assertion in GetClock().
int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
ScheduleStateMachineIn(delay);
}
-nsresult
-MediaDecoderStateMachine::DropVideoUpToSeekTarget(MediaData* aSample)
-{
- MOZ_ASSERT(OnTaskQueue());
- RefPtr<VideoData> video(aSample->As<VideoData>());
- MOZ_ASSERT(video);
- DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
- video->mTime, video->GetEndTime());
- MOZ_ASSERT(mCurrentSeek.Exists());
- const int64_t target = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
-
- // If the frame end time is less than the seek target, we won't want
- // to display this frame after the seek, so discard it.
- if (target >= video->GetEndTime()) {
- DECODER_LOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
- video->mTime, video->GetEndTime(), target);
- mFirstVideoFrameAfterSeek = video;
- } else {
- if (target >= video->mTime && video->GetEndTime() >= target) {
- // The seek target lies inside this frame's time slice. Adjust the frame's
- // start time to match the seek target. We do this by replacing the
- // first frame with a shallow copy which has the new timestamp.
- RefPtr<VideoData> temp = VideoData::ShallowCopyUpdateTimestamp(video, target);
- video = temp;
- }
- mFirstVideoFrameAfterSeek = nullptr;
-
- DECODER_LOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
- video->mTime, video->GetEndTime(), target);
-
- MOZ_ASSERT(VideoQueue().GetSize() == 0, "Should be the 1st sample after seeking");
- Push(video, MediaData::VIDEO_DATA);
- }
-
- return NS_OK;
-}
-
-nsresult
-MediaDecoderStateMachine::DropAudioUpToSeekTarget(MediaData* aSample)
-{
- MOZ_ASSERT(OnTaskQueue());
- RefPtr<AudioData> audio(aSample->As<AudioData>());
- MOZ_ASSERT(audio &&
- mCurrentSeek.Exists() &&
- mCurrentSeek.mTarget.IsAccurate());
-
- CheckedInt64 sampleDuration =
- FramesToUsecs(audio->mFrames, mInfo.mAudio.mRate);
- if (!sampleDuration.isValid()) {
- return NS_ERROR_FAILURE;
- }
-
- if (audio->mTime + sampleDuration.value() <= mCurrentSeek.mTarget.GetTime().ToMicroseconds()) {
- // Our seek target lies after the frames in this AudioData. Don't
- // push it onto the audio queue, and keep decoding forwards.
- return NS_OK;
- }
-
- if (audio->mTime > mCurrentSeek.mTarget.GetTime().ToMicroseconds()) {
- // The seek target doesn't lie in the audio block just after the last
- // audio frames we've seen which were before the seek target. This
- // could have been the first audio data we've seen after seek, i.e. the
- // seek terminated after the seek target in the audio stream. Just
- // abort the audio decode-to-target, the state machine will play
- // silence to cover the gap. Typically this happens in poorly muxed
- // files.
- DECODER_WARN("Audio not synced after seek, maybe a poorly muxed file?");
- Push(audio, MediaData::AUDIO_DATA);
- return NS_OK;
- }
-
- // The seek target lies somewhere in this AudioData's frames, strip off
- // any frames which lie before the seek target, so we'll begin playback
- // exactly at the seek target.
- NS_ASSERTION(mCurrentSeek.mTarget.GetTime().ToMicroseconds() >= audio->mTime,
- "Target must at or be after data start.");
- NS_ASSERTION(mCurrentSeek.mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
- "Data must end after target.");
-
- CheckedInt64 framesToPrune =
- UsecsToFrames(mCurrentSeek.mTarget.GetTime().ToMicroseconds() - audio->mTime, mInfo.mAudio.mRate);
- if (!framesToPrune.isValid()) {
- return NS_ERROR_FAILURE;
- }
- if (framesToPrune.value() > audio->mFrames) {
- // We've messed up somehow. Don't try to trim frames, the |frames|
- // variable below will overflow.
- DECODER_WARN("Can't prune more frames that we have!");
- return NS_ERROR_FAILURE;
- }
- uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
- uint32_t channels = audio->mChannels;
- auto audioData = MakeUnique<AudioDataValue[]>(frames * channels);
- memcpy(audioData.get(),
- audio->mAudioData.get() + (framesToPrune.value() * channels),
- frames * channels * sizeof(AudioDataValue));
- CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
- if (!duration.isValid()) {
- return NS_ERROR_FAILURE;
- }
- RefPtr<AudioData> data(new AudioData(audio->mOffset,
- mCurrentSeek.mTarget.GetTime().ToMicroseconds(),
- duration.value(),
- frames,
- Move(audioData),
- channels,
- audio->mRate));
- MOZ_ASSERT(AudioQueue().GetSize() == 0, "Should be the 1st sample after seeking");
- Push(data, MediaData::AUDIO_DATA);
-
- return NS_OK;
-}
-
void MediaDecoderStateMachine::UpdateNextFrameStatus()
{
MOZ_ASSERT(OnTaskQueue());
MediaDecoderOwner::NextFrameStatus status;
const char* statusString;
if (mState <= DECODER_STATE_WAIT_FOR_CDM || IsDecodingFirstFrame()) {
status = MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;