author | Bobby Holley <bobbyholley@gmail.com> |
Wed, 29 Apr 2015 17:07:55 -0700 | |
changeset 242226 | 3188222d068229029a623956ce16b9eb2fac8643 |
parent 242225 | a6b50156107b9c32230c7c214b68e48e92f7958f |
child 242227 | 41b7be6965cd0fa5222cd39db8f2bf028486230b |
push id | 59346 |
push user | bobbyholley@gmail.com |
push date | Mon, 04 May 2015 18:15:02 +0000 |
treeherder | mozilla-inbound@be04da4b954d [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | jww |
bugs | 1159974 |
milestone | 40.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
dom/media/MediaDecoderStateMachine.cpp | file | annotate | diff | comparison | revisions | |
dom/media/MediaDecoderStateMachine.h | file | annotate | diff | comparison | revisions |
--- a/dom/media/MediaDecoderStateMachine.cpp +++ b/dom/media/MediaDecoderStateMachine.cpp @@ -309,30 +309,32 @@ MediaDecoderStateMachine::Initialization mNextPlayState.Connect(mDecoder->CanonicalNextPlayState()); // Initialize watchers. mWatchManager.Watch(mState, &MediaDecoderStateMachine::UpdateNextFrameStatus); mWatchManager.Watch(mAudioCompleted, &MediaDecoderStateMachine::UpdateNextFrameStatus); } bool MediaDecoderStateMachine::HasFutureAudio() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio"); // We've got audio ready to play if: // 1. We've not completed playback of audio, and // 2. we either have more than the threshold of decoded audio available, or // we've completely decoded all audio (but not finished playing it yet // as per 1). return !mAudioCompleted && (AudioDecodedUsecs() > mLowAudioThresholdUsecs * mPlaybackRate || AudioQueue().IsFinished()); } bool MediaDecoderStateMachine::HaveNextFrameData() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return (!HasAudio() || HasFutureAudio()) && (!HasVideo() || VideoQueue().GetSize() > 0); } int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() { MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); @@ -562,26 +564,28 @@ void MediaDecoderStateMachine::SendStrea if (finished && AudioQueue().GetSize() == 0) { mAudioCompleted = true; } } MediaDecoderStateMachine::WakeDecoderRunnable* MediaDecoderStateMachine::GetWakeDecoderRunnable() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (!mPendingWakeDecoder.get()) { mPendingWakeDecoder = new WakeDecoderRunnable(this); } return mPendingWakeDecoder.get(); } bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs) { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (AudioQueue().GetSize() == 0 || GetDecodedAudioDuration() < aAmpleAudioUSecs) { return false; } if (!mAudioCaptured) { return true; @@ -599,16 +603,17 @@ bool MediaDecoderStateMachine::HaveEnoug TaskQueue(), GetWakeDecoderRunnable()); } return true; } bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (static_cast<uint32_t>(VideoQueue().GetSize()) < GetAmpleVideoFrames() * mPlaybackRate) { return false; } DecodedStreamData* stream = mDecoder->GetDecodedStream(); @@ -623,27 +628,29 @@ bool MediaDecoderStateMachine::HaveEnoug } return true; } bool MediaDecoderStateMachine::NeedToDecodeVideo() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return IsVideoDecoding() && ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) || (mState == DECODER_STATE_DECODING_FIRSTFRAME && IsVideoDecoding() && VideoQueue().GetSize() == 0) || (!mMinimizePreroll && !HaveEnoughDecodedVideo())); } bool MediaDecoderStateMachine::NeedToSkipToNextKeyframe() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (mState == DECODER_STATE_DECODING_FIRSTFRAME) { return false; } MOZ_ASSERT(mState == DECODER_STATE_DECODING || mState == DECODER_STATE_BUFFERING || mState == DECODER_STATE_SEEKING); @@ -683,16 +690,17 @@ MediaDecoderStateMachine::NeedToSkipToNe } return false; } bool MediaDecoderStateMachine::NeedToDecodeAudio() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); SAMPLE_LOG("NeedToDecodeAudio() isDec=%d decToTar=%d minPrl=%d seek=%d enufAud=%d", IsAudioDecoding(), mDecodeToSeekTarget, mMinimizePreroll, mState == DECODER_STATE_SEEKING, HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate)); return IsAudioDecoding() && ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) || @@ -701,29 +709,31 @@ MediaDecoderStateMachine::NeedToDecodeAu (!mMinimizePreroll && !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate) && (mState != DECODER_STATE_SEEKING || mDecodeToSeekTarget))); } bool MediaDecoderStateMachine::IsAudioSeekComplete() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); SAMPLE_LOG("IsAudioSeekComplete() curTarVal=%d mAudDis=%d aqFin=%d aqSz=%d", mCurrentSeek.Exists(), mDropAudioUntilNextDiscontinuity, AudioQueue().IsFinished(), AudioQueue().GetSize()); return !HasAudio() || (mCurrentSeek.Exists() && !mDropAudioUntilNextDiscontinuity && (AudioQueue().IsFinished() || AudioQueue().GetSize() > 0)); } bool MediaDecoderStateMachine::IsVideoSeekComplete() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); SAMPLE_LOG("IsVideoSeekComplete() curTarVal=%d mVidDis=%d vqFin=%d vqSz=%d", mCurrentSeek.Exists(), mDropVideoUntilNextDiscontinuity, VideoQueue().IsFinished(), VideoQueue().GetSize()); return !HasVideo() || (mCurrentSeek.Exists() && !mDropVideoUntilNextDiscontinuity && (VideoQueue().IsFinished() || VideoQueue().GetSize() > 0)); @@ -1138,23 +1148,25 @@ MediaDecoderStateMachine::CheckIfSeekCom mDecodeToSeekTarget = false; SeekCompleted(); } } bool MediaDecoderStateMachine::IsAudioDecoding() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return HasAudio() && !AudioQueue().IsFinished(); } bool MediaDecoderStateMachine::IsVideoDecoding() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return HasVideo() && !VideoQueue().IsFinished(); } void MediaDecoderStateMachine::CheckIfDecodeComplete() { MOZ_ASSERT(OnTaskQueue()); @@ -1748,16 +1760,17 @@ void MediaDecoderStateMachine::StopAudio } // Wake up those waiting for audio sink to finish. mDecoder->GetReentrantMonitor().NotifyAll(); } nsresult MediaDecoderStateMachine::EnqueueDecodeFirstFrameTask() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); MOZ_ASSERT(mState == DECODER_STATE_DECODING_FIRSTFRAME); nsCOMPtr<nsIRunnable> task( NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeFirstFrame)); TaskQueue()->Dispatch(task.forget()); return NS_OK; } @@ -2036,16 +2049,17 @@ MediaDecoderStateMachine::StartAudioThre mAudioSink->SetPlaybackRate(mPlaybackRate); mAudioSink->SetPreservesPitch(mPreservesPitch); } return NS_OK; } int64_t MediaDecoderStateMachine::AudioDecodedUsecs() { + MOZ_ASSERT(OnTaskQueue()); NS_ASSERTION(HasAudio(), "Should only call AudioDecodedUsecs() when we have audio"); // The amount of audio we have decoded is the amount of audio data we've // already decoded and pushed to the hardware, plus the amount of audio // data waiting to be pushed to the hardware. int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0; // Currently for real time streams, AudioQueue().Duration() produce @@ -2053,41 +2067,45 @@ int64_t MediaDecoderStateMachine::AudioD if (IsRealTime()) { return pushed + FramesToUsecs(AudioQueue().FrameCount(), mInfo.mAudio.mRate).value(); } return pushed + AudioQueue().Duration(); } bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); MOZ_ASSERT(mReader->UseBufferingHeuristics()); // We consider ourselves low on decoded data if we're low on audio, // provided we've not decoded to the end of the audio stream, or // if we're low on video frames, provided // we've not decoded to the end of the video stream. return ((IsAudioDecoding() && AudioDecodedUsecs() < aAudioUsecs) || (IsVideoDecoding() && static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES)); } bool MediaDecoderStateMachine::OutOfDecodedAudio() { + MOZ_ASSERT(OnTaskQueue()); return IsAudioDecoding() && !AudioQueue().IsFinished() && AudioQueue().GetSize() == 0 && (!mAudioSink || !mAudioSink->HasUnplayedFrames()); } bool MediaDecoderStateMachine::HasLowUndecodedData() { + MOZ_ASSERT(OnTaskQueue()); return HasLowUndecodedData(mLowDataThresholdUsecs); } bool MediaDecoderStateMachine::HasLowUndecodedData(int64_t aUsecs) { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); NS_ASSERTION(mState > DECODER_STATE_DECODING_FIRSTFRAME, "Must have loaded first frame for GetBuffered() to work"); // If we don't have a duration, GetBuffered is probably not going to produce // a useful buffered range. Return false here so that we don't get stuck in // buffering mode for live streams. if (GetDuration() < 0) { @@ -2141,19 +2159,19 @@ MediaDecoderStateMachine::DecodeError() nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethod(mDecoder, &MediaDecoder::DecodeError); AbstractThread::MainThread()->Dispatch(event.forget()); } void MediaDecoderStateMachine::OnMetadataRead(MetadataHolder* aMetadata) { - ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(mState == DECODER_STATE_DECODING_METADATA); + ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mMetadataRequest.Complete(); mDecoder->SetMediaSeekable(mReader->IsMediaSeekable()); mInfo = aMetadata->mInfo; mMetadataTags = aMetadata->mTags.forget(); if (HasVideo()) { DECODER_LOG("Video decode isAsync=%d HWAccel=%d videoQueueSize=%d", @@ -2181,19 +2199,19 @@ MediaDecoderStateMachine::OnMetadataRead SetState(DECODER_STATE_DECODING_FIRSTFRAME); EnqueueDecodeFirstFrameTask(); ScheduleStateMachine(); } void MediaDecoderStateMachine::OnMetadataNotRead(ReadMetadataFailureReason aReason) { - ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(mState == DECODER_STATE_DECODING_METADATA); + ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mMetadataRequest.Complete(); if (aReason == ReadMetadataFailureReason::WAITING_FOR_RESOURCES) { SetState(DECODER_STATE_WAIT_FOR_RESOURCES); } else { MOZ_ASSERT(aReason == ReadMetadataFailureReason::METADATA_ERROR); DECODER_WARN("Decode metadata failed, shutting down decoder"); DecodeError(); @@ -2294,18 +2312,18 @@ MediaDecoderStateMachine::DecodeFirstFra } return NS_OK; } nsresult MediaDecoderStateMachine::FinishDecodeFirstFrame() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); - MOZ_ASSERT(OnTaskQueue()); DECODER_LOG("FinishDecodeFirstFrame"); if (IsShutdown()) { return NS_ERROR_FAILURE; } if (!IsRealTime() && !mSentFirstFrameLoadedEvent) { const VideoData* v = VideoQueue().PeekFront(); @@ -2370,32 +2388,32 @@ MediaDecoderStateMachine::FinishDecodeFi } return NS_OK; } void MediaDecoderStateMachine::OnSeekCompleted(int64_t aTime) { + MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); - MOZ_ASSERT(OnTaskQueue()); mSeekRequest.Complete(); // We must decode the first samples of active streams, so we can determine // the new stream time. So dispatch tasks to do that. mDecodeToSeekTarget = true; DispatchDecodeTasksIfNeeded(); } void MediaDecoderStateMachine::OnSeekFailed(nsresult aResult) { + MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); - MOZ_ASSERT(OnTaskQueue()); mSeekRequest.Complete(); MOZ_ASSERT(NS_FAILED(aResult), "Cancels should also disconnect mSeekRequest"); DecodeError(); } void MediaDecoderStateMachine::SeekCompleted() { @@ -2833,26 +2851,28 @@ void MediaDecoderStateMachine::RenderVid } container->SetCurrentFrame(aData->mDisplay, aData->mImage, aTarget); MOZ_ASSERT(container->GetFrameDelay() >= 0 || IsRealTime()); } } void MediaDecoderStateMachine::ResyncAudioClock() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (IsPlaying()) { SetPlayStartTime(TimeStamp::Now()); mPlayDuration = GetAudioClock() - mStartTime; } } int64_t MediaDecoderStateMachine::GetAudioClock() const { + MOZ_ASSERT(OnTaskQueue()); // We must hold the decoder monitor while using the audio stream off the // audio sink to ensure that it doesn't get destroyed on the audio sink // while we're using it. AssertCurrentThreadInMonitor(); MOZ_ASSERT(HasAudio() && !mAudioCompleted); return mAudioStartTime + (mAudioSink ? mAudioSink->GetPosition() : 0); } @@ -2869,16 +2889,17 @@ int64_t MediaDecoderStateMachine::GetVid int64_t delta = DurationToUsecs(TimeStamp::Now() - mPlayStartTime); // Take playback rate into account. delta *= mPlaybackRate; return mStartTime + mPlayDuration + delta; } int64_t MediaDecoderStateMachine::GetClock() const { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); // Determine the clock time. If we've got audio, and we've not reached // the end of the audio, use the audio clock. However if we've finished // audio, or don't have audio, use the system clock. If our output is being // fed to a MediaStream, use that stream as the source of the clock. int64_t clock_time = -1; if (!IsPlaying()) { @@ -3048,16 +3069,17 @@ void MediaDecoderStateMachine::AdvanceFr } else { ScheduleStateMachine(); } } nsresult MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample) { + MOZ_ASSERT(OnTaskQueue()); nsRefPtr<VideoData> video(aSample); MOZ_ASSERT(video); DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld] dup=%d", video->mTime, video->GetEndTime(), video->mDuplicate); MOZ_ASSERT(mCurrentSeek.Exists()); const int64_t target = mCurrentSeek.mTarget.mTime; // Duplicate handling: if we're dropping frames up the seek target, we must @@ -3099,16 +3121,17 @@ MediaDecoderStateMachine::DropVideoUpToS } return NS_OK; } nsresult MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample) { + MOZ_ASSERT(OnTaskQueue()); nsRefPtr<AudioData> audio(aSample); MOZ_ASSERT(audio && mCurrentSeek.Exists() && mCurrentSeek.mTarget.mType == SeekTarget::Accurate); CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate); CheckedInt64 targetFrame = UsecsToFrames(mCurrentSeek.mTarget.mTime, @@ -3194,17 +3217,18 @@ void MediaDecoderStateMachine::SetStartT // Set the audio start time to be start of media. If this lies before the // first actual audio frame we have, we'll inject silence during playback // to ensure the audio starts at the correct time. mAudioStartTime = mStartTime; DECODER_LOG("Set media start time to %lld", mStartTime); } -void MediaDecoderStateMachine::UpdateNextFrameStatus() { +void MediaDecoderStateMachine::UpdateNextFrameStatus() +{ MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MediaDecoderOwner::NextFrameStatus status; const char* statusString; if (IsBuffering()) { status = MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING; statusString = "NEXT_FRAME_UNAVAILABLE_BUFFERING"; @@ -3223,16 +3247,17 @@ void MediaDecoderStateMachine::UpdateNex DECODER_LOG("Changed mNextFrameStatus to %s", statusString); } mNextFrameStatus = status; } bool MediaDecoderStateMachine::JustExitedQuickBuffering() { + MOZ_ASSERT(OnTaskQueue()); return !mDecodeStartTime.IsNull() && mQuickBuffering && (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromMicroseconds(QUICK_BUFFER_THRESHOLD_USECS); } void MediaDecoderStateMachine::StartBuffering() { MOZ_ASSERT(OnTaskQueue()); @@ -3280,17 +3305,19 @@ void MediaDecoderStateMachine::SetPlaySt } if (!mPlayStartTime.IsNull()) { mAudioSink->StartPlayback(); } else { mAudioSink->StopPlayback(); } } -void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() { +void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() +{ + MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); DispatchAudioDecodeTaskIfNeeded(); DispatchVideoDecodeTaskIfNeeded(); } void MediaDecoderStateMachine::ScheduleStateMachine() { AssertCurrentThreadInMonitor(); @@ -3466,16 +3493,17 @@ void MediaDecoderStateMachine::OnAudioSi // Otherwise notify media decoder/element about this error for it makes // no sense to play an audio-only file without sound output. DecodeError(); } uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated()) ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE) : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE); } } // namespace mozilla
--- a/dom/media/MediaDecoderStateMachine.h +++ b/dom/media/MediaDecoderStateMachine.h @@ -1075,22 +1075,24 @@ protected: uint32_t VideoPrerollFrames() const { return IsRealTime() ? 0 : GetAmpleVideoFrames() / 2; } bool DonePrerollingAudio() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return !IsAudioDecoding() || GetDecodedAudioDuration() >= AudioPrerollUsecs() * mPlaybackRate; } bool DonePrerollingVideo() { + MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); return !IsVideoDecoding() || static_cast<uint32_t>(VideoQueue().GetSize()) >= VideoPrerollFrames() * mPlaybackRate; } void StopPrerollingAudio() { AssertCurrentThreadInMonitor();