author | Bobby Holley <bobbyholley@gmail.com> |
Fri, 22 May 2015 16:05:19 -0700 | |
changeset 249255 | 0e80ea297120 |
parent 249254 | 534be03edda3 |
child 249256 | c80271e9f1c2 |
push id | 28923 |
push user | ryanvm@gmail.com |
push date | 2015-06-17 18:57 +0000 |
treeherder | mozilla-central@099d6cd6725e [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | jww |
bugs | 1163223 |
milestone | 41.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/dom/media/MediaDecoderReader.cpp +++ b/dom/media/MediaDecoderReader.cpp @@ -160,30 +160,16 @@ MediaDecoderReader::GetBuffered() int64_t durationUs = 0; { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); durationUs = mDecoder->GetMediaDuration(); } return GetEstimatedBufferedTimeRanges(stream, durationUs); } -int64_t -MediaDecoderReader::ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) -{ - int64_t startTime = std::min<int64_t>(aAudio ? aAudio->mTime : INT64_MAX, - aVideo ? aVideo->mTime : INT64_MAX); - if (startTime == INT64_MAX) { - startTime = 0; - } - DECODER_LOG("ComputeStartTime first video frame start %lld", aVideo ? aVideo->mTime : -1); - DECODER_LOG("ComputeStartTime first audio frame start %lld", aAudio ? aAudio->mTime : -1); - NS_ASSERTION(startTime >= 0, "Start time is negative"); - return startTime; -} - nsRefPtr<MediaDecoderReader::MetadataPromise> MediaDecoderReader::AsyncReadMetadata() { typedef ReadMetadataFailureReason Reason; MOZ_ASSERT(OnTaskQueue()); mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn(); DECODER_LOG("MediaDecoderReader::AsyncReadMetadata");
--- a/dom/media/MediaDecoderReader.h +++ b/dom/media/MediaDecoderReader.h @@ -211,18 +211,16 @@ public: // The OggReader relies on this base implementation not performing I/O, // since in FirefoxOS we can't do I/O on the main thread, where this is // called. virtual media::TimeIntervals GetBuffered(); // MediaSourceReader opts out of the start-time-guessing mechanism. virtual bool ForceZeroStartTime() const { return false; } - virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio); - // The MediaDecoderStateMachine uses various heuristics that assume that // raw media data is arriving sequentially from a network channel. This // makes sense in the <video src="foo"> case, but not for more advanced use // cases like MSE. virtual bool UseBufferingHeuristics() { return true; } // Returns the number of bytes of memory allocated by structures/frames in // the video queue.
--- a/dom/media/MediaDecoderStateMachine.cpp +++ b/dom/media/MediaDecoderStateMachine.cpp @@ -182,17 +182,16 @@ MediaDecoderStateMachine::MediaDecoderSt mTaskQueue(new MediaTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK), /* aSupportsTailDispatch = */ true)), mWatchManager(this, mTaskQueue), mRealTime(aRealTime), mDispatchedStateMachine(false), mDelayedScheduler(this), mState(DECODER_STATE_DECODING_NONE, "MediaDecoderStateMachine::mState"), mPlayDuration(0), - mStartTime(-1), mEndTime(-1), mDurationSet(false), mEstimatedDuration(mTaskQueue, NullableTimeUnit(), "MediaDecoderStateMachine::EstimatedDuration (Mirror)"), mExplicitDuration(mTaskQueue, Maybe<double>(), "MediaDecoderStateMachine::mExplicitDuration (Mirror)"), mObservedDuration(TimeUnit(), "MediaDecoderStateMachine::mObservedDuration"), mPlayState(mTaskQueue, MediaDecoder::PLAY_STATE_LOADING, @@ -201,18 +200,18 @@ MediaDecoderStateMachine::MediaDecoderSt "MediaDecoderStateMachine::mNextPlayState (Mirror)"), mLogicallySeeking(mTaskQueue, false, "MediaDecoderStateMachine::mLogicallySeeking (Mirror)"), mNextFrameStatus(mTaskQueue, MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED, "MediaDecoderStateMachine::mNextFrameStatus (Canonical)"), mFragmentEndTime(-1), mReader(aReader), mCurrentPosition(mTaskQueue, 0, "MediaDecoderStateMachine::mCurrentPosition (Canonical)"), - mStreamStartTime(-1), - mAudioStartTime(-1), + mStreamStartTime(0), + mAudioStartTime(0), mAudioEndTime(-1), mDecodedAudioEndTime(-1), mVideoFrameEndTime(-1), mDecodedVideoEndTime(-1), mVolume(mTaskQueue, 1.0, "MediaDecoderStateMachine::mVolume (Mirror)"), mPlaybackRate(1.0), mLogicalPlaybackRate(mTaskQueue, 1.0, "MediaDecoderStateMachine::mLogicalPlaybackRate (Mirror)"), mPreservesPitch(mTaskQueue, true, "MediaDecoderStateMachine::mPreservesPitch (Mirror)"), @@ -1252,17 +1251,17 @@ void MediaDecoderStateMachine::StopPlayb MOZ_ASSERT(OnTaskQueue()); DECODER_LOG("StopPlayback()"); AssertCurrentThreadInMonitor(); mDecoder->NotifyPlaybackStopped(); if (IsPlaying()) { - mPlayDuration = GetClock() - mStartTime; + mPlayDuration = GetClock(); SetPlayStartTime(TimeStamp()); } // Notify the audio sink, so that it notices that we've stopped playing, // so it can pause audio playback. mDecoder->GetReentrantMonitor().NotifyAll(); NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()"); DispatchDecodeTasksIfNeeded(); @@ -1302,21 +1301,20 @@ void MediaDecoderStateMachine::MaybeStar mDecoder->GetReentrantMonitor().NotifyAll(); DispatchDecodeTasksIfNeeded(); } void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime) { MOZ_ASSERT(OnTaskQueue()); - SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld) (mStartTime=%lld)", aTime, mStartTime); + SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld)", aTime); AssertCurrentThreadInMonitor(); - NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime"); - mCurrentPosition = aTime - mStartTime; + mCurrentPosition = aTime; NS_ASSERTION(mCurrentPosition >= 0, "CurrentTime should be positive!"); mObservedDuration = std::max(mObservedDuration.Ref(), TimeUnit::FromMicroseconds(mCurrentPosition.Ref())); } void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime) { MOZ_ASSERT(OnTaskQueue()); @@ -1382,19 +1380,19 @@ bool MediaDecoderStateMachine::IsRealTim { return mRealTime; } int64_t MediaDecoderStateMachine::GetDuration() { AssertCurrentThreadInMonitor(); - if (mEndTime == -1 || mStartTime == -1) + if (mEndTime == -1) return -1; - return mEndTime - mStartTime; + return mEndTime; } int64_t MediaDecoderStateMachine::GetEndTime() { if (mEndTime == -1 && mDurationSet) { return INT64_MAX; } return mEndTime; @@ -1429,50 +1427,34 @@ void MediaDecoderStateMachine::Recompute } else { return; } if (duration < mObservedDuration.Ref()) { duration = mObservedDuration; fireDurationChanged = true; } - fireDurationChanged = fireDurationChanged && duration.ToMicroseconds() != GetDuration(); - SetDuration(duration); + + MOZ_ASSERT(duration.ToMicroseconds() >= 0); + mEndTime = duration.IsInfinite() ? -1 : duration.ToMicroseconds(); + mDurationSet = true; if (fireDurationChanged) { nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethodWithArg<TimeUnit>(mDecoder, &MediaDecoder::DurationChanged, duration); AbstractThread::MainThread()->Dispatch(event.forget()); } } -void MediaDecoderStateMachine::SetDuration(TimeUnit aDuration) -{ - MOZ_ASSERT(OnTaskQueue()); - ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); - MOZ_ASSERT(aDuration.ToMicroseconds() >= 0); - if (mStartTime == -1) { - SetStartTime(0); - } - mDurationSet = true; - - if (aDuration.IsInfinite()) { - mEndTime = -1; - return; - } - - mEndTime = mStartTime + aDuration.ToMicroseconds(); -} - void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime) { AssertCurrentThreadInMonitor(); - mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime; + mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime; } bool MediaDecoderStateMachine::IsDormantNeeded() { return mReader->IsDormantNeeded(); } void MediaDecoderStateMachine::SetDormant(bool aDormant) @@ -1689,19 +1671,21 @@ void MediaDecoderStateMachine::NotifyDat // While playing an unseekable stream of unknown duration, mEndTime is // updated (in AdvanceFrame()) as we play. But if data is being downloaded // faster than played, mEndTime won't reflect the end of playable data // since we haven't played the frame at the end of buffered data. So update // mEndTime here as new data is downloaded to prevent such a lag. // // Make sure to only do this if we have a start time, otherwise the reader // doesn't know how to compute GetBuffered. - if (!mDecoder->IsInfinite() || mStartTime == -1) { + if (!mDecoder->IsInfinite() || !HaveStartTime()) + { return; } + media::TimeIntervals buffered{mDecoder->GetBuffered()}; if (!buffered.IsInvalid()) { bool exists; media::TimeUnit end{buffered.GetEnd(&exists)}; if (exists) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mEndTime = std::max<int64_t>(mEndTime, end.ToMicroseconds()); } @@ -1847,22 +1831,21 @@ MediaDecoderStateMachine::InitiateSeek() MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); mCurrentSeek.RejectIfExists(__func__); mCurrentSeek.Steal(mPendingSeek); // Bound the seek time to be inside the media range. int64_t end = GetEndTime(); - NS_ASSERTION(mStartTime != -1, "Should know start time by now"); NS_ASSERTION(end != -1, "Should know end time by now"); - int64_t seekTime = mCurrentSeek.mTarget.mTime + mStartTime; + int64_t seekTime = mCurrentSeek.mTarget.mTime; seekTime = std::min(seekTime, end); - seekTime = std::max(mStartTime, seekTime); - NS_ASSERTION(seekTime >= mStartTime && seekTime <= end, + seekTime = std::max(int64_t(0), seekTime); + NS_ASSERTION(seekTime >= 0 && seekTime <= end, "Can only seek in range [0,duration]"); mCurrentSeek.mTarget.mTime = seekTime; if (mAudioCaptured) { nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethodWithArgs<MediaStreamGraph*>( this, &MediaDecoderStateMachine::RecreateDecodedStream, nullptr); AbstractThread::MainThread()->Dispatch(r.forget()); } @@ -2301,24 +2284,22 @@ nsresult MediaDecoderStateMachine::DecodeFirstFrame() { MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); MOZ_ASSERT(mState == DECODER_STATE_DECODING_FIRSTFRAME); DECODER_LOG("DecodeFirstFrame started"); if (IsRealTime()) { - SetStartTime(0); nsresult res = FinishDecodeFirstFrame(); NS_ENSURE_SUCCESS(res, res); } else if (mSentFirstFrameLoadedEvent) { // We're resuming from dormant state, so we don't need to request // the first samples in order to determine the media start time, // we have the start time from last time we loaded. - SetStartTime(mStartTime); nsresult res = FinishDecodeFirstFrame(); NS_ENSURE_SUCCESS(res, res); } else { if (HasAudio()) { mAudioDataRequest.Begin( ProxyMediaCall(DecodeTaskQueue(), mReader.get(), __func__, &MediaDecoderReader::RequestAudioData) ->Then(TaskQueue(), __func__, mStartTimeRendezvous.get(), @@ -2355,32 +2336,28 @@ MediaDecoderStateMachine::FinishDecodeFi AssertCurrentThreadInMonitor(); DECODER_LOG("FinishDecodeFirstFrame"); if (IsShutdown()) { return NS_ERROR_FAILURE; } if (!IsRealTime() && !mSentFirstFrameLoadedEvent) { - const VideoData* v = VideoQueue().PeekFront(); - const AudioData* a = AudioQueue().PeekFront(); - SetStartTime(mReader->ComputeStartTime(v, a)); if (VideoQueue().GetSize()) { ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); RenderVideoFrame(VideoQueue().PeekFront(), TimeStamp::Now()); } } - NS_ASSERTION(mStartTime != -1, "Must have start time"); MOZ_ASSERT(!(mDecoder->IsMediaSeekable() && mDecoder->IsTransportSeekable()) || (GetDuration() != -1) || mDurationSet, "Seekable media should have duration"); DECODER_LOG("Media goes from %lld to %lld (duration %lld) " "transportSeekable=%d, mediaSeekable=%d", - mStartTime, mEndTime, GetDuration(), + 0, mEndTime, GetDuration(), mDecoder->IsTransportSeekable(), mDecoder->IsMediaSeekable()); if (HasAudio() && !HasVideo()) { // We're playing audio only. We don't need to worry about slow video // decodes causing audio underruns, so don't buffer so much audio in // order to reduce memory usage. mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; @@ -2444,17 +2421,17 @@ MediaDecoderStateMachine::SeekCompleted( // seekTime is bounded in suitable duration. See Bug 1112438. int64_t videoStart = video ? video->mTime : seekTime; int64_t audioStart = audio ? audio->mTime : seekTime; newCurrentTime = mAudioStartTime = std::min(audioStart, videoStart); } else { newCurrentTime = video ? video->mTime : seekTime; } mStreamStartTime = newCurrentTime; - mPlayDuration = newCurrentTime - mStartTime; + mPlayDuration = newCurrentTime; mDecoder->StartProgressUpdates(); // Change state to DECODING or COMPLETED now. SeekingStopped will // call MediaDecoderStateMachine::Seek to reset our state to SEEKING // if we need to seek again. bool isLiveStream = mDecoder->GetResource()->IsLiveStream(); @@ -2761,18 +2738,18 @@ MediaDecoderStateMachine::Reset() // Stop the audio thread. Otherwise, AudioSink might be accessing AudioQueue // outside of the decoder monitor while we are clearing the queue and causes // crash for no samples to be popped. StopAudioThread(); mVideoFrameEndTime = -1; mDecodedVideoEndTime = -1; - mStreamStartTime = -1; - mAudioStartTime = -1; + mStreamStartTime = 0; + mAudioStartTime = 0; mAudioEndTime = -1; mDecodedAudioEndTime = -1; mAudioCompleted = false; AudioQueue().Reset(); VideoQueue().Reset(); mFirstVideoFrameAfterSeek = nullptr; mDropAudioUntilNextDiscontinuity = true; mDropVideoUntilNextDiscontinuity = true; @@ -2831,17 +2808,17 @@ void MediaDecoderStateMachine::RenderVid } void MediaDecoderStateMachine::ResyncAudioClock() { MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); if (IsPlaying()) { SetPlayStartTime(TimeStamp::Now()); - mPlayDuration = GetAudioClock() - mStartTime; + mPlayDuration = GetAudioClock(); } } int64_t MediaDecoderStateMachine::GetAudioClock() const { MOZ_ASSERT(OnTaskQueue()); // We must hold the decoder monitor while using the audio stream off the @@ -2861,38 +2838,38 @@ int64_t MediaDecoderStateMachine::GetStr return mStreamStartTime + GetDecodedStream()->GetPosition(); } int64_t MediaDecoderStateMachine::GetVideoStreamPosition() const { AssertCurrentThreadInMonitor(); if (!IsPlaying()) { - return mPlayDuration + mStartTime; + return mPlayDuration; } // Time elapsed since we started playing. int64_t delta = DurationToUsecs(TimeStamp::Now() - mPlayStartTime); // Take playback rate into account. delta *= mPlaybackRate; - return mStartTime + mPlayDuration + delta; + return mPlayDuration + delta; } int64_t MediaDecoderStateMachine::GetClock() const { MOZ_ASSERT(OnTaskQueue()); AssertCurrentThreadInMonitor(); // Determine the clock time. If we've got audio, and we've not reached // the end of the audio, use the audio clock. However if we've finished // audio, or don't have audio, use the system clock. If our output is being // fed to a MediaStream, use that stream as the source of the clock. int64_t clock_time = -1; if (!IsPlaying()) { - clock_time = mPlayDuration + mStartTime; + clock_time = mPlayDuration; } else { if (mAudioCaptured) { clock_time = GetStreamClock(); } else if (HasAudio() && !mAudioCompleted) { clock_time = GetAudioClock(); } else { // Audio is disabled on this system. Sync to the system clock. clock_time = GetVideoStreamPosition(); @@ -2925,17 +2902,17 @@ void MediaDecoderStateMachine::AdvanceFr SendStreamData(); } const int64_t clock_time = GetClock(); TimeStamp nowTime = TimeStamp::Now(); // Skip frames up to the frame at the playback position, and figure out // the time remaining until it's time to display the next frame. int64_t remainingTime = AUDIO_DURATION_USECS; - NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time."); + NS_ASSERTION(clock_time >= 0, "Should have positive clock time."); nsRefPtr<VideoData> currentFrame; if (VideoQueue().GetSize() > 0) { VideoData* frame = VideoQueue().PeekFront(); int32_t droppedFrames = 0; while (IsRealTime() || clock_time >= frame->mTime) { mVideoFrameEndTime = frame->GetEndTime(); if (currentFrame) { mDecoder->NotifyDecodedFrames(0, 0, 1); @@ -3005,17 +2982,17 @@ void MediaDecoderStateMachine::AdvanceFr // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside // the monitor and get a staled value from GetCurrentTimeUs() which hits the // assertion in GetClock(). if (currentFrame) { // Decode one frame and display it. int64_t delta = currentFrame->mTime - clock_time; TimeStamp presTime = nowTime + TimeDuration::FromMicroseconds(delta / mPlaybackRate); - NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time"); + NS_ASSERTION(currentFrame->mTime >= 0, "Should have positive frame time"); { ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); // If we have video, we want to increment the clock in steps of the frame // duration. RenderVideoFrame(currentFrame, presTime); } MOZ_ASSERT(IsPlaying()); MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics(); @@ -3166,42 +3143,16 @@ MediaDecoderStateMachine::DropAudioUpToS audioData.forget(), channels, audio->mRate)); PushFront(data); return NS_OK; } -void MediaDecoderStateMachine::SetStartTime(int64_t aStartTimeUsecs) -{ - AssertCurrentThreadInMonitor(); - DECODER_LOG("SetStartTime(%lld)", aStartTimeUsecs); - mStartTime = 0; - if (aStartTimeUsecs != 0) { - mStartTime = aStartTimeUsecs; - // XXXbholley - this whole method goes away in the upcoming patches. - if (mDurationSet && GetEndTime() != INT64_MAX) { - NS_ASSERTION(mEndTime != -1, - "We should have mEndTime as supplied duration here"); - // We were specified a duration from a Content-Duration HTTP header. - // Adjust mEndTime so that mEndTime-mStartTime matches the specified - // duration. - mEndTime = mStartTime + mEndTime; - } - } - - // Set the audio start time to be start of media. If this lies before the - // first actual audio frame we have, we'll inject silence during playback - // to ensure the audio starts at the correct time. - mAudioStartTime = mStartTime; - mStreamStartTime = mStartTime; - DECODER_LOG("Set media start time to %lld", mStartTime); -} - void MediaDecoderStateMachine::UpdateNextFrameStatus() { MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MediaDecoderOwner::NextFrameStatus status; const char* statusString; if (mState <= DECODER_STATE_DECODING_FIRSTFRAME) { @@ -3359,17 +3310,17 @@ MediaDecoderStateMachine::LogicalPlaybac } // AudioStream will handle playback rate change when we have audio. // Do nothing while we are not playing. Change in playback rate will // take effect next time we start playing again. if (!HasAudio() && IsPlaying()) { // Remember how much time we've spent in playing the media // for playback rate will change from now on. - mPlayDuration = GetVideoStreamPosition() - mStartTime; + mPlayDuration = GetVideoStreamPosition(); SetPlayStartTime(TimeStamp::Now()); } mPlaybackRate = mLogicalPlaybackRate; if (mAudioSink) { mAudioSink->SetPlaybackRate(mPlaybackRate); } }
--- a/dom/media/MediaDecoderStateMachine.h +++ b/dom/media/MediaDecoderStateMachine.h @@ -191,24 +191,16 @@ public: int64_t GetDuration(); // Time of the last frame in the media, in microseconds or INT64_MAX if // media has an infinite duration. // Accessed on state machine, decode, and main threads. // Access controlled by decoder monitor. int64_t GetEndTime(); - // Called from the main thread to set the duration of the media resource - // if it is able to be obtained via HTTP headers. Called from the - // state machine thread to set the duration if it is obtained from the - // media metadata. The decoder monitor must be obtained before calling this. - // aDuration is in microseconds. - // A value of INT64_MAX will be treated as infinity. - void SetDuration(media::TimeUnit aDuration); - // Functions used by assertions to ensure we're calling things // on the appropriate threads. bool OnDecodeTaskQueue() const; bool OnTaskQueue() const; // Seeks to the decoder to aTarget asynchronously. // Must be called on the state machine thread. nsRefPtr<MediaDecoder::SeekPromise> Seek(SeekTarget aTarget); @@ -601,17 +593,17 @@ protected: // Returns the "media time". This is the absolute time which the media // playback has reached. i.e. this returns values in the range // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does // not start at 0. Note this is different than the "current playback position", // which is in the range [0,duration]. int64_t GetMediaTime() const { AssertCurrentThreadInMonitor(); - return mStartTime + mCurrentPosition; + return mCurrentPosition; } // Returns an upper bound on the number of microseconds of audio that is // decoded and playable. This is the sum of the number of usecs of audio which // is decoded and in the reader's audio queue, and the usecs of unplayed audio // which has been pushed to the audio hardware for playback. Note that after // calling this, the audio hardware may play some of the audio pushed to // hardware, so this can only be used as a upper bound. The decoder monitor @@ -950,22 +942,16 @@ public: // by decoder monitor. int64_t mPlayDuration; // Time that buffering started. Used for buffering timeout and only // accessed on the state machine thread. This is null while we're not // buffering. TimeStamp mBufferingStart; - // Start time of the media, in microseconds. This is the presentation - // time of the first frame decoded from the media, and is used to calculate - // duration and as a bounds for seeking. Accessed on state machine, decode, - // and main threads. Access controlled by decoder monitor. - int64_t mStartTime; - // Time of the last frame in the media, in microseconds. This is the // end time of the last frame in the media. Accessed on state // machine, decode, and main threads. Access controlled by decoder monitor. // It will be set to -1 if the duration is infinite int64_t mEndTime; // Recomputes the canonical duration from various sources. void RecomputeDuration();
--- a/dom/media/MediaFormatReader.cpp +++ b/dom/media/MediaFormatReader.cpp @@ -1475,18 +1475,9 @@ MediaFormatReader::NotifyDataRemoved() } bool MediaFormatReader::ForceZeroStartTime() const { return !mDemuxer->ShouldComputeStartTime(); } -int64_t -MediaFormatReader::ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) -{ - if (mDemuxer->ShouldComputeStartTime()) { - return MediaDecoderReader::ComputeStartTime(aVideo, aAudio); - } - return 0; -} - } // namespace mozilla
--- a/dom/media/MediaFormatReader.h +++ b/dom/media/MediaFormatReader.h @@ -94,18 +94,16 @@ public: void DisableHardwareAcceleration() override; bool IsWaitForDataSupported() override { return true; } nsRefPtr<WaitForDataPromise> WaitForData(MediaData::Type aType) override; bool IsWaitingOnCDMResource() override; - int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) override; - bool UseBufferingHeuristics() override { return mTrackDemuxersMayBlock; } private: bool InitDemuxer(); // Notify the demuxer that new data has been received.
--- a/dom/media/mediasource/MediaSourceReader.h +++ b/dom/media/mediasource/MediaSourceReader.h @@ -90,17 +90,16 @@ public: GetVideoReader()->DisableHardwareAcceleration(); } } // We can't compute a proper start time since we won't necessarily // have the first frame of the resource available. This does the same // as chrome/blink and assumes that we always start at t=0. virtual bool ForceZeroStartTime() const override { return true; } - virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) override { return 0; } // Buffering heuristics don't make sense for MSE, because the arrival of data // is at least partly controlled by javascript, and javascript does not expect // us to sit on unplayed data just because it may not be enough to play // through. bool UseBufferingHeuristics() override { return false; } bool IsMediaSeekable() override { return true; }