Bug 1172264 - Track the MDSM's duration as a TimeUnit and eliminate the separate concept of 'end time'. r=jww
authorBobby Holley <bobbyholley@gmail.com>
Mon, 08 Jun 2015 11:30:59 -0700
changeset 249382 394c85b4a06b268d66a2af4d8dd23a5d2cd6e23c
parent 249381 f4f44192009b8227e71aaf653c0e884a283e0af5
child 249383 3ead3466f84a2f9b7f6b2ddd47ebd93b425b3691
push id61234
push userbobbyholley@gmail.com
push dateWed, 17 Jun 2015 16:49:23 +0000
treeherdermozilla-inbound@a5eb0b1fcf39 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjww
bugs1172264
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1172264 - Track the MDSM's duration as a TimeUnit and eliminate the separate concept of 'end time'. r=jww
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -182,18 +182,16 @@ MediaDecoderStateMachine::MediaDecoderSt
   mTaskQueue(new MediaTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                                 /* aSupportsTailDispatch = */ true)),
   mWatchManager(this, mTaskQueue),
   mRealTime(aRealTime),
   mDispatchedStateMachine(false),
   mDelayedScheduler(this),
   mState(DECODER_STATE_DECODING_NONE, "MediaDecoderStateMachine::mState"),
   mPlayDuration(0),
-  mEndTime(-1),
-  mDurationSet(false),
   mEstimatedDuration(mTaskQueue, NullableTimeUnit(),
                     "MediaDecoderStateMachine::EstimatedDuration (Mirror)"),
   mExplicitDuration(mTaskQueue, Maybe<double>(),
                     "MediaDecoderStateMachine::mExplicitDuration (Mirror)"),
   mObservedDuration(TimeUnit(), "MediaDecoderStateMachine::mObservedDuration"),
   mPlayState(mTaskQueue, MediaDecoder::PLAY_STATE_LOADING,
              "MediaDecoderStateMachine::mPlayState (Mirror)"),
   mNextPlayState(mTaskQueue, MediaDecoder::PLAY_STATE_PAUSED,
@@ -1379,28 +1377,21 @@ void MediaDecoderStateMachine::VolumeCha
 bool MediaDecoderStateMachine::IsRealTime() const
 {
   return mRealTime;
 }
 
 int64_t MediaDecoderStateMachine::GetDuration()
 {
   AssertCurrentThreadInMonitor();
-
-  if (mEndTime == -1)
+  if (mDuration.isNothing() || mDuration.ref().IsInfinite()) {
     return -1;
-  return mEndTime;
-}
-
-int64_t MediaDecoderStateMachine::GetEndTime()
-{
-  if (mEndTime == -1 && mDurationSet) {
-    return INT64_MAX;
   }
-  return mEndTime;
+
+  return mDuration.ref().ToMicroseconds();
 }
 
 void MediaDecoderStateMachine::RecomputeDuration()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
 
   // We dispatch DurationChanged to the MediaDecoder when the duration changes
@@ -1427,21 +1418,20 @@ void MediaDecoderStateMachine::Recompute
   } else {
     return;
   }
 
   if (duration < mObservedDuration.Ref()) {
     duration = mObservedDuration;
     fireDurationChanged = true;
   }
-  fireDurationChanged = fireDurationChanged && duration.ToMicroseconds() != GetDuration();
+  fireDurationChanged = fireDurationChanged && duration != mDuration.ref();
 
   MOZ_ASSERT(duration.ToMicroseconds() >= 0);
-  mEndTime = duration.IsInfinite() ? -1 : duration.ToMicroseconds();
-  mDurationSet = true;
+  mDuration = Some(duration);
 
   if (fireDurationChanged) {
     nsCOMPtr<nsIRunnable> event =
       NS_NewRunnableMethodWithArg<TimeUnit>(mDecoder, &MediaDecoder::DurationChanged, duration);
     AbstractThread::MainThread()->Dispatch(event.forget());
   }
 }
 
@@ -1663,36 +1653,36 @@ void MediaDecoderStateMachine::Logically
 
 void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
                                                      uint32_t aLength,
                                                      int64_t aOffset)
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
 
-  // While playing an unseekable stream of unknown duration, mEndTime is
+  // While playing an unseekable stream of unknown duration, mDuration is
   // updated (in AdvanceFrame()) as we play. But if data is being downloaded
-  // faster than played, mEndTime won't reflect the end of playable data
+  // faster than played, mDuration won't reflect the end of playable data
   // since we haven't played the frame at the end of buffered data. So update
-  // mEndTime here as new data is downloaded to prevent such a lag.
+  // mDuration here as new data is downloaded to prevent such a lag.
   //
   // Make sure to only do this if we have a start time, otherwise the reader
   // doesn't know how to compute GetBuffered.
   if (!mDecoder->IsInfinite() || !HaveStartTime())
   {
     return;
   }
 
   media::TimeIntervals buffered{mDecoder->GetBuffered()};
   if (!buffered.IsInvalid()) {
     bool exists;
     media::TimeUnit end{buffered.GetEnd(&exists)};
     if (exists) {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-      mEndTime = std::max<int64_t>(mEndTime, end.ToMicroseconds());
+      mDuration = Some(std::max<TimeUnit>(mDuration.ref(), end));
     }
   }
 }
 
 nsRefPtr<MediaDecoder::SeekPromise>
 MediaDecoderStateMachine::Seek(SeekTarget aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -1830,17 +1820,17 @@ MediaDecoderStateMachine::InitiateSeek()
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
 
   mCurrentSeek.RejectIfExists(__func__);
   mCurrentSeek.Steal(mPendingSeek);
 
   // Bound the seek time to be inside the media range.
-  int64_t end = GetEndTime();
+  int64_t end = Duration().ToMicroseconds();
   NS_ASSERTION(end != -1, "Should know end time by now");
   int64_t seekTime = mCurrentSeek.mTarget.mTime;
   seekTime = std::min(seekTime, end);
   seekTime = std::max(int64_t(0), seekTime);
   NS_ASSERTION(seekTime >= 0 && seekTime <= end,
                "Can only seek in range [0,duration]");
   mCurrentSeek.mTarget.mTime = seekTime;
 
@@ -1871,17 +1861,17 @@ MediaDecoderStateMachine::InitiateSeek()
 
   // Reset our state machine and decoding pipeline before seeking.
   Reset();
 
   // Do the seek.
   nsRefPtr<MediaDecoderStateMachine> self = this;
   mSeekRequest.Begin(ProxyMediaCall(DecodeTaskQueue(), mReader.get(), __func__,
                                     &MediaDecoderReader::Seek, mCurrentSeek.mTarget.mTime,
-                                    GetEndTime())
+                                    Duration().ToMicroseconds())
     ->Then(TaskQueue(), __func__,
            [self] (int64_t) -> void {
              ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
              self->mSeekRequest.Complete();
              // We must decode the first samples of active streams, so we can determine
              // the new stream time. So dispatch tasks to do that.
              self->mDecodeToSeekTarget = true;
              self->DispatchDecodeTasksIfNeeded();
@@ -2082,17 +2072,17 @@ bool MediaDecoderStateMachine::HasLowUnd
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(mState > DECODER_STATE_DECODING_FIRSTFRAME,
                "Must have loaded first frame for GetBuffered() to work");
 
   // If we don't have a duration, GetBuffered is probably not going to produce
   // a useful buffered range. Return false here so that we don't get stuck in
   // buffering mode for live streams.
-  if (GetDuration() < 0) {
+  if (Duration().IsInfinite()) {
     return false;
   }
 
   media::TimeIntervals buffered{mReader->GetBuffered()};
   if (buffered.IsInvalid()) {
     return false;
   }
 
@@ -2103,22 +2093,22 @@ bool MediaDecoderStateMachine::HasLowUnd
   int64_t endOfDecodedAudioData = INT64_MAX;
   if (HasAudio() && !AudioQueue().AtEndOfStream()) {
     // mDecodedAudioEndTime could be -1 when no audio samples are decoded.
     // But that is fine since we consider ourself as low in decoded data when
     // we don't have any decoded audio samples at all.
     endOfDecodedAudioData = mDecodedAudioEndTime;
   }
   int64_t endOfDecodedData = std::min(endOfDecodedVideoData, endOfDecodedAudioData);
-  if (GetDuration() < endOfDecodedData) {
+  if (Duration().ToMicroseconds() < endOfDecodedData) {
     // Our duration is not up to date. No point buffering.
     return false;
   }
   media::TimeInterval interval(media::TimeUnit::FromMicroseconds(endOfDecodedData),
-                               media::TimeUnit::FromMicroseconds(std::min(endOfDecodedData + aUsecs, GetDuration())));
+                               media::TimeUnit::FromMicroseconds(std::min(endOfDecodedData + aUsecs, Duration().ToMicroseconds())));
   return endOfDecodedData != INT64_MAX && !buffered.Contains(interval);
 }
 
 void
 MediaDecoderStateMachine::DecodeError()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -2197,17 +2187,17 @@ MediaDecoderStateMachine::OnMetadataRead
   mDecoder->StartProgressUpdates();
 
   // In general, we wait until we know the duration before notifying the decoder.
   // However, we notify  unconditionally in this case without waiting for the start
   // time, since the caller might be waiting on metadataloaded to be fired before
   // feeding in the CDM, which we need to decode the first frame (and
   // thus get the metadata). We could fix this if we could compute the start
   // time by demuxing without necessaring decoding.
-  mNotifyMetadataBeforeFirstFrame = mDurationSet || mReader->IsWaitingOnCDMResource();
+  mNotifyMetadataBeforeFirstFrame = mDuration.isSome() || mReader->IsWaitingOnCDMResource();
   if (mNotifyMetadataBeforeFirstFrame) {
     EnqueueLoadedMetadataEvent();
   }
 
   if (mReader->IsWaitingOnCDMResource()) {
     // Metadata parsing was successful but we're still waiting for CDM caps
     // to become available so that we can build the correct decryptor/decoder.
     SetState(DECODER_STATE_WAIT_FOR_CDM);
@@ -2342,23 +2332,24 @@ MediaDecoderStateMachine::FinishDecodeFi
 
   if (!IsRealTime() && !mSentFirstFrameLoadedEvent) {
     if (VideoQueue().GetSize()) {
       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
       RenderVideoFrame(VideoQueue().PeekFront(), TimeStamp::Now());
     }
   }
 
-  MOZ_ASSERT(!(mDecoder->IsMediaSeekable() && mDecoder->IsTransportSeekable()) ||
-               (GetDuration() != -1) || mDurationSet,
-             "Seekable media should have duration");
-  DECODER_LOG("Media goes from %lld to %lld (duration %lld) "
+  // If we don't know the duration by this point, we assume infinity, per spec.
+  if (mDuration.isNothing()) {
+    mDuration.emplace(TimeUnit::FromInfinity());
+  }
+
+  DECODER_LOG("Media duration %lld, "
               "transportSeekable=%d, mediaSeekable=%d",
-              0, mEndTime, GetDuration(),
-              mDecoder->IsTransportSeekable(), mDecoder->IsMediaSeekable());
+              Duration().ToMicroseconds(), mDecoder->IsTransportSeekable(), mDecoder->IsMediaSeekable());
 
   if (HasAudio() && !HasVideo()) {
     // We're playing audio only. We don't need to worry about slow video
     // decodes causing audio underruns, so don't buffer so much audio in
     // order to reduce memory usage.
     mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
     mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
     mQuickBufferingLowDataThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
@@ -2404,17 +2395,17 @@ MediaDecoderStateMachine::SeekCompleted(
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   MOZ_ASSERT(mState == DECODER_STATE_SEEKING);
 
   int64_t seekTime = mCurrentSeek.mTarget.mTime;
   int64_t newCurrentTime = seekTime;
 
   // Setup timestamp state.
   nsRefPtr<VideoData> video = VideoQueue().PeekFront();
-  if (seekTime == mEndTime) {
+  if (seekTime == Duration().ToMicroseconds()) {
     newCurrentTime = mAudioStartTime = seekTime;
   } else if (HasAudio()) {
     AudioData* audio = AudioQueue().PeekFront();
     // Though we adjust the newCurrentTime in audio-based, and supplemented
     // by video. For better UX, should NOT bind the slide position to
     // the first audio data timestamp directly.
     // While seeking to a position where there's only either audio or video, or
     // seeking to a position lies before audio or video, we need to check if
@@ -2435,17 +2426,17 @@ MediaDecoderStateMachine::SeekCompleted(
   // if we need to seek again.
 
   bool isLiveStream = mDecoder->GetResource()->IsLiveStream();
   if (mPendingSeek.Exists()) {
     // A new seek target came in while we were processing the old one. No rest
     // for the seeking.
     DECODER_LOG("A new seek came along while we were finishing the old one - staying in SEEKING");
     SetState(DECODER_STATE_SEEKING);
-  } else if (GetMediaTime() == mEndTime && !isLiveStream) {
+  } else if (GetMediaTime() == Duration().ToMicroseconds() && !isLiveStream) {
     // Seeked to end of media, move to COMPLETED state. Note we don't do
     // this if we're playing a live stream, since the end of media will advance
     // once we download more data!
     DECODER_LOG("Changed state from SEEKING (to %lld) to COMPLETED", seekTime);
     // Explicitly set our state so we don't decode further, and so
     // we report playback ended to the media element.
     SetState(DECODER_STATE_COMPLETED);
     DispatchDecodeTasksIfNeeded();
@@ -2699,17 +2690,17 @@ nsresult MediaDecoderStateMachine::RunSt
       }
 
       StopAudioThread();
 
       if (mPlayState == MediaDecoder::PLAY_STATE_PLAYING &&
           !mSentPlaybackEndedEvent)
       {
         int64_t clockTime = std::max(mAudioEndTime, mVideoFrameEndTime);
-        clockTime = std::max(int64_t(0), std::max(clockTime, mEndTime));
+        clockTime = std::max(int64_t(0), std::max(clockTime, Duration().ToMicroseconds()));
         UpdatePlaybackPosition(clockTime);
 
         nsCOMPtr<nsIRunnable> event =
           NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded);
         AbstractThread::MainThread()->Dispatch(event.forget());
 
         mSentPlaybackEndedEvent = true;
       }
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -185,22 +185,16 @@ public:
   void FinishShutdown();
 
   bool IsRealTime() const;
 
   // Called from the main thread to get the duration. The decoder monitor
   // must be obtained before calling this. It is in units of microseconds.
   int64_t GetDuration();
 
-  // Time of the last frame in the media, in microseconds or INT64_MAX if
-  // media has an infinite duration.
-  // Accessed on state machine, decode, and main threads.
-  // Access controlled by decoder monitor.
-  int64_t GetEndTime();
-
   // Functions used by assertions to ensure we're calling things
   // on the appropriate threads.
   bool OnDecodeTaskQueue() const;
   bool OnTaskQueue() const;
 
   // Seeks to the decoder to aTarget asynchronously.
   // Must be called on the state machine thread.
   nsRefPtr<MediaDecoder::SeekPromise> Seek(SeekTarget aTarget);
@@ -942,30 +936,24 @@ public:
   // by decoder monitor.
   int64_t mPlayDuration;
 
   // Time that buffering started. Used for buffering timeout and only
   // accessed on the state machine thread. This is null while we're not
   // buffering.
   TimeStamp mBufferingStart;
 
-  // Time of the last frame in the media, in microseconds. This is the
-  // end time of the last frame in the media. Accessed on state
-  // machine, decode, and main threads. Access controlled by decoder monitor.
-  // It will be set to -1 if the duration is infinite
-  int64_t mEndTime;
+  // Duration of the media. This is guaranteed to be non-null after we finish
+  // decoding the first frame.
+  media::NullableTimeUnit mDuration;
+  media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.ref(); }
 
   // Recomputes the canonical duration from various sources.
   void RecomputeDuration();
 
-  // Will be set when SetDuration has been called with a value != -1
-  // mDurationSet false doesn't indicate that we do not have a valid duration
-  // as mStartTime and mEndTime could have been set separately.
-  bool mDurationSet;
-
   // The duration according to the demuxer's current estimate, mirrored from the main thread.
   Mirror<media::NullableTimeUnit> mEstimatedDuration;
 
   // The duration explicitly set by JS, mirrored from the main thread.
   Mirror<Maybe<double>> mExplicitDuration;
 
   // The highest timestamp that our position has reached. Monotonically
   // increasing.