Backed out 8 changesets (bug 1163223) for getting in the way of me backing out f46a712edf7e
authorWes Kocher <wkocher@mozilla.com>
Tue, 16 Jun 2015 14:47:50 -0700
changeset 249186 3fe20c75349b9918ed0f80bd51fd034fabad2dea
parent 249185 df2676f8ee8b5e07f8a6d58bd1de4a2023c36a9e
child 249187 bbeaa22dfafe54b1d15a89092c151d9424c0679f
push id61165
push userkwierso@gmail.com
push dateTue, 16 Jun 2015 21:48:24 +0000
treeherdermozilla-inbound@bbeaa22dfafe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1163223
milestone41.0a1
backs outc1b33c43f0c5debb3968d236c32b80a86d79c13a
a7ee6eb45f625048027957010c4b952685ff31c0
b2e10f194455405977e0eebf0c067440c2681681
9e7651567cad4cfd136462bac75de2d4db27962e
20e25e93ed5f4db31b37114d35290dd465ce6233
5193508738f8406b8998f59e702326034b745b76
aea6b8d1531852ac1a978bafb4107a218b8415d8
7b6804398fc3aeeea8b61146a88748ce432afc50
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 8 changesets (bug 1163223) for getting in the way of me backing out f46a712edf7e Backed out changeset c1b33c43f0c5 (bug 1163223) Backed out changeset a7ee6eb45f62 (bug 1163223) Backed out changeset b2e10f194455 (bug 1163223) Backed out changeset 9e7651567cad (bug 1163223) Backed out changeset 20e25e93ed5f (bug 1163223) Backed out changeset 5193508738f8 (bug 1163223) Backed out changeset aea6b8d15318 (bug 1163223) Backed out changeset 7b6804398fc3 (bug 1163223)
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaDecoderReader.cpp
dom/media/MediaDecoderReader.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/MediaInfo.h
dom/media/fmp4/MP4Reader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/ogg/OggReader.cpp
dom/media/test/test_buffered.html
dom/media/webm/WebMReader.cpp
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -20,19 +20,16 @@
 
 namespace mozilla {
 
 using namespace mozilla::gfx;
 using layers::ImageContainer;
 using layers::PlanarYCbCrImage;
 using layers::PlanarYCbCrData;
 
-const char* AudioData::sTypeName = "audio";
-const char* VideoData::sTypeName = "video";
-
 void
 AudioData::EnsureAudioBuffer()
 {
   if (mAudioBuffer)
     return;
   mAudioBuffer = SharedBuffer::Create(mFrames*mChannels*sizeof(AudioDataValue));
 
   AudioDataValue* data = static_cast<AudioDataValue*>(mAudioBuffer->Data());
@@ -107,30 +104,30 @@ IsInEmulator()
 }
 
 #endif
 
 VideoData::VideoData(int64_t aOffset,
                      int64_t aTime,
                      int64_t aDuration,
                      int64_t aTimecode)
-  : MediaData(sType, aOffset, aTime, aDuration)
+  : MediaData(VIDEO_DATA, aOffset, aTime, aDuration)
   , mDuplicate(true)
 {
   NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
   mTimecode = aTimecode;
 }
 
 VideoData::VideoData(int64_t aOffset,
                      int64_t aTime,
                      int64_t aDuration,
                      bool aKeyframe,
                      int64_t aTimecode,
                      IntSize aDisplay)
-  : MediaData(sType, aOffset, aTime, aDuration)
+  : MediaData(VIDEO_DATA, aOffset, aTime, aDuration)
   , mDisplay(aDisplay)
   , mDuplicate(false)
 {
   NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
   mKeyframe = aKeyframe;
   mTimecode = aTimecode;
 }
 
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -68,21 +68,16 @@ public:
   bool mKeyframe;
 
   // True if this is the first sample after a gap or discontinuity in
   // the stream. This is true for the first sample in a stream after a seek.
   bool mDiscontinuity;
 
   int64_t GetEndTime() const { return mTime + mDuration; }
 
-  bool AdjustForStartTime(int64_t aStartTime)
-  {
-    mTime = mTime - aStartTime;
-    return mTime >= 0;
-  }
 protected:
   explicit MediaData(Type aType)
     : mType(aType)
     , mOffset(0)
     , mTime(0)
     , mTimecode(0)
     , mDuration(0)
     , mKeyframe(false)
@@ -99,25 +94,22 @@ public:
 
   AudioData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             uint32_t aFrames,
             AudioDataValue* aData,
             uint32_t aChannels,
             uint32_t aRate)
-    : MediaData(sType, aOffset, aTime, aDuration)
+    : MediaData(AUDIO_DATA, aOffset, aTime, aDuration)
     , mFrames(aFrames)
     , mChannels(aChannels)
     , mRate(aRate)
     , mAudioData(aData) {}
 
-  static const Type sType = AUDIO_DATA;
-  static const char* sTypeName;
-
   // Creates a new VideoData identical to aOther, but with a different
   // specified timestamp and duration. All data from aOther is copied
   // into the new AudioData but the audio data which is transferred.
   // After such call, the original aOther is unusable.
   static already_AddRefed<AudioData>
   TransferAndUpdateTimestampAndDuration(AudioData* aOther,
                                         int64_t aTimestamp,
                                         int64_t aDuration);
@@ -151,19 +143,16 @@ class VideoInfo;
 class VideoData : public MediaData {
 public:
   typedef gfx::IntRect IntRect;
   typedef gfx::IntSize IntSize;
   typedef layers::ImageContainer ImageContainer;
   typedef layers::Image Image;
   typedef layers::PlanarYCbCrImage PlanarYCbCrImage;
 
-  static const Type sType = VIDEO_DATA;
-  static const char* sTypeName;
-
   // YCbCr data obtained from decoding the video. The index's are:
   //   0 = Y
   //   1 = Cb
   //   2 = Cr
   struct YCbCrBuffer {
     struct Plane {
       uint8_t* mData;
       uint32_t mWidth;
--- a/dom/media/MediaDecoderReader.cpp
+++ b/dom/media/MediaDecoderReader.cpp
@@ -143,33 +143,45 @@ VideoData* MediaDecoderReader::DecodeToF
   VideoData* d = nullptr;
   return (d = VideoQueue().PeekFront()) ? d : nullptr;
 }
 
 void
 MediaDecoderReader::SetStartTime(int64_t aStartTime)
 {
   mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
-  MOZ_ASSERT(mStartTime == -1);
   mStartTime = aStartTime;
 }
 
 media::TimeIntervals
 MediaDecoderReader::GetBuffered()
 {
-  NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
   AutoPinned<MediaResource> stream(mDecoder->GetResource());
   int64_t durationUs = 0;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     durationUs = mDecoder->GetMediaDuration();
   }
   return GetEstimatedBufferedTimeRanges(stream, durationUs);
 }
 
+int64_t
+MediaDecoderReader::ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio)
+{
+  int64_t startTime = std::min<int64_t>(aAudio ? aAudio->mTime : INT64_MAX,
+                                        aVideo ? aVideo->mTime : INT64_MAX);
+  if (startTime == INT64_MAX) {
+    startTime = 0;
+  }
+  DECODER_LOG("ComputeStartTime first video frame start %lld", aVideo ? aVideo->mTime : -1);
+  DECODER_LOG("ComputeStartTime first audio frame start %lld", aAudio ? aAudio->mTime : -1);
+  NS_ASSERTION(startTime >= 0, "Start time is negative");
+  return startTime;
+}
+
 nsRefPtr<MediaDecoderReader::MetadataPromise>
 MediaDecoderReader::AsyncReadMetadata()
 {
   typedef ReadMetadataFailureReason Reason;
 
   MOZ_ASSERT(OnTaskQueue());
   mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
   DECODER_LOG("MediaDecoderReader::AsyncReadMetadata");
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -208,18 +208,17 @@ public:
   // The primary advantage of this implementation in the reader base class
   // is that it's a fast approximation, which does not perform any I/O.
   //
   // The OggReader relies on this base implementation not performing I/O,
   // since in FirefoxOS we can't do I/O on the main thread, where this is
   // called.
   virtual media::TimeIntervals GetBuffered();
 
-  // MediaSourceReader opts out of the start-time-guessing mechanism.
-  virtual bool ForceZeroStartTime() const { return false; }
+  virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio);
 
   // The MediaDecoderStateMachine uses various heuristics that assume that
   // raw media data is arriving sequentially from a network channel. This
   // makes sense in the <video src="foo"> case, but not for more advanced use
   // cases like MSE.
   virtual bool UseBufferingHeuristics() { return true; }
 
   // Returns the number of bytes of memory allocated by structures/frames in
@@ -321,23 +320,17 @@ protected:
   // Whether we should accept media that we know we can't play
   // directly, because they have a number of channel higher than
   // what we support.
   bool mIgnoreAudioOutputFormat;
 
   // The start time of the media, in microseconds. This is the presentation
   // time of the first frame decoded from the media. This is initialized to -1,
   // and then set to a value >= by MediaDecoderStateMachine::SetStartTime(),
-  // after which point it never changes (though SetStartTime may be called
-  // multiple times with the same value).
-  //
-  // This is an ugly breach of abstractions - it's currently necessary for the
-  // readers to return the correct value of GetBuffered. We should refactor
-  // things such that all GetBuffered calls go through the MDSM, which would
-  // offset the range accordingly.
+  // after which point it never changes.
   int64_t mStartTime;
 
   // This is a quick-and-dirty way for DecodeAudioData implementations to
   // communicate the presence of a decoding error to RequestAudioData. We should
   // replace this with a promise-y mechanism as we make this stuff properly
   // async.
   bool mHitAudioDecodeError;
   bool mShutdown;
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -51,16 +51,18 @@ using namespace mozilla::media;
 
 #define NS_DispatchToMainThread(...) CompileError_UseAbstractThreadDispatchInstead
 
 // avoid redefined macro in unified build
 #undef LOG
 #undef DECODER_LOG
 #undef VERBOSE_LOG
 
+extern PRLogModuleInfo* gMediaDecoderLog;
+extern PRLogModuleInfo* gMediaSampleLog;
 #define LOG(m, l, x, ...) \
   MOZ_LOG(m, l, ("Decoder=%p " x, mDecoder.get(), ##__VA_ARGS__))
 #define DECODER_LOG(x, ...) \
   LOG(gMediaDecoderLog, LogLevel::Debug, x, ##__VA_ARGS__)
 #define VERBOSE_LOG(x, ...) \
   LOG(gMediaDecoderLog, LogLevel::Verbose, x, ##__VA_ARGS__)
 #define SAMPLE_LOG(x, ...) \
   LOG(gMediaSampleLog, LogLevel::Debug, x, ##__VA_ARGS__)
@@ -182,16 +184,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mTaskQueue(new MediaTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                                 /* aSupportsTailDispatch = */ true)),
   mWatchManager(this, mTaskQueue),
   mRealTime(aRealTime),
   mDispatchedStateMachine(false),
   mDelayedScheduler(this),
   mState(DECODER_STATE_DECODING_NONE, "MediaDecoderStateMachine::mState"),
   mPlayDuration(0),
+  mStartTime(-1),
   mEndTime(-1),
   mDurationSet(false),
   mEstimatedDuration(mTaskQueue, NullableTimeUnit(),
                     "MediaDecoderStateMachine::EstimatedDuration (Mirror)"),
   mExplicitDuration(mTaskQueue, Maybe<double>(),
                     "MediaDecoderStateMachine::mExplicitDuration (Mirror)"),
   mObservedDuration(TimeUnit(), "MediaDecoderStateMachine::mObservedDuration"),
   mPlayState(mTaskQueue, MediaDecoder::PLAY_STATE_LOADING,
@@ -200,35 +203,35 @@ MediaDecoderStateMachine::MediaDecoderSt
                  "MediaDecoderStateMachine::mNextPlayState (Mirror)"),
   mLogicallySeeking(mTaskQueue, false,
              "MediaDecoderStateMachine::mLogicallySeeking (Mirror)"),
   mNextFrameStatus(mTaskQueue, MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED,
                    "MediaDecoderStateMachine::mNextFrameStatus (Canonical)"),
   mFragmentEndTime(-1),
   mReader(aReader),
   mCurrentPosition(mTaskQueue, 0, "MediaDecoderStateMachine::mCurrentPosition (Canonical)"),
-  mStreamStartTime(0),
-  mAudioStartTime(0),
+  mStreamStartTime(-1),
+  mAudioStartTime(-1),
   mAudioEndTime(-1),
   mDecodedAudioEndTime(-1),
   mVideoFrameEndTime(-1),
   mDecodedVideoEndTime(-1),
   mVolume(mTaskQueue, 1.0, "MediaDecoderStateMachine::mVolume (Mirror)"),
   mPlaybackRate(1.0),
   mLogicalPlaybackRate(mTaskQueue, 1.0, "MediaDecoderStateMachine::mLogicalPlaybackRate (Mirror)"),
   mPreservesPitch(mTaskQueue, true, "MediaDecoderStateMachine::mPreservesPitch (Mirror)"),
   mLowAudioThresholdUsecs(detail::LOW_AUDIO_USECS),
   mAmpleAudioThresholdUsecs(detail::AMPLE_AUDIO_USECS),
   mQuickBufferingLowDataThresholdUsecs(detail::QUICK_BUFFERING_LOW_DATA_USECS),
   mIsAudioPrerolling(false),
   mIsVideoPrerolling(false),
   mAudioCaptured(false),
   mPositionChangeQueued(false),
   mAudioCompleted(false, "MediaDecoderStateMachine::mAudioCompleted"),
-  mNotifyMetadataBeforeFirstFrame(false),
+  mGotDurationFromMetaData(false),
   mDispatchedEventToDecode(false),
   mQuickBuffering(false),
   mMinimizePreroll(false),
   mDecodeThreadWaiting(false),
   mDropAudioUntilNextDiscontinuity(false),
   mDropVideoUntilNextDiscontinuity(false),
   mDecodeToSeekTarget(false),
   mCurrentTimeBeforeSeek(0),
@@ -757,17 +760,16 @@ MediaDecoderStateMachine::IsVideoSeekCom
 void
 MediaDecoderStateMachine::OnAudioDecoded(AudioData* aAudioSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   nsRefPtr<AudioData> audio(aAudioSample);
   MOZ_ASSERT(audio);
   mAudioDataRequest.Complete();
-  aAudioSample->AdjustForStartTime(StartTime());
   mDecodedAudioEndTime = audio->GetEndTime();
 
   SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
              (audio ? audio->mTime : -1),
              (audio ? audio->GetEndTime() : -1),
              (audio ? audio->mDiscontinuity : 0));
 
   switch (mState) {
@@ -1038,19 +1040,17 @@ MediaDecoderStateMachine::MaybeFinishDec
 }
 
 void
 MediaDecoderStateMachine::OnVideoDecoded(VideoData* aVideoSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   nsRefPtr<VideoData> video(aVideoSample);
-  MOZ_ASSERT(video);
   mVideoDataRequest.Complete();
-  aVideoSample->AdjustForStartTime(StartTime());
   mDecodedVideoEndTime = video ? video->GetEndTime() : mDecodedVideoEndTime;
 
   SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d",
              (video ? video->mTime : -1),
              (video ? video->GetEndTime() : -1),
              (video ? video->mDiscontinuity : 0));
 
   switch (mState) {
@@ -1251,17 +1251,17 @@ void MediaDecoderStateMachine::StopPlayb
   MOZ_ASSERT(OnTaskQueue());
   DECODER_LOG("StopPlayback()");
 
   AssertCurrentThreadInMonitor();
 
   mDecoder->NotifyPlaybackStopped();
 
   if (IsPlaying()) {
-    mPlayDuration = GetClock();
+    mPlayDuration = GetClock() - mStartTime;
     SetPlayStartTime(TimeStamp());
   }
   // Notify the audio sink, so that it notices that we've stopped playing,
   // so it can pause audio playback.
   mDecoder->GetReentrantMonitor().NotifyAll();
   NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()");
 
   DispatchDecodeTasksIfNeeded();
@@ -1301,20 +1301,21 @@ void MediaDecoderStateMachine::MaybeStar
 
   mDecoder->GetReentrantMonitor().NotifyAll();
   DispatchDecodeTasksIfNeeded();
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
-  SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld)", aTime);
+  SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld) (mStartTime=%lld)", aTime, mStartTime);
   AssertCurrentThreadInMonitor();
 
-  mCurrentPosition = aTime;
+  NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime");
+  mCurrentPosition = aTime - mStartTime;
   NS_ASSERTION(mCurrentPosition >= 0, "CurrentTime should be positive!");
   mObservedDuration = std::max(mObservedDuration.Ref(),
                                TimeUnit::FromMicroseconds(mCurrentPosition.Ref()));
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -1380,19 +1381,19 @@ bool MediaDecoderStateMachine::IsRealTim
 {
   return mRealTime;
 }
 
 int64_t MediaDecoderStateMachine::GetDuration()
 {
   AssertCurrentThreadInMonitor();
 
-  if (mEndTime == -1)
+  if (mEndTime == -1 || mStartTime == -1)
     return -1;
-  return mEndTime;
+  return mEndTime - mStartTime;
 }
 
 int64_t MediaDecoderStateMachine::GetEndTime()
 {
   if (mEndTime == -1 && mDurationSet) {
     return INT64_MAX;
   }
   return mEndTime;
@@ -1419,42 +1420,61 @@ void MediaDecoderStateMachine::Recompute
     // We don't fire duration changed for this case because it should have
     // already been fired on the main thread when the explicit duration was set.
     duration = TimeUnit::FromSeconds(d);
   } else if (mEstimatedDuration.Ref().isSome()) {
     duration = mEstimatedDuration.Ref().ref();
     fireDurationChanged = true;
   } else if (mInfo.mMetadataDuration.isSome()) {
     duration = mInfo.mMetadataDuration.ref();
+  } else if (mInfo.mMetadataEndTime.isSome() && mStartTime >= 0) {
+    duration = mInfo.mMetadataEndTime.ref() - TimeUnit::FromMicroseconds(mStartTime);
   } else {
     return;
   }
 
   if (duration < mObservedDuration.Ref()) {
     duration = mObservedDuration;
     fireDurationChanged = true;
   }
+
   fireDurationChanged = fireDurationChanged && duration.ToMicroseconds() != GetDuration();
-
-  MOZ_ASSERT(duration.ToMicroseconds() >= 0);
-  mEndTime = duration.IsInfinite() ? -1 : duration.ToMicroseconds();
-  mDurationSet = true;
+  SetDuration(duration);
 
   if (fireDurationChanged) {
     nsCOMPtr<nsIRunnable> event =
       NS_NewRunnableMethodWithArg<TimeUnit>(mDecoder, &MediaDecoder::DurationChanged, duration);
     AbstractThread::MainThread()->Dispatch(event.forget());
   }
 }
 
+void MediaDecoderStateMachine::SetDuration(TimeUnit aDuration)
+{
+  MOZ_ASSERT(OnTaskQueue());
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  MOZ_ASSERT(aDuration.ToMicroseconds() >= 0);
+  mDurationSet = true;
+
+  if (mStartTime == -1) {
+    SetStartTime(0);
+  }
+
+  if (aDuration.IsInfinite()) {
+    mEndTime = -1;
+    return;
+  }
+
+  mEndTime = mStartTime + aDuration.ToMicroseconds();
+}
+
 void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime)
 {
   AssertCurrentThreadInMonitor();
 
-  mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime;
+  mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime;
 }
 
 bool MediaDecoderStateMachine::IsDormantNeeded()
 {
   return mReader->IsDormantNeeded();
 }
 
 void MediaDecoderStateMachine::SetDormant(bool aDormant)
@@ -1543,21 +1563,16 @@ void MediaDecoderStateMachine::Shutdown(
   mCurrentSeek.RejectIfExists(__func__);
 
   if (IsPlaying()) {
     StopPlayback();
   }
 
   Reset();
 
-  // Shut down our start time rendezvous.
-  if (mStartTimeRendezvous) {
-    mStartTimeRendezvous->Destroy();
-  }
-
   // Put a task in the decode queue to shutdown the reader.
   // the queue to spin down.
   ProxyMediaCall(DecodeTaskQueue(), mReader.get(), __func__, &MediaDecoderReader::Shutdown)
     ->Then(TaskQueue(), __func__, this,
            &MediaDecoderStateMachine::FinishShutdown,
            &MediaDecoderStateMachine::FinishShutdown);
   DECODER_LOG("Shutdown started");
 }
@@ -1671,21 +1686,19 @@ void MediaDecoderStateMachine::NotifyDat
   // While playing an unseekable stream of unknown duration, mEndTime is
   // updated (in AdvanceFrame()) as we play. But if data is being downloaded
   // faster than played, mEndTime won't reflect the end of playable data
   // since we haven't played the frame at the end of buffered data. So update
   // mEndTime here as new data is downloaded to prevent such a lag.
   //
   // Make sure to only do this if we have a start time, otherwise the reader
   // doesn't know how to compute GetBuffered.
-  if (!mDecoder->IsInfinite() || !HaveStartTime())
-  {
+  if (!mDecoder->IsInfinite() || mStartTime == -1) {
     return;
   }
-
   media::TimeIntervals buffered{mDecoder->GetBuffered()};
   if (!buffered.IsInvalid()) {
     bool exists;
     media::TimeUnit end{buffered.GetEnd(&exists)};
     if (exists) {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       mEndTime = std::max<int64_t>(mEndTime, end.ToMicroseconds());
     }
@@ -1831,21 +1844,22 @@ MediaDecoderStateMachine::InitiateSeek()
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
 
   mCurrentSeek.RejectIfExists(__func__);
   mCurrentSeek.Steal(mPendingSeek);
 
   // Bound the seek time to be inside the media range.
   int64_t end = GetEndTime();
+  NS_ASSERTION(mStartTime != -1, "Should know start time by now");
   NS_ASSERTION(end != -1, "Should know end time by now");
-  int64_t seekTime = mCurrentSeek.mTarget.mTime;
+  int64_t seekTime = mCurrentSeek.mTarget.mTime + mStartTime;
   seekTime = std::min(seekTime, end);
-  seekTime = std::max(int64_t(0), seekTime);
-  NS_ASSERTION(seekTime >= 0 && seekTime <= end,
+  seekTime = std::max(mStartTime, seekTime);
+  NS_ASSERTION(seekTime >= mStartTime && seekTime <= end,
                "Can only seek in range [0,duration]");
   mCurrentSeek.mTarget.mTime = seekTime;
 
   if (mAudioCaptured) {
     nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethodWithArgs<MediaStreamGraph*>(
       this, &MediaDecoderStateMachine::RecreateDecodedStream, nullptr);
     AbstractThread::MainThread()->Dispatch(r.forget());
   }
@@ -2150,65 +2164,33 @@ MediaDecoderStateMachine::OnMetadataRead
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(mState == DECODER_STATE_DECODING_METADATA);
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   mMetadataRequest.Complete();
 
   mDecoder->SetMediaSeekable(mReader->IsMediaSeekable());
   mInfo = aMetadata->mInfo;
   mMetadataTags = aMetadata->mTags.forget();
-  nsRefPtr<MediaDecoderStateMachine> self = this;
-
-  // Set up the start time rendezvous if it doesn't already exist (which is
-  // generally the case, unless we're coming out of dormant mode).
-  if (!mStartTimeRendezvous) {
-    mStartTimeRendezvous = new StartTimeRendezvous(TaskQueue(), HasAudio(), HasVideo(),
-                                                   mReader->ForceZeroStartTime() || IsRealTime());
-
-    mStartTimeRendezvous->AwaitStartTime()->Then(TaskQueue(), __func__,
-      [self] () -> void {
-        NS_ENSURE_TRUE_VOID(!self->IsShutdown());
-        ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
-        self->mReader->SetStartTime(self->StartTime());
-      },
-      [] () -> void { NS_WARNING("Setting start time on reader failed"); }
-    );
-  }
-
-  if (mInfo.mMetadataDuration.isSome()) {
+
+  if (mInfo.mMetadataDuration.isSome() || mInfo.mMetadataEndTime.isSome()) {
     RecomputeDuration();
-  } else if (mInfo.mUnadjustedMetadataEndTime.isSome()) {
-    mStartTimeRendezvous->AwaitStartTime()->Then(TaskQueue(), __func__,
-      [self] () -> void {
-        NS_ENSURE_TRUE_VOID(!self->IsShutdown());
-        TimeUnit unadjusted = self->mInfo.mUnadjustedMetadataEndTime.ref();
-        TimeUnit adjustment = TimeUnit::FromMicroseconds(self->StartTime());
-        self->mInfo.mMetadataDuration.emplace(unadjusted - adjustment);
-        self->RecomputeDuration();
-      }, [] () -> void { NS_WARNING("Adjusting metadata end time failed"); }
-    );
   }
 
   if (HasVideo()) {
     DECODER_LOG("Video decode isAsync=%d HWAccel=%d videoQueueSize=%d",
                 mReader->IsAsync(),
                 mReader->VideoIsHardwareAccelerated(),
                 GetAmpleVideoFrames());
   }
 
   mDecoder->StartProgressUpdates();
-
-  // In general, we wait until we know the duration before notifying the decoder.
-  // However, we notify  unconditionally in this case without waiting for the start
-  // time, since the caller might be waiting on metadataloaded to be fired before
-  // feeding in the CDM, which we need to decode the first frame (and
-  // thus get the metadata). We could fix this if we could compute the start
-  // time by demuxing without necessaring decoding.
-  mNotifyMetadataBeforeFirstFrame = mDurationSet || mReader->IsWaitingOnCDMResource();
-  if (mNotifyMetadataBeforeFirstFrame) {
+  mGotDurationFromMetaData = (GetDuration() != -1) || mDurationSet;
+  if (mGotDurationFromMetaData) {
+    // We have all the information required: duration and size
+    // Inform the element that we've loaded the metadata.
     EnqueueLoadedMetadataEvent();
   }
 
   if (mReader->IsWaitingOnCDMResource()) {
     // Metadata parsing was successful but we're still waiting for CDM caps
     // to become available so that we can build the correct decryptor/decoder.
     SetState(DECODER_STATE_WAIT_FOR_CDM);
     return;
@@ -2284,47 +2266,39 @@ nsresult
 MediaDecoderStateMachine::DecodeFirstFrame()
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   MOZ_ASSERT(mState == DECODER_STATE_DECODING_FIRSTFRAME);
   DECODER_LOG("DecodeFirstFrame started");
 
   if (IsRealTime()) {
+    SetStartTime(0);
     nsresult res = FinishDecodeFirstFrame();
     NS_ENSURE_SUCCESS(res, res);
   } else if (mSentFirstFrameLoadedEvent) {
     // We're resuming from dormant state, so we don't need to request
     // the first samples in order to determine the media start time,
     // we have the start time from last time we loaded.
+    SetStartTime(mStartTime);
     nsresult res = FinishDecodeFirstFrame();
     NS_ENSURE_SUCCESS(res, res);
   } else {
     if (HasAudio()) {
-      mAudioDataRequest.Begin(
-        ProxyMediaCall(DecodeTaskQueue(), mReader.get(), __func__,
-                       &MediaDecoderReader::RequestAudioData)
-        ->Then(TaskQueue(), __func__, mStartTimeRendezvous.get(),
-               &StartTimeRendezvous::ProcessFirstSample<AudioDataPromise>,
-               &StartTimeRendezvous::FirstSampleRejected<AudioData>)
-        ->CompletionPromise()
+      mAudioDataRequest.Begin(ProxyMediaCall(DecodeTaskQueue(), mReader.get(),
+                                             __func__, &MediaDecoderReader::RequestAudioData)
         ->Then(TaskQueue(), __func__, this,
                &MediaDecoderStateMachine::OnAudioDecoded,
-               &MediaDecoderStateMachine::OnAudioNotDecoded)
-      );
+               &MediaDecoderStateMachine::OnAudioNotDecoded));
     }
     if (HasVideo()) {
       mVideoDecodeStartTime = TimeStamp::Now();
-      mVideoDataRequest.Begin(
-        ProxyMediaCall(DecodeTaskQueue(), mReader.get(), __func__,
-                       &MediaDecoderReader::RequestVideoData, false, int64_t(0))
-        ->Then(TaskQueue(), __func__, mStartTimeRendezvous.get(),
-               &StartTimeRendezvous::ProcessFirstSample<VideoDataPromise>,
-               &StartTimeRendezvous::FirstSampleRejected<VideoData>)
-        ->CompletionPromise()
+      mVideoDataRequest.Begin(ProxyMediaCall(DecodeTaskQueue(), mReader.get(),
+                                             __func__, &MediaDecoderReader::RequestVideoData, false,
+                                             int64_t(0))
         ->Then(TaskQueue(), __func__, this,
                &MediaDecoderStateMachine::OnVideoDecoded,
                &MediaDecoderStateMachine::OnVideoNotDecoded));
     }
   }
 
   return NS_OK;
 }
@@ -2336,28 +2310,32 @@ MediaDecoderStateMachine::FinishDecodeFi
   AssertCurrentThreadInMonitor();
   DECODER_LOG("FinishDecodeFirstFrame");
 
   if (IsShutdown()) {
     return NS_ERROR_FAILURE;
   }
 
   if (!IsRealTime() && !mSentFirstFrameLoadedEvent) {
+    const VideoData* v = VideoQueue().PeekFront();
+    const AudioData* a = AudioQueue().PeekFront();
+    SetStartTime(mReader->ComputeStartTime(v, a));
     if (VideoQueue().GetSize()) {
       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
       RenderVideoFrame(VideoQueue().PeekFront(), TimeStamp::Now());
     }
   }
 
+  NS_ASSERTION(mStartTime != -1, "Must have start time");
   MOZ_ASSERT(!(mDecoder->IsMediaSeekable() && mDecoder->IsTransportSeekable()) ||
                (GetDuration() != -1) || mDurationSet,
              "Seekable media should have duration");
   DECODER_LOG("Media goes from %lld to %lld (duration %lld) "
               "transportSeekable=%d, mediaSeekable=%d",
-              0, mEndTime, GetDuration(),
+              mStartTime, mEndTime, GetDuration(),
               mDecoder->IsTransportSeekable(), mDecoder->IsMediaSeekable());
 
   if (HasAudio() && !HasVideo()) {
     // We're playing audio only. We don't need to worry about slow video
     // decodes causing audio underruns, so don't buffer so much audio in
     // order to reduce memory usage.
     mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
     mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
@@ -2367,21 +2345,25 @@ MediaDecoderStateMachine::FinishDecodeFi
   // Get potentially updated metadata
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     mReader->ReadUpdatedMetadata(&mInfo);
   }
 
   nsAutoPtr<MediaInfo> info(new MediaInfo());
   *info = mInfo;
-  if (!mNotifyMetadataBeforeFirstFrame) {
-    // If we didn't have duration and/or start time before, we should now.
+  if (!mGotDurationFromMetaData) {
+    // We now have a duration, we can fire the LoadedMetadata and
+    // FirstFrame event.
     EnqueueLoadedMetadataEvent();
+    EnqueueFirstFrameLoadedEvent();
+  } else {
+    // Inform the element that we've loaded the first frame.
+    EnqueueFirstFrameLoadedEvent();
   }
-  EnqueueFirstFrameLoadedEvent();
 
   if (mState == DECODER_STATE_DECODING_FIRSTFRAME) {
     StartDecoding();
   }
 
   // For very short media the first frame decode can decode the entire media.
   // So we need to check if this has occurred, else our decode pipeline won't
   // run (since it doesn't need to) and we won't detect end of stream.
@@ -2421,17 +2403,17 @@ MediaDecoderStateMachine::SeekCompleted(
     // seekTime is bounded in suitable duration. See Bug 1112438.
     int64_t videoStart = video ? video->mTime : seekTime;
     int64_t audioStart = audio ? audio->mTime : seekTime;
     newCurrentTime = mAudioStartTime = std::min(audioStart, videoStart);
   } else {
     newCurrentTime = video ? video->mTime : seekTime;
   }
   mStreamStartTime = newCurrentTime;
-  mPlayDuration = newCurrentTime;
+  mPlayDuration = newCurrentTime - mStartTime;
 
   mDecoder->StartProgressUpdates();
 
   // Change state to DECODING or COMPLETED now. SeekingStopped will
   // call MediaDecoderStateMachine::Seek to reset our state to SEEKING
   // if we need to seek again.
 
   bool isLiveStream = mDecoder->GetResource()->IsLiveStream();
@@ -2738,18 +2720,18 @@ MediaDecoderStateMachine::Reset()
 
   // Stop the audio thread. Otherwise, AudioSink might be accessing AudioQueue
   // outside of the decoder monitor while we are clearing the queue and causes
   // crash for no samples to be popped.
   StopAudioThread();
 
   mVideoFrameEndTime = -1;
   mDecodedVideoEndTime = -1;
-  mStreamStartTime = 0;
-  mAudioStartTime = 0;
+  mStreamStartTime = -1;
+  mAudioStartTime = -1;
   mAudioEndTime = -1;
   mDecodedAudioEndTime = -1;
   mAudioCompleted = false;
   AudioQueue().Reset();
   VideoQueue().Reset();
   mFirstVideoFrameAfterSeek = nullptr;
   mDropAudioUntilNextDiscontinuity = true;
   mDropVideoUntilNextDiscontinuity = true;
@@ -2808,17 +2790,17 @@ void MediaDecoderStateMachine::RenderVid
 }
 
 void MediaDecoderStateMachine::ResyncAudioClock()
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   if (IsPlaying()) {
     SetPlayStartTime(TimeStamp::Now());
-    mPlayDuration = GetAudioClock();
+    mPlayDuration = GetAudioClock() - mStartTime;
   }
 }
 
 int64_t
 MediaDecoderStateMachine::GetAudioClock() const
 {
   MOZ_ASSERT(OnTaskQueue());
   // We must hold the decoder monitor while using the audio stream off the
@@ -2838,38 +2820,38 @@ int64_t MediaDecoderStateMachine::GetStr
   return mStreamStartTime + GetDecodedStream()->GetPosition();
 }
 
 int64_t MediaDecoderStateMachine::GetVideoStreamPosition() const
 {
   AssertCurrentThreadInMonitor();
 
   if (!IsPlaying()) {
-    return mPlayDuration;
+    return mPlayDuration + mStartTime;
   }
 
   // Time elapsed since we started playing.
   int64_t delta = DurationToUsecs(TimeStamp::Now() - mPlayStartTime);
   // Take playback rate into account.
   delta *= mPlaybackRate;
-  return mPlayDuration + delta;
+  return mStartTime + mPlayDuration + delta;
 }
 
 int64_t MediaDecoderStateMachine::GetClock() const
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
 
   // Determine the clock time. If we've got audio, and we've not reached
   // the end of the audio, use the audio clock. However if we've finished
   // audio, or don't have audio, use the system clock. If our output is being
   // fed to a MediaStream, use that stream as the source of the clock.
   int64_t clock_time = -1;
   if (!IsPlaying()) {
-    clock_time = mPlayDuration;
+    clock_time = mPlayDuration + mStartTime;
   } else {
     if (mAudioCaptured) {
       clock_time = GetStreamClock();
     } else if (HasAudio() && !mAudioCompleted) {
       clock_time = GetAudioClock();
     } else {
       // Audio is disabled on this system. Sync to the system clock.
       clock_time = GetVideoStreamPosition();
@@ -2902,17 +2884,17 @@ void MediaDecoderStateMachine::AdvanceFr
     SendStreamData();
   }
 
   const int64_t clock_time = GetClock();
   TimeStamp nowTime = TimeStamp::Now();
   // Skip frames up to the frame at the playback position, and figure out
   // the time remaining until it's time to display the next frame.
   int64_t remainingTime = AUDIO_DURATION_USECS;
-  NS_ASSERTION(clock_time >= 0, "Should have positive clock time.");
+  NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
   nsRefPtr<VideoData> currentFrame;
   if (VideoQueue().GetSize() > 0) {
     VideoData* frame = VideoQueue().PeekFront();
     int32_t droppedFrames = 0;
     while (IsRealTime() || clock_time >= frame->mTime) {
       mVideoFrameEndTime = frame->GetEndTime();
       if (currentFrame) {
         mDecoder->NotifyDecodedFrames(0, 0, 1);
@@ -2982,17 +2964,17 @@ void MediaDecoderStateMachine::AdvanceFr
   // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
   // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   // assertion in GetClock().
 
   if (currentFrame) {
     // Decode one frame and display it.
     int64_t delta = currentFrame->mTime - clock_time;
     TimeStamp presTime = nowTime + TimeDuration::FromMicroseconds(delta / mPlaybackRate);
-    NS_ASSERTION(currentFrame->mTime >= 0, "Should have positive frame time");
+    NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time");
     {
       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
       // If we have video, we want to increment the clock in steps of the frame
       // duration.
       RenderVideoFrame(currentFrame, presTime);
     }
     MOZ_ASSERT(IsPlaying());
     MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics();
@@ -3143,16 +3125,47 @@ MediaDecoderStateMachine::DropAudioUpToS
                                          audioData.forget(),
                                          channels,
                                          audio->mRate));
   PushFront(data);
 
   return NS_OK;
 }
 
+void MediaDecoderStateMachine::SetStartTime(int64_t aStartTimeUsecs)
+{
+  AssertCurrentThreadInMonitor();
+  DECODER_LOG("SetStartTime(%lld)", aStartTimeUsecs);
+  mStartTime = 0;
+  if (aStartTimeUsecs != 0) {
+    mStartTime = aStartTimeUsecs;
+    if (mGotDurationFromMetaData && GetEndTime() != INT64_MAX) {
+      NS_ASSERTION(mEndTime != -1,
+                   "We should have mEndTime as supplied duration here");
+      // We were specified a duration from a Content-Duration HTTP header.
+      // Adjust mEndTime so that mEndTime-mStartTime matches the specified
+      // duration.
+      mEndTime = mStartTime + mEndTime;
+    }
+  }
+
+  // Pass along this immutable value to the reader so that it can make
+  // calculations independently of the state machine.
+  mReader->SetStartTime(mStartTime);
+
+  // Set the audio start time to be start of media. If this lies before the
+  // first actual audio frame we have, we'll inject silence during playback
+  // to ensure the audio starts at the correct time.
+  mAudioStartTime = mStartTime;
+  mStreamStartTime = mStartTime;
+  DECODER_LOG("Set media start time to %lld", mStartTime);
+
+  RecomputeDuration();
+}
+
 void MediaDecoderStateMachine::UpdateNextFrameStatus()
 {
   MOZ_ASSERT(OnTaskQueue());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
 
   MediaDecoderOwner::NextFrameStatus status;
   const char* statusString;
   if (mState <= DECODER_STATE_DECODING_FIRSTFRAME) {
@@ -3310,17 +3323,17 @@ MediaDecoderStateMachine::LogicalPlaybac
   }
 
   // AudioStream will handle playback rate change when we have audio.
   // Do nothing while we are not playing. Change in playback rate will
   // take effect next time we start playing again.
   if (!HasAudio() && IsPlaying()) {
     // Remember how much time we've spent in playing the media
     // for playback rate will change from now on.
-    mPlayDuration = GetVideoStreamPosition();
+    mPlayDuration = GetVideoStreamPosition() - mStartTime;
     SetPlayStartTime(TimeStamp::Now());
   }
 
   mPlaybackRate = mLogicalPlaybackRate;
   if (mAudioSink) {
     mAudioSink->SetPlaybackRate(mPlaybackRate);
   }
 }
@@ -3332,16 +3345,17 @@ void MediaDecoderStateMachine::Preserves
 
   if (mAudioSink) {
     mAudioSink->SetPreservesPitch(mPreservesPitch);
   }
 }
 
 bool MediaDecoderStateMachine::IsShutdown()
 {
+  AssertCurrentThreadInMonitor();
   return mState == DECODER_STATE_ERROR ||
          mState == DECODER_STATE_SHUTDOWN;
 }
 
 void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime,
                                              nsAutoPtr<MediaInfo> aInfo,
                                              nsAutoPtr<MetadataTags> aTags)
 {
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -95,19 +95,16 @@ hardware (via AudioStream).
 #include "DecodedStream.h"
 
 namespace mozilla {
 
 class AudioSegment;
 class MediaTaskQueue;
 class AudioSink;
 
-extern PRLogModuleInfo* gMediaDecoderLog;
-extern PRLogModuleInfo* gMediaSampleLog;
-
 /*
   The state machine class. This manages the decoding and seeking in the
   MediaDecoderReader on the decode task queue, and A/V sync on the shared
   state machine thread, and controls the audio "push" thread.
 
   All internal state is synchronised via the decoder monitor. State changes
   are either propagated by NotifyAll on the monitor (typically when state
   changes need to be propagated to non-state machine threads) or by scheduling
@@ -115,18 +112,16 @@ extern PRLogModuleInfo* gMediaSampleLog;
 
   See MediaDecoder.h for more details.
 */
 class MediaDecoderStateMachine
 {
   friend class AudioSink;
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderStateMachine)
 public:
-  typedef MediaDecoderReader::AudioDataPromise AudioDataPromise;
-  typedef MediaDecoderReader::VideoDataPromise VideoDataPromise;
   typedef MediaDecoderOwner::NextFrameStatus NextFrameStatus;
   MediaDecoderStateMachine(MediaDecoder* aDecoder,
                            MediaDecoderReader* aReader,
                            bool aRealTime = false);
 
   nsresult Init(MediaDecoderStateMachine* aCloneDonor);
 
   // Enumeration for the valid decoding states
@@ -191,16 +186,24 @@ public:
   int64_t GetDuration();
 
   // Time of the last frame in the media, in microseconds or INT64_MAX if
   // media has an infinite duration.
   // Accessed on state machine, decode, and main threads.
   // Access controlled by decoder monitor.
   int64_t GetEndTime();
 
+  // Called from the main thread to set the duration of the media resource
+  // if it is able to be obtained via HTTP headers. Called from the
+  // state machine thread to set the duration if it is obtained from the
+  // media metadata. The decoder monitor must be obtained before calling this.
+  // aDuration is in microseconds.
+  // A value of INT64_MAX will be treated as infinity.
+  void SetDuration(media::TimeUnit aDuration);
+
   // Functions used by assertions to ensure we're calling things
   // on the appropriate threads.
   bool OnDecodeTaskQueue() const;
   bool OnTaskQueue() const;
 
   // Seeks to the decoder to aTarget asynchronously.
   // Must be called on the state machine thread.
   nsRefPtr<MediaDecoder::SeekPromise> Seek(SeekTarget aTarget);
@@ -258,17 +261,24 @@ public:
   // Must be called with the decode monitor held.
   bool IsSeeking() const {
     MOZ_ASSERT(OnTaskQueue());
     AssertCurrentThreadInMonitor();
     return mState == DECODER_STATE_SEEKING;
   }
 
   media::TimeIntervals GetBuffered() {
+    // It's possible for JS to query .buffered before we've determined the start
+    // time from metadata, in which case the reader isn't ready to be asked this
+    // question.
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+    if (mStartTime < 0) {
+      return media::TimeIntervals();
+    }
+
     return mReader->GetBuffered();
   }
 
   size_t SizeOfVideoQueue() {
     if (mReader) {
       return mReader->SizeOfVideoQueueInBytes();
     }
     return 0;
@@ -593,17 +603,17 @@ protected:
 
   // Returns the "media time". This is the absolute time which the media
   // playback has reached. i.e. this returns values in the range
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different than the "current playback position",
   // which is in the range [0,duration].
   int64_t GetMediaTime() const {
     AssertCurrentThreadInMonitor();
-    return mCurrentPosition;
+    return mStartTime + mCurrentPosition;
   }
 
   // Returns an upper bound on the number of microseconds of audio that is
   // decoded and playable. This is the sum of the number of usecs of audio which
   // is decoded and in the reader's audio queue, and the usecs of unplayed audio
   // which has been pushed to the audio hardware for playback. Note that after
   // calling this, the audio hardware may play some of the audio pushed to
   // hardware, so this can only be used as a upper bound. The decoder monitor
@@ -774,143 +784,16 @@ public:
   private:
     MediaDecoderStateMachine* mSelf;
     nsRefPtr<MediaTimer> mMediaTimer;
     MediaPromiseRequestHolder<mozilla::MediaTimerPromise> mRequest;
     TimeStamp mTarget;
 
   } mDelayedScheduler;
 
-  // StartTimeRendezvous is a helper class that quarantines the first sample
-  // until it gets a sample from both channels, such that we can be guaranteed
-  // to know the start time by the time On{Audio,Video}Decoded is called.
-  class StartTimeRendezvous {
-  public:
-    typedef MediaDecoderReader::AudioDataPromise AudioDataPromise;
-    typedef MediaDecoderReader::VideoDataPromise VideoDataPromise;
-    typedef MediaPromise<bool, bool, /* isExclusive = */ false> HaveStartTimePromise;
-
-    NS_INLINE_DECL_THREADSAFE_REFCOUNTING(StartTimeRendezvous);
-    StartTimeRendezvous(AbstractThread* aOwnerThread, bool aHasAudio, bool aHasVideo,
-                        bool aForceZeroStartTime)
-      : mOwnerThread(aOwnerThread)
-    {
-      if (aForceZeroStartTime) {
-        mAudioStartTime.emplace(0);
-        mVideoStartTime.emplace(0);
-        return;
-      }
-
-      if (!aHasAudio) {
-        mAudioStartTime.emplace(INT64_MAX);
-      }
-
-      if (!aHasVideo) {
-        mVideoStartTime.emplace(INT64_MAX);
-      }
-    }
-
-    void Destroy()
-    {
-      mAudioStartTime = Some(mAudioStartTime.refOr(INT64_MAX));
-      mVideoStartTime = Some(mVideoStartTime.refOr(INT64_MAX));
-      mHaveStartTimePromise.RejectIfExists(false, __func__);
-    }
-
-    nsRefPtr<HaveStartTimePromise> AwaitStartTime()
-    {
-      if (HaveStartTime()) {
-        return HaveStartTimePromise::CreateAndResolve(true, __func__);
-      }
-      return mHaveStartTimePromise.Ensure(__func__);
-    }
-
-    template<typename PromiseType>
-    struct PromiseSampleType {
-      typedef typename PromiseType::ResolveValueType::element_type Type;
-    };
-
-    template<typename PromiseType>
-    nsRefPtr<PromiseType> ProcessFirstSample(typename PromiseSampleType<PromiseType>::Type* aData)
-    {
-      typedef typename PromiseSampleType<PromiseType>::Type DataType;
-      typedef typename PromiseType::Private PromisePrivate;
-      MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-      MaybeSetChannelStartTime<DataType>(aData->mTime);
-
-      nsRefPtr<PromisePrivate> p = new PromisePrivate(__func__);
-      nsRefPtr<DataType> data = aData;
-      nsRefPtr<StartTimeRendezvous> self = this;
-      AwaitStartTime()->Then(mOwnerThread, __func__,
-                             [p, data, self] () -> void {
-                               MOZ_ASSERT(self->mOwnerThread->IsCurrentThreadIn());
-                               p->Resolve(data, __func__);
-                             },
-                             [p] () -> void { p->Reject(MediaDecoderReader::CANCELED, __func__); });
-
-      return p.forget();
-    }
-
-    template<typename SampleType>
-    void FirstSampleRejected(MediaDecoderReader::NotDecodedReason aReason)
-    {
-      MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-      if (aReason == MediaDecoderReader::DECODE_ERROR) {
-        mHaveStartTimePromise.RejectIfExists(false, __func__);
-      } else if (aReason == MediaDecoderReader::END_OF_STREAM) {
-        MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
-                ("StartTimeRendezvous=%p %s Has no samples.", this, SampleType::sTypeName));
-        MaybeSetChannelStartTime<SampleType>(INT64_MAX);
-      }
-    }
-
-    bool HaveStartTime() { return mAudioStartTime.isSome() && mVideoStartTime.isSome(); }
-    int64_t StartTime()
-    {
-      int64_t time = std::min(mAudioStartTime.ref(), mVideoStartTime.ref());
-      return time == INT64_MAX ? 0 : time;
-    }
-  private:
-    virtual ~StartTimeRendezvous() {}
-
-    template<typename SampleType>
-    void MaybeSetChannelStartTime(int64_t aStartTime)
-    {
-      if (ChannelStartTime(SampleType::sType).isSome()) {
-        // If we're initialized with aForceZeroStartTime=true, the channel start
-        // times are already set.
-        return;
-      }
-
-      MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
-              ("StartTimeRendezvous=%p Setting %s start time to %lld",
-               this, SampleType::sTypeName, aStartTime));
-
-      ChannelStartTime(SampleType::sType).emplace(aStartTime);
-      if (HaveStartTime()) {
-        mHaveStartTimePromise.ResolveIfExists(true, __func__);
-      }
-    }
-
-    Maybe<int64_t>& ChannelStartTime(MediaData::Type aType)
-    {
-      return aType == MediaData::AUDIO_DATA ? mAudioStartTime : mVideoStartTime;
-    }
-
-    MediaPromiseHolder<HaveStartTimePromise> mHaveStartTimePromise;
-    nsRefPtr<AbstractThread> mOwnerThread;
-    Maybe<int64_t> mAudioStartTime;
-    Maybe<int64_t> mVideoStartTime;
-  };
-  nsRefPtr<StartTimeRendezvous> mStartTimeRendezvous;
-
-  bool HaveStartTime() { return mStartTimeRendezvous && mStartTimeRendezvous->HaveStartTime(); }
-  int64_t StartTime() { return mStartTimeRendezvous->StartTime(); }
-
   // Time at which the last video sample was requested. If it takes too long
   // before the sample arrives, we will increase the amount of audio we buffer.
   // This is necessary for legacy synchronous decoders to prevent underruns.
   TimeStamp mVideoDecodeStartTime;
 
   // Queue of audio frames. This queue is threadsafe, and is accessed from
   // the audio, decoder, state machine, and main threads.
   MediaQueue<AudioData> mAudioQueue;
@@ -942,16 +825,22 @@ public:
   // by decoder monitor.
   int64_t mPlayDuration;
 
   // Time that buffering started. Used for buffering timeout and only
   // accessed on the state machine thread. This is null while we're not
   // buffering.
   TimeStamp mBufferingStart;
 
+  // Start time of the media, in microseconds. This is the presentation
+  // time of the first frame decoded from the media, and is used to calculate
+  // duration and as a bounds for seeking. Accessed on state machine, decode,
+  // and main threads. Access controlled by decoder monitor.
+  int64_t mStartTime;
+
   // Time of the last frame in the media, in microseconds. This is the
   // end time of the last frame in the media. Accessed on state
   // machine, decode, and main threads. Access controlled by decoder monitor.
   // It will be set to -1 if the duration is infinite
   int64_t mEndTime;
 
   // Recomputes the canonical duration from various sources.
   void RecomputeDuration();
@@ -1264,23 +1153,19 @@ protected:
   // when either all the audio frames have completed playing, or we've moved
   // into shutdown state, and the threads are to be
   // destroyed. Written by the audio playback thread and read and written by
   // the state machine thread. Synchronised via decoder monitor.
   // When data is being sent to a MediaStream, this is true when all data has
   // been written to the MediaStream.
   Watchable<bool> mAudioCompleted;
 
-  // Flag whether we notify metadata before decoding the first frame or after.
-  //
-  // Note that the odd semantics here are designed to replicate the current
-  // behavior where we notify the decoder each time we come out of dormant, but
-  // send suppressed event visibility for those cases. This code can probably be
-  // simplified.
-  bool mNotifyMetadataBeforeFirstFrame;
+  // True if mDuration has a value obtained from an HTTP header, or from
+  // the media index/metadata. Accessed on the state machine thread.
+  bool mGotDurationFromMetaData;
 
   // True if we've dispatched an event to the decode task queue to call
   // DecodeThreadRun(). We use this flag to prevent us from dispatching
   // unneccessary runnables, since the decode thread runs in a loop.
   bool mDispatchedEventToDecode;
 
   // If this is true while we're in buffering mode, we can exit early,
   // as it's likely we may be able to playback. This happens when we enter
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1295,17 +1295,17 @@ MediaFormatReader::GetBuffered()
   media::TimeIntervals intervals;
 
   if (!mInitDone) {
     return intervals;
   }
   int64_t startTime;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-    NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
+    MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
     startTime = mStartTime;
   }
   if (NS_IsMainThread()) {
     if (mCachedTimeRangesStale) {
       MOZ_ASSERT(mMainThreadDemuxer);
       if (!mDataRange.IsEmpty()) {
         mMainThreadDemuxer->NotifyDataArrived(mDataRange.Length(), mDataRange.mStart);
       }
@@ -1469,15 +1469,18 @@ MediaFormatReader::NotifyDataRemoved()
   // Queue a task to notify our main demuxer.
   RefPtr<nsIRunnable> task =
     NS_NewRunnableMethodWithArgs<int32_t, uint64_t>(
       this, &MediaFormatReader::NotifyDemuxer,
       0, 0);
   TaskQueue()->Dispatch(task.forget());
 }
 
-bool
-MediaFormatReader::ForceZeroStartTime() const
+int64_t
+MediaFormatReader::ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio)
 {
-  return !mDemuxer->ShouldComputeStartTime();
+  if (mDemuxer->ShouldComputeStartTime()) {
+    return MediaDecoderReader::ComputeStartTime(aVideo, aAudio);
+  }
+  return 0;
 }
 
 } // namespace mozilla
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -70,18 +70,16 @@ public:
   int64_t GetEvictionOffset(double aTime) override;
   void NotifyDataArrived(const char* aBuffer,
                                  uint32_t aLength,
                                  int64_t aOffset) override;
   void NotifyDataRemoved() override;
 
   media::TimeIntervals GetBuffered() override;
 
-  virtual bool ForceZeroStartTime() const override;
-
   // For Media Resource Management
   void SetIdle() override;
   bool IsDormantNeeded() override;
   void ReleaseMediaResources() override;
   void SetSharedDecoderManager(SharedDecoderManager* aManager)
     override;
 
   nsresult ResetDecode() override;
@@ -94,16 +92,18 @@ public:
 
   void DisableHardwareAcceleration() override;
 
   bool IsWaitForDataSupported() override { return true; }
   nsRefPtr<WaitForDataPromise> WaitForData(MediaData::Type aType) override;
 
   bool IsWaitingOnCDMResource() override;
 
+  int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) override;
+
   bool UseBufferingHeuristics() override
   {
     return mTrackDemuxersMayBlock;
   }
 
 private:
   bool InitDemuxer();
   // Notify the demuxer that new data has been received.
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -350,16 +350,16 @@ public:
   AudioInfo mAudio;
 
   // If the metadata includes a duration, we store it here.
   media::NullableTimeUnit mMetadataDuration;
 
   // The Ogg reader tries to kinda-sorta compute the duration by seeking to the
   // end and determining the timestamp of the last frame. This isn't useful as
   // a duration until we know the start time, so we need to track it separately.
-  media::NullableTimeUnit mUnadjustedMetadataEndTime;
+  media::NullableTimeUnit mMetadataEndTime;
 
   EncryptionInfo mCrypto;
 };
 
 } // namespace mozilla
 
 #endif // MediaInfo_h
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -1076,17 +1076,17 @@ media::TimeIntervals
 MP4Reader::GetBuffered()
 {
   MonitorAutoLock mon(mDemuxerMonitor);
   media::TimeIntervals buffered;
   if (!mIndexReady) {
     return buffered;
   }
   UpdateIndex();
-  NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
+  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
 
   AutoPinned<MediaResource> resource(mDecoder->GetResource());
   nsTArray<MediaByteRange> ranges;
   nsresult rv = resource->GetCachedRanges(ranges);
 
   if (NS_SUCCEEDED(rv)) {
     nsTArray<Interval<Microseconds>> timeRanges;
     mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -89,17 +89,17 @@ public:
     if (GetVideoReader()) {
       GetVideoReader()->DisableHardwareAcceleration();
     }
   }
 
   // We can't compute a proper start time since we won't necessarily
   // have the first frame of the resource available. This does the same
   // as chrome/blink and assumes that we always start at t=0.
-  virtual bool ForceZeroStartTime() const override { return true; }
+  virtual int64_t ComputeStartTime(const VideoData* aVideo, const AudioData* aAudio) override { return 0; }
 
   // Buffering heuristics don't make sense for MSE, because the arrival of data
   // is at least partly controlled by javascript, and javascript does not expect
   // us to sit on unplayed data just because it may not be enough to play
   // through.
   bool UseBufferingHeuristics() override { return false; }
 
   bool IsMediaSeekable() override { return true; }
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -484,17 +484,17 @@ nsresult OggReader::ReadMetadata(MediaIn
       NS_ASSERTION(length > 0, "Must have a content length to get end time");
 
       int64_t endTime = 0;
       {
         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
         endTime = RangeEndTime(length);
       }
       if (endTime != -1) {
-        mInfo.mUnadjustedMetadataEndTime.emplace(TimeUnit::FromMicroseconds(endTime));
+        mInfo.mMetadataEndTime.emplace(TimeUnit::FromMicroseconds(endTime));
         LOG(LogLevel::Debug, ("Got Ogg duration from seeking to end %lld", endTime));
       }
     }
   } else {
     return NS_ERROR_FAILURE;
   }
   *aInfo = mInfo;
 
@@ -1847,17 +1847,17 @@ nsresult OggReader::SeekBisection(int64_
 
   SEEK_LOG(LogLevel::Debug, ("Seek complete in %d bisections.", hops));
 
   return NS_OK;
 }
 
 media::TimeIntervals OggReader::GetBuffered()
 {
-  NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
+  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
   {
     mozilla::ReentrantMonitorAutoEnter mon(mMonitor);
     if (mIsChained) {
       return media::TimeIntervals::Invalid();
     }
   }
 #ifdef OGG_ESTIMATE_BUFFERED
   return MediaDecoderReader::GetBuffered();
--- a/dom/media/test/test_buffered.html
+++ b/dom/media/test/test_buffered.html
@@ -20,17 +20,16 @@ https://bugzilla.mozilla.org/show_bug.cg
 <script type="application/javascript">
 
 // Test for Bug 462957; HTMLMediaElement.buffered.
 
 var manager = new MediaTestManager;
 
 function testBuffered(e) {
   var v = e.target;
-  v.removeEventListener('timeupdate', testBuffered);
   
   // The whole media should be buffered...
   var b = v.buffered;
   is(b.length, 1, v._name + ": Should be buffered in one range");
   is(b.start(0), 0, v._name + ": First range start should be media start");
   ok(Math.abs(b.end(0) - v.duration) < 0.1, v._name + ": First range end should be media end");
 
   // Ensure INDEX_SIZE_ERR is thrown when we access outside the range
@@ -90,22 +89,22 @@ function fetch(url, fetched_callback) {
 }
 
 function startTest(test, token) {
   // Fetch the media resource using XHR so we can be sure the entire
   // resource is loaded before we test buffered ranges. This ensures
   // we have deterministic behaviour.
   var onfetched = function(uri) {
     var v = document.createElement('video');
-    v.autoplay = true;
+    v.preload = "metadata";
     v._token = token;
     v.src = uri;
     v._name = test.name;
     v._test = test;
-    v.addEventListener("timeupdate", testBuffered, false);
+    v.addEventListener("loadedmetadata", testBuffered, false);
     document.body.appendChild(v);
   };
 
   manager.started(token);
   fetch(test.name, onfetched);
 }
 
 // Note: No need to set media test prefs, since we're using XHR to fetch
--- a/dom/media/webm/WebMReader.cpp
+++ b/dom/media/webm/WebMReader.cpp
@@ -1088,17 +1088,17 @@ nsresult WebMReader::SeekInternal(int64_
       return NS_ERROR_FAILURE;
     }
   }
   return NS_OK;
 }
 
 media::TimeIntervals WebMReader::GetBuffered()
 {
-  NS_ENSURE_TRUE(mStartTime >= 0, media::TimeIntervals());
+  MOZ_ASSERT(mStartTime != -1, "Need to finish metadata decode first");
   AutoPinned<MediaResource> resource(mDecoder->GetResource());
 
   media::TimeIntervals buffered;
   // Special case completely cached files.  This also handles local files.
   if (mContext && resource->IsDataCachedToEndOfResource(0)) {
     uint64_t duration = 0;
     if (nestegg_duration(mContext, &duration) == 0) {
       buffered +=