Bug 1129732: Part1. Dynamically adjust calculations using timestampoffset. r=mattwoodrow
authorJean-Yves Avenard <jyavenard@mozilla.com>
Mon, 09 Feb 2015 23:28:59 +1100
changeset 228085 3068d82cb1a8f8ce7ee52b2b9ed97c41d39a9a1f
parent 228084 42dd185fb447931bba640b207e34be6914de2996
child 228086 a190ece892cc72a7c08488df1c3b68ab952bbad6
push id55310
push userjyavenard@mozilla.com
push dateMon, 09 Feb 2015 12:30:55 +0000
treeherdermozilla-inbound@35aa35129ceb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmattwoodrow
bugs1129732
milestone38.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1129732: Part1. Dynamically adjust calculations using timestampoffset. r=mattwoodrow Timestamp Offset calculations are now done exclusively by the Media Source components which allow to recalculate them on the fly. By abstracting those offsets it remove the need for the sub-decoders to handle them (which allows to add WebM support).
dom/html/TimeRanges.cpp
dom/html/TimeRanges.h
dom/media/AbstractMediaDecoder.h
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/fmp4/MP4Reader.cpp
dom/media/fmp4/MP4Reader.h
dom/media/gtest/TestMP4Demuxer.cpp
dom/media/mediasource/ContainerParser.cpp
dom/media/mediasource/MediaSourceReader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/mediasource/SourceBufferDecoder.cpp
dom/media/mediasource/SourceBufferDecoder.h
dom/media/mediasource/TrackBuffer.cpp
media/libstagefright/binding/DecoderData.cpp
media/libstagefright/binding/Index.cpp
media/libstagefright/binding/MoofParser.cpp
media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
media/libstagefright/binding/include/mp4_demuxer/Index.h
media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
media/libstagefright/binding/mp4_demuxer.cpp
--- a/dom/html/TimeRanges.cpp
+++ b/dom/html/TimeRanges.cpp
@@ -167,10 +167,19 @@ TimeRanges::Find(double aTime, double aT
 }
 
 bool
 TimeRanges::WrapObject(JSContext* aCx, JS::MutableHandle<JSObject*> aReflector)
 {
   return TimeRangesBinding::Wrap(aCx, this, aReflector);
 }
 
+void
+TimeRanges::Shift(double aOffset)
+{
+  for (index_type i = 0; i < mRanges.Length(); ++i) {
+    mRanges[i].mStart += aOffset;
+    mRanges[i].mEnd += aOffset;
+  }
+}
+
 } // namespace dom
 } // namespace mozilla
--- a/dom/html/TimeRanges.h
+++ b/dom/html/TimeRanges.h
@@ -56,16 +56,19 @@ public:
   {
     return mRanges.Length();
   }
 
   virtual double Start(uint32_t aIndex, ErrorResult& aRv);
 
   virtual double End(uint32_t aIndex, ErrorResult& aRv);
 
+  // Shift all values by aOffset seconds.
+  void Shift(double aOffset);
+
 private:
   ~TimeRanges();
 
   // Comparator which orders TimeRanges by start time. Used by Normalize().
   struct TimeRange
   {
     TimeRange(double aStart, double aEnd)
       : mStart(aStart),
--- a/dom/media/AbstractMediaDecoder.h
+++ b/dom/media/AbstractMediaDecoder.h
@@ -60,19 +60,16 @@ public:
   // Called by the decode thread to keep track of the number of bytes read
   // from the resource.
   virtual void NotifyBytesConsumed(int64_t aBytes, int64_t aOffset) = 0;
 
   // Increments the parsed and decoded frame counters by the passed in counts.
   // Can be called on any thread.
   virtual void NotifyDecodedFrames(uint32_t aParsed, uint32_t aDecoded) = 0;
 
-  // For decoders with a notion of timestamp offset, returns the value in microseconds.
-  virtual int64_t GetTimestampOffset() const { return 0; }
-
   // Return the duration of the media in microseconds.
   virtual int64_t GetMediaDuration() = 0;
 
   // Set the duration of the media in microseconds.
   virtual void SetMediaDuration(int64_t aDuration) = 0;
 
   // Sets the duration of the media in microseconds. The MediaDecoder
   // fires a durationchange event to its owner (e.g., an HTML audio
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -43,16 +43,37 @@ AudioData::SizeOfIncludingThis(MallocSiz
 {
   size_t size = aMallocSizeOf(this) + aMallocSizeOf(mAudioData);
   if (mAudioBuffer) {
     size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf);
   }
   return size;
 }
 
+/* static */
+already_AddRefed<AudioData>
+AudioData::TransferAndUpdateTimestampAndDuration(AudioData* aOther,
+                                                  int64_t aTimestamp,
+                                                  int64_t aDuration)
+{
+  NS_ENSURE_TRUE(aOther, nullptr);
+  nsRefPtr<AudioData> v = new AudioData(aOther->mOffset,
+                                        aTimestamp,
+                                        aDuration,
+                                        aOther->mFrames,
+                                        aOther->mAudioData,
+                                        aOther->mChannels,
+                                        aOther->mRate);
+  v->mDiscontinuity = aOther->mDiscontinuity;
+  // Remove aOther's AudioData as it can't be shared across two targets.
+  aOther->mAudioData.forget();
+
+  return v.forget();
+}
+
 static bool
 ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
 {
   return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
          aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
          aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
          aPlane.mStride > 0;
 }
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -79,16 +79,25 @@ public:
             uint32_t aChannels,
             uint32_t aRate)
     : MediaData(AUDIO_DATA, aOffset, aTime, aDuration)
     , mFrames(aFrames)
     , mChannels(aChannels)
     , mRate(aRate)
     , mAudioData(aData) {}
 
+  // Creates a new VideoData identical to aOther, but with a different
+  // specified timestamp and duration. All data from aOther is copied
+  // into the new AudioData but the audio data which is transferred.
+  // After such call, the original aOther is unusable.
+  static already_AddRefed<AudioData>
+  TransferAndUpdateTimestampAndDuration(AudioData* aOther,
+                                        int64_t aTimestamp,
+                                        int64_t aDuration);
+
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
 
   // If mAudioBuffer is null, creates it from mAudioData.
   void EnsureAudioBuffer();
 
   const uint32_t mFrames;
   const uint32_t mChannels;
   const uint32_t mRate;
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -201,17 +201,16 @@ static bool sIsEMEEnabled = false;
 static bool sDemuxSkipToNextKeyframe = true;
 
 nsresult
 MP4Reader::Init(MediaDecoderReader* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
   PlatformDecoderModule::Init();
   mStream = new MP4Stream(mDecoder->GetResource());
-  mTimestampOffset = GetDecoder()->GetTimestampOffset();
 
   InitLayersBackendType();
 
   mAudio.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mAudio.mTaskQueue, NS_ERROR_FAILURE);
 
   mVideo.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mVideo.mTaskQueue, NS_ERROR_FAILURE);
@@ -337,17 +336,17 @@ MP4Reader::PreReadMetadata()
   if (mPlatform) {
     RequestCodecResource();
   }
 }
 
 bool
 MP4Reader::InitDemuxer()
 {
-  mDemuxer = new MP4Demuxer(mStream, mTimestampOffset, &mDemuxerMonitor);
+  mDemuxer = new MP4Demuxer(mStream, &mDemuxerMonitor);
   return mDemuxer->Init();
 }
 
 nsresult
 MP4Reader::ReadMetadata(MediaInfo* aInfo,
                         MetadataTags** aTags)
 {
   if (!mDemuxerInitialized) {
--- a/dom/media/fmp4/MP4Reader.h
+++ b/dom/media/fmp4/MP4Reader.h
@@ -122,17 +122,16 @@ private:
   virtual bool IsWaitingOnCDMResource() MOZ_OVERRIDE;
 
   Microseconds GetNextKeyframeTime();
   bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
 
   size_t SizeOfQueue(TrackType aTrack);
 
   nsRefPtr<MP4Stream> mStream;
-  int64_t mTimestampOffset;
   nsAutoPtr<mp4_demuxer::MP4Demuxer> mDemuxer;
   nsRefPtr<PlatformDecoderModule> mPlatform;
 
   class DecoderCallback : public MediaDataDecoderCallback {
   public:
     DecoderCallback(MP4Reader* aReader,
                     mp4_demuxer::TrackType aType)
       : mReader(aReader)
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -19,17 +19,17 @@ public:
 
   nsRefPtr<MockMediaResource> resource;
   Monitor mMonitor;
   nsAutoPtr<MP4Demuxer> demuxer;
 
   explicit MP4DemuxerBinding(const char* aFileName = "dash_dashinit.mp4")
     : resource(new MockMediaResource(aFileName))
     , mMonitor("TestMP4Demuxer monitor")
-    , demuxer(new MP4Demuxer(new MP4Stream(resource), 0, &mMonitor))
+    , demuxer(new MP4Demuxer(new MP4Stream(resource), &mMonitor))
   {
     EXPECT_EQ(NS_OK, resource->Open(nullptr));
   }
 
 private:
   virtual ~MP4DemuxerBinding()
   {
   }
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -280,17 +280,17 @@ public:
     bool initSegment = IsInitSegmentPresent(aData);
     if (initSegment) {
       mResource = new SourceBufferResource(NS_LITERAL_CSTRING("video/mp4"));
       mStream = new MP4Stream(mResource);
       // We use a timestampOffset of 0 for ContainerParser, and require
       // consumers of ParseStartAndEndTimestamps to add their timestamp offset
       // manually. This allows the ContainerParser to be shared across different
       // timestampOffsets.
-      mParser = new mp4_demuxer::MoofParser(mStream, 0, 0, &mMonitor);
+      mParser = new mp4_demuxer::MoofParser(mStream, 0, &mMonitor);
       mInitData = new LargeDataBuffer();
     } else if (!mStream || !mParser) {
       return false;
     }
 
     mResource->AppendData(aData);
     nsTArray<MediaByteRange> byteRanges;
     MediaByteRange mbr =
--- a/dom/media/mediasource/MediaSourceReader.cpp
+++ b/dom/media/mediasource/MediaSourceReader.cpp
@@ -86,126 +86,134 @@ MediaSourceReader::IsWaitingMediaResourc
   }
 
   return !mHasEssentialTrackBuffers;
 }
 
 size_t
 MediaSourceReader::SizeOfVideoQueueInFrames()
 {
-  if (!mVideoReader) {
+  if (!GetVideoReader()) {
     MSE_DEBUG("MediaSourceReader(%p)::SizeOfVideoQueue called with no video reader", this);
     return 0;
   }
-  return mVideoReader->SizeOfVideoQueueInFrames();
+  return GetVideoReader()->SizeOfVideoQueueInFrames();
 }
 
 size_t
 MediaSourceReader::SizeOfAudioQueueInFrames()
 {
-  if (!mAudioReader) {
+  if (!GetAudioReader()) {
     MSE_DEBUG("MediaSourceReader(%p)::SizeOfAudioQueue called with no audio reader", this);
     return 0;
   }
-  return mAudioReader->SizeOfAudioQueueInFrames();
+  return GetAudioReader()->SizeOfAudioQueueInFrames();
 }
 
 nsRefPtr<MediaDecoderReader::AudioDataPromise>
 MediaSourceReader::RequestAudioData()
 {
   nsRefPtr<AudioDataPromise> p = mAudioPromise.Ensure(__func__);
   MSE_DEBUGV("MediaSourceReader(%p)::RequestAudioData", this);
-  if (!mAudioReader) {
+  if (!GetAudioReader()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called with no audio reader", this);
     mAudioPromise.Reject(DECODE_ERROR, __func__);
     return p;
   }
   if (IsSeeking()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called mid-seek. Rejecting.", this);
     mAudioPromise.Reject(CANCELED, __func__);
     return p;
   }
   MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists());
 
-  SwitchReaderResult ret = SwitchAudioReader(mLastAudioTime);
+  SwitchSourceResult ret = SwitchAudioSource(mLastAudioTime);
   switch (ret) {
-    case READER_NEW:
-      mAudioSeekRequest.Begin(mAudioReader->Seek(mLastAudioTime, 0)
+    case SOURCE_NEW:
+      mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mLastAudioTime), 0)
                               ->RefableThen(GetTaskQueue(), __func__, this,
                                             &MediaSourceReader::CompleteAudioSeekAndDoRequest,
                                             &MediaSourceReader::CompleteAudioSeekAndRejectPromise));
       break;
-    case READER_ERROR:
+    case SOURCE_ERROR:
       if (mLastAudioTime) {
         CheckForWaitOrEndOfStream(MediaData::AUDIO_DATA, mLastAudioTime);
         break;
       }
       // Fallback to using current reader
     default:
       DoAudioRequest();
       break;
   }
   return p;
 }
 
 void MediaSourceReader::DoAudioRequest()
 {
-  mAudioRequest.Begin(mAudioReader->RequestAudioData()
+  mAudioRequest.Begin(GetAudioReader()->RequestAudioData()
                       ->RefableThen(GetTaskQueue(), __func__, this,
                                     &MediaSourceReader::OnAudioDecoded,
                                     &MediaSourceReader::OnAudioNotDecoded));
 }
 
 void
 MediaSourceReader::OnAudioDecoded(AudioData* aSample)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mAudioRequest.Complete();
 
+  int64_t ourTime = aSample->mTime + mAudioSourceDecoder->GetTimestampOffset();
+
   MSE_DEBUGV("MediaSourceReader(%p)::OnAudioDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]",
-             this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity);
+             this, ourTime, aSample->mDuration, aSample->mDiscontinuity);
   if (mDropAudioBeforeThreshold) {
-    if (aSample->mTime < mTimeThreshold) {
+    if (ourTime < mTimeThreshold) {
       MSE_DEBUG("MediaSourceReader(%p)::OnAudioDecoded mTime=%lld < mTimeThreshold=%lld",
-                this, aSample->mTime, mTimeThreshold);
-      mAudioRequest.Begin(mAudioReader->RequestAudioData()
+                this, ourTime, mTimeThreshold);
+      mAudioRequest.Begin(GetAudioReader()->RequestAudioData()
                           ->RefableThen(GetTaskQueue(), __func__, this,
                                         &MediaSourceReader::OnAudioDecoded,
                                         &MediaSourceReader::OnAudioNotDecoded));
       return;
     }
     mDropAudioBeforeThreshold = false;
   }
 
-  mLastAudioTime = aSample->mTime + aSample->mDuration;
+  // Adjust the sample time into our reference.
+  nsRefPtr<AudioData> newSample =
+    AudioData::TransferAndUpdateTimestampAndDuration(aSample,
+                                                     ourTime,
+                                                     aSample->mDuration);
+  mLastAudioTime = newSample->GetEndTime();
 
-  mAudioPromise.Resolve(aSample, __func__);
+  mAudioPromise.Resolve(newSample, __func__);
 }
 
 // Find the closest approximation to the end time for this stream.
 // mLast{Audio,Video}Time differs from the actual end time because of
 // Bug 1065207 - the duration of a WebM fragment is an estimate not the
 // actual duration. In the case of audio time an example of where they
 // differ would be the actual sample duration being small but the
 // previous sample being large. The buffered end time uses that last
 // sample duration as an estimate of the end time duration giving an end
 // time that is greater than mLastAudioTime, which is the actual sample
 // end time.
 // Reader switching is based on the buffered end time though so they can be
 // quite different. By using the EOS_FUZZ_US and the buffered end time we
 // attempt to account for this difference.
 static void
-AdjustEndTime(int64_t* aEndTime, MediaDecoderReader* aReader)
+AdjustEndTime(int64_t* aEndTime, SourceBufferDecoder* aDecoder)
 {
-  if (aReader) {
+  if (aDecoder && aDecoder->GetReader()) {
     nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
-    aReader->GetBuffered(ranges);
+    aDecoder->GetReader()->GetBuffered(ranges);
     if (ranges->Length() > 0) {
       // End time is a double so we convert to nearest by adding 0.5.
-      int64_t end = ranges->GetEndTime() * USECS_PER_S + 0.5;
+      int64_t end =
+        (ranges->GetEndTime() + aDecoder->GetTimestampOffset()) * USECS_PER_S + 0.5;
       *aEndTime = std::max(*aEndTime, end);
     }
   }
 }
 
 void
 MediaSourceReader::OnAudioNotDecoded(NotDecodedReason aReason)
 {
@@ -216,109 +224,117 @@ MediaSourceReader::OnAudioNotDecoded(Not
   if (aReason == DECODE_ERROR || aReason == CANCELED) {
     mAudioPromise.Reject(aReason, __func__);
     return;
   }
 
   // End of stream. Force switching past this stream to another reader by
   // switching to the end of the buffered range.
   MOZ_ASSERT(aReason == END_OF_STREAM);
-  if (mAudioReader) {
-    AdjustEndTime(&mLastAudioTime, mAudioReader);
+  if (mAudioSourceDecoder) {
+    AdjustEndTime(&mLastAudioTime, mAudioSourceDecoder);
   }
 
-  // See if we can find a different reader that can pick up where we left off.
-  if (SwitchAudioReader(mLastAudioTime) == READER_NEW) {
-    mAudioSeekRequest.Begin(mAudioReader->Seek(mLastAudioTime, 0)
+  // See if we can find a different source that can pick up where we left off.
+  if (SwitchAudioSource(mLastAudioTime) == SOURCE_NEW) {
+    mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mLastAudioTime), 0)
                             ->RefableThen(GetTaskQueue(), __func__, this,
                                           &MediaSourceReader::CompleteAudioSeekAndDoRequest,
                                           &MediaSourceReader::CompleteAudioSeekAndRejectPromise));
     return;
   }
 
   CheckForWaitOrEndOfStream(MediaData::AUDIO_DATA, mLastAudioTime);
 }
 
-
 nsRefPtr<MediaDecoderReader::VideoDataPromise>
 MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
 {
   nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__);
   MSE_DEBUGV("MediaSourceReader(%p)::RequestVideoData(%d, %lld)",
              this, aSkipToNextKeyframe, aTimeThreshold);
-  if (!mVideoReader) {
+  if (!GetVideoReader()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called with no video reader", this);
     mVideoPromise.Reject(DECODE_ERROR, __func__);
     return p;
   }
   if (aSkipToNextKeyframe) {
     mTimeThreshold = aTimeThreshold;
     mDropAudioBeforeThreshold = true;
     mDropVideoBeforeThreshold = true;
   }
   if (IsSeeking()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called mid-seek. Rejecting.", this);
     mVideoPromise.Reject(CANCELED, __func__);
     return p;
   }
   MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists());
 
-  SwitchReaderResult ret = SwitchVideoReader(mLastVideoTime);
+  SwitchSourceResult ret = SwitchVideoSource(mLastVideoTime);
   switch (ret) {
-    case READER_NEW:
-      mVideoSeekRequest.Begin(mVideoReader->Seek(mLastVideoTime, 0)
+    case SOURCE_NEW:
+      mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0)
                              ->RefableThen(GetTaskQueue(), __func__, this,
                                            &MediaSourceReader::CompleteVideoSeekAndDoRequest,
                                            &MediaSourceReader::CompleteVideoSeekAndRejectPromise));
       break;
-    case READER_ERROR:
+    case SOURCE_ERROR:
       if (mLastVideoTime) {
         CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime);
         break;
       }
       // Fallback to using current reader.
     default:
       DoVideoRequest();
       break;
   }
 
   return p;
 }
 
 void
 MediaSourceReader::DoVideoRequest()
 {
-  mVideoRequest.Begin(mVideoReader->RequestVideoData(mDropVideoBeforeThreshold, mTimeThreshold)
+  mVideoRequest.Begin(GetVideoReader()->RequestVideoData(mDropVideoBeforeThreshold, GetReaderVideoTime(mTimeThreshold))
                       ->RefableThen(GetTaskQueue(), __func__, this,
                                     &MediaSourceReader::OnVideoDecoded,
                                     &MediaSourceReader::OnVideoNotDecoded));
 }
 
 void
 MediaSourceReader::OnVideoDecoded(VideoData* aSample)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mVideoRequest.Complete();
 
+  // Adjust the sample time into our reference.
+  int64_t ourTime = aSample->mTime + mVideoSourceDecoder->GetTimestampOffset();
+
   MSE_DEBUGV("MediaSourceReader(%p)::OnVideoDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]",
-             this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity);
+             this, ourTime, aSample->mDuration, aSample->mDiscontinuity);
   if (mDropVideoBeforeThreshold) {
-    if (aSample->mTime < mTimeThreshold) {
+    if (ourTime < mTimeThreshold) {
       MSE_DEBUG("MediaSourceReader(%p)::OnVideoDecoded mTime=%lld < mTimeThreshold=%lld",
-                this, aSample->mTime, mTimeThreshold);
+                this, ourTime, mTimeThreshold);
       DoVideoRequest();
       return;
     }
     mDropVideoBeforeThreshold = false;
     mTimeThreshold = 0;
   }
 
-  mLastVideoTime = aSample->mTime + aSample->mDuration;
+  // Adjust the sample time into our reference.
+  nsRefPtr<VideoData> newSample =
+    VideoData::ShallowCopyUpdateTimestampAndDuration(aSample,
+                                                     ourTime,
+                                                     aSample->mDuration);
 
-  mVideoPromise.Resolve(aSample, __func__);
+  mLastVideoTime = newSample->GetEndTime();
+
+  mVideoPromise.Resolve(newSample, __func__);
 }
 
 void
 MediaSourceReader::OnVideoNotDecoded(NotDecodedReason aReason)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mVideoRequest.Complete();
 
@@ -326,23 +342,23 @@ MediaSourceReader::OnVideoNotDecoded(Not
   if (aReason == DECODE_ERROR || aReason == CANCELED) {
     mVideoPromise.Reject(aReason, __func__);
     return;
   }
 
   // End of stream. Force switching past this stream to another reader by
   // switching to the end of the buffered range.
   MOZ_ASSERT(aReason == END_OF_STREAM);
-  if (mVideoReader) {
-    AdjustEndTime(&mLastVideoTime, mVideoReader);
+  if (mVideoSourceDecoder) {
+    AdjustEndTime(&mLastVideoTime, mVideoSourceDecoder);
   }
 
   // See if we can find a different reader that can pick up where we left off.
-  if (SwitchVideoReader(mLastVideoTime) == READER_NEW) {
-    mVideoSeekRequest.Begin(mVideoReader->Seek(mLastVideoTime, 0)
+  if (SwitchVideoSource(mLastVideoTime) == SOURCE_NEW) {
+    mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0)
                            ->RefableThen(GetTaskQueue(), __func__, this,
                                          &MediaSourceReader::CompleteVideoSeekAndDoRequest,
                                          &MediaSourceReader::CompleteVideoSeekAndRejectPromise));
     return;
   }
 
   CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime);
 }
@@ -390,19 +406,19 @@ MediaSourceReader::ContinueShutdown()
                                        &MediaSourceReader::ContinueShutdown,
                                        &MediaSourceReader::ContinueShutdown);
     mShutdownTrackBuffers.AppendElement(mTrackBuffers[0]);
     mTrackBuffers.RemoveElementAt(0);
     return;
   }
 
   mAudioTrack = nullptr;
-  mAudioReader = nullptr;
+  mAudioSourceDecoder = nullptr;
   mVideoTrack = nullptr;
-  mVideoReader = nullptr;
+  mVideoSourceDecoder = nullptr;
 
 #ifdef MOZ_FMP4
   if (mSharedDecoderManager) {
     mSharedDecoderManager->Shutdown();
     mSharedDecoderManager = nullptr;
   }
 #endif
 
@@ -417,130 +433,134 @@ MediaSourceReader::ContinueShutdown()
 
 void
 MediaSourceReader::BreakCycles()
 {
   MediaDecoderReader::BreakCycles();
 
   // These were cleared in Shutdown().
   MOZ_ASSERT(!mAudioTrack);
-  MOZ_ASSERT(!mAudioReader);
+  MOZ_ASSERT(!mAudioSourceDecoder);
   MOZ_ASSERT(!mVideoTrack);
-  MOZ_ASSERT(!mVideoReader);
+  MOZ_ASSERT(!mVideoSourceDecoder);
   MOZ_ASSERT(!mTrackBuffers.Length());
 
   for (uint32_t i = 0; i < mShutdownTrackBuffers.Length(); ++i) {
     mShutdownTrackBuffers[i]->BreakCycles();
   }
   mShutdownTrackBuffers.Clear();
 }
 
-already_AddRefed<MediaDecoderReader>
-MediaSourceReader::SelectReader(int64_t aTarget,
-                                int64_t aTolerance,
-                                const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
+already_AddRefed<SourceBufferDecoder>
+MediaSourceReader::SelectDecoder(int64_t aTarget,
+                                 int64_t aTolerance,
+                                 const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
 {
   mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
 
   // Consider decoders in order of newest to oldest, as a newer decoder
   // providing a given buffered range is expected to replace an older one.
   for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) {
-    nsRefPtr<MediaDecoderReader> newReader = aTrackDecoders[i]->GetReader();
+    nsRefPtr<SourceBufferDecoder> newDecoder = aTrackDecoders[i];
 
     nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
-    aTrackDecoders[i]->GetBuffered(ranges);
+    newDecoder->GetBuffered(ranges);
     if (ranges->Find(double(aTarget) / USECS_PER_S,
                      double(aTolerance) / USECS_PER_S) == dom::TimeRanges::NoIndex) {
-      MSE_DEBUGV("MediaSourceReader(%p)::SelectReader(%lld) newReader=%p target not in ranges=%s",
-                 this, aTarget, newReader.get(), DumpTimeRanges(ranges).get());
+      MSE_DEBUGV("MediaSourceReader(%p)::SelectDecoder(%lld) newDecoder=%p target not in ranges=%s",
+                 this, aTarget, newDecoder.get(), DumpTimeRanges(ranges).get());
       continue;
     }
 
-    return newReader.forget();
+    return newDecoder.forget();
   }
 
   return nullptr;
 }
 
 bool
 MediaSourceReader::HaveData(int64_t aTarget, MediaData::Type aType)
 {
   TrackBuffer* trackBuffer = aType == MediaData::AUDIO_DATA ? mAudioTrack : mVideoTrack;
   MOZ_ASSERT(trackBuffer);
-  nsRefPtr<MediaDecoderReader> reader = SelectReader(aTarget, EOS_FUZZ_US, trackBuffer->Decoders());
-  return !!reader;
+  nsRefPtr<SourceBufferDecoder> decoder = SelectDecoder(aTarget, EOS_FUZZ_US, trackBuffer->Decoders());
+  return !!decoder;
 }
 
-MediaSourceReader::SwitchReaderResult
-MediaSourceReader::SwitchAudioReader(int64_t aTarget)
+MediaSourceReader::SwitchSourceResult
+MediaSourceReader::SwitchAudioSource(int64_t aTarget)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   // XXX: Can't handle adding an audio track after ReadMetadata.
   if (!mAudioTrack) {
-    return READER_ERROR;
+    return SOURCE_ERROR;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
-  nsRefPtr<MediaDecoderReader> newReader = SelectReader(aTarget, /* aTolerance = */ 0, mAudioTrack->Decoders());
-  if (!newReader) {
-    newReader = SelectReader(aTarget, EOS_FUZZ_US, mAudioTrack->Decoders());
+  nsRefPtr<SourceBufferDecoder> newDecoder =
+    SelectDecoder(aTarget, /* aTolerance = */ 0, mAudioTrack->Decoders());
+  if (!newDecoder) {
+    newDecoder = SelectDecoder(aTarget, EOS_FUZZ_US, mAudioTrack->Decoders());
   }
-  if (newReader && newReader != mAudioReader) {
-    mAudioReader->SetIdle();
-    mAudioReader = newReader;
-    MSE_DEBUGV("MediaSourceReader(%p)::SwitchAudioReader switched reader to %p", this, mAudioReader.get());
-    return READER_NEW;
+  if (newDecoder && newDecoder != mAudioSourceDecoder) {
+    GetAudioReader()->SetIdle();
+    mAudioSourceDecoder = newDecoder;
+    MSE_DEBUGV("MediaSourceReader(%p)::SwitchAudioSource switched decoder to %p",
+               this, mAudioSourceDecoder.get());
+    return SOURCE_NEW;
   }
-  return newReader ? READER_EXISTING : READER_ERROR;
+  return newDecoder ? SOURCE_EXISTING : SOURCE_ERROR;
 }
 
-MediaSourceReader::SwitchReaderResult
-MediaSourceReader::SwitchVideoReader(int64_t aTarget)
+MediaSourceReader::SwitchSourceResult
+MediaSourceReader::SwitchVideoSource(int64_t aTarget)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   // XXX: Can't handle adding a video track after ReadMetadata.
   if (!mVideoTrack) {
-    return READER_ERROR;
+    return SOURCE_ERROR;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
-  nsRefPtr<MediaDecoderReader> newReader = SelectReader(aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders());
-  if (!newReader) {
-    newReader = SelectReader(aTarget, EOS_FUZZ_US, mVideoTrack->Decoders());
+  nsRefPtr<SourceBufferDecoder> newDecoder =
+    SelectDecoder(aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders());
+  if (!newDecoder) {
+    newDecoder = SelectDecoder(aTarget, EOS_FUZZ_US, mVideoTrack->Decoders());
   }
-  if (newReader && newReader != mVideoReader) {
-    mVideoReader->SetIdle();
-    mVideoReader = newReader;
-    MSE_DEBUGV("MediaSourceReader(%p)::SwitchVideoReader switched reader to %p", this, mVideoReader.get());
-    return READER_NEW;
+  if (newDecoder && newDecoder != mVideoSourceDecoder) {
+    GetVideoReader()->SetIdle();
+    mVideoSourceDecoder = newDecoder;
+    MSE_DEBUGV("MediaSourceReader(%p)::SwitchVideoSource switched decoder to %p",
+               this, mVideoSourceDecoder.get());
+    return SOURCE_NEW;
   }
-  return newReader ? READER_EXISTING : READER_ERROR;
+  return newDecoder ? SOURCE_EXISTING : SOURCE_ERROR;
 }
 
 bool
 MediaSourceReader::IsDormantNeeded()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  if (mVideoReader) {
-    return mVideoReader->IsDormantNeeded();
+  if (GetVideoReader()) {
+    return GetVideoReader()->IsDormantNeeded();
   }
 
   return false;
 }
 
 void
 MediaSourceReader::ReleaseMediaResources()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  if (mVideoReader) {
-    mVideoReader->ReleaseMediaResources();
+  if (GetVideoReader()) {
+    GetVideoReader()->ReleaseMediaResources();
   }
 }
 
 MediaDecoderReader*
 CreateReaderForType(const nsACString& aType, AbstractMediaDecoder* aDecoder)
 {
 #ifdef MOZ_FMP4
   // The MP4Reader that supports fragmented MP4 and uses
@@ -712,66 +732,71 @@ MediaSourceReader::Seek(int64_t aTime, i
 
 void
 MediaSourceReader::CancelSeek()
 {
   MOZ_ASSERT(OnDecodeThread());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   mWaitingForSeekData = false;
   mPendingSeekTime = -1;
-  if (mAudioReader) {
+  if (GetAudioReader()) {
     mAudioSeekRequest.DisconnectIfExists();
-    mAudioReader->CancelSeek();
+    GetAudioReader()->CancelSeek();
   }
-  if (mVideoReader) {
+  if (GetVideoReader()) {
     mVideoSeekRequest.DisconnectIfExists();
-    mVideoReader->CancelSeek();
+    GetVideoReader()->CancelSeek();
   }
   mSeekPromise.RejectIfExists(NS_OK, __func__);
 }
 
 void
 MediaSourceReader::OnVideoSeekCompleted(int64_t aTime)
 {
   mVideoSeekRequest.Complete();
 
+  // The aTime we receive is in the sub-reader's reference.
+  int64_t ourTime = aTime + mVideoSourceDecoder->GetTimestampOffset();
+
   if (mAudioTrack) {
-    mPendingSeekTime = aTime;
+    mPendingSeekTime = ourTime;
     DoAudioSeek();
   } else {
     mPendingSeekTime = -1;
-    mSeekPromise.Resolve(aTime, __func__);
+    mSeekPromise.Resolve(ourTime, __func__);
   }
 }
 
 void
 MediaSourceReader::OnVideoSeekFailed(nsresult aResult)
 {
   mVideoSeekRequest.Complete();
   mPendingSeekTime = -1;
   mSeekPromise.Reject(aResult, __func__);
 }
 
 void
 MediaSourceReader::DoAudioSeek()
 {
-    SwitchAudioReader(mPendingSeekTime);
-    mAudioSeekRequest.Begin(mAudioReader->Seek(mPendingSeekTime, 0)
-                           ->RefableThen(GetTaskQueue(), __func__, this,
-                                         &MediaSourceReader::OnAudioSeekCompleted,
-                                         &MediaSourceReader::OnAudioSeekFailed));
-    MSE_DEBUG("MediaSourceReader(%p)::DoAudioSeek reader=%p", this, mAudioReader.get());
+  SwitchAudioSource(mPendingSeekTime);
+  mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mPendingSeekTime), 0)
+                         ->RefableThen(GetTaskQueue(), __func__, this,
+                                       &MediaSourceReader::OnAudioSeekCompleted,
+                                       &MediaSourceReader::OnAudioSeekFailed));
+  MSE_DEBUG("MediaSourceReader(%p)::DoAudioSeek reader=%p", this, GetAudioReader());
 }
 
 void
 MediaSourceReader::OnAudioSeekCompleted(int64_t aTime)
 {
   mAudioSeekRequest.Complete();
   mPendingSeekTime = -1;
-  mSeekPromise.Resolve(aTime, __func__);
+  // The aTime we receive is in the sub-reader's reference.
+  mSeekPromise.Resolve(aTime + mAudioSourceDecoder->GetTimestampOffset(),
+                       __func__);
 }
 
 void
 MediaSourceReader::OnAudioSeekFailed(nsresult aResult)
 {
   mAudioSeekRequest.Complete();
   mPendingSeekTime = -1;
   mSeekPromise.Reject(aResult, __func__);
@@ -806,22 +831,22 @@ MediaSourceReader::AttemptSeek()
   } else {
     MOZ_CRASH();
   }
 }
 
 void
 MediaSourceReader::DoVideoSeek()
 {
-  SwitchVideoReader(mPendingSeekTime);
-  mVideoSeekRequest.Begin(mVideoReader->Seek(mPendingSeekTime, 0)
+  SwitchVideoSource(mPendingSeekTime);
+  mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mPendingSeekTime), 0)
                           ->RefableThen(GetTaskQueue(), __func__, this,
                                         &MediaSourceReader::OnVideoSeekCompleted,
                                         &MediaSourceReader::OnVideoSeekFailed));
-  MSE_DEBUG("MediaSourceReader(%p)::DoVideoSeek reader=%p", this, mVideoReader.get());
+  MSE_DEBUG("MediaSourceReader(%p)::DoVideoSeek reader=%p", this, GetVideoReader());
 }
 
 nsresult
 MediaSourceReader::GetBuffered(dom::TimeRanges* aBuffered)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   MOZ_ASSERT(aBuffered->Length() == 0);
   if (mTrackBuffers.IsEmpty()) {
@@ -898,63 +923,63 @@ MediaSourceReader::ReadMetadata(MediaInf
   if (!mAudioTrack && !mVideoTrack) {
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata missing track: mAudioTrack=%p mVideoTrack=%p",
               this, mAudioTrack.get(), mVideoTrack.get());
     return NS_ERROR_FAILURE;
   }
 
   if (mAudioTrack) {
     MOZ_ASSERT(mAudioTrack->IsReady());
-    mAudioReader = mAudioTrack->Decoders()[0]->GetReader();
+    mAudioSourceDecoder = mAudioTrack->Decoders()[0];
 
-    const MediaInfo& info = mAudioReader->GetMediaInfo();
+    const MediaInfo& info = GetAudioReader()->GetMediaInfo();
     MOZ_ASSERT(info.HasAudio());
     mInfo.mAudio = info.mAudio;
     mInfo.mIsEncrypted = mInfo.mIsEncrypted || info.mIsEncrypted;
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata audio reader=%p duration=%lld",
-              this, mAudioReader.get(),
-              mAudioReader->GetDecoder()->GetMediaDuration());
+              this, mAudioSourceDecoder.get(),
+              mAudioSourceDecoder->GetReader()->GetDecoder()->GetMediaDuration());
   }
 
   if (mVideoTrack) {
     MOZ_ASSERT(mVideoTrack->IsReady());
-    mVideoReader = mVideoTrack->Decoders()[0]->GetReader();
+    mVideoSourceDecoder = mVideoTrack->Decoders()[0];
 
-    const MediaInfo& info = mVideoReader->GetMediaInfo();
+    const MediaInfo& info = GetVideoReader()->GetMediaInfo();
     MOZ_ASSERT(info.HasVideo());
     mInfo.mVideo = info.mVideo;
     mInfo.mIsEncrypted = mInfo.mIsEncrypted || info.mIsEncrypted;
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata video reader=%p duration=%lld",
-              this, mVideoReader.get(),
-              mVideoReader->GetDecoder()->GetMediaDuration());
+              this, GetVideoReader(),
+              GetVideoReader()->GetDecoder()->GetMediaDuration());
   }
 
   *aInfo = mInfo;
   *aTags = nullptr; // TODO: Handle metadata.
 
   return NS_OK;
 }
 
 void
 MediaSourceReader::ReadUpdatedMetadata(MediaInfo* aInfo)
 {
   if (mAudioTrack) {
     MOZ_ASSERT(mAudioTrack->IsReady());
-    mAudioReader = mAudioTrack->Decoders()[0]->GetReader();
+    mAudioSourceDecoder = mAudioTrack->Decoders()[0];
 
-    const MediaInfo& info = mAudioReader->GetMediaInfo();
+    const MediaInfo& info = GetAudioReader()->GetMediaInfo();
     MOZ_ASSERT(info.HasAudio());
     mInfo.mAudio = info.mAudio;
   }
 
   if (mVideoTrack) {
     MOZ_ASSERT(mVideoTrack->IsReady());
-    mVideoReader = mVideoTrack->Decoders()[0]->GetReader();
+    mVideoSourceDecoder = mVideoTrack->Decoders()[0];
 
-    const MediaInfo& info = mVideoReader->GetMediaInfo();
+    const MediaInfo& info = GetVideoReader()->GetMediaInfo();
     MOZ_ASSERT(info.HasVideo());
     mInfo.mVideo = info.mVideo;
   }
   *aInfo = mInfo;
 }
 
 void
 MediaSourceReader::Ended()
@@ -994,31 +1019,31 @@ MediaSourceReader::GetMozDebugReaderData
     result += nsPrintfCString("\tDumping Audio Track Decoders: - mLastAudioTime: %f\n", double(mLastAudioTime) / USECS_PER_S);
     for (int32_t i = mAudioTrack->Decoders().Length() - 1; i >= 0; --i) {
       nsRefPtr<MediaDecoderReader> newReader = mAudioTrack->Decoders()[i]->GetReader();
 
       nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
       mAudioTrack->Decoders()[i]->GetBuffered(ranges);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
                                 i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == mAudioReader.get() ? "true" : "false",
+                                newReader.get() == GetAudioReader() ? "true" : "false",
                                 mAudioTrack->Decoders()[i]->GetResource()->GetSize());
     }
   }
 
   if (mVideoTrack) {
     result += nsPrintfCString("\tDumping Video Track Decoders - mLastVideoTime: %f\n", double(mLastVideoTime) / USECS_PER_S);
     for (int32_t i = mVideoTrack->Decoders().Length() - 1; i >= 0; --i) {
       nsRefPtr<MediaDecoderReader> newReader = mVideoTrack->Decoders()[i]->GetReader();
 
       nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
       mVideoTrack->Decoders()[i]->GetBuffered(ranges);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
                                 i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == mVideoReader.get() ? "true" : "false",
+                                newReader.get() == GetVideoReader() ? "true" : "false",
                                 mVideoTrack->Decoders()[i]->GetResource()->GetSize());
     }
   }
   aString += NS_ConvertUTF8toUTF16(result);
 }
 
 #ifdef MOZ_EME
 nsresult
@@ -1035,12 +1060,36 @@ MediaSourceReader::SetCDMProxy(CDMProxy*
   return NS_OK;
 }
 #endif
 
 bool
 MediaSourceReader::IsActiveReader(MediaDecoderReader* aReader)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  return aReader == mVideoReader.get() || aReader == mAudioReader.get();
+  return aReader == GetVideoReader() || aReader == GetAudioReader();
+}
+
+MediaDecoderReader*
+MediaSourceReader::GetAudioReader() const
+{
+  return mAudioSourceDecoder ? mAudioSourceDecoder->GetReader() : nullptr;
+}
+
+MediaDecoderReader*
+MediaSourceReader::GetVideoReader() const
+{
+  return mVideoSourceDecoder ? mVideoSourceDecoder->GetReader() : nullptr;
+}
+
+int64_t
+MediaSourceReader::GetReaderAudioTime(int64_t aTime) const
+{
+  return aTime - mAudioSourceDecoder->GetTimestampOffset();
+}
+
+int64_t
+MediaSourceReader::GetReaderVideoTime(int64_t aTime) const
+{
+  return aTime - mVideoSourceDecoder->GetTimestampOffset();
 }
 
 } // namespace mozilla
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -137,39 +137,39 @@ public:
   // Set the duration of the attached mediasource element.
   void SetMediaSourceDuration(double aDuration /* seconds */);
 
 #ifdef MOZ_EME
   nsresult SetCDMProxy(CDMProxy* aProxy);
 #endif
 
   virtual bool IsAsync() const MOZ_OVERRIDE {
-    return (!mAudioReader || mAudioReader->IsAsync()) &&
-           (!mVideoReader || mVideoReader->IsAsync());
+    return (!GetAudioReader() || GetAudioReader()->IsAsync()) &&
+           (!GetVideoReader() || GetVideoReader()->IsAsync());
   }
 
   // Returns true if aReader is a currently active audio or video
   bool IsActiveReader(MediaDecoderReader* aReader);
 
   // Returns a string describing the state of the MediaSource internal
   // buffered data. Used for debugging purposes.
   void GetMozDebugReaderData(nsAString& aString);
 
 private:
-  // Switch the current audio/video reader to the reader that
+  // Switch the current audio/video source to the source that
   // contains aTarget (or up to aTolerance after target). Both
   // aTarget and aTolerance are in microseconds.
-  enum SwitchReaderResult {
-    READER_ERROR = -1,
-    READER_EXISTING = 0,
-    READER_NEW = 1,
+  enum SwitchSourceResult {
+    SOURCE_ERROR = -1,
+    SOURCE_EXISTING = 0,
+    SOURCE_NEW = 1,
   };
 
-  SwitchReaderResult SwitchAudioReader(int64_t aTarget);
-  SwitchReaderResult SwitchVideoReader(int64_t aTarget);
+  SwitchSourceResult SwitchAudioSource(int64_t aTarget);
+  SwitchSourceResult SwitchVideoSource(int64_t aTarget);
 
   void DoAudioRequest();
   void DoVideoRequest();
 
   void CompleteAudioSeekAndDoRequest()
   {
     mAudioSeekRequest.Complete();
     DoAudioRequest();
@@ -188,32 +188,37 @@ private:
   }
 
   void CompleteVideoSeekAndRejectPromise()
   {
     mVideoSeekRequest.Complete();
     mVideoPromise.Reject(DECODE_ERROR, __func__);
   }
 
+  MediaDecoderReader* GetAudioReader() const;
+  MediaDecoderReader* GetVideoReader() const;
+  int64_t GetReaderAudioTime(int64_t aTime) const;
+  int64_t GetReaderVideoTime(int64_t aTime) const;
+
   // Will reject the MediaPromise with END_OF_STREAM if mediasource has ended
   // or with WAIT_FOR_DATA otherwise.
   void CheckForWaitOrEndOfStream(MediaData::Type aType, int64_t aTime /* microseconds */);
 
-  // Return a reader from the set available in aTrackDecoders that has data
+  // Return a decoder from the set available in aTrackDecoders that has data
   // available in the range requested by aTarget.
-  already_AddRefed<MediaDecoderReader> SelectReader(int64_t aTarget,
-                                                    int64_t aTolerance,
-                                                    const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
+  already_AddRefed<SourceBufferDecoder> SelectDecoder(int64_t aTarget,
+                                                      int64_t aTolerance,
+                                                      const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
   bool HaveData(int64_t aTarget, MediaData::Type aType);
 
   void AttemptSeek();
   bool IsSeeking() { return mPendingSeekTime != -1; }
 
-  nsRefPtr<MediaDecoderReader> mAudioReader;
-  nsRefPtr<MediaDecoderReader> mVideoReader;
+  nsRefPtr<SourceBufferDecoder> mAudioSourceDecoder;
+  nsRefPtr<SourceBufferDecoder> mVideoSourceDecoder;
 
   nsTArray<nsRefPtr<TrackBuffer>> mTrackBuffers;
   nsTArray<nsRefPtr<TrackBuffer>> mShutdownTrackBuffers;
   nsTArray<nsRefPtr<TrackBuffer>> mEssentialTrackBuffers;
   nsRefPtr<TrackBuffer> mAudioTrack;
   nsRefPtr<TrackBuffer> mVideoTrack;
 
   MediaPromiseConsumerHolder<AudioDataPromise> mAudioRequest;
--- a/dom/media/mediasource/SourceBufferDecoder.cpp
+++ b/dom/media/mediasource/SourceBufferDecoder.cpp
@@ -243,39 +243,46 @@ SourceBufferDecoder::NotifyDataArrived(c
 
 nsresult
 SourceBufferDecoder::GetBuffered(dom::TimeRanges* aBuffered)
 {
   nsresult rv = mReader->GetBuffered(aBuffered);
   if (NS_FAILED(rv)) {
     return rv;
   }
+
+  // Adjust buffered range according to timestamp offset.
+  aBuffered->Shift((double)mTimestampOffset / USECS_PER_S);
+
   if (!WasTrimmed()) {
     return NS_OK;
   }
   nsRefPtr<dom::TimeRanges> tr = new dom::TimeRanges();
   tr->Add(0, mTrimmedOffset);
   aBuffered->Intersection(tr);
   return NS_OK;
 }
 
 int64_t
 SourceBufferDecoder::ConvertToByteOffset(double aTime)
 {
-  int64_t readerOffset = mReader->GetEvictionOffset(aTime);
+  int64_t readerOffset =
+    mReader->GetEvictionOffset(aTime - double(mTimestampOffset) / USECS_PER_S);
   if (readerOffset >= 0) {
     return readerOffset;
   }
 
   // Uses a conversion based on (aTime/duration) * length.  For the
   // purposes of eviction this should be adequate since we have the
   // byte threshold as well to ensure data actually gets evicted and
   // we ensure we don't evict before the current playable point.
   if (mRealMediaDuration <= 0) {
     return -1;
   }
   int64_t length = GetResource()->GetLength();
   MOZ_ASSERT(length > 0);
-  int64_t offset = (aTime / (double(mRealMediaDuration) / USECS_PER_S)) * length;
+  int64_t offset =
+    ((aTime - double(mTimestampOffset) / USECS_PER_S) /
+      (double(mRealMediaDuration) / USECS_PER_S)) * length;
   return offset;
 }
 
 } // namespace mozilla
--- a/dom/media/mediasource/SourceBufferDecoder.h
+++ b/dom/media/mediasource/SourceBufferDecoder.h
@@ -37,17 +37,16 @@ public:
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   virtual bool IsMediaSeekable() MOZ_FINAL MOZ_OVERRIDE;
   virtual bool IsShutdown() const MOZ_FINAL MOZ_OVERRIDE;
   virtual bool IsTransportSeekable() MOZ_FINAL MOZ_OVERRIDE;
   virtual bool OnDecodeThread() const MOZ_FINAL MOZ_OVERRIDE;
   virtual bool OnStateMachineThread() const MOZ_FINAL MOZ_OVERRIDE;
-  virtual int64_t GetTimestampOffset() const MOZ_FINAL MOZ_OVERRIDE { return mTimestampOffset; }
   virtual int64_t GetMediaDuration() MOZ_FINAL MOZ_OVERRIDE;
   virtual layers::ImageContainer* GetImageContainer() MOZ_FINAL MOZ_OVERRIDE;
   virtual MediaDecoderOwner* GetOwner() MOZ_FINAL MOZ_OVERRIDE;
   virtual SourceBufferResource* GetResource() const MOZ_FINAL MOZ_OVERRIDE;
   virtual ReentrantMonitor& GetReentrantMonitor() MOZ_FINAL MOZ_OVERRIDE;
   virtual VideoFrameContainer* GetVideoFrameContainer() MOZ_FINAL MOZ_OVERRIDE;
   virtual void MetadataLoaded(nsAutoPtr<MediaInfo> aInfo, nsAutoPtr<MetadataTags> aTags, bool aRestoredFromDormant) MOZ_FINAL MOZ_OVERRIDE;
   virtual void FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo, bool aRestoredFromDormant) MOZ_FINAL MOZ_OVERRIDE;
@@ -61,28 +60,30 @@ public:
   virtual void SetMediaDuration(int64_t aDuration) MOZ_FINAL MOZ_OVERRIDE;
   virtual void SetMediaEndTime(int64_t aTime) MOZ_FINAL MOZ_OVERRIDE;
   virtual void SetMediaSeekable(bool aMediaSeekable) MOZ_FINAL MOZ_OVERRIDE;
   virtual void UpdateEstimatedMediaDuration(int64_t aDuration) MOZ_FINAL MOZ_OVERRIDE;
   virtual void UpdatePlaybackPosition(int64_t aTime) MOZ_FINAL MOZ_OVERRIDE;
   virtual bool HasInitializationData() MOZ_FINAL MOZ_OVERRIDE;
 
   // SourceBufferResource specific interface below.
+  int64_t GetTimestampOffset() const { return mTimestampOffset; }
+  void SetTimestampOffset(int64_t aOffset)  { mTimestampOffset = aOffset; }
 
   // Warning: this mirrors GetBuffered in MediaDecoder, but this class's base is
   // AbstractMediaDecoder, which does not supply this interface.
   nsresult GetBuffered(dom::TimeRanges* aBuffered);
 
   void SetReader(MediaDecoderReader* aReader)
   {
     MOZ_ASSERT(!mReader);
     mReader = aReader;
   }
 
-  MediaDecoderReader* GetReader()
+  MediaDecoderReader* GetReader() const
   {
     return mReader;
   }
 
   void SetTaskQueue(MediaTaskQueue* aTaskQueue)
   {
     MOZ_ASSERT((!mTaskQueue && aTaskQueue) || (mTaskQueue && !aTaskQueue));
     mTaskQueue = aTaskQueue;
--- a/dom/media/mediasource/TrackBuffer.cpp
+++ b/dom/media/mediasource/TrackBuffer.cpp
@@ -201,18 +201,16 @@ TrackBuffer::AppendData(LargeDataBuffer*
   } else if (!hadCompleteInitData && gotInit) {
     MOZ_ASSERT(mCurrentDecoder);
     // Queue pending decoder for initialization now that we have a full
     // init segment.
     decoders.AppendElement(mCurrentDecoder);
   }
 
   if (gotMedia) {
-    start += aTimestampOffset;
-    end += aTimestampOffset;
     if (mLastEndTimestamp &&
         (!mParser->TimestampsFuzzyEqual(start, mLastEndTimestamp.value()) ||
          mLastTimestampOffset != aTimestampOffset ||
          mDecoderPerSegment ||
          (mCurrentDecoder && mCurrentDecoder->WasTrimmed()))) {
       MSE_DEBUG("TrackBuffer(%p)::AppendData: Data last=[%lld, %lld] overlaps [%lld, %lld]",
                 this, mLastStartTimestamp, mLastEndTimestamp.value(), start, end);
 
--- a/media/libstagefright/binding/DecoderData.cpp
+++ b/media/libstagefright/binding/DecoderData.cpp
@@ -228,23 +228,23 @@ MP4Sample::MP4Sample(const MP4Sample& co
 MP4Sample::~MP4Sample()
 {
   if (mMediaBuffer) {
     mMediaBuffer->release();
   }
 }
 
 void
-MP4Sample::Update(int64_t& aMediaTime, int64_t& aTimestampOffset)
+MP4Sample::Update(int64_t& aMediaTime)
 {
   sp<MetaData> m = mMediaBuffer->meta_data();
   // XXXbholley - Why don't we adjust decode_timestamp for aMediaTime?
   // According to k17e, this code path is no longer used - we should probably remove it.
-  decode_timestamp = FindInt64(m, kKeyDecodingTime) + aTimestampOffset;
-  composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime + aTimestampOffset;
+  decode_timestamp = FindInt64(m, kKeyDecodingTime);
+  composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime;
   duration = FindInt64(m, kKeyDuration);
   byte_offset = FindInt64(m, kKey64BitFileOffset);
   is_sync_point = FindInt32(m, kKeyIsSyncFrame);
   data = reinterpret_cast<uint8_t*>(mMediaBuffer->data());
   size = mMediaBuffer->range_length();
 
   crypto.Update(m);
 }
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -224,23 +224,22 @@ SampleIterator::GetNextKeyframeTime()
       return moofs[moof].mIndex[sample].mDecodeTime;
     }
     ++sample;
   }
   MOZ_ASSERT(false); // should not be reached.
 }
 
 Index::Index(const stagefright::Vector<MediaSource::Indice>& aIndex,
-             Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
-             Monitor* aMonitor)
+             Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
   : mSource(aSource)
   , mMonitor(aMonitor)
 {
   if (aIndex.isEmpty()) {
-    mMoofParser = new MoofParser(aSource, aTrackId, aTimestampOffset, aMonitor);
+    mMoofParser = new MoofParser(aSource, aTrackId, aMonitor);
   } else {
     for (size_t i = 0; i < aIndex.size(); i++) {
       const MediaSource::Indice& indice = aIndex[i];
       Sample sample;
       sample.mByteRange = MediaByteRange(indice.start_offset,
                                          indice.end_offset);
       sample.mCompositionRange = Interval<Microseconds>(indice.start_composition,
                                                         indice.end_composition);
--- a/media/libstagefright/binding/MoofParser.cpp
+++ b/media/libstagefright/binding/MoofParser.cpp
@@ -24,17 +24,17 @@ MoofParser::RebuildFragmentedIndex(
 void
 MoofParser::RebuildFragmentedIndex(BoxContext& aContext)
 {
   for (Box box(&aContext, mOffset); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("moov")) {
       mInitRange = MediaByteRange(0, box.Range().mEnd);
       ParseMoov(box);
     } else if (box.IsType("moof")) {
-      Moof moof(box, mTrex, mMdhd, mEdts, mSinf, mTimestampOffset);
+      Moof moof(box, mTrex, mMdhd, mEdts, mSinf);
 
       if (!mMoofs.IsEmpty()) {
         // Stitch time ranges together in the case of a (hopefully small) time
         // range gap between moofs.
         mMoofs.LastElement().FixRounding(moof);
       }
 
       mMoofs.AppendElement(moof);
@@ -209,18 +209,19 @@ MoofParser::ParseEncrypted(Box& aBox)
 
       if (mSinf.IsValid()) {
         break;
       }
     }
   }
 }
 
-Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf, Microseconds aTimestampOffset) :
-    mRange(aBox.Range()), mTimestampOffset(aTimestampOffset), mMaxRoundingError(0)
+Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf)
+  : mRange(aBox.Range())
+  , mMaxRoundingError(0)
 {
   for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("traf")) {
       ParseTraf(box, aTrex, aMdhd, aEdts, aSinf);
     }
   }
   ProcessCenc();
 }
@@ -394,20 +395,20 @@ Moof::ParseTrun(Box& aBox, Tfhd& aTfhd, 
     if (flags & 0x800) {
       ctsOffset = reader->Read32();
     }
 
     Sample sample;
     sample.mByteRange = MediaByteRange(offset, offset + sampleSize);
     offset += sampleSize;
 
-    sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime) + mTimestampOffset;
+    sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime);
     sample.mCompositionRange = Interval<Microseconds>(
-      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart) + mTimestampOffset,
-      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart) + mTimestampOffset);
+      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart),
+      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart));
     decodeTime += sampleDuration;
 
     sample.mSync = !(sampleFlags & 0x1010000);
 
     mIndex.AppendElement(sample);
 
     mMdatRange = mMdatRange.Extents(sample.mByteRange);
   }
@@ -504,17 +505,18 @@ Trex::Trex(Box& aBox)
   mTrackId = reader->ReadU32();
   mDefaultSampleDescriptionIndex = reader->ReadU32();
   mDefaultSampleDuration = reader->ReadU32();
   mDefaultSampleSize = reader->ReadU32();
   mDefaultSampleFlags = reader->ReadU32();
   mValid = true;
 }
 
-Tfhd::Tfhd(Box& aBox, Trex& aTrex) : Trex(aTrex)
+Tfhd::Tfhd(Box& aBox, Trex& aTrex)
+  : Trex(aTrex)
 {
   MOZ_ASSERT(aBox.IsType("tfhd"));
   MOZ_ASSERT(aBox.Parent()->IsType("traf"));
   MOZ_ASSERT(aBox.Parent()->Parent()->IsType("moof"));
 
   BoxReader reader(aBox);
   if (!reader->CanReadType<uint32_t>()) {
     return;
--- a/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
@@ -152,17 +152,17 @@ public:
 typedef int64_t Microseconds;
 
 class MP4Sample
 {
 public:
   MP4Sample();
   MP4Sample(const MP4Sample& copy);
   virtual ~MP4Sample();
-  void Update(int64_t& aMediaTime, int64_t& aTimestampOffset);
+  void Update(int64_t& aMediaTime);
   void Pad(size_t aPaddingBytes);
 
   stagefright::MediaBuffer* mMediaBuffer;
 
   Microseconds decode_timestamp;
   Microseconds composition_timestamp;
   Microseconds duration;
   int64_t byte_offset;
--- a/media/libstagefright/binding/include/mp4_demuxer/Index.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/Index.h
@@ -33,18 +33,17 @@ private:
 };
 
 class Index
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Index)
 
   Index(const stagefright::Vector<stagefright::MediaSource::Indice>& aIndex,
-        Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
-        Monitor* aMonitor);
+        Stream* aSource, uint32_t aTrackId, Monitor* aMonitor);
 
   void UpdateMoofIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   Microseconds GetEndCompositionIfBuffered(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   void ConvertByteRangesToTimeRanges(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges,
     nsTArray<Interval<Microseconds>>* aTimeRanges);
   uint64_t GetEvictionOffset(Microseconds aTime);
--- a/media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
@@ -163,17 +163,17 @@ private:
   int64_t mMoofOffset;
   Saiz& mSaiz;
   Saio& mSaio;
 };
 
 class Moof : public Atom
 {
 public:
-  Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf, Microseconds aTimestampOffset);
+  Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf);
   bool GetAuxInfo(AtomType aType, nsTArray<MediaByteRange>* aByteRanges);
   void FixRounding(const Moof& aMoof);
 
   mozilla::MediaByteRange mRange;
   mozilla::MediaByteRange mMdatRange;
   Interval<Microseconds> mTimeRange;
   nsTArray<Sample> mIndex;
 
@@ -181,27 +181,27 @@ public:
   nsTArray<Saio> mSaios;
 
 private:
   void ParseTraf(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf);
   void ParseTrun(Box& aBox, Tfhd& aTfhd, Tfdt& aTfdt, Mdhd& aMdhd, Edts& aEdts);
   void ParseSaiz(Box& aBox);
   void ParseSaio(Box& aBox);
   bool ProcessCenc();
-  Microseconds mTimestampOffset;
   uint64_t mMaxRoundingError;
 };
 
 class MoofParser
 {
 public:
-  MoofParser(Stream* aSource, uint32_t aTrackId,
-             Microseconds aTimestampOffset, Monitor* aMonitor)
-    : mSource(aSource), mOffset(0), mTimestampOffset(aTimestampOffset),
-      mTrex(aTrackId), mMonitor(aMonitor)
+  MoofParser(Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
+    : mSource(aSource)
+    , mOffset(0)
+    , mTrex(aTrackId)
+    , mMonitor(aMonitor)
   {
     // Setting the mTrex.mTrackId to 0 is a nasty work around for calculating
     // the composition range for MSE. We need an array of tracks.
   }
   void RebuildFragmentedIndex(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   void RebuildFragmentedIndex(BoxContext& aContext);
   Interval<Microseconds> GetCompositionRange(
@@ -218,17 +218,16 @@ public:
   void ParseEncrypted(Box& aBox);
   void ParseSinf(Box& aBox);
 
   bool BlockingReadNextMoof();
 
   mozilla::MediaByteRange mInitRange;
   nsRefPtr<Stream> mSource;
   uint64_t mOffset;
-  Microseconds mTimestampOffset;
   nsTArray<uint64_t> mMoofOffsets;
   Mdhd mMdhd;
   Trex mTrex;
   Tfdt mTfdt;
   Edts mEdts;
   Sinf mSinf;
   Monitor* mMonitor;
   nsTArray<Moof>& Moofs() { mMonitor->AssertCurrentThreadOwns(); return mMoofs; }
--- a/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
@@ -38,17 +38,17 @@ protected:
   virtual ~Stream() {}
 };
 
 enum TrackType { kVideo = 1, kAudio };
 
 class MP4Demuxer
 {
 public:
-  explicit MP4Demuxer(Stream* aSource, Microseconds aTimestampOffset, Monitor* aMonitor);
+  explicit MP4Demuxer(Stream* aSource, Monitor* aMonitor);
   ~MP4Demuxer();
 
   bool Init();
   Microseconds Duration();
   bool CanSeek();
 
   bool HasValidAudio();
   bool HasValidVideo();
@@ -81,16 +81,15 @@ private:
   AudioDecoderConfig mAudioConfig;
   VideoDecoderConfig mVideoConfig;
   CryptoFile mCrypto;
 
   nsAutoPtr<StageFrightPrivate> mPrivate;
   nsRefPtr<Stream> mSource;
   nsTArray<mozilla::MediaByteRange> mCachedByteRanges;
   nsTArray<Interval<Microseconds>> mCachedTimeRanges;
-  Microseconds mTimestampOffset;
   Monitor* mMonitor;
   Microseconds mNextKeyframeTime;
 };
 
 } // namespace mozilla
 
 #endif // MP4_DEMUXER_H_
--- a/media/libstagefright/binding/mp4_demuxer.cpp
+++ b/media/libstagefright/binding/mp4_demuxer.cpp
@@ -68,20 +68,21 @@ public:
   virtual uint32_t flags() { return kWantsPrefetching | kIsHTTPBasedSource; }
 
   virtual status_t reconnectAtOffset(off64_t offset) { return NO_ERROR; }
 
 private:
   nsRefPtr<Stream> mSource;
 };
 
-MP4Demuxer::MP4Demuxer(Stream* source, Microseconds aTimestampOffset, Monitor* aMonitor)
-  : mPrivate(new StageFrightPrivate()), mSource(source),
-    mTimestampOffset(aTimestampOffset), mMonitor(aMonitor),
-    mNextKeyframeTime(-1)
+MP4Demuxer::MP4Demuxer(Stream* source, Monitor* aMonitor)
+  : mPrivate(new StageFrightPrivate())
+  , mSource(source)
+  , mMonitor(aMonitor)
+  , mNextKeyframeTime(-1)
 {
   mPrivate->mExtractor = new MPEG4Extractor(new DataSourceAdapter(source));
 }
 
 MP4Demuxer::~MP4Demuxer()
 {
   if (mPrivate->mAudio.get()) {
     mPrivate->mAudio->stop();
@@ -116,31 +117,31 @@ MP4Demuxer::Init()
       sp<MediaSource> track = e->getTrack(i);
       if (track->start() != OK) {
         return false;
       }
       mPrivate->mAudio = track;
       mAudioConfig.Update(metaData, mimeType);
       nsRefPtr<Index> index = new Index(mPrivate->mAudio->exportIndex(),
                                         mSource, mAudioConfig.mTrackId,
-                                        mTimestampOffset, mMonitor);
+                                        mMonitor);
       mPrivate->mIndexes.AppendElement(index);
       if (index->IsFragmented()) {
         mPrivate->mAudioIterator = new SampleIterator(index);
       }
     } else if (!mPrivate->mVideo.get() && !strncmp(mimeType, "video/", 6)) {
       sp<MediaSource> track = e->getTrack(i);
       if (track->start() != OK) {
         return false;
       }
       mPrivate->mVideo = track;
       mVideoConfig.Update(metaData, mimeType);
       nsRefPtr<Index> index = new Index(mPrivate->mVideo->exportIndex(),
                                         mSource, mVideoConfig.mTrackId,
-                                        mTimestampOffset, mMonitor);
+                                        mMonitor);
       mPrivate->mIndexes.AppendElement(index);
       if (index->IsFragmented()) {
         mPrivate->mVideoIterator = new SampleIterator(index);
       }
     }
   }
   sp<MetaData> metaData = e->getMetaData();
   mCrypto.Update(metaData);
@@ -227,17 +228,17 @@ MP4Demuxer::DemuxAudioSample()
   status_t status =
     mPrivate->mAudio->read(&sample->mMediaBuffer, &mPrivate->mAudioOptions);
   mPrivate->mAudioOptions.clearSeekTo();
 
   if (status < 0) {
     return nullptr;
   }
 
-  sample->Update(mAudioConfig.media_time, mTimestampOffset);
+  sample->Update(mAudioConfig.media_time);
 
   return sample.forget();
 }
 
 MP4Sample*
 MP4Demuxer::DemuxVideoSample()
 {
   mMonitor->AssertCurrentThreadOwns();
@@ -260,17 +261,17 @@ MP4Demuxer::DemuxVideoSample()
   status_t status =
     mPrivate->mVideo->read(&sample->mMediaBuffer, &mPrivate->mVideoOptions);
   mPrivate->mVideoOptions.clearSeekTo();
 
   if (status < 0) {
     return nullptr;
   }
 
-  sample->Update(mVideoConfig.media_time, mTimestampOffset);
+  sample->Update(mVideoConfig.media_time);
   sample->extra_data = mVideoConfig.extra_data;
 
   return sample.forget();
 }
 
 void
 MP4Demuxer::UpdateIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges)
 {