Backed out 6 changesets (bug 1129732, bug 1118597) for linux32 debug bustage.
authorRyan VanderMeulen <ryanvm@gmail.com>
Fri, 20 Feb 2015 18:16:12 -0500
changeset 249861 77d5e3a304359aae428d3a445dd7b95eb89e0597
parent 249860 e2c205d7cea5d589cd870e66512c144a17fc8bfc
child 249862 06fb0d5fe0d7b712cefb18536f55452ef02dc934
push id4489
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 15:17:55 +0000
treeherdermozilla-beta@fd7c3dc24146 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1129732, 1118597
milestone37.0a2
backs outf24f0b0532f983d6ec010d1e09a8aaadf41b38c3
7f5d120245af74677a9fd7b4bd0d4ab5c5bafee0
81b4d96a95f5288010b011f6cb7e3967d493c2c4
5808eff15019262a435eb0876a309ad8e90acf74
eea43cfc67c72b5cc054de2637d8b44a17f89984
e9584e7c65b275ab15a676b5bdd2818f129acd92
Backed out 6 changesets (bug 1129732, bug 1118597) for linux32 debug bustage. Backed out changeset f24f0b0532f9 (bug 1118597) Backed out changeset 7f5d120245af (bug 1129732) Backed out changeset 81b4d96a95f5 (bug 1129732) Backed out changeset 5808eff15019 (bug 1129732) Backed out changeset eea43cfc67c7 (bug 1129732) Backed out changeset e9584e7c65b2 (bug 1118597)
dom/html/TimeRanges.cpp
dom/html/TimeRanges.h
dom/media/AbstractMediaDecoder.h
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/fmp4/MP4Reader.cpp
dom/media/fmp4/MP4Reader.h
dom/media/gtest/TestMP4Demuxer.cpp
dom/media/mediasource/ContainerParser.cpp
dom/media/mediasource/MediaSourceReader.cpp
dom/media/mediasource/MediaSourceReader.h
dom/media/mediasource/SourceBufferDecoder.cpp
dom/media/mediasource/SourceBufferDecoder.h
dom/media/mediasource/TrackBuffer.cpp
dom/media/mediasource/TrackBuffer.h
media/libstagefright/binding/Box.cpp
media/libstagefright/binding/DecoderData.cpp
media/libstagefright/binding/Index.cpp
media/libstagefright/binding/MoofParser.cpp
media/libstagefright/binding/SinfParser.cpp
media/libstagefright/binding/include/mp4_demuxer/Atom.h
media/libstagefright/binding/include/mp4_demuxer/AtomType.h
media/libstagefright/binding/include/mp4_demuxer/Box.h
media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
media/libstagefright/binding/include/mp4_demuxer/Index.h
media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
media/libstagefright/binding/include/mp4_demuxer/SinfParser.h
media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
media/libstagefright/binding/mp4_demuxer.cpp
media/libstagefright/moz.build
--- a/dom/html/TimeRanges.cpp
+++ b/dom/html/TimeRanges.cpp
@@ -167,19 +167,10 @@ TimeRanges::Find(double aTime, double aT
 }
 
 JSObject*
 TimeRanges::WrapObject(JSContext* aCx)
 {
   return TimeRangesBinding::Wrap(aCx, this);
 }
 
-void
-TimeRanges::Shift(double aOffset)
-{
-  for (index_type i = 0; i < mRanges.Length(); ++i) {
-    mRanges[i].mStart += aOffset;
-    mRanges[i].mEnd += aOffset;
-  }
-}
-
 } // namespace dom
 } // namespace mozilla
--- a/dom/html/TimeRanges.h
+++ b/dom/html/TimeRanges.h
@@ -56,19 +56,16 @@ public:
   {
     return mRanges.Length();
   }
 
   virtual double Start(uint32_t aIndex, ErrorResult& aRv);
 
   virtual double End(uint32_t aIndex, ErrorResult& aRv);
 
-  // Shift all values by aOffset seconds.
-  void Shift(double aOffset);
-
 private:
   ~TimeRanges();
 
   // Comparator which orders TimeRanges by start time. Used by Normalize().
   struct TimeRange
   {
     TimeRange(double aStart, double aEnd)
       : mStart(aStart),
--- a/dom/media/AbstractMediaDecoder.h
+++ b/dom/media/AbstractMediaDecoder.h
@@ -60,16 +60,19 @@ public:
   // Called by the decode thread to keep track of the number of bytes read
   // from the resource.
   virtual void NotifyBytesConsumed(int64_t aBytes, int64_t aOffset) = 0;
 
   // Increments the parsed and decoded frame counters by the passed in counts.
   // Can be called on any thread.
   virtual void NotifyDecodedFrames(uint32_t aParsed, uint32_t aDecoded) = 0;
 
+  // For decoders with a notion of timestamp offset, returns the value in microseconds.
+  virtual int64_t GetTimestampOffset() const { return 0; }
+
   // Return the duration of the media in microseconds.
   virtual int64_t GetMediaDuration() = 0;
 
   // Set the duration of the media in microseconds.
   virtual void SetMediaDuration(int64_t aDuration) = 0;
 
   // Sets the duration of the media in microseconds. The MediaDecoder
   // fires a durationchange event to its owner (e.g., an HTML audio
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -43,37 +43,16 @@ AudioData::SizeOfIncludingThis(MallocSiz
 {
   size_t size = aMallocSizeOf(this) + aMallocSizeOf(mAudioData);
   if (mAudioBuffer) {
     size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf);
   }
   return size;
 }
 
-/* static */
-already_AddRefed<AudioData>
-AudioData::TransferAndUpdateTimestampAndDuration(AudioData* aOther,
-                                                  int64_t aTimestamp,
-                                                  int64_t aDuration)
-{
-  NS_ENSURE_TRUE(aOther, nullptr);
-  nsRefPtr<AudioData> v = new AudioData(aOther->mOffset,
-                                        aTimestamp,
-                                        aDuration,
-                                        aOther->mFrames,
-                                        aOther->mAudioData,
-                                        aOther->mChannels,
-                                        aOther->mRate);
-  v->mDiscontinuity = aOther->mDiscontinuity;
-  // Remove aOther's AudioData as it can't be shared across two targets.
-  aOther->mAudioData.forget();
-
-  return v.forget();
-}
-
 static bool
 ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
 {
   return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
          aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
          aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
          aPlane.mStride > 0;
 }
@@ -153,34 +132,32 @@ VideoData::ShallowCopyUpdateDuration(Vid
                                      int64_t aDuration)
 {
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aOther->mTime,
                                         aDuration,
                                         aOther->mKeyframe,
                                         aOther->mTimecode,
                                         aOther->mDisplay);
-  v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
 VideoData::ShallowCopyUpdateTimestamp(VideoData* aOther,
                                       int64_t aTimestamp)
 {
   NS_ENSURE_TRUE(aOther, nullptr);
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aTimestamp,
                                         aOther->GetEndTime() - aTimestamp,
                                         aOther->mKeyframe,
                                         aOther->mTimecode,
                                         aOther->mDisplay);
-  v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
 VideoData::ShallowCopyUpdateTimestampAndDuration(VideoData* aOther,
                                                  int64_t aTimestamp,
@@ -188,17 +165,16 @@ VideoData::ShallowCopyUpdateTimestampAnd
 {
   NS_ENSURE_TRUE(aOther, nullptr);
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aTimestamp,
                                         aDuration,
                                         aOther->mKeyframe,
                                         aOther->mTimecode,
                                         aOther->mDisplay);
-  v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 void VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                     VideoInfo& aInfo,
                                     const YCbCrBuffer &aBuffer,
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -79,25 +79,16 @@ public:
             uint32_t aChannels,
             uint32_t aRate)
     : MediaData(AUDIO_DATA, aOffset, aTime, aDuration)
     , mFrames(aFrames)
     , mChannels(aChannels)
     , mRate(aRate)
     , mAudioData(aData) {}
 
-  // Creates a new VideoData identical to aOther, but with a different
-  // specified timestamp and duration. All data from aOther is copied
-  // into the new AudioData but the audio data which is transferred.
-  // After such call, the original aOther is unusable.
-  static already_AddRefed<AudioData>
-  TransferAndUpdateTimestampAndDuration(AudioData* aOther,
-                                        int64_t aTimestamp,
-                                        int64_t aDuration);
-
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
 
   // If mAudioBuffer is null, creates it from mAudioData.
   void EnsureAudioBuffer();
 
   const uint32_t mFrames;
   const uint32_t mChannels;
   const uint32_t mRate;
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -201,16 +201,17 @@ static bool sIsEMEEnabled = false;
 static bool sDemuxSkipToNextKeyframe = true;
 
 nsresult
 MP4Reader::Init(MediaDecoderReader* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
   PlatformDecoderModule::Init();
   mStream = new MP4Stream(mDecoder->GetResource());
+  mTimestampOffset = GetDecoder()->GetTimestampOffset();
 
   InitLayersBackendType();
 
   mAudio.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mAudio.mTaskQueue, NS_ERROR_FAILURE);
 
   mVideo.mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(mVideo.mTaskQueue, NS_ERROR_FAILURE);
@@ -336,17 +337,17 @@ MP4Reader::PreReadMetadata()
   if (mPlatform) {
     RequestCodecResource();
   }
 }
 
 bool
 MP4Reader::InitDemuxer()
 {
-  mDemuxer = new MP4Demuxer(mStream, &mDemuxerMonitor);
+  mDemuxer = new MP4Demuxer(mStream, mTimestampOffset, &mDemuxerMonitor);
   return mDemuxer->Init();
 }
 
 nsresult
 MP4Reader::ReadMetadata(MediaInfo* aInfo,
                         MetadataTags** aTags)
 {
   if (!mDemuxerInitialized) {
--- a/dom/media/fmp4/MP4Reader.h
+++ b/dom/media/fmp4/MP4Reader.h
@@ -122,16 +122,17 @@ private:
   virtual bool IsWaitingOnCDMResource() MOZ_OVERRIDE;
 
   Microseconds GetNextKeyframeTime();
   bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
 
   size_t SizeOfQueue(TrackType aTrack);
 
   nsRefPtr<MP4Stream> mStream;
+  int64_t mTimestampOffset;
   nsAutoPtr<mp4_demuxer::MP4Demuxer> mDemuxer;
   nsRefPtr<PlatformDecoderModule> mPlatform;
 
   class DecoderCallback : public MediaDataDecoderCallback {
   public:
     DecoderCallback(MP4Reader* aReader,
                     mp4_demuxer::TrackType aType)
       : mReader(aReader)
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -19,17 +19,17 @@ public:
 
   nsRefPtr<MockMediaResource> resource;
   Monitor mMonitor;
   nsAutoPtr<MP4Demuxer> demuxer;
 
   explicit MP4DemuxerBinding(const char* aFileName = "dash_dashinit.mp4")
     : resource(new MockMediaResource(aFileName))
     , mMonitor("TestMP4Demuxer monitor")
-    , demuxer(new MP4Demuxer(new MP4Stream(resource), &mMonitor))
+    , demuxer(new MP4Demuxer(new MP4Stream(resource), 0, &mMonitor))
   {
     EXPECT_EQ(NS_OK, resource->Open(nullptr));
   }
 
 private:
   virtual ~MP4DemuxerBinding()
   {
   }
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -277,17 +277,17 @@ public:
     bool initSegment = IsInitSegmentPresent(aData);
     if (initSegment) {
       mResource = new SourceBufferResource(NS_LITERAL_CSTRING("video/mp4"));
       mStream = new MP4Stream(mResource);
       // We use a timestampOffset of 0 for ContainerParser, and require
       // consumers of ParseStartAndEndTimestamps to add their timestamp offset
       // manually. This allows the ContainerParser to be shared across different
       // timestampOffsets.
-      mParser = new mp4_demuxer::MoofParser(mStream, 0, &mMonitor);
+      mParser = new mp4_demuxer::MoofParser(mStream, 0, 0, &mMonitor);
       mInitData = new LargeDataBuffer();
     } else if (!mStream || !mParser) {
       return false;
     }
 
     mResource->AppendData(aData);
     nsTArray<MediaByteRange> byteRanges;
     MediaByteRange mbr =
--- a/dom/media/mediasource/MediaSourceReader.cpp
+++ b/dom/media/mediasource/MediaSourceReader.cpp
@@ -86,130 +86,123 @@ MediaSourceReader::IsWaitingMediaResourc
   }
 
   return !mHasEssentialTrackBuffers;
 }
 
 size_t
 MediaSourceReader::SizeOfVideoQueueInFrames()
 {
-  if (!GetVideoReader()) {
+  if (!mVideoReader) {
     MSE_DEBUG("MediaSourceReader(%p)::SizeOfVideoQueue called with no video reader", this);
     return 0;
   }
-  return GetVideoReader()->SizeOfVideoQueueInFrames();
+  return mVideoReader->SizeOfVideoQueueInFrames();
 }
 
 size_t
 MediaSourceReader::SizeOfAudioQueueInFrames()
 {
-  if (!GetAudioReader()) {
+  if (!mAudioReader) {
     MSE_DEBUG("MediaSourceReader(%p)::SizeOfAudioQueue called with no audio reader", this);
     return 0;
   }
-  return GetAudioReader()->SizeOfAudioQueueInFrames();
+  return mAudioReader->SizeOfAudioQueueInFrames();
 }
 
 nsRefPtr<MediaDecoderReader::AudioDataPromise>
 MediaSourceReader::RequestAudioData()
 {
   nsRefPtr<AudioDataPromise> p = mAudioPromise.Ensure(__func__);
   MSE_DEBUGV("MediaSourceReader(%p)::RequestAudioData", this);
-  if (!GetAudioReader()) {
+  if (!mAudioReader) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called with no audio reader", this);
     mAudioPromise.Reject(DECODE_ERROR, __func__);
     return p;
   }
   if (IsSeeking()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called mid-seek. Rejecting.", this);
     mAudioPromise.Reject(CANCELED, __func__);
     return p;
   }
   MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists());
 
-  SwitchSourceResult ret = SwitchAudioSource(mLastAudioTime);
+  SwitchReaderResult ret = SwitchAudioReader(mLastAudioTime);
   switch (ret) {
-    case SOURCE_NEW:
-      mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mLastAudioTime), 0)
+    case READER_NEW:
+      mAudioSeekRequest.Begin(mAudioReader->Seek(mLastAudioTime, 0)
                               ->RefableThen(GetTaskQueue(), __func__, this,
                                             &MediaSourceReader::CompleteAudioSeekAndDoRequest,
                                             &MediaSourceReader::CompleteAudioSeekAndRejectPromise));
       break;
-    case SOURCE_ERROR:
+    case READER_ERROR:
       if (mLastAudioTime) {
         CheckForWaitOrEndOfStream(MediaData::AUDIO_DATA, mLastAudioTime);
         break;
       }
       // Fallback to using current reader
     default:
       DoAudioRequest();
       break;
   }
   return p;
 }
 
 void MediaSourceReader::DoAudioRequest()
 {
-  mAudioRequest.Begin(GetAudioReader()->RequestAudioData()
+  mAudioRequest.Begin(mAudioReader->RequestAudioData()
                       ->RefableThen(GetTaskQueue(), __func__, this,
                                     &MediaSourceReader::OnAudioDecoded,
                                     &MediaSourceReader::OnAudioNotDecoded));
 }
 
 void
 MediaSourceReader::OnAudioDecoded(AudioData* aSample)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mAudioRequest.Complete();
 
-  int64_t ourTime = aSample->mTime + mAudioSourceDecoder->GetTimestampOffset();
-
   MSE_DEBUGV("MediaSourceReader(%p)::OnAudioDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]",
-             this, ourTime, aSample->mDuration, aSample->mDiscontinuity);
+             this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity);
   if (mDropAudioBeforeThreshold) {
-    if (ourTime < mTimeThreshold) {
+    if (aSample->mTime < mTimeThreshold) {
       MSE_DEBUG("MediaSourceReader(%p)::OnAudioDecoded mTime=%lld < mTimeThreshold=%lld",
-                this, ourTime, mTimeThreshold);
-      mAudioRequest.Begin(GetAudioReader()->RequestAudioData()
+                this, aSample->mTime, mTimeThreshold);
+      mAudioRequest.Begin(mAudioReader->RequestAudioData()
                           ->RefableThen(GetTaskQueue(), __func__, this,
                                         &MediaSourceReader::OnAudioDecoded,
                                         &MediaSourceReader::OnAudioNotDecoded));
       return;
     }
     mDropAudioBeforeThreshold = false;
   }
 
-  // Adjust the sample time into our reference.
-  nsRefPtr<AudioData> newSample =
-    AudioData::TransferAndUpdateTimestampAndDuration(aSample,
-                                                     ourTime,
-                                                     aSample->mDuration);
-  mLastAudioTime = newSample->GetEndTime();
+  mLastAudioTime = aSample->mTime + aSample->mDuration;
 
-  mAudioPromise.Resolve(newSample, __func__);
+  mAudioPromise.Resolve(aSample, __func__);
 }
 
 // Find the closest approximation to the end time for this stream.
 // mLast{Audio,Video}Time differs from the actual end time because of
 // Bug 1065207 - the duration of a WebM fragment is an estimate not the
 // actual duration. In the case of audio time an example of where they
 // differ would be the actual sample duration being small but the
 // previous sample being large. The buffered end time uses that last
 // sample duration as an estimate of the end time duration giving an end
 // time that is greater than mLastAudioTime, which is the actual sample
 // end time.
 // Reader switching is based on the buffered end time though so they can be
 // quite different. By using the EOS_FUZZ_US and the buffered end time we
 // attempt to account for this difference.
 static void
-AdjustEndTime(int64_t* aEndTime, SourceBufferDecoder* aDecoder)
+AdjustEndTime(int64_t* aEndTime, MediaDecoderReader* aReader)
 {
-  if (aDecoder) {
+  if (aReader) {
     nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
-    aDecoder->GetBuffered(ranges);
+    aReader->GetBuffered(ranges);
     if (ranges->Length() > 0) {
       // End time is a double so we convert to nearest by adding 0.5.
       int64_t end = ranges->GetEndTime() * USECS_PER_S + 0.5;
       *aEndTime = std::max(*aEndTime, end);
     }
   }
 }
 
@@ -223,117 +216,109 @@ MediaSourceReader::OnAudioNotDecoded(Not
   if (aReason == DECODE_ERROR || aReason == CANCELED) {
     mAudioPromise.Reject(aReason, __func__);
     return;
   }
 
   // End of stream. Force switching past this stream to another reader by
   // switching to the end of the buffered range.
   MOZ_ASSERT(aReason == END_OF_STREAM);
-  if (mAudioSourceDecoder) {
-    AdjustEndTime(&mLastAudioTime, mAudioSourceDecoder);
+  if (mAudioReader) {
+    AdjustEndTime(&mLastAudioTime, mAudioReader);
   }
 
-  // See if we can find a different source that can pick up where we left off.
-  if (SwitchAudioSource(mLastAudioTime) == SOURCE_NEW) {
-    mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mLastAudioTime), 0)
+  // See if we can find a different reader that can pick up where we left off.
+  if (SwitchAudioReader(mLastAudioTime) == READER_NEW) {
+    mAudioSeekRequest.Begin(mAudioReader->Seek(mLastAudioTime, 0)
                             ->RefableThen(GetTaskQueue(), __func__, this,
                                           &MediaSourceReader::CompleteAudioSeekAndDoRequest,
                                           &MediaSourceReader::CompleteAudioSeekAndRejectPromise));
     return;
   }
 
   CheckForWaitOrEndOfStream(MediaData::AUDIO_DATA, mLastAudioTime);
 }
 
+
 nsRefPtr<MediaDecoderReader::VideoDataPromise>
 MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
 {
   nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__);
   MSE_DEBUGV("MediaSourceReader(%p)::RequestVideoData(%d, %lld)",
              this, aSkipToNextKeyframe, aTimeThreshold);
-  if (!GetVideoReader()) {
+  if (!mVideoReader) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called with no video reader", this);
     mVideoPromise.Reject(DECODE_ERROR, __func__);
     return p;
   }
   if (aSkipToNextKeyframe) {
     mTimeThreshold = aTimeThreshold;
     mDropAudioBeforeThreshold = true;
     mDropVideoBeforeThreshold = true;
   }
   if (IsSeeking()) {
     MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called mid-seek. Rejecting.", this);
     mVideoPromise.Reject(CANCELED, __func__);
     return p;
   }
   MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists());
 
-  SwitchSourceResult ret = SwitchVideoSource(mLastVideoTime);
+  SwitchReaderResult ret = SwitchVideoReader(mLastVideoTime);
   switch (ret) {
-    case SOURCE_NEW:
-      mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0)
+    case READER_NEW:
+      mVideoSeekRequest.Begin(mVideoReader->Seek(mLastVideoTime, 0)
                              ->RefableThen(GetTaskQueue(), __func__, this,
                                            &MediaSourceReader::CompleteVideoSeekAndDoRequest,
                                            &MediaSourceReader::CompleteVideoSeekAndRejectPromise));
       break;
-    case SOURCE_ERROR:
+    case READER_ERROR:
       if (mLastVideoTime) {
         CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime);
         break;
       }
       // Fallback to using current reader.
     default:
       DoVideoRequest();
       break;
   }
 
   return p;
 }
 
 void
 MediaSourceReader::DoVideoRequest()
 {
-  mVideoRequest.Begin(GetVideoReader()->RequestVideoData(mDropVideoBeforeThreshold, GetReaderVideoTime(mTimeThreshold))
+  mVideoRequest.Begin(mVideoReader->RequestVideoData(mDropVideoBeforeThreshold, mTimeThreshold)
                       ->RefableThen(GetTaskQueue(), __func__, this,
                                     &MediaSourceReader::OnVideoDecoded,
                                     &MediaSourceReader::OnVideoNotDecoded));
 }
 
 void
 MediaSourceReader::OnVideoDecoded(VideoData* aSample)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mVideoRequest.Complete();
 
-  // Adjust the sample time into our reference.
-  int64_t ourTime = aSample->mTime + mVideoSourceDecoder->GetTimestampOffset();
-
   MSE_DEBUGV("MediaSourceReader(%p)::OnVideoDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]",
-             this, ourTime, aSample->mDuration, aSample->mDiscontinuity);
+             this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity);
   if (mDropVideoBeforeThreshold) {
-    if (ourTime < mTimeThreshold) {
+    if (aSample->mTime < mTimeThreshold) {
       MSE_DEBUG("MediaSourceReader(%p)::OnVideoDecoded mTime=%lld < mTimeThreshold=%lld",
-                this, ourTime, mTimeThreshold);
+                this, aSample->mTime, mTimeThreshold);
       DoVideoRequest();
       return;
     }
     mDropVideoBeforeThreshold = false;
     mTimeThreshold = 0;
   }
 
-  // Adjust the sample time into our reference.
-  nsRefPtr<VideoData> newSample =
-    VideoData::ShallowCopyUpdateTimestampAndDuration(aSample,
-                                                     ourTime,
-                                                     aSample->mDuration);
+  mLastVideoTime = aSample->mTime + aSample->mDuration;
 
-  mLastVideoTime = newSample->GetEndTime();
-
-  mVideoPromise.Resolve(newSample, __func__);
+  mVideoPromise.Resolve(aSample, __func__);
 }
 
 void
 MediaSourceReader::OnVideoNotDecoded(NotDecodedReason aReason)
 {
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
   mVideoRequest.Complete();
 
@@ -341,23 +326,23 @@ MediaSourceReader::OnVideoNotDecoded(Not
   if (aReason == DECODE_ERROR || aReason == CANCELED) {
     mVideoPromise.Reject(aReason, __func__);
     return;
   }
 
   // End of stream. Force switching past this stream to another reader by
   // switching to the end of the buffered range.
   MOZ_ASSERT(aReason == END_OF_STREAM);
-  if (mVideoSourceDecoder) {
-    AdjustEndTime(&mLastVideoTime, mVideoSourceDecoder);
+  if (mVideoReader) {
+    AdjustEndTime(&mLastVideoTime, mVideoReader);
   }
 
   // See if we can find a different reader that can pick up where we left off.
-  if (SwitchVideoSource(mLastVideoTime) == SOURCE_NEW) {
-    mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0)
+  if (SwitchVideoReader(mLastVideoTime) == READER_NEW) {
+    mVideoSeekRequest.Begin(mVideoReader->Seek(mLastVideoTime, 0)
                            ->RefableThen(GetTaskQueue(), __func__, this,
                                          &MediaSourceReader::CompleteVideoSeekAndDoRequest,
                                          &MediaSourceReader::CompleteVideoSeekAndRejectPromise));
     return;
   }
 
   CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime);
 }
@@ -405,19 +390,19 @@ MediaSourceReader::ContinueShutdown()
                                        &MediaSourceReader::ContinueShutdown,
                                        &MediaSourceReader::ContinueShutdown);
     mShutdownTrackBuffers.AppendElement(mTrackBuffers[0]);
     mTrackBuffers.RemoveElementAt(0);
     return;
   }
 
   mAudioTrack = nullptr;
-  mAudioSourceDecoder = nullptr;
+  mAudioReader = nullptr;
   mVideoTrack = nullptr;
-  mVideoSourceDecoder = nullptr;
+  mVideoReader = nullptr;
 
 #ifdef MOZ_FMP4
   if (mSharedDecoderManager) {
     mSharedDecoderManager->Shutdown();
     mSharedDecoderManager = nullptr;
   }
 #endif
 
@@ -432,134 +417,130 @@ MediaSourceReader::ContinueShutdown()
 
 void
 MediaSourceReader::BreakCycles()
 {
   MediaDecoderReader::BreakCycles();
 
   // These were cleared in Shutdown().
   MOZ_ASSERT(!mAudioTrack);
-  MOZ_ASSERT(!mAudioSourceDecoder);
+  MOZ_ASSERT(!mAudioReader);
   MOZ_ASSERT(!mVideoTrack);
-  MOZ_ASSERT(!mVideoSourceDecoder);
+  MOZ_ASSERT(!mVideoReader);
   MOZ_ASSERT(!mTrackBuffers.Length());
 
   for (uint32_t i = 0; i < mShutdownTrackBuffers.Length(); ++i) {
     mShutdownTrackBuffers[i]->BreakCycles();
   }
   mShutdownTrackBuffers.Clear();
 }
 
-already_AddRefed<SourceBufferDecoder>
-MediaSourceReader::SelectDecoder(int64_t aTarget,
-                                 int64_t aTolerance,
-                                 const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
+already_AddRefed<MediaDecoderReader>
+MediaSourceReader::SelectReader(int64_t aTarget,
+                                int64_t aTolerance,
+                                const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders)
 {
   mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
 
   // Consider decoders in order of newest to oldest, as a newer decoder
   // providing a given buffered range is expected to replace an older one.
   for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) {
-    nsRefPtr<SourceBufferDecoder> newDecoder = aTrackDecoders[i];
+    nsRefPtr<MediaDecoderReader> newReader = aTrackDecoders[i]->GetReader();
 
     nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
-    newDecoder->GetBuffered(ranges);
+    aTrackDecoders[i]->GetBuffered(ranges);
     if (ranges->Find(double(aTarget) / USECS_PER_S,
                      double(aTolerance) / USECS_PER_S) == dom::TimeRanges::NoIndex) {
-      MSE_DEBUGV("MediaSourceReader(%p)::SelectDecoder(%lld) newDecoder=%p target not in ranges=%s",
-                 this, aTarget, newDecoder.get(), DumpTimeRanges(ranges).get());
+      MSE_DEBUGV("MediaSourceReader(%p)::SelectReader(%lld) newReader=%p target not in ranges=%s",
+                 this, aTarget, newReader.get(), DumpTimeRanges(ranges).get());
       continue;
     }
 
-    return newDecoder.forget();
+    return newReader.forget();
   }
 
   return nullptr;
 }
 
 bool
 MediaSourceReader::HaveData(int64_t aTarget, MediaData::Type aType)
 {
   TrackBuffer* trackBuffer = aType == MediaData::AUDIO_DATA ? mAudioTrack : mVideoTrack;
   MOZ_ASSERT(trackBuffer);
-  nsRefPtr<SourceBufferDecoder> decoder = SelectDecoder(aTarget, EOS_FUZZ_US, trackBuffer->Decoders());
-  return !!decoder;
+  nsRefPtr<MediaDecoderReader> reader = SelectReader(aTarget, EOS_FUZZ_US, trackBuffer->Decoders());
+  return !!reader;
 }
 
-MediaSourceReader::SwitchSourceResult
-MediaSourceReader::SwitchAudioSource(int64_t aTarget)
+MediaSourceReader::SwitchReaderResult
+MediaSourceReader::SwitchAudioReader(int64_t aTarget)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   // XXX: Can't handle adding an audio track after ReadMetadata.
   if (!mAudioTrack) {
-    return SOURCE_ERROR;
+    return READER_ERROR;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
-  nsRefPtr<SourceBufferDecoder> newDecoder =
-    SelectDecoder(aTarget, /* aTolerance = */ 0, mAudioTrack->Decoders());
-  if (!newDecoder) {
-    newDecoder = SelectDecoder(aTarget, EOS_FUZZ_US, mAudioTrack->Decoders());
+  nsRefPtr<MediaDecoderReader> newReader = SelectReader(aTarget, /* aTolerance = */ 0, mAudioTrack->Decoders());
+  if (!newReader) {
+    newReader = SelectReader(aTarget, EOS_FUZZ_US, mAudioTrack->Decoders());
   }
-  if (newDecoder && newDecoder != mAudioSourceDecoder) {
-    GetAudioReader()->SetIdle();
-    mAudioSourceDecoder = newDecoder;
-    MSE_DEBUGV("MediaSourceReader(%p)::SwitchAudioSource switched decoder to %p",
-               this, mAudioSourceDecoder.get());
-    return SOURCE_NEW;
+  if (newReader && newReader != mAudioReader) {
+    mAudioReader->SetIdle();
+    mAudioReader = newReader;
+    MSE_DEBUGV("MediaSourceReader(%p)::SwitchAudioReader switched reader to %p", this, mAudioReader.get());
+    return READER_NEW;
   }
-  return newDecoder ? SOURCE_EXISTING : SOURCE_ERROR;
+  return newReader ? READER_EXISTING : READER_ERROR;
 }
 
-MediaSourceReader::SwitchSourceResult
-MediaSourceReader::SwitchVideoSource(int64_t aTarget)
+MediaSourceReader::SwitchReaderResult
+MediaSourceReader::SwitchVideoReader(int64_t aTarget)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   // XXX: Can't handle adding a video track after ReadMetadata.
   if (!mVideoTrack) {
-    return SOURCE_ERROR;
+    return READER_ERROR;
   }
 
   // We first search without the tolerance and then search with it, so that, in
   // the case of perfectly-aligned data, we don't prematurely jump to a new
   // reader and skip the last few samples of the current one.
-  nsRefPtr<SourceBufferDecoder> newDecoder =
-    SelectDecoder(aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders());
-  if (!newDecoder) {
-    newDecoder = SelectDecoder(aTarget, EOS_FUZZ_US, mVideoTrack->Decoders());
+  nsRefPtr<MediaDecoderReader> newReader = SelectReader(aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders());
+  if (!newReader) {
+    newReader = SelectReader(aTarget, EOS_FUZZ_US, mVideoTrack->Decoders());
   }
-  if (newDecoder && newDecoder != mVideoSourceDecoder) {
-    GetVideoReader()->SetIdle();
-    mVideoSourceDecoder = newDecoder;
-    MSE_DEBUGV("MediaSourceReader(%p)::SwitchVideoSource switched decoder to %p",
-               this, mVideoSourceDecoder.get());
-    return SOURCE_NEW;
+  if (newReader && newReader != mVideoReader) {
+    mVideoReader->SetIdle();
+    mVideoReader = newReader;
+    MSE_DEBUGV("MediaSourceReader(%p)::SwitchVideoReader switched reader to %p", this, mVideoReader.get());
+    return READER_NEW;
   }
-  return newDecoder ? SOURCE_EXISTING : SOURCE_ERROR;
+  return newReader ? READER_EXISTING : READER_ERROR;
 }
 
 bool
 MediaSourceReader::IsDormantNeeded()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  if (GetVideoReader()) {
-    return GetVideoReader()->IsDormantNeeded();
+  if (mVideoReader) {
+    return mVideoReader->IsDormantNeeded();
   }
 
   return false;
 }
 
 void
 MediaSourceReader::ReleaseMediaResources()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  if (GetVideoReader()) {
-    GetVideoReader()->ReleaseMediaResources();
+  if (mVideoReader) {
+    mVideoReader->ReleaseMediaResources();
   }
 }
 
 MediaDecoderReader*
 CreateReaderForType(const nsACString& aType, AbstractMediaDecoder* aDecoder)
 {
 #ifdef MOZ_FMP4
   // The MP4Reader that supports fragmented MP4 and uses
@@ -731,71 +712,66 @@ MediaSourceReader::Seek(int64_t aTime, i
 
 void
 MediaSourceReader::CancelSeek()
 {
   MOZ_ASSERT(OnDecodeThread());
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   mWaitingForSeekData = false;
   mPendingSeekTime = -1;
-  if (GetAudioReader()) {
+  if (mAudioReader) {
     mAudioSeekRequest.DisconnectIfExists();
-    GetAudioReader()->CancelSeek();
+    mAudioReader->CancelSeek();
   }
-  if (GetVideoReader()) {
+  if (mVideoReader) {
     mVideoSeekRequest.DisconnectIfExists();
-    GetVideoReader()->CancelSeek();
+    mVideoReader->CancelSeek();
   }
   mSeekPromise.RejectIfExists(NS_OK, __func__);
 }
 
 void
 MediaSourceReader::OnVideoSeekCompleted(int64_t aTime)
 {
   mVideoSeekRequest.Complete();
 
-  // The aTime we receive is in the sub-reader's reference.
-  int64_t ourTime = aTime + mVideoSourceDecoder->GetTimestampOffset();
-
   if (mAudioTrack) {
-    mPendingSeekTime = ourTime;
+    mPendingSeekTime = aTime;
     DoAudioSeek();
   } else {
     mPendingSeekTime = -1;
-    mSeekPromise.Resolve(ourTime, __func__);
+    mSeekPromise.Resolve(aTime, __func__);
   }
 }
 
 void
 MediaSourceReader::OnVideoSeekFailed(nsresult aResult)
 {
   mVideoSeekRequest.Complete();
   mPendingSeekTime = -1;
   mSeekPromise.Reject(aResult, __func__);
 }
 
 void
 MediaSourceReader::DoAudioSeek()
 {
-  SwitchAudioSource(mPendingSeekTime);
-  mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mPendingSeekTime), 0)
-                         ->RefableThen(GetTaskQueue(), __func__, this,
-                                       &MediaSourceReader::OnAudioSeekCompleted,
-                                       &MediaSourceReader::OnAudioSeekFailed));
-  MSE_DEBUG("MediaSourceReader(%p)::DoAudioSeek reader=%p", this, GetAudioReader());
+    SwitchAudioReader(mPendingSeekTime);
+    mAudioSeekRequest.Begin(mAudioReader->Seek(mPendingSeekTime, 0)
+                           ->RefableThen(GetTaskQueue(), __func__, this,
+                                         &MediaSourceReader::OnAudioSeekCompleted,
+                                         &MediaSourceReader::OnAudioSeekFailed));
+    MSE_DEBUG("MediaSourceReader(%p)::DoAudioSeek reader=%p", this, mAudioReader.get());
 }
 
 void
 MediaSourceReader::OnAudioSeekCompleted(int64_t aTime)
 {
   mAudioSeekRequest.Complete();
   mPendingSeekTime = -1;
-  // The aTime we receive is in the sub-reader's reference.
-  mSeekPromise.Resolve(aTime + mAudioSourceDecoder->GetTimestampOffset(),
-                       __func__);
+  mSeekPromise.Resolve(aTime, __func__);
 }
 
 void
 MediaSourceReader::OnAudioSeekFailed(nsresult aResult)
 {
   mAudioSeekRequest.Complete();
   mPendingSeekTime = -1;
   mSeekPromise.Reject(aResult, __func__);
@@ -830,22 +806,22 @@ MediaSourceReader::AttemptSeek()
   } else {
     MOZ_CRASH();
   }
 }
 
 void
 MediaSourceReader::DoVideoSeek()
 {
-  SwitchVideoSource(mPendingSeekTime);
-  mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mPendingSeekTime), 0)
+  SwitchVideoReader(mPendingSeekTime);
+  mVideoSeekRequest.Begin(mVideoReader->Seek(mPendingSeekTime, 0)
                           ->RefableThen(GetTaskQueue(), __func__, this,
                                         &MediaSourceReader::OnVideoSeekCompleted,
                                         &MediaSourceReader::OnVideoSeekFailed));
-  MSE_DEBUG("MediaSourceReader(%p)::DoVideoSeek reader=%p", this, GetVideoReader());
+  MSE_DEBUG("MediaSourceReader(%p)::DoVideoSeek reader=%p", this, mVideoReader.get());
 }
 
 nsresult
 MediaSourceReader::GetBuffered(dom::TimeRanges* aBuffered)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   MOZ_ASSERT(aBuffered->Length() == 0);
   if (mTrackBuffers.IsEmpty()) {
@@ -922,61 +898,61 @@ MediaSourceReader::ReadMetadata(MediaInf
   if (!mAudioTrack && !mVideoTrack) {
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata missing track: mAudioTrack=%p mVideoTrack=%p",
               this, mAudioTrack.get(), mVideoTrack.get());
     return NS_ERROR_FAILURE;
   }
 
   if (mAudioTrack) {
     MOZ_ASSERT(mAudioTrack->IsReady());
-    mAudioSourceDecoder = mAudioTrack->Decoders()[0];
+    mAudioReader = mAudioTrack->Decoders()[0]->GetReader();
 
-    const MediaInfo& info = GetAudioReader()->GetMediaInfo();
+    const MediaInfo& info = mAudioReader->GetMediaInfo();
     MOZ_ASSERT(info.HasAudio());
     mInfo.mAudio = info.mAudio;
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata audio reader=%p duration=%lld",
-              this, mAudioSourceDecoder.get(),
-              mAudioSourceDecoder->GetReader()->GetDecoder()->GetMediaDuration());
+              this, mAudioReader.get(),
+              mAudioReader->GetDecoder()->GetMediaDuration());
   }
 
   if (mVideoTrack) {
     MOZ_ASSERT(mVideoTrack->IsReady());
-    mVideoSourceDecoder = mVideoTrack->Decoders()[0];
+    mVideoReader = mVideoTrack->Decoders()[0]->GetReader();
 
-    const MediaInfo& info = GetVideoReader()->GetMediaInfo();
+    const MediaInfo& info = mVideoReader->GetMediaInfo();
     MOZ_ASSERT(info.HasVideo());
     mInfo.mVideo = info.mVideo;
     MSE_DEBUG("MediaSourceReader(%p)::ReadMetadata video reader=%p duration=%lld",
-              this, GetVideoReader(),
-              GetVideoReader()->GetDecoder()->GetMediaDuration());
+              this, mVideoReader.get(),
+              mVideoReader->GetDecoder()->GetMediaDuration());
   }
 
   *aInfo = mInfo;
   *aTags = nullptr; // TODO: Handle metadata.
 
   return NS_OK;
 }
 
 void
 MediaSourceReader::ReadUpdatedMetadata(MediaInfo* aInfo)
 {
   if (mAudioTrack) {
     MOZ_ASSERT(mAudioTrack->IsReady());
-    mAudioSourceDecoder = mAudioTrack->Decoders()[0];
+    mAudioReader = mAudioTrack->Decoders()[0]->GetReader();
 
-    const MediaInfo& info = GetAudioReader()->GetMediaInfo();
+    const MediaInfo& info = mAudioReader->GetMediaInfo();
     MOZ_ASSERT(info.HasAudio());
     mInfo.mAudio = info.mAudio;
   }
 
   if (mVideoTrack) {
     MOZ_ASSERT(mVideoTrack->IsReady());
-    mVideoSourceDecoder = mVideoTrack->Decoders()[0];
+    mVideoReader = mVideoTrack->Decoders()[0]->GetReader();
 
-    const MediaInfo& info = GetVideoReader()->GetMediaInfo();
+    const MediaInfo& info = mVideoReader->GetMediaInfo();
     MOZ_ASSERT(info.HasVideo());
     mInfo.mVideo = info.mVideo;
   }
   *aInfo = mInfo;
 }
 
 void
 MediaSourceReader::Ended()
@@ -1016,31 +992,31 @@ MediaSourceReader::GetMozDebugReaderData
     result += nsPrintfCString("\tDumping Audio Track Decoders: - mLastAudioTime: %f\n", double(mLastAudioTime) / USECS_PER_S);
     for (int32_t i = mAudioTrack->Decoders().Length() - 1; i >= 0; --i) {
       nsRefPtr<MediaDecoderReader> newReader = mAudioTrack->Decoders()[i]->GetReader();
 
       nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
       mAudioTrack->Decoders()[i]->GetBuffered(ranges);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
                                 i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == GetAudioReader() ? "true" : "false",
+                                newReader.get() == mAudioReader.get() ? "true" : "false",
                                 mAudioTrack->Decoders()[i]->GetResource()->GetSize());
     }
   }
 
   if (mVideoTrack) {
     result += nsPrintfCString("\tDumping Video Track Decoders - mLastVideoTime: %f\n", double(mLastVideoTime) / USECS_PER_S);
     for (int32_t i = mVideoTrack->Decoders().Length() - 1; i >= 0; --i) {
       nsRefPtr<MediaDecoderReader> newReader = mVideoTrack->Decoders()[i]->GetReader();
 
       nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
       mVideoTrack->Decoders()[i]->GetBuffered(ranges);
       result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n",
                                 i, newReader.get(), DumpTimeRanges(ranges).get(),
-                                newReader.get() == GetVideoReader() ? "true" : "false",
+                                newReader.get() == mVideoReader.get() ? "true" : "false",
                                 mVideoTrack->Decoders()[i]->GetResource()->GetSize());
     }
   }
   aString += NS_ConvertUTF8toUTF16(result);
 }
 
 #ifdef MOZ_EME
 nsresult
@@ -1057,36 +1033,12 @@ MediaSourceReader::SetCDMProxy(CDMProxy*
   return NS_OK;
 }
 #endif
 
 bool
 MediaSourceReader::IsActiveReader(MediaDecoderReader* aReader)
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  return aReader == GetVideoReader() || aReader == GetAudioReader();
-}
-
-MediaDecoderReader*
-MediaSourceReader::GetAudioReader() const
-{
-  return mAudioSourceDecoder ? mAudioSourceDecoder->GetReader() : nullptr;
-}
-
-MediaDecoderReader*
-MediaSourceReader::GetVideoReader() const
-{
-  return mVideoSourceDecoder ? mVideoSourceDecoder->GetReader() : nullptr;
-}
-
-int64_t
-MediaSourceReader::GetReaderAudioTime(int64_t aTime) const
-{
-  return aTime - mAudioSourceDecoder->GetTimestampOffset();
-}
-
-int64_t
-MediaSourceReader::GetReaderVideoTime(int64_t aTime) const
-{
-  return aTime - mVideoSourceDecoder->GetTimestampOffset();
+  return aReader == mVideoReader.get() || aReader == mAudioReader.get();
 }
 
 } // namespace mozilla
--- a/dom/media/mediasource/MediaSourceReader.h
+++ b/dom/media/mediasource/MediaSourceReader.h
@@ -137,39 +137,39 @@ public:
   // Set the duration of the attached mediasource element.
   void SetMediaSourceDuration(double aDuration /* seconds */);
 
 #ifdef MOZ_EME
   nsresult SetCDMProxy(CDMProxy* aProxy);
 #endif
 
   virtual bool IsAsync() const MOZ_OVERRIDE {
-    return (!GetAudioReader() || GetAudioReader()->IsAsync()) &&
-           (!GetVideoReader() || GetVideoReader()->IsAsync());
+    return (!mAudioReader || mAudioReader->IsAsync()) &&
+           (!mVideoReader || mVideoReader->IsAsync());
   }
 
   // Returns true if aReader is a currently active audio or video
   bool IsActiveReader(MediaDecoderReader* aReader);
 
   // Returns a string describing the state of the MediaSource internal
   // buffered data. Used for debugging purposes.
   void GetMozDebugReaderData(nsAString& aString);
 
 private:
-  // Switch the current audio/video source to the source that
+  // Switch the current audio/video reader to the reader that
   // contains aTarget (or up to aTolerance after target). Both
   // aTarget and aTolerance are in microseconds.
-  enum SwitchSourceResult {
-    SOURCE_ERROR = -1,
-    SOURCE_EXISTING = 0,
-    SOURCE_NEW = 1,
+  enum SwitchReaderResult {
+    READER_ERROR = -1,
+    READER_EXISTING = 0,
+    READER_NEW = 1,
   };
 
-  SwitchSourceResult SwitchAudioSource(int64_t aTarget);
-  SwitchSourceResult SwitchVideoSource(int64_t aTarget);
+  SwitchReaderResult SwitchAudioReader(int64_t aTarget);
+  SwitchReaderResult SwitchVideoReader(int64_t aTarget);
 
   void DoAudioRequest();
   void DoVideoRequest();
 
   void CompleteAudioSeekAndDoRequest()
   {
     mAudioSeekRequest.Complete();
     DoAudioRequest();
@@ -188,37 +188,32 @@ private:
   }
 
   void CompleteVideoSeekAndRejectPromise()
   {
     mVideoSeekRequest.Complete();
     mVideoPromise.Reject(DECODE_ERROR, __func__);
   }
 
-  MediaDecoderReader* GetAudioReader() const;
-  MediaDecoderReader* GetVideoReader() const;
-  int64_t GetReaderAudioTime(int64_t aTime) const;
-  int64_t GetReaderVideoTime(int64_t aTime) const;
-
   // Will reject the MediaPromise with END_OF_STREAM if mediasource has ended
   // or with WAIT_FOR_DATA otherwise.
   void CheckForWaitOrEndOfStream(MediaData::Type aType, int64_t aTime /* microseconds */);
 
-  // Return a decoder from the set available in aTrackDecoders that has data
+  // Return a reader from the set available in aTrackDecoders that has data
   // available in the range requested by aTarget.
-  already_AddRefed<SourceBufferDecoder> SelectDecoder(int64_t aTarget,
-                                                      int64_t aTolerance,
-                                                      const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
+  already_AddRefed<MediaDecoderReader> SelectReader(int64_t aTarget,
+                                                    int64_t aTolerance,
+                                                    const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders);
   bool HaveData(int64_t aTarget, MediaData::Type aType);
 
   void AttemptSeek();
   bool IsSeeking() { return mPendingSeekTime != -1; }
 
-  nsRefPtr<SourceBufferDecoder> mAudioSourceDecoder;
-  nsRefPtr<SourceBufferDecoder> mVideoSourceDecoder;
+  nsRefPtr<MediaDecoderReader> mAudioReader;
+  nsRefPtr<MediaDecoderReader> mVideoReader;
 
   nsTArray<nsRefPtr<TrackBuffer>> mTrackBuffers;
   nsTArray<nsRefPtr<TrackBuffer>> mShutdownTrackBuffers;
   nsTArray<nsRefPtr<TrackBuffer>> mEssentialTrackBuffers;
   nsRefPtr<TrackBuffer> mAudioTrack;
   nsRefPtr<TrackBuffer> mVideoTrack;
 
   MediaPromiseConsumerHolder<AudioDataPromise> mAudioRequest;
--- a/dom/media/mediasource/SourceBufferDecoder.cpp
+++ b/dom/media/mediasource/SourceBufferDecoder.cpp
@@ -243,46 +243,39 @@ SourceBufferDecoder::NotifyDataArrived(c
 
 nsresult
 SourceBufferDecoder::GetBuffered(dom::TimeRanges* aBuffered)
 {
   nsresult rv = mReader->GetBuffered(aBuffered);
   if (NS_FAILED(rv)) {
     return rv;
   }
-
-  // Adjust buffered range according to timestamp offset.
-  aBuffered->Shift((double)mTimestampOffset / USECS_PER_S);
-
   if (!WasTrimmed()) {
     return NS_OK;
   }
   nsRefPtr<dom::TimeRanges> tr = new dom::TimeRanges();
   tr->Add(0, mTrimmedOffset);
   aBuffered->Intersection(tr);
   return NS_OK;
 }
 
 int64_t
 SourceBufferDecoder::ConvertToByteOffset(double aTime)
 {
-  int64_t readerOffset =
-    mReader->GetEvictionOffset(aTime - double(mTimestampOffset) / USECS_PER_S);
+  int64_t readerOffset = mReader->GetEvictionOffset(aTime);
   if (readerOffset >= 0) {
     return readerOffset;
   }
 
   // Uses a conversion based on (aTime/duration) * length.  For the
   // purposes of eviction this should be adequate since we have the
   // byte threshold as well to ensure data actually gets evicted and
   // we ensure we don't evict before the current playable point.
   if (mRealMediaDuration <= 0) {
     return -1;
   }
   int64_t length = GetResource()->GetLength();
   MOZ_ASSERT(length > 0);
-  int64_t offset =
-    ((aTime - double(mTimestampOffset) / USECS_PER_S) /
-      (double(mRealMediaDuration) / USECS_PER_S)) * length;
+  int64_t offset = (aTime / (double(mRealMediaDuration) / USECS_PER_S)) * length;
   return offset;
 }
 
 } // namespace mozilla
--- a/dom/media/mediasource/SourceBufferDecoder.h
+++ b/dom/media/mediasource/SourceBufferDecoder.h
@@ -37,16 +37,17 @@ public:
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   virtual bool IsMediaSeekable() MOZ_FINAL MOZ_OVERRIDE;
   virtual bool IsShutdown() const MOZ_FINAL MOZ_OVERRIDE;
   virtual bool IsTransportSeekable() MOZ_FINAL MOZ_OVERRIDE;
   virtual bool OnDecodeThread() const MOZ_FINAL MOZ_OVERRIDE;
   virtual bool OnStateMachineThread() const MOZ_FINAL MOZ_OVERRIDE;
+  virtual int64_t GetTimestampOffset() const MOZ_FINAL MOZ_OVERRIDE { return mTimestampOffset; }
   virtual int64_t GetMediaDuration() MOZ_FINAL MOZ_OVERRIDE;
   virtual layers::ImageContainer* GetImageContainer() MOZ_FINAL MOZ_OVERRIDE;
   virtual MediaDecoderOwner* GetOwner() MOZ_FINAL MOZ_OVERRIDE;
   virtual SourceBufferResource* GetResource() const MOZ_FINAL MOZ_OVERRIDE;
   virtual ReentrantMonitor& GetReentrantMonitor() MOZ_FINAL MOZ_OVERRIDE;
   virtual VideoFrameContainer* GetVideoFrameContainer() MOZ_FINAL MOZ_OVERRIDE;
   virtual void MetadataLoaded(nsAutoPtr<MediaInfo> aInfo, nsAutoPtr<MetadataTags> aTags, bool aRestoredFromDromant) MOZ_FINAL MOZ_OVERRIDE;
   virtual void FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo, bool aRestoredFromDromant) MOZ_FINAL MOZ_OVERRIDE;
@@ -60,30 +61,28 @@ public:
   virtual void SetMediaDuration(int64_t aDuration) MOZ_FINAL MOZ_OVERRIDE;
   virtual void SetMediaEndTime(int64_t aTime) MOZ_FINAL MOZ_OVERRIDE;
   virtual void SetMediaSeekable(bool aMediaSeekable) MOZ_FINAL MOZ_OVERRIDE;
   virtual void UpdateEstimatedMediaDuration(int64_t aDuration) MOZ_FINAL MOZ_OVERRIDE;
   virtual void UpdatePlaybackPosition(int64_t aTime) MOZ_FINAL MOZ_OVERRIDE;
   virtual bool HasInitializationData() MOZ_FINAL MOZ_OVERRIDE;
 
   // SourceBufferResource specific interface below.
-  int64_t GetTimestampOffset() const { return mTimestampOffset; }
-  void SetTimestampOffset(int64_t aOffset)  { mTimestampOffset = aOffset; }
 
   // Warning: this mirrors GetBuffered in MediaDecoder, but this class's base is
   // AbstractMediaDecoder, which does not supply this interface.
   nsresult GetBuffered(dom::TimeRanges* aBuffered);
 
   void SetReader(MediaDecoderReader* aReader)
   {
     MOZ_ASSERT(!mReader);
     mReader = aReader;
   }
 
-  MediaDecoderReader* GetReader() const
+  MediaDecoderReader* GetReader()
   {
     return mReader;
   }
 
   void SetTaskQueue(MediaTaskQueue* aTaskQueue)
   {
     MOZ_ASSERT((!mTaskQueue && aTaskQueue) || (mTaskQueue && !aTaskQueue));
     mTaskQueue = aTaskQueue;
--- a/dom/media/mediasource/TrackBuffer.cpp
+++ b/dom/media/mediasource/TrackBuffer.cpp
@@ -34,27 +34,23 @@ extern PRLogModuleInfo* GetMediaSourceAP
 #define MSE_API(...)
 #endif
 
 // Time in seconds to substract from the current time when deciding the
 // time point to evict data before in a decoder. This is used to help
 // prevent evicting the current playback point.
 #define MSE_EVICT_THRESHOLD_TIME 2.0
 
-// Time in microsecond under which a timestamp will be considered to be 0.
-#define FUZZ_TIMESTAMP_OFFSET 100000
-
 namespace mozilla {
 
 TrackBuffer::TrackBuffer(MediaSourceDecoder* aParentDecoder, const nsACString& aType)
   : mParentDecoder(aParentDecoder)
   , mType(aType)
   , mLastStartTimestamp(0)
   , mLastTimestampOffset(0)
-  , mAdjustedTimestamp(0)
   , mShutdown(false)
 {
   MOZ_COUNT_CTOR(TrackBuffer);
   mParser = ContainerParser::CreateForMIMEType(aType);
   mTaskQueue = new MediaTaskQueue(GetMediaDecodeThreadPool());
   aParentDecoder->AddTrackBuffer(this);
   mDecoderPerSegment = Preferences::GetBool("media.mediasource.decoder-per-segment", false);
   MSE_DEBUG("TrackBuffer(%p) created for parent decoder %p", this, aParentDecoder);
@@ -184,73 +180,67 @@ TrackBuffer::AppendData(LargeDataBuffer*
 
   int64_t start = 0, end = 0;
   bool gotMedia = mParser->ParseStartAndEndTimestamps(aData, start, end);
   bool gotInit = mParser->HasCompleteInitData();
 
   if (newInitData) {
     if (!gotInit) {
       // We need a new decoder, but we can't initialize it yet.
-      nsRefPtr<SourceBufferDecoder> decoder =
-        NewDecoder(aTimestampOffset - mAdjustedTimestamp);
+      nsRefPtr<SourceBufferDecoder> decoder = NewDecoder(aTimestampOffset);
       // The new decoder is stored in mDecoders/mCurrentDecoder, so we
       // don't need to do anything with 'decoder'. It's only a placeholder.
       if (!decoder) {
         mInitializationPromise.Reject(NS_ERROR_FAILURE, __func__);
         return p;
       }
     } else {
-      if (!decoders.NewDecoder(aTimestampOffset - mAdjustedTimestamp)) {
+      if (!decoders.NewDecoder(aTimestampOffset)) {
         mInitializationPromise.Reject(NS_ERROR_FAILURE, __func__);
         return p;
       }
     }
   } else if (!hadCompleteInitData && gotInit) {
     MOZ_ASSERT(mCurrentDecoder);
     // Queue pending decoder for initialization now that we have a full
     // init segment.
     decoders.AppendElement(mCurrentDecoder);
   }
 
   if (gotMedia) {
+    start += aTimestampOffset;
+    end += aTimestampOffset;
     if (mLastEndTimestamp &&
         (!mParser->TimestampsFuzzyEqual(start, mLastEndTimestamp.value()) ||
          mLastTimestampOffset != aTimestampOffset ||
          mDecoderPerSegment ||
          (mCurrentDecoder && mCurrentDecoder->WasTrimmed()))) {
       MSE_DEBUG("TrackBuffer(%p)::AppendData: Data last=[%lld, %lld] overlaps [%lld, %lld]",
                 this, mLastStartTimestamp, mLastEndTimestamp.value(), start, end);
 
       if (!newInitData) {
         // This data is earlier in the timeline than data we have already
         // processed or not continuous, so we must create a new decoder
         // to handle the decoding.
-        if (!hadCompleteInitData ||
-            !decoders.NewDecoder(aTimestampOffset - mAdjustedTimestamp)) {
+        if (!hadCompleteInitData || !decoders.NewDecoder(aTimestampOffset)) {
           mInitializationPromise.Reject(NS_ERROR_FAILURE, __func__);
           return p;
         }
         MSE_DEBUG("TrackBuffer(%p)::AppendData: Decoder marked as initialized.", this);
         AppendDataToCurrentResource(oldInit, 0);
       }
       mLastStartTimestamp = start;
     } else {
       MSE_DEBUG("TrackBuffer(%p)::AppendData: Segment last=[%lld, %lld] [%lld, %lld]",
                 this, mLastStartTimestamp, mLastEndTimestamp ? mLastEndTimestamp.value() : 0, start, end);
     }
     mLastEndTimestamp.reset();
     mLastEndTimestamp.emplace(end);
   }
 
-  if (gotMedia && start > 0 &&
-      (start < FUZZ_TIMESTAMP_OFFSET || start < mAdjustedTimestamp)) {
-    AdjustDecodersTimestampOffset(mAdjustedTimestamp - start);
-    mAdjustedTimestamp = start;
-  }
-
   if (!AppendDataToCurrentResource(aData, end - start)) {
     mInitializationPromise.Reject(NS_ERROR_FAILURE, __func__);
     return p;
   }
 
   if (decoders.Length()) {
     // We're going to have to wait for the decoder to initialize, the promise
     // will be resolved once initialization completes.
@@ -920,18 +910,9 @@ TrackBuffer::RangeRemoval(int64_t aStart
       }
       MSE_DEBUG("TrackBuffer(%p):RangeRemoval remove empty decoders=%d", this, i);
       RemoveDecoder(decoders[i]);
     }
   }
   return true;
 }
 
-void
-TrackBuffer::AdjustDecodersTimestampOffset(int32_t aOffset)
-{
-  ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
-  for (uint32_t i = 0; i < mDecoders.Length(); i++) {
-    mDecoders[i]->SetTimestampOffset(mDecoders[i]->GetTimestampOffset() + aOffset);
-  }
-}
-
 } // namespace mozilla
--- a/dom/media/mediasource/TrackBuffer.h
+++ b/dom/media/mediasource/TrackBuffer.h
@@ -185,21 +185,19 @@ private:
 
   nsRefPtr<MediaSourceDecoder> mParentDecoder;
   const nsCString mType;
 
   // The last start and end timestamps added to the TrackBuffer via
   // AppendData.  Accessed on the main thread only.
   int64_t mLastStartTimestamp;
   Maybe<int64_t> mLastEndTimestamp;
-  void AdjustDecodersTimestampOffset(int32_t aOffset);
 
   // The timestamp offset used by our current decoder, in microseconds.
   int64_t mLastTimestampOffset;
-  int64_t mAdjustedTimestamp;
 
   // Set when the first decoder used by this TrackBuffer is initialized.
   // Protected by mParentDecoder's monitor.
   MediaInfo mInfo;
 
   void ContinueShutdown();
   MediaPromiseHolder<ShutdownPromise> mShutdownPromise;
   bool mDecoderPerSegment;
--- a/media/libstagefright/binding/Box.cpp
+++ b/media/libstagefright/binding/Box.cpp
@@ -7,39 +7,16 @@
 #include "mp4_demuxer/Box.h"
 #include "mp4_demuxer/mp4_demuxer.h"
 #include "mozilla/Endian.h"
 
 using namespace mozilla;
 
 namespace mp4_demuxer {
 
-// Returns the offset from the start of the body of a box of type |aType|
-// to the start of its first child.
-static uint32_t
-BoxOffset(AtomType aType)
-{
-  const uint32_t FULLBOX_OFFSET = 4;
-
-  if (aType == AtomType("mp4a") || aType == AtomType("enca")) {
-    // AudioSampleEntry; ISO 14496-12, section 8.16
-    return 28;
-  } else if (aType == AtomType("mp4v") || aType == AtomType("encv")) {
-    // VideoSampleEntry; ISO 14496-12, section 8.16
-    return 78;
-  } else if (aType == AtomType("stsd")) {
-    // SampleDescriptionBox; ISO 14496-12, section 8.16
-    // This is a FullBox, and contains a |count| member before its child
-    // boxes.
-    return FULLBOX_OFFSET + 4;
-  }
-
-  return 0;
-}
-
 Box::Box(BoxContext* aContext, uint64_t aOffset, const Box* aParent)
   : mContext(aContext), mParent(aParent)
 {
   uint8_t header[8];
   MediaByteRange headerRange(aOffset, aOffset + sizeof(header));
   if (mParent && !mParent->mRange.Contains(headerRange)) {
     return;
   }
@@ -71,31 +48,29 @@ Box::Box(BoxContext* aContext, uint64_t 
     if ((mParent && !mParent->mRange.Contains(bigLengthRange)) ||
         !byteRange->Contains(bigLengthRange) ||
         !mContext->mSource->CachedReadAt(aOffset, bigLength,
                                          sizeof(bigLength), &bytes) ||
         bytes != sizeof(bigLength)) {
       return;
     }
     size = BigEndian::readUint64(bigLength);
-    mBodyOffset = bigLengthRange.mEnd;
+    mChildOffset = bigLengthRange.mEnd;
   } else {
-    mBodyOffset = headerRange.mEnd;
+    mChildOffset = headerRange.mEnd;
   }
 
-  mType = BigEndian::readUint32(&header[4]);
-  mChildOffset = mBodyOffset + BoxOffset(mType);
-
   MediaByteRange boxRange(aOffset, aOffset + size);
   if (mChildOffset > boxRange.mEnd ||
       (mParent && !mParent->mRange.Contains(boxRange)) ||
       !byteRange->Contains(boxRange)) {
     return;
   }
   mRange = boxRange;
+  mType = BigEndian::readUint32(&header[4]);
 }
 
 Box::Box()
   : mContext(nullptr)
 {}
 
 Box
 Box::Next() const
--- a/media/libstagefright/binding/DecoderData.cpp
+++ b/media/libstagefright/binding/DecoderData.cpp
@@ -228,23 +228,23 @@ MP4Sample::MP4Sample(const MP4Sample& co
 MP4Sample::~MP4Sample()
 {
   if (mMediaBuffer) {
     mMediaBuffer->release();
   }
 }
 
 void
-MP4Sample::Update(int64_t& aMediaTime)
+MP4Sample::Update(int64_t& aMediaTime, int64_t& aTimestampOffset)
 {
   sp<MetaData> m = mMediaBuffer->meta_data();
   // XXXbholley - Why don't we adjust decode_timestamp for aMediaTime?
   // According to k17e, this code path is no longer used - we should probably remove it.
-  decode_timestamp = FindInt64(m, kKeyDecodingTime);
-  composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime;
+  decode_timestamp = FindInt64(m, kKeyDecodingTime) + aTimestampOffset;
+  composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime + aTimestampOffset;
   duration = FindInt64(m, kKeyDuration);
   byte_offset = FindInt64(m, kKey64BitFileOffset);
   is_sync_point = FindInt32(m, kKeyIsSyncFrame);
   data = reinterpret_cast<uint8_t*>(mMediaBuffer->data());
   size = mMediaBuffer->range_length();
 
   crypto.Update(m);
 }
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -1,17 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mp4_demuxer/ByteReader.h"
 #include "mp4_demuxer/Index.h"
 #include "mp4_demuxer/Interval.h"
 #include "mp4_demuxer/MoofParser.h"
-#include "mp4_demuxer/SinfParser.h"
 #include "media/stagefright/MediaSource.h"
 #include "MediaResource.h"
 
 #include <algorithm>
 #include <limits>
 
 using namespace stagefright;
 using namespace mozilla;
@@ -104,50 +103,34 @@ MP4Sample* SampleIterator::GetNext()
 
   size_t bytesRead;
   if (!mIndex->mSource->ReadAt(sample->byte_offset, sample->data, sample->size,
                                &bytesRead) || bytesRead != sample->size) {
     return nullptr;
   }
 
   if (!s->mCencRange.IsNull()) {
-    MoofParser* parser = mIndex->mMoofParser.get();
-
-    if (!parser || !parser->mSinf.IsValid()) {
-      return nullptr;
-    }
-
-    uint8_t ivSize = parser->mSinf.mDefaultIVSize;
-
     // The size comes from an 8 bit field
     nsAutoTArray<uint8_t, 256> cenc;
     cenc.SetLength(s->mCencRange.Length());
-    if (!mIndex->mSource->ReadAt(s->mCencRange.mStart, cenc.Elements(), cenc.Length(),
+    if (!mIndex->mSource->ReadAt(s->mCencRange.mStart, &cenc[0], cenc.Length(),
                                  &bytesRead) || bytesRead != cenc.Length()) {
       return nullptr;
     }
     ByteReader reader(cenc);
     sample->crypto.valid = true;
-    sample->crypto.iv_size = ivSize;
-
-    if (!reader.ReadArray(sample->crypto.iv, ivSize)) {
-      return nullptr;
-    }
-
-    if (reader.CanRead16()) {
+    reader.ReadArray(sample->crypto.iv, 16);
+    if (reader.Remaining()) {
       uint16_t count = reader.ReadU16();
-
-      if (reader.Remaining() < count * 6) {
-        return nullptr;
-      }
-
       for (size_t i = 0; i < count; i++) {
         sample->crypto.plain_sizes.AppendElement(reader.ReadU16());
         sample->crypto.encrypted_sizes.AppendElement(reader.ReadU32());
       }
+      reader.ReadArray(sample->crypto.iv, 16);
+      sample->crypto.iv_size = 16;
     }
   }
 
   Next();
 
   return sample.forget();
 }
 
@@ -224,22 +207,23 @@ SampleIterator::GetNextKeyframeTime()
       return moofs[moof].mIndex[sample].mDecodeTime;
     }
     ++sample;
   }
   MOZ_ASSERT(false); // should not be reached.
 }
 
 Index::Index(const stagefright::Vector<MediaSource::Indice>& aIndex,
-             Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
+             Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
+             Monitor* aMonitor)
   : mSource(aSource)
   , mMonitor(aMonitor)
 {
   if (aIndex.isEmpty()) {
-    mMoofParser = new MoofParser(aSource, aTrackId, aMonitor);
+    mMoofParser = new MoofParser(aSource, aTrackId, aTimestampOffset, aMonitor);
   } else {
     for (size_t i = 0; i < aIndex.size(); i++) {
       const MediaSource::Indice& indice = aIndex[i];
       Sample sample;
       sample.mByteRange = MediaByteRange(indice.start_offset,
                                          indice.end_offset);
       sample.mCompositionRange = Interval<Microseconds>(indice.start_composition,
                                                         indice.end_composition);
--- a/media/libstagefright/binding/MoofParser.cpp
+++ b/media/libstagefright/binding/MoofParser.cpp
@@ -1,15 +1,14 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mp4_demuxer/MoofParser.h"
 #include "mp4_demuxer/Box.h"
-#include "mp4_demuxer/SinfParser.h"
 #include <limits>
 
 namespace mp4_demuxer
 {
 
 using namespace stagefright;
 using namespace mozilla;
 
@@ -24,17 +23,17 @@ MoofParser::RebuildFragmentedIndex(
 void
 MoofParser::RebuildFragmentedIndex(BoxContext& aContext)
 {
   for (Box box(&aContext, mOffset); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("moov")) {
       mInitRange = MediaByteRange(0, box.Range().mEnd);
       ParseMoov(box);
     } else if (box.IsType("moof")) {
-      Moof moof(box, mTrex, mMdhd, mEdts, mSinf);
+      Moof moof(box, mTrex, mMdhd, mEdts, mTimestampOffset);
 
       if (!mMoofs.IsEmpty()) {
         // Stitch time ranges together in the case of a (hopefully small) time
         // range gap between moofs.
         mMoofs.LastElement().FixRounding(moof);
       }
 
       mMoofs.AppendElement(moof);
@@ -143,18 +142,16 @@ MoofParser::ParseTrak(Box& aBox)
 }
 
 void
 MoofParser::ParseMdia(Box& aBox, Tkhd& aTkhd)
 {
   for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("mdhd")) {
       mMdhd = Mdhd(box);
-    } else if (box.IsType("minf")) {
-      ParseMinf(box);
     }
   }
 }
 
 void
 MoofParser::ParseMvex(Box& aBox)
 {
   for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
@@ -162,70 +159,22 @@ MoofParser::ParseMvex(Box& aBox)
       Trex trex = Trex(box);
       if (!mTrex.mTrackId || trex.mTrackId == mTrex.mTrackId) {
         mTrex = trex;
       }
     }
   }
 }
 
-void
-MoofParser::ParseMinf(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    if (box.IsType("stbl")) {
-      ParseStbl(box);
-    }
-  }
-}
-
-void
-MoofParser::ParseStbl(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    if (box.IsType("stsd")) {
-      ParseStsd(box);
-    }
-  }
-}
-
-void
-MoofParser::ParseStsd(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    if (box.IsType("encv") || box.IsType("enca")) {
-      ParseEncrypted(box);
-    }
-  }
-}
-
-void
-MoofParser::ParseEncrypted(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    // Some MP4 files have been found to have multiple sinf boxes in the same
-    // enc* box. This does not match spec anyway, so just choose the first
-    // one that parses properly.
-    if (box.IsType("sinf")) {
-      mSinf = Sinf(box);
-
-      if (mSinf.IsValid()) {
-        break;
-      }
-    }
-  }
-}
-
-Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf)
-  : mRange(aBox.Range())
-  , mMaxRoundingError(0)
+Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Microseconds aTimestampOffset) :
+    mRange(aBox.Range()), mTimestampOffset(aTimestampOffset), mMaxRoundingError(0)
 {
   for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("traf")) {
-      ParseTraf(box, aTrex, aMdhd, aEdts, aSinf);
+      ParseTraf(box, aTrex, aMdhd, aEdts);
     }
   }
   ProcessCenc();
 }
 
 bool
 Moof::GetAuxInfo(AtomType aType, nsTArray<MediaByteRange>* aByteRanges)
 {
@@ -286,32 +235,32 @@ Moof::ProcessCenc()
   }
   for (int i = 0; i < cencRanges.Length(); i++) {
     mIndex[i].mCencRange = cencRanges[i];
   }
   return true;
 }
 
 void
-Moof::ParseTraf(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf)
+Moof::ParseTraf(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts)
 {
   Tfhd tfhd(aTrex);
   Tfdt tfdt;
   for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
     if (box.IsType("tfhd")) {
       tfhd = Tfhd(box, aTrex);
     } else if (!aTrex.mTrackId || tfhd.mTrackId == aTrex.mTrackId) {
       if (box.IsType("tfdt")) {
         tfdt = Tfdt(box);
       } else if (box.IsType("trun")) {
         ParseTrun(box, tfhd, tfdt, aMdhd, aEdts);
       } else if (box.IsType("saiz")) {
-        mSaizs.AppendElement(Saiz(box, aSinf.mDefaultEncryptionType));
+        mSaizs.AppendElement(Saiz(box));
       } else if (box.IsType("saio")) {
-        mSaios.AppendElement(Saio(box, aSinf.mDefaultEncryptionType));
+        mSaios.AppendElement(Saio(box));
       }
     }
   }
 }
 
 void
 Moof::FixRounding(const Moof& aMoof) {
   Microseconds gap = aMoof.mTimeRange.start - mTimeRange.end;
@@ -395,20 +344,20 @@ Moof::ParseTrun(Box& aBox, Tfhd& aTfhd, 
     if (flags & 0x800) {
       ctsOffset = reader->Read32();
     }
 
     Sample sample;
     sample.mByteRange = MediaByteRange(offset, offset + sampleSize);
     offset += sampleSize;
 
-    sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime);
+    sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime) + mTimestampOffset;
     sample.mCompositionRange = Interval<Microseconds>(
-      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart),
-      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart));
+      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart) + mTimestampOffset,
+      aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart) + mTimestampOffset);
     decodeTime += sampleDuration;
 
     sample.mSync = !(sampleFlags & 0x1010000);
 
     mIndex.AppendElement(sample);
 
     mMdatRange = mMdatRange.Extents(sample.mByteRange);
   }
@@ -505,18 +454,17 @@ Trex::Trex(Box& aBox)
   mTrackId = reader->ReadU32();
   mDefaultSampleDescriptionIndex = reader->ReadU32();
   mDefaultSampleDuration = reader->ReadU32();
   mDefaultSampleSize = reader->ReadU32();
   mDefaultSampleFlags = reader->ReadU32();
   mValid = true;
 }
 
-Tfhd::Tfhd(Box& aBox, Trex& aTrex)
-  : Trex(aTrex)
+Tfhd::Tfhd(Box& aBox, Trex& aTrex) : Trex(aTrex)
 {
   MOZ_ASSERT(aBox.IsType("tfhd"));
   MOZ_ASSERT(aBox.Parent()->IsType("traf"));
   MOZ_ASSERT(aBox.Parent()->Parent()->IsType("moof"));
 
   BoxReader reader(aBox);
   if (!reader->CanReadType<uint32_t>()) {
     return;
@@ -604,19 +552,17 @@ Edts::Edts(Box& aBox)
   } else {
     segment_duration = reader->ReadU32();
     mMediaStart = reader->Read32();
   }
   NS_ASSERTION(segment_duration == 0, "Can't handle edits with fixed durations");
   reader->DiscardRemaining();
 }
 
-Saiz::Saiz(Box& aBox, AtomType aDefaultType)
-  : mAuxInfoType(aDefaultType)
-  , mAuxInfoTypeParameter(0)
+Saiz::Saiz(Box& aBox) : mAuxInfoType("sinf"), mAuxInfoTypeParameter(0)
 {
   BoxReader reader(aBox);
   if (!reader->CanReadType<uint32_t>()) {
     return;
   }
   uint32_t flags = reader->ReadU32();
   uint8_t version = flags >> 24;
   size_t need =
@@ -637,19 +583,17 @@ Saiz::Saiz(Box& aBox, AtomType aDefaultT
   } else {
     if (!reader->ReadArray(mSampleInfoSize, count)) {
       return;
     }
   }
   mValid = true;
 }
 
-Saio::Saio(Box& aBox, AtomType aDefaultType)
-  : mAuxInfoType(aDefaultType)
-  , mAuxInfoTypeParameter(0)
+Saio::Saio(Box& aBox) : mAuxInfoType("sinf"), mAuxInfoTypeParameter(0)
 {
   BoxReader reader(aBox);
   if (!reader->CanReadType<uint32_t>()) {
     return;
   }
   uint32_t flags = reader->ReadU32();
   uint8_t version = flags >> 24;
   size_t need = ((flags & 1) ? (2*sizeof(uint32_t)) : 0) + sizeof(uint32_t);
deleted file mode 100644
--- a/media/libstagefright/binding/SinfParser.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "mozilla/unused.h"
-#include "mp4_demuxer/SinfParser.h"
-#include "mp4_demuxer/AtomType.h"
-#include "mp4_demuxer/Box.h"
-
-namespace mp4_demuxer {
-
-Sinf::Sinf(Box& aBox)
-  : mDefaultIVSize(0)
-  , mDefaultEncryptionType()
-{
-  SinfParser parser(aBox);
-  if (parser.GetSinf().IsValid()) {
-    *this = parser.GetSinf();
-  }
-}
-
-SinfParser::SinfParser(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    if (box.IsType("schm")) {
-      ParseSchm(box);
-    } else if (box.IsType("schi")) {
-      ParseSchi(box);
-    }
-  }
-}
-
-void
-SinfParser::ParseSchm(Box& aBox)
-{
-  BoxReader reader(aBox);
-
-  if (reader->Remaining() < 8) {
-    return;
-  }
-
-  mozilla::unused << reader->ReadU32(); // flags -- ignore
-  mSinf.mDefaultEncryptionType = reader->ReadU32();
-
-  reader->DiscardRemaining();
-}
-
-void
-SinfParser::ParseSchi(Box& aBox)
-{
-  for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
-    if (box.IsType("tenc")) {
-      ParseTenc(box);
-    }
-  }
-}
-
-void
-SinfParser::ParseTenc(Box& aBox)
-{
-  BoxReader reader(aBox);
-
-  if (reader->Remaining() < 24) {
-    return;
-  }
-
-  mozilla::unused << reader->ReadU32(); // flags -- ignore
-
-  uint32_t isEncrypted = reader->ReadU24();
-  mSinf.mDefaultIVSize = reader->ReadU8();
-  memcpy(mSinf.mDefaultKeyID, reader->Read(16), 16);
-}
-
-}
deleted file mode 100644
--- a/media/libstagefright/binding/include/mp4_demuxer/Atom.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef ATOM_H_
-#define ATOM_H_
-
-namespace mp4_demuxer {
-
-class Atom
-{
-public:
-  Atom()
-    : mValid(false)
-  {
-  }
-  virtual bool IsValid()
-  {
-    return mValid;
-  }
-protected:
-  bool mValid;
-};
-
-}
-
-#endif // ATOM_H_
--- a/media/libstagefright/binding/include/mp4_demuxer/AtomType.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/AtomType.h
@@ -16,16 +16,15 @@ namespace mp4_demuxer {
 
 class AtomType
 {
 public:
   AtomType() : mType(0) { }
   MOZ_IMPLICIT AtomType(uint32_t aType) : mType(aType) { }
   MOZ_IMPLICIT AtomType(const char* aType) : mType(BigEndian::readUint32(aType)) { }
   bool operator==(const AtomType& aType) const { return mType == aType.mType; }
-  bool operator!() const { return !mType; }
 
 private:
   uint32_t mType;
 };
 }
 
 #endif
--- a/media/libstagefright/binding/include/mp4_demuxer/Box.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/Box.h
@@ -49,17 +49,16 @@ public:
   Box Next() const;
   Box FirstChild() const;
   void Read(nsTArray<uint8_t>* aDest);
 
 private:
   bool Contains(MediaByteRange aRange) const;
   BoxContext* mContext;
   mozilla::MediaByteRange mRange;
-  uint64_t mBodyOffset;
   uint64_t mChildOffset;
   AtomType mType;
   const Box* mParent;
 };
 
 class BoxReader
 {
 public:
--- a/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
@@ -152,17 +152,17 @@ public:
 typedef int64_t Microseconds;
 
 class MP4Sample
 {
 public:
   MP4Sample();
   MP4Sample(const MP4Sample& copy);
   virtual ~MP4Sample();
-  void Update(int64_t& aMediaTime);
+  void Update(int64_t& aMediaTime, int64_t& aTimestampOffset);
   void Pad(size_t aPaddingBytes);
 
   stagefright::MediaBuffer* mMediaBuffer;
 
   Microseconds decode_timestamp;
   Microseconds composition_timestamp;
   Microseconds duration;
   int64_t byte_offset;
--- a/media/libstagefright/binding/include/mp4_demuxer/Index.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/Index.h
@@ -33,17 +33,18 @@ private:
 };
 
 class Index
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Index)
 
   Index(const stagefright::Vector<stagefright::MediaSource::Indice>& aIndex,
-        Stream* aSource, uint32_t aTrackId, Monitor* aMonitor);
+        Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
+        Monitor* aMonitor);
 
   void UpdateMoofIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   Microseconds GetEndCompositionIfBuffered(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   void ConvertByteRangesToTimeRanges(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges,
     nsTArray<Interval<Microseconds>>* aTimeRanges);
   uint64_t GetEvictionOffset(Microseconds aTime);
--- a/media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/MoofParser.h
@@ -1,28 +1,41 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOOF_PARSER_H_
 #define MOOF_PARSER_H_
 
-#include "mp4_demuxer/Atom.h"
 #include "mp4_demuxer/AtomType.h"
 #include "mp4_demuxer/mp4_demuxer.h"
-#include "mp4_demuxer/SinfParser.h"
 #include "MediaResource.h"
 
 namespace mp4_demuxer {
 
 class Stream;
 class Box;
 class BoxContext;
 class Moof;
 
+class Atom
+{
+public:
+  Atom()
+    : mValid(false)
+  {
+  }
+  virtual bool IsValid()
+  {
+    return mValid;
+  }
+protected:
+  bool mValid;
+};
+
 class Tkhd : public Atom
 {
 public:
   Tkhd()
     : mCreationTime(0)
     , mModificationTime(0)
     , mTrackId(0)
     , mDuration(0)
@@ -132,27 +145,27 @@ struct Sample
   Microseconds mDecodeTime;
   Interval<Microseconds> mCompositionRange;
   bool mSync;
 };
 
 class Saiz : public Atom
 {
 public:
-  Saiz(Box& aBox, AtomType aDefaultType);
+  explicit Saiz(Box& aBox);
 
   AtomType mAuxInfoType;
   uint32_t mAuxInfoTypeParameter;
   nsTArray<uint8_t> mSampleInfoSize;
 };
 
 class Saio : public Atom
 {
 public:
-  Saio(Box& aBox, AtomType aDefaultType);
+  explicit Saio(Box& aBox);
 
   AtomType mAuxInfoType;
   uint32_t mAuxInfoTypeParameter;
   nsTArray<uint64_t> mOffsets;
 };
 
 class AuxInfo {
 public:
@@ -163,77 +176,71 @@ private:
   int64_t mMoofOffset;
   Saiz& mSaiz;
   Saio& mSaio;
 };
 
 class Moof : public Atom
 {
 public:
-  Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf);
+  Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Microseconds aTimestampOffset);
   bool GetAuxInfo(AtomType aType, nsTArray<MediaByteRange>* aByteRanges);
   void FixRounding(const Moof& aMoof);
 
   mozilla::MediaByteRange mRange;
   mozilla::MediaByteRange mMdatRange;
   Interval<Microseconds> mTimeRange;
   nsTArray<Sample> mIndex;
 
   nsTArray<Saiz> mSaizs;
   nsTArray<Saio> mSaios;
 
 private:
-  void ParseTraf(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Sinf& aSinf);
+  void ParseTraf(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts);
   void ParseTrun(Box& aBox, Tfhd& aTfhd, Tfdt& aTfdt, Mdhd& aMdhd, Edts& aEdts);
   void ParseSaiz(Box& aBox);
   void ParseSaio(Box& aBox);
   bool ProcessCenc();
+  Microseconds mTimestampOffset;
   uint64_t mMaxRoundingError;
 };
 
 class MoofParser
 {
 public:
-  MoofParser(Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
-    : mSource(aSource)
-    , mOffset(0)
-    , mTrex(aTrackId)
-    , mMonitor(aMonitor)
+  MoofParser(Stream* aSource, uint32_t aTrackId,
+             Microseconds aTimestampOffset, Monitor* aMonitor)
+    : mSource(aSource), mOffset(0), mTimestampOffset(aTimestampOffset),
+      mTrex(aTrackId), mMonitor(aMonitor)
   {
     // Setting the mTrex.mTrackId to 0 is a nasty work around for calculating
     // the composition range for MSE. We need an array of tracks.
   }
   void RebuildFragmentedIndex(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   void RebuildFragmentedIndex(BoxContext& aContext);
   Interval<Microseconds> GetCompositionRange(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges);
   bool ReachedEnd();
   void ParseMoov(Box& aBox);
   void ParseTrak(Box& aBox);
   void ParseMdia(Box& aBox, Tkhd& aTkhd);
   void ParseMvex(Box& aBox);
 
-  void ParseMinf(Box& aBox);
-  void ParseStbl(Box& aBox);
-  void ParseStsd(Box& aBox);
-  void ParseEncrypted(Box& aBox);
-  void ParseSinf(Box& aBox);
-
   bool BlockingReadNextMoof();
 
   mozilla::MediaByteRange mInitRange;
   nsRefPtr<Stream> mSource;
   uint64_t mOffset;
+  Microseconds mTimestampOffset;
   nsTArray<uint64_t> mMoofOffsets;
   Mdhd mMdhd;
   Trex mTrex;
   Tfdt mTfdt;
   Edts mEdts;
-  Sinf mSinf;
   Monitor* mMonitor;
   nsTArray<Moof>& Moofs() { mMonitor->AssertCurrentThreadOwns(); return mMoofs; }
 private:
   nsTArray<Moof> mMoofs;
 };
 }
 
 #endif
deleted file mode 100644
--- a/media/libstagefright/binding/include/mp4_demuxer/SinfParser.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-
-#ifndef SINF_PARSER_H_
-#define SINF_PARSER_H_
-
-#include "mp4_demuxer/Atom.h"
-#include "mp4_demuxer/AtomType.h"
-
-namespace mp4_demuxer {
-
-class Box;
-
-class Sinf : public Atom
-{
-public:
-  Sinf()
-    : mDefaultIVSize(0)
-    , mDefaultEncryptionType()
-  {}
-  explicit Sinf(Box& aBox);
-
-  virtual bool IsValid() MOZ_OVERRIDE
-  {
-    return !!mDefaultIVSize && !!mDefaultEncryptionType;
-  }
-
-  uint8_t mDefaultIVSize;
-  AtomType mDefaultEncryptionType;
-  uint8_t mDefaultKeyID[16];
-};
-
-class SinfParser
-{
-public:
-  explicit SinfParser(Box& aBox);
-
-  Sinf& GetSinf() { return mSinf; }
-private:
-  void ParseSchm(Box& aBox);
-  void ParseSchi(Box& aBox);
-  void ParseTenc(Box& aBox);
-
-  Sinf mSinf;
-};
-
-}
-
-#endif // SINF_PARSER_H_
--- a/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
@@ -38,17 +38,17 @@ protected:
   virtual ~Stream() {}
 };
 
 enum TrackType { kVideo = 1, kAudio };
 
 class MP4Demuxer
 {
 public:
-  explicit MP4Demuxer(Stream* aSource, Monitor* aMonitor);
+  explicit MP4Demuxer(Stream* aSource, Microseconds aTimestampOffset, Monitor* aMonitor);
   ~MP4Demuxer();
 
   bool Init();
   Microseconds Duration();
   bool CanSeek();
 
   bool HasValidAudio();
   bool HasValidVideo();
@@ -81,15 +81,16 @@ private:
   AudioDecoderConfig mAudioConfig;
   VideoDecoderConfig mVideoConfig;
   CryptoFile mCrypto;
 
   nsAutoPtr<StageFrightPrivate> mPrivate;
   nsRefPtr<Stream> mSource;
   nsTArray<mozilla::MediaByteRange> mCachedByteRanges;
   nsTArray<Interval<Microseconds>> mCachedTimeRanges;
+  Microseconds mTimestampOffset;
   Monitor* mMonitor;
   Microseconds mNextKeyframeTime;
 };
 
 } // namespace mozilla
 
 #endif // MP4_DEMUXER_H_
--- a/media/libstagefright/binding/mp4_demuxer.cpp
+++ b/media/libstagefright/binding/mp4_demuxer.cpp
@@ -68,21 +68,20 @@ public:
   virtual uint32_t flags() { return kWantsPrefetching | kIsHTTPBasedSource; }
 
   virtual status_t reconnectAtOffset(off64_t offset) { return NO_ERROR; }
 
 private:
   nsRefPtr<Stream> mSource;
 };
 
-MP4Demuxer::MP4Demuxer(Stream* source, Monitor* aMonitor)
-  : mPrivate(new StageFrightPrivate())
-  , mSource(source)
-  , mMonitor(aMonitor)
-  , mNextKeyframeTime(-1)
+MP4Demuxer::MP4Demuxer(Stream* source, Microseconds aTimestampOffset, Monitor* aMonitor)
+  : mPrivate(new StageFrightPrivate()), mSource(source),
+    mTimestampOffset(aTimestampOffset), mMonitor(aMonitor),
+    mNextKeyframeTime(-1)
 {
   mPrivate->mExtractor = new MPEG4Extractor(new DataSourceAdapter(source));
 }
 
 MP4Demuxer::~MP4Demuxer()
 {
   if (mPrivate->mAudio.get()) {
     mPrivate->mAudio->stop();
@@ -117,33 +116,33 @@ MP4Demuxer::Init()
       sp<MediaSource> track = e->getTrack(i);
       if (track->start() != OK) {
         return false;
       }
       mPrivate->mAudio = track;
       mAudioConfig.Update(metaData, mimeType);
       nsRefPtr<Index> index = new Index(mPrivate->mAudio->exportIndex(),
                                         mSource, mAudioConfig.mTrackId,
-                                        mMonitor);
+                                        mTimestampOffset, mMonitor);
       mPrivate->mIndexes.AppendElement(index);
-      if (index->IsFragmented()) {
+      if (index->IsFragmented() && !mAudioConfig.crypto.valid) {
         mPrivate->mAudioIterator = new SampleIterator(index);
       }
     } else if (!mPrivate->mVideo.get() && !strncmp(mimeType, "video/", 6)) {
       sp<MediaSource> track = e->getTrack(i);
       if (track->start() != OK) {
         return false;
       }
       mPrivate->mVideo = track;
       mVideoConfig.Update(metaData, mimeType);
       nsRefPtr<Index> index = new Index(mPrivate->mVideo->exportIndex(),
                                         mSource, mVideoConfig.mTrackId,
-                                        mMonitor);
+                                        mTimestampOffset, mMonitor);
       mPrivate->mIndexes.AppendElement(index);
-      if (index->IsFragmented()) {
+      if (index->IsFragmented() && !mVideoConfig.crypto.valid) {
         mPrivate->mVideoIterator = new SampleIterator(index);
       }
     }
   }
   sp<MetaData> metaData = e->getMetaData();
   mCrypto.Update(metaData);
 
   int64_t movieDuration;
@@ -228,17 +227,17 @@ MP4Demuxer::DemuxAudioSample()
   status_t status =
     mPrivate->mAudio->read(&sample->mMediaBuffer, &mPrivate->mAudioOptions);
   mPrivate->mAudioOptions.clearSeekTo();
 
   if (status < 0) {
     return nullptr;
   }
 
-  sample->Update(mAudioConfig.media_time);
+  sample->Update(mAudioConfig.media_time, mTimestampOffset);
 
   return sample.forget();
 }
 
 MP4Sample*
 MP4Demuxer::DemuxVideoSample()
 {
   mMonitor->AssertCurrentThreadOwns();
@@ -261,17 +260,17 @@ MP4Demuxer::DemuxVideoSample()
   status_t status =
     mPrivate->mVideo->read(&sample->mMediaBuffer, &mPrivate->mVideoOptions);
   mPrivate->mVideoOptions.clearSeekTo();
 
   if (status < 0) {
     return nullptr;
   }
 
-  sample->Update(mVideoConfig.media_time);
+  sample->Update(mVideoConfig.media_time, mTimestampOffset);
   sample->extra_data = mVideoConfig.extra_data;
 
   return sample.forget();
 }
 
 void
 MP4Demuxer::UpdateIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges)
 {
--- a/media/libstagefright/moz.build
+++ b/media/libstagefright/moz.build
@@ -45,27 +45,25 @@ if CONFIG['OS_TARGET'] != 'Android':
         'system/core/libcutils/strdup16to8.c',
         'system/core/liblog/logd_write.c',
         'system/core/liblog/logprint.c',
     ]
 
 EXPORTS.mp4_demuxer += [
     'binding/include/mp4_demuxer/Adts.h',
     'binding/include/mp4_demuxer/AnnexB.h',
-    'binding/include/mp4_demuxer/Atom.h',
     'binding/include/mp4_demuxer/AtomType.h',
     'binding/include/mp4_demuxer/BufferStream.h',
     'binding/include/mp4_demuxer/ByteReader.h',
     'binding/include/mp4_demuxer/ByteWriter.h',
     'binding/include/mp4_demuxer/DecoderData.h',
     'binding/include/mp4_demuxer/H264.h',
     'binding/include/mp4_demuxer/Interval.h',
     'binding/include/mp4_demuxer/MoofParser.h',
     'binding/include/mp4_demuxer/mp4_demuxer.h',
-    'binding/include/mp4_demuxer/SinfParser.h',
 ]
 
 SOURCES += [
     'frameworks/av/media/libstagefright/foundation/hexdump.cpp',
     'frameworks/av/media/libstagefright/MetaData.cpp',
     'system/core/libutils/RefBase.cpp',
     'system/core/libutils/String16.cpp',
     'system/core/libutils/String8.cpp',
@@ -77,17 +75,16 @@ UNIFIED_SOURCES += [
     'binding/AnnexB.cpp',
     'binding/Box.cpp',
     'binding/BufferStream.cpp',
     'binding/DecoderData.cpp',
     'binding/H264.cpp',
     'binding/Index.cpp',
     'binding/MoofParser.cpp',
     'binding/mp4_demuxer.cpp',
-    'binding/SinfParser.cpp',
     'frameworks/av/media/libstagefright/DataSource.cpp',
     'frameworks/av/media/libstagefright/ESDS.cpp',
     'frameworks/av/media/libstagefright/foundation/AAtomizer.cpp',
     'frameworks/av/media/libstagefright/foundation/ABitReader.cpp',
     'frameworks/av/media/libstagefright/foundation/ABuffer.cpp',
     'frameworks/av/media/libstagefright/foundation/AString.cpp',
     'frameworks/av/media/libstagefright/id3/ID3.cpp',
     'frameworks/av/media/libstagefright/MediaBuffer.cpp',