Bug 1171330: P11. Add eviction support. r=cajbir.
authorJean-Yves Avenard <jyavenard@mozilla.com>
Thu, 11 Jun 2015 16:26:57 +1000
changeset 248275 b4d64177e02feb19194eaf99abc9b6ffffad96a8
parent 248274 34ec7493164c7a7b10cd7e6ad263e41c662dce43
child 248276 d62627bbe3b24fa92aef8aa9b3fa86a39bbcfb32
push id28893
push userkwierso@gmail.com
push dateFri, 12 Jun 2015 00:02:58 +0000
treeherderautoland@8cf9d3e497f9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscajbir
bugs1171330
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1171330: P11. Add eviction support. r=cajbir. We evict data in two steps. Up to playback time, or tail data.
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/mediasource/TrackBuffersManager.h
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -124,17 +124,30 @@ TrackBuffersManager::RangeRemoval(TimeUn
   return p;
 }
 
 TrackBuffersManager::EvictDataResult
 TrackBuffersManager::EvictData(TimeUnit aPlaybackTime,
                                uint32_t aThreshold,
                                TimeUnit* aBufferStartTime)
 {
-  // TODO.
+  MOZ_ASSERT(NS_IsMainThread());
+
+  int64_t toEvict = GetSize() - aThreshold;
+  if (toEvict <= 0) {
+    return EvictDataResult::NO_DATA_EVICTED;
+  }
+  MSE_DEBUG("Reaching our size limit, schedule eviction of %lld bytes", toEvict);
+
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableMethodWithArgs<TimeUnit, uint32_t>(
+      this, &TrackBuffersManager::DoEvictData,
+      aPlaybackTime, toEvict);
+  GetTaskQueue()->Dispatch(task.forget());
+
   return EvictDataResult::NO_DATA_EVICTED;
 }
 
 void
 TrackBuffersManager::EvictBefore(TimeUnit aTime)
 {
   MOZ_ASSERT(NS_IsMainThread());
   nsCOMPtr<nsIRunnable> task =
@@ -177,18 +190,17 @@ TrackBuffersManager::Buffered()
     intersection.Intersection(*trackRanges);
   }
   return intersection;
 }
 
 int64_t
 TrackBuffersManager::GetSize()
 {
-  // TODO
-  return 0;
+  return mSizeSourceBuffer;
 }
 
 void
 TrackBuffersManager::Ended()
 {
   mEnded = true;
 }
 
@@ -276,16 +288,80 @@ TrackBuffersManager::CompleteResetParser
   // 7. Set append state to WAITING_FOR_SEGMENT.
   SetAppendState(AppendState::WAITING_FOR_SEGMENT);
 
   // We're done.
   mAbort = false;
 }
 
 void
+TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
+                                 uint32_t aSizeToEvict)
+{
+  MOZ_ASSERT(OnTaskQueue());
+
+  // Remove any data we've already played, up to 5s behind.
+  TimeUnit lowerLimit = aPlaybackTime - TimeUnit::FromSeconds(5);
+  TimeUnit to;
+  // Video is what takes the most space, only evict there if we have video.
+  const auto& track = HasVideo() ? mVideoTracks : mAudioTracks;
+  const auto& buffer = track.mBuffers.LastElement();
+  uint32_t lastKeyFrameIndex = 0;
+  int64_t toEvict = aSizeToEvict;
+  uint32_t partialEvict = 0;
+  for (uint32_t i = 0; i < buffer.Length(); i++) {
+    const auto& frame = buffer[i];
+    if (frame->mKeyframe) {
+      lastKeyFrameIndex = i;
+      toEvict -= partialEvict;
+      if (toEvict < 0) {
+        break;
+      }
+      partialEvict = 0;
+    }
+    if (frame->mTime >= lowerLimit.ToMicroseconds()) {
+      break;
+    }
+    partialEvict += sizeof(*frame) + frame->mSize;
+  }
+  if (lastKeyFrameIndex > 0) {
+    CodedFrameRemoval(
+      TimeInterval(TimeUnit::FromMicroseconds(0),
+                   TimeUnit::FromMicroseconds(buffer[lastKeyFrameIndex-1]->mTime)));
+  }
+  if (toEvict <= 0) {
+    return;
+  }
+
+  // Still some to remove. Remove data starting from the end, up to 5s ahead
+  // of our playtime.
+  TimeUnit upperLimit = aPlaybackTime + TimeUnit::FromSeconds(5);
+  for (int32_t i = buffer.Length() - 1; i >= 0; i--) {
+    const auto& frame = buffer[i];
+    if (frame->mKeyframe) {
+      lastKeyFrameIndex = i;
+      toEvict -= partialEvict;
+      if (toEvict < 0) {
+        break;
+      }
+      partialEvict = 0;
+    }
+    if (frame->mTime <= upperLimit.ToMicroseconds()) {
+      break;
+    }
+    partialEvict += sizeof(*frame) + frame->mSize;
+  }
+  if (lastKeyFrameIndex < buffer.Length()) {
+    CodedFrameRemoval(
+      TimeInterval(TimeUnit::FromMicroseconds(buffer[lastKeyFrameIndex+1]->mTime),
+                   TimeUnit::FromInfinity()));
+  }
+}
+
+void
 TrackBuffersManager::CodedFrameRemoval(TimeInterval aInterval)
 {
   MSE_DEBUG("From %.2fs to %.2f",
             aInterval.mStart.ToSeconds(), aInterval.mEnd.ToSeconds());
   TimeUnit duration{TimeUnit::FromSeconds(mParentDecoder->GetMediaSourceDuration())};
 
   MSE_DEBUG("duration:%.2f", duration.ToSeconds());
   MSE_DEBUG("before video ranges=%s", DumpTimeRanges(mVideoTracks.mBufferedRanges).get());
@@ -327,30 +403,32 @@ TrackBuffersManager::CodedFrameRemoval(T
             TimeInterval(TimeUnit::FromMicroseconds(frame->mTime),
                          TimeUnit::FromMicroseconds(frame->mTime + frame->mDuration));
           firstRemovedIndex = i;
         } else {
           removedInterval = removedInterval.Span(
             TimeInterval(TimeUnit::FromMicroseconds(frame->mTime),
                          TimeUnit::FromMicroseconds(frame->mTime + frame->mDuration)));
         }
+        track->mSizeBuffer -= sizeof(*frame) + frame->mSize;
         data.RemoveElementAt(i);
       }
     }
     // 4. Remove decoding dependencies of the coded frames removed in the previous step:
     // Remove all coded frames between the coded frames removed in the previous step and the next random access point after those removed frames.
     if (firstRemovedIndex >= 0) {
       for (uint32_t i = firstRemovedIndex; i < data.Length(); i++) {
         const auto& frame = data[i];
         if (frame->mKeyframe) {
           break;
         }
         removedInterval = removedInterval.Span(
           TimeInterval(TimeUnit::FromMicroseconds(frame->mTime),
                        TimeUnit::FromMicroseconds(frame->mTime + frame->mDuration)));
+        track->mSizeBuffer -= sizeof(*frame) + frame->mSize;
         data.RemoveElementAt(i);
       }
     }
     track->mBufferedRanges -= removedInterval;
 
     // 5. If this object is in activeSourceBuffers, the current playback position
     // is greater than or equal to start and less than the remove end timestamp,
     // and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set the
@@ -364,16 +442,19 @@ TrackBuffersManager::CodedFrameRemoval(T
   {
     MonitorAutoLock mon(mMonitor);
     mVideoBufferedRanges = mVideoTracks.mBufferedRanges;
     mAudioBufferedRanges = mAudioTracks.mBufferedRanges;
   }
   MSE_DEBUG("after video ranges=%s", DumpTimeRanges(mVideoTracks.mBufferedRanges).get());
   MSE_DEBUG("after audio ranges=%s", DumpTimeRanges(mAudioTracks.mBufferedRanges).get());
 
+  // Update our reported total size.
+  mSizeSourceBuffer = mVideoTracks.mSizeBuffer + mAudioTracks.mSizeBuffer;
+
   mRangeRemovalPromise.ResolveIfExists(true, __func__);
 }
 
 void
 TrackBuffersManager::InitSegmentParserLoop()
 {
   AppendIncomingBuffers();
   SegmentParserLoop();
@@ -886,16 +967,19 @@ TrackBuffersManager::CompleteCodedFrameP
   {
     MonitorAutoLock mon(mMonitor);
 
     // Save our final tracks buffered ranges.
     mVideoBufferedRanges = mVideoTracks.mBufferedRanges;
     mAudioBufferedRanges = mAudioTracks.mBufferedRanges;
   }
 
+  // Update our reported total size.
+  mSizeSourceBuffer = mVideoTracks.mSizeBuffer + mAudioTracks.mSizeBuffer;
+
   // Return to step 6.4 of Segment Parser Loop algorithm
   // 4. If this SourceBuffer is full and cannot accept more media data, then set the buffer full flag to true.
   // TODO
   mBufferFull = false;
 
   // 5. If the input buffer does not contain a complete media segment, then jump to the need more data step below.
   if (mParser->MediaSegmentRange().IsNull()) {
     mProcessingPromise.ResolveIfExists(true, __func__);
@@ -1041,16 +1125,17 @@ TrackBuffersManager::ProcessFrame(MediaR
               TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                            TimeUnit::FromMicroseconds(sample->mTime + sample->mDuration));
             firstRemovedIndex = i;
           } else {
             removedInterval = removedInterval.Span(
               TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                            TimeUnit::FromMicroseconds(sample->mTime + sample->mDuration)));
           }
+          trackBuffer.mSizeBuffer -= sizeof(*sample) + sample->mSize;
           data.RemoveElementAt(i);
         }
       }
     } else if (trackBuffer.mHighestEndTimestamp.ref() <= presentationTimestamp) {
       for (uint32_t i = 0; i < data.Length(); i++) {
         MediaRawData* sample = data[i].get();
         if (sample->mTime >= trackBuffer.mHighestEndTimestamp.ref().ToMicroseconds() &&
             sample->mTime < frameEndTimestamp.ToMicroseconds()) {
@@ -1059,46 +1144,50 @@ TrackBuffersManager::ProcessFrame(MediaR
               TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                            TimeUnit::FromMicroseconds(sample->mTime + sample->mDuration));
             firstRemovedIndex = i;
           } else {
             removedInterval = removedInterval.Span(
               TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                            TimeUnit::FromMicroseconds(sample->mTime + sample->mDuration)));
           }
+          trackBuffer.mSizeBuffer -= sizeof(*sample) + sample->mSize;
           data.RemoveElementAt(i);
         }
       }
     }
   }
   // 15. Remove decoding dependencies of the coded frames removed in the previous step:
   // Remove all coded frames between the coded frames removed in the previous step and the next random access point after those removed frames.
   if (firstRemovedIndex >= 0) {
     for (uint32_t i = firstRemovedIndex; i < data.Length(); i++) {
       MediaRawData* sample = data[i].get();
       if (sample->mKeyframe) {
         break;
       }
       removedInterval = removedInterval.Span(
         TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                      TimeUnit::FromMicroseconds(sample->mTime + sample->mDuration)));
+      trackBuffer.mSizeBuffer -= sizeof(*aSample) + sample->mSize;
       data.RemoveElementAt(i);
     }
     // Update our buffered range to exclude the range just removed.
     trackBuffer.mBufferedRanges -= removedInterval;
   }
 
   // 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
   aSample->mTime = presentationTimestamp.ToMicroseconds();
   aSample->mTimecode = decodeTimestamp.ToMicroseconds();
   if (firstRemovedIndex >= 0) {
     data.InsertElementAt(firstRemovedIndex, aSample);
   } else {
     data.AppendElement(aSample);
   }
+  trackBuffer.mSizeBuffer += sizeof(*aSample) + aSample->mSize;
+
   // 17. Set last decode timestamp for track buffer to decode timestamp.
   trackBuffer.mLastDecodeTimestamp = Some(decodeTimestamp);
   // 18. Set last frame duration for track buffer to frame duration.
   trackBuffer.mLastFrameDuration = Some(TimeUnit::FromMicroseconds(aSample->mDuration));
   // 19. If highest end timestamp for track buffer is unset or frame end timestamp is greater than highest end timestamp, then set highest end timestamp for track buffer to frame end timestamp.
   if (trackBuffer.mHighestEndTimestamp.isNothing() ||
       frameEndTimestamp > trackBuffer.mHighestEndTimestamp.ref()) {
     trackBuffer.mHighestEndTimestamp = Some(frameEndTimestamp);
--- a/dom/media/mediasource/TrackBuffersManager.h
+++ b/dom/media/mediasource/TrackBuffersManager.h
@@ -154,33 +154,37 @@ private:
   void DoDemuxAudio();
   void OnAudioDemuxCompleted(nsRefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
   void OnAudioDemuxFailed(DemuxerFailureReason aFailure)
   {
     mAudioTracks.mDemuxRequest.Complete();
     OnDemuxFailed(TrackType::kAudioTrack, aFailure);
   }
 
+  void DoEvictData(const TimeUnit& aPlaybackTime, uint32_t aThreshold);
+
   struct TrackData {
     TrackData()
       : mNumTracks(0)
       , mNeedRandomAccessPoint(true)
+      , mSizeBuffer(0)
     {}
     uint32_t mNumTracks;
     Maybe<TimeUnit> mLastDecodeTimestamp;
     Maybe<TimeUnit> mLastFrameDuration;
     Maybe<TimeUnit> mHighestEndTimestamp;
     bool mNeedRandomAccessPoint;
     nsRefPtr<MediaTrackDemuxer> mDemuxer;
     TrackBuffer mQueuedSamples;
     MediaPromiseRequestHolder<MediaTrackDemuxer::SamplesPromise> mDemuxRequest;
     UniquePtr<TrackInfo> mInfo;
     // We only manage a single track of each type at this time.
     nsTArray<TrackBuffer> mBuffers;
     TimeIntervals mBufferedRanges;
+    uint32_t mSizeBuffer;
   };
   bool ProcessFrame(MediaRawData* aSample, TrackData& aTrackData);
   MediaPromiseRequestHolder<CodedFrameProcessingPromise> mProcessingRequest;
   MediaPromiseHolder<CodedFrameProcessingPromise> mProcessingPromise;
 
   // SourceBuffer media promise (resolved on the main thread)
   MediaPromiseHolder<AppendPromise> mAppendPromise;
   MediaPromiseHolder<RangeRemovalPromise> mRangeRemovalPromise;
@@ -218,16 +222,19 @@ private:
   nsMainThreadPtrHandle<dom::SourceBuffer> mParent;
   nsMainThreadPtrHandle<MediaSourceDecoder> mParentDecoder;
 
   // Set to true if abort is called.
   Atomic<bool> mAbort;
   // Set to true if mediasource state changed to ended.
   Atomic<bool> mEnded;
 
+  // Global size of this source buffer content.
+  Atomic<int64_t> mSizeSourceBuffer;
+
   // Monitor to protect following objects accessed across multipple threads.
   mutable Monitor mMonitor;
   // Set by the main thread, but only when all our tasks are completes
   // (e.g. when SourceBuffer.updating is false). So the monitor isn't
   // technically required for mIncomingBuffer.
   typedef Pair<nsRefPtr<MediaLargeByteBuffer>, TimeUnit> IncomingBuffer;
   nsTArray<IncomingBuffer> mIncomingBuffers;
   TimeIntervals mVideoBufferedRanges;