Bug 1198094: P1. Limit rate at which InputExhausted could be called by mac decoder. r=rillian a=ritu
☠☠ backed out by 5bb661db5c6c ☠ ☠
authorJean-Yves Avenard <jyavenard@mozilla.com>
Tue, 25 Aug 2015 15:33:23 +1000
changeset 288971 eef6993d896ed4ae6e19c3e1079b689edd6499cb
parent 288970 744ce5c31af5b6121dd450ec25d32999d80b0090
child 288972 43c9dbedf7dec89862ffbfd98cd4a51becd32952
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrillian, ritu
bugs1198094
milestone42.0a2
Bug 1198094: P1. Limit rate at which InputExhausted could be called by mac decoder. r=rillian a=ritu
dom/media/platforms/apple/AppleVDADecoder.cpp
dom/media/platforms/apple/AppleVDADecoder.h
dom/media/platforms/apple/AppleVTDecoder.cpp
--- a/dom/media/platforms/apple/AppleVDADecoder.cpp
+++ b/dom/media/platforms/apple/AppleVDADecoder.cpp
@@ -39,16 +39,17 @@ AppleVDADecoder::AppleVDADecoder(const V
   , mPictureWidth(aConfig.mImage.width)
   , mPictureHeight(aConfig.mImage.height)
   , mDisplayWidth(aConfig.mDisplay.width)
   , mDisplayHeight(aConfig.mDisplay.height)
   , mInputIncoming(0)
   , mIsShutDown(false)
   , mUseSoftwareImages(true)
   , mIs106(!nsCocoaFeatures::OnLionOrLater())
+  , mQueuedSamples(0)
   , mMonitor("AppleVideoDecoder")
   , mIsFlushing(false)
   , mDecoder(nullptr)
 {
   MOZ_COUNT_CTOR(AppleVDADecoder);
   // TODO: Verify aConfig.mime_type.
 
   mExtraData = aConfig.mExtraData;
@@ -217,25 +218,23 @@ PlatformCallback(void* decompressionOutp
 
   // Validate our arguments.
   // According to Apple's TN2267
   // The output callback is still called for all flushed frames,
   // but no image buffers will be returned.
   // FIXME: Distinguish between errors and empty flushed frames.
   if (status != noErr || !image) {
     NS_WARNING("AppleVDADecoder decoder returned no data");
-    return;
-  }
-  MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
-             "AppleVDADecoder returned an unexpected image type");
-
-  if (infoFlags & kVDADecodeInfo_FrameDropped)
-  {
+    image = nullptr;
+  } else if (infoFlags & kVDADecodeInfo_FrameDropped) {
     NS_WARNING("  ...frame dropped...");
-    return;
+    image = nullptr;
+  } else {
+    MOZ_ASSERT(image || CFGetTypeID(image) == CVPixelBufferGetTypeID(),
+               "AppleVDADecoder returned an unexpected image type");
   }
 
   AppleVDADecoder* decoder =
     static_cast<AppleVDADecoder*>(decompressionOutputRefCon);
 
   AutoCFRelease<CFNumberRef> ptsref =
     (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_PTS"));
   AutoCFRelease<CFNumberRef> dtsref =
@@ -282,24 +281,26 @@ AppleVDADecoder::CreateAppleFrameRef(con
 }
 
 void
 AppleVDADecoder::DrainReorderedFrames()
 {
   while (!mReorderQueue.IsEmpty()) {
     mCallback->Output(mReorderQueue.Pop());
   }
+  mQueuedSamples = 0;
 }
 
 void
 AppleVDADecoder::ClearReorderedFrames()
 {
   while (!mReorderQueue.IsEmpty()) {
     mReorderQueue.Pop();
   }
+  mQueuedSamples = 0;
 }
 
 // Copy and return a decoded frame.
 nsresult
 AppleVDADecoder::OutputFrame(CFRefPtr<CVPixelBufferRef> aImage,
                              AppleVDADecoder::AppleFrameRef aFrameRef)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
@@ -312,16 +313,29 @@ AppleVDADecoder::OutputFrame(CFRefPtr<CV
   LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
       aFrameRef.byte_offset,
       aFrameRef.decode_timestamp.ToMicroseconds(),
       aFrameRef.composition_timestamp.ToMicroseconds(),
       aFrameRef.duration.ToMicroseconds(),
       aFrameRef.is_sync_point ? " keyframe" : ""
   );
 
+  if (mQueuedSamples > mMaxRefFrames) {
+    // We had stopped requesting more input because we had received too much at
+    // the time. We can ask for more once again.
+    mCallback->InputExhausted();
+  }
+  MOZ_ASSERT(mQueuedSamples);
+  mQueuedSamples--;
+
+  if (!aImage) {
+    // Image was dropped by decoder.
+    return NS_OK;
+  }
+
   // Where our resulting image will end up.
   nsRefPtr<VideoData> data;
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
   gfx::IntRect visible = gfx::IntRect(0,
                                       0,
                                       mPictureWidth,
@@ -475,16 +489,18 @@ AppleVDADecoder::SubmitFrame(MediaRawDat
   AutoCFRelease<CFDictionaryRef> frameInfo =
     CFDictionaryCreate(kCFAllocatorDefault,
                        keys,
                        values,
                        ArrayLength(keys),
                        &kCFTypeDictionaryKeyCallBacks,
                        &kCFTypeDictionaryValueCallBacks);
 
+  mQueuedSamples++;
+
   OSStatus rv = VDADecoderDecode(mDecoder,
                                  0,
                                  block,
                                  frameInfo);
 
   if (rv != noErr) {
     NS_WARNING("AppleVDADecoder: Couldn't pass frame to decoder");
     mCallback->Error();
@@ -498,17 +514,17 @@ AppleVDADecoder::SubmitFrame(MediaRawDat
     // This dictionary can contain client provided information associated with
     // the frame being decoded, for example presentation time.
     // The CFDictionaryRef will be retained by the framework.
     // In 10.6, it is released one too many. So retain it.
     CFRetain(frameInfo);
   }
 
   // Ask for more data.
-  if (!mInputIncoming) {
+  if (!mInputIncoming && mQueuedSamples <= mMaxRefFrames) {
     LOG("AppleVDADecoder task queue empty; requesting more data");
     mCallback->InputExhausted();
   }
 
   return NS_OK;
 }
 
 nsresult
--- a/dom/media/platforms/apple/AppleVDADecoder.h
+++ b/dom/media/platforms/apple/AppleVDADecoder.h
@@ -117,16 +117,21 @@ protected:
   // Increased when Input is called, and decreased when ProcessFrame runs.
   // Reaching 0 indicates that there's no pending Input.
   Atomic<uint32_t> mInputIncoming;
   Atomic<bool> mIsShutDown;
 
   bool mUseSoftwareImages;
   bool mIs106;
 
+  // Number of times a sample was queued via Input(). Will be decreased upon
+  // the decoder's callback being invoked.
+  // This is used to calculate how many frames has been buffered by the decoder.
+  uint32_t mQueuedSamples;
+
   // For wait on mIsFlushing during Shutdown() process.
   Monitor mMonitor;
   // Set on reader/decode thread calling Flush() to indicate that output is
   // not required and so input samples on mTaskQueue need not be processed.
   // Cleared on mTaskQueue in ProcessDrain().
   Atomic<bool> mIsFlushing;
 
 private:
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -157,24 +157,23 @@ PlatformCallback(void* decompressionOutp
   AppleVTDecoder* decoder =
     static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
   nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
     static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
 
   // Validate our arguments.
   if (status != noErr || !image) {
     NS_WARNING("VideoToolbox decoder returned no data");
-    return;
-  }
-  if (flags & kVTDecodeInfo_FrameDropped) {
+    image = nullptr;
+  } else if (flags & kVTDecodeInfo_FrameDropped) {
     NS_WARNING("  ...frame tagged as dropped...");
+  } else {
+    MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
+      "VideoToolbox returned an unexpected image type");
   }
-  MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
-    "VideoToolbox returned an unexpected image type");
-
   nsCOMPtr<nsIRunnable> task =
     NS_NewRunnableMethodWithArgs<CFRefPtr<CVPixelBufferRef>, AppleVTDecoder::AppleFrameRef>(
       decoder, &AppleVTDecoder::OutputFrame, image, *frameRef);
   decoder->DispatchOutputTask(task.forget());
 }
 
 nsresult
 AppleVTDecoder::WaitForAsynchronousFrames()
@@ -232,32 +231,34 @@ AppleVTDecoder::SubmitFrame(MediaRawData
   }
   CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
   rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
   if (rv != noErr) {
     NS_ERROR("Couldn't create CMSampleBuffer");
     return NS_ERROR_FAILURE;
   }
 
+  mQueuedSamples++;
+
   VTDecodeFrameFlags decodeFlags =
     kVTDecodeFrame_EnableAsynchronousDecompression;
   rv = VTDecompressionSessionDecodeFrame(mSession,
                                          sample,
                                          decodeFlags,
                                          CreateAppleFrameRef(aSample),
                                          &infoFlags);
   if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
     LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
     NS_WARNING("Couldn't pass frame to decoder");
     mCallback->Error();
     return NS_ERROR_FAILURE;
   }
 
   // Ask for more data.
-  if (!mInputIncoming) {
+  if (!mInputIncoming && mQueuedSamples <= mMaxRefFrames) {
     LOG("AppleVTDecoder task queue empty; requesting more data");
     mCallback->InputExhausted();
   }
 
   return NS_OK;
 }
 
 nsresult