Bug 984698 - Reduce the amount of audio prerolled when we are not also decoding video in the same file. r=padenot
authorChris Pearce <cpearce@mozilla.com>
Fri, 21 Mar 2014 11:47:17 +1300
changeset 186684 e076f1fb825bf12cd0a71f15134293668b3ef8be
parent 186683 3f80f6fa65b6fa5203ad3c43360e79a08a1926b6
child 186685 de535cd27ee74fbeb5d8c54ed9fdabc260e8b435
push idunknown
push userunknown
push dateunknown
Bug 984698 - Reduce the amount of audio prerolled when we are not also decoding video in the same file. r=padenot
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -69,16 +69,23 @@ static const uint32_t BUFFERING_WAIT_S =
 static const uint32_t LOW_AUDIO_USECS = 300000;
 // If more than this many usecs of decoded audio is queued, we'll hold off
 // decoding more audio. If we increase the low audio threshold (see
 // LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
 // less than the low audio threshold.
 const int64_t AMPLE_AUDIO_USECS = 1000000;
+// When we're only playing audio and we don't have a video stream, we divide
+// AMPLE_AUDIO_USECS and LOW_AUDIO_USECS by the following value. This reduces
+// the amount of decoded audio we buffer, reducing our memory usage. We only
+// need to decode far ahead when we're decoding video using software decoding,
+// as otherwise a long video decode could cause an audio underrun.
+const int64_t NO_VIDEO_AMPLE_AUDIO_DIVISOR = 8;
 // Maximum number of bytes we'll allocate and write at once to the audio
 // hardware when the audio stream contains missing frames and we're
 // writing silence in order to fill the gap. We limit our silence-writes
 // to 32KB in order to avoid allocating an impossibly large chunk of
 // memory if we encounter a large chunk of silence.
 const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
 // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
@@ -1851,16 +1858,24 @@ nsresult MediaDecoderStateMachine::Decod
               "Active seekable media should have end time");
   MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) ||
              GetDuration() != -1, "Seekable media should have duration");
   DECODER_LOG(PR_LOG_DEBUG, ("%p Media goes from %lld to %lld (duration %lld)"
                              " transportSeekable=%d, mediaSeekable=%d",
                              mDecoder.get(), mStartTime, mEndTime, GetDuration(),
                              mTransportSeekable, mMediaSeekable));
+  if (HasAudio() && !HasVideo()) {
+    // We're playing audio only. We don't need to worry about slow video
+    // decodes causing audio underruns, so don't buffer so much audio in
+    // order to reduce memory usage.
+    mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
+    mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
+  }
   // Inform the element that we've loaded the metadata and the first frame,
   // setting the default framebuffer size for audioavailable events.  Also,
   // if there is audio, let the MozAudioAvailable event manager know about
   // the metadata.
   if (HasAudio()) {
     mEventManager.Init(mInfo.mAudio.mChannels, mInfo.mAudio.mRate);
     // Set the buffer length at the decoder level to be able, to be able
     // to retrive the value via media element method. The RequestFrameBufferLength