Bug 1726061 - Make mLiveBufferingAppended optional r=padenot,pehrsons
authorChun-Min Chang <chun.m.chang@gmail.com>
Wed, 18 Aug 2021 19:22:04 +0000
changeset 589247 eb3bb3f300086ebac66d221be986bb04413f9e47
parent 589246 a3c678c2c608afddeec401b0a975d2dcdc90828d
child 589248 5f2bcfe6f959b997c7a28982e4e01ca9ddb50016
push id148243
push usercchang@mozilla.com
push dateWed, 18 Aug 2021 19:24:29 +0000
treeherderautoland@eb3bb3f30008 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot, pehrsons
bugs1726061
milestone93.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1726061 - Make mLiveBufferingAppended optional r=padenot,pehrsons `mLiveBufferingAppended` [1] can be wrapped by a `Maybe` naturally since it should only be used when `mLiveFramesAppended` [2] is `true`. It's easier to make sure `mLiveBufferingAppended` be used in a reasonable timing by doing so, instead of checking `mLiveFramesAppended` every time before using `mLiveBufferingAppended`. [1] https://searchfox.org/mozilla-central/rev/d3683dbb252506400c71256ef3994cdbdfb71ada/dom/media/webrtc/MediaEngineWebRTCAudio.h#236 [2] https://searchfox.org/mozilla-central/rev/d3683dbb252506400c71256ef3994cdbdfb71ada/dom/media/webrtc/MediaEngineWebRTCAudio.h#230 Differential Revision: https://phabricator.services.mozilla.com/D122797
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -616,18 +616,17 @@ void MediaEngineWebRTCMicrophoneSource::
 }
 
 AudioInputProcessing::AudioInputProcessing(
     uint32_t aMaxChannelCount, const PrincipalHandle& aPrincipalHandle)
     : mAudioProcessing(AudioProcessing::Create()),
       mRequestedInputChannelCount(aMaxChannelCount),
       mSkipProcessing(false),
       mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
-      mLiveFramesAppended(false),
-      mLiveBufferingAppended(0),
+      mLiveBufferingAppended(Nothing()),
       mPrincipal(aPrincipalHandle),
       mEnabled(false),
       mEnded(false) {}
 
 void AudioInputProcessing::Disconnect(MediaTrackGraphImpl* aGraph) {
   // This method is just for asserts.
   MOZ_ASSERT(aGraph->OnGraphThread());
 }
@@ -758,17 +757,17 @@ void AudioInputProcessing::UpdateAPMExtr
       new webrtc::ExtendedFilter(aExtendedFilter));
   config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(aDelayAgnostic));
 
   mAudioProcessing->SetExtraOptions(config);
 }
 
 void AudioInputProcessing::Start() {
   mEnabled = true;
-  mLiveFramesAppended = false;
+  mLiveBufferingAppended = Nothing();
 }
 
 void AudioInputProcessing::Stop() { mEnabled = false; }
 
 void AudioInputProcessing::Pull(MediaTrackGraphImpl* aGraph, GraphTime aFrom,
                                 GraphTime aTo, GraphTime aTrackEnd,
                                 AudioSegment* aSegment,
                                 bool aLastPullThisIteration, bool* aEnded) {
@@ -800,73 +799,73 @@ void AudioInputProcessing::Pull(MediaTra
     // accomodate the buffering worst-case.
     buffering += mPacketizerInput->mPacketSize;
   }
 
   if (delta <= 0) {
     return;
   }
 
-  if (MOZ_LIKELY(mLiveFramesAppended)) {
-    if (MOZ_UNLIKELY(buffering > mLiveBufferingAppended)) {
+  if (MOZ_LIKELY(mLiveBufferingAppended)) {
+    if (MOZ_UNLIKELY(buffering > *mLiveBufferingAppended)) {
       // We need to buffer more data. This could happen the first time we pull
       // input data, or the first iteration after starting to use the
       // packetizer.
+      TrackTime silence = buffering - *mLiveBufferingAppended;
       LOG_FRAME("AudioInputProcessing %p Inserting %" PRId64
                 " frames of silence due to buffer increase",
-                this, buffering - mLiveBufferingAppended);
-      mSegment.InsertNullDataAtStart(buffering - mLiveBufferingAppended);
-      mLiveBufferingAppended = buffering;
-    } else if (MOZ_UNLIKELY(buffering < mLiveBufferingAppended)) {
+                this, silence);
+      mSegment.InsertNullDataAtStart(silence);
+      mLiveBufferingAppended = Some(buffering);
+    } else if (MOZ_UNLIKELY(buffering < *mLiveBufferingAppended)) {
       // We need to clear some buffered data to reduce latency now that the
       // packetizer is no longer used.
       MOZ_ASSERT(PassThrough(aGraph), "Must have turned on passthrough");
-      MOZ_ASSERT(mSegment.GetDuration() >=
-                 (mLiveBufferingAppended - buffering));
-      TrackTime frames =
-          std::min(mSegment.GetDuration(), mLiveBufferingAppended - buffering);
+      TrackTime removal = *mLiveBufferingAppended - buffering;
+      MOZ_ASSERT(mSegment.GetDuration() >= removal);
+      TrackTime frames = std::min(mSegment.GetDuration(), removal);
       LOG_FRAME("AudioInputProcessing %p Removing %" PRId64
                 " frames of silence due to buffer decrease",
                 this, frames);
-      mLiveBufferingAppended -= frames;
+      *mLiveBufferingAppended -= frames;
       mSegment.RemoveLeading(frames);
     }
   }
 
   if (mSegment.GetDuration() > 0) {
-    MOZ_ASSERT(buffering == mLiveBufferingAppended);
+    MOZ_ASSERT(buffering == *mLiveBufferingAppended);
     TrackTime frames = std::min(mSegment.GetDuration(), delta);
     LOG_FRAME("AudioInputProcessing %p Appending %" PRId64
               " frames of real data for %u channels.",
               this, frames, mRequestedInputChannelCount);
     aSegment->AppendSlice(mSegment, 0, frames);
     mSegment.RemoveLeading(frames);
     delta -= frames;
 
     // Assert that the amount of data buffered doesn't grow unboundedly.
     MOZ_ASSERT_IF(aLastPullThisIteration, mSegment.GetDuration() <= buffering);
   }
 
   if (delta <= 0) {
     if (mSegment.GetDuration() == 0) {
-      mLiveBufferingAppended = -delta;
+      mLiveBufferingAppended = Some(-delta);
     }
     return;
   }
 
   LOG_FRAME("AudioInputProcessing %p Pulling %" PRId64
             " frames of silence for %u channels.",
             this, delta, mRequestedInputChannelCount);
 
   // This assertion fails if we append silence here after having appended live
   // frames. Before appending live frames we should add sufficient buffering to
   // not have to glitch (aka append silence). Failing this meant the buffering
   // was not sufficient.
-  MOZ_ASSERT_IF(mEnabled, !mLiveFramesAppended);
-  mLiveBufferingAppended = 0;
+  MOZ_ASSERT_IF(mEnabled, !mLiveBufferingAppended);
+  mLiveBufferingAppended = Nothing();
 
   aSegment->AppendNullData(delta);
 }
 
 void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph,
                                             BufferInfo aInfo) {
   MOZ_ASSERT(aGraph->OnGraphThread());
   MOZ_ASSERT(mEnabled);
@@ -1078,17 +1077,17 @@ void AudioInputProcessing::PacketizeAndP
   }
 }
 
 void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph,
                                         const AudioSegment* aSegment) {
   MOZ_ASSERT(aGraph);
   MOZ_ASSERT(aGraph->OnGraphThread());
 
-  if (mEnded || !mEnabled || !mLiveFramesAppended || !mInputData) {
+  if (mEnded || !mEnabled || !mLiveBufferingAppended || !mInputData) {
     return;
   }
 
   // One NotifyInputData might have multiple following ProcessInput calls, but
   // we only process one input per NotifyInputData call.
   BufferInfo inputInfo = mInputData.extract();
 
   // If some processing is necessary, packetize and insert in the WebRTC.org
@@ -1109,17 +1108,17 @@ void AudioInputProcessing::ProcessInput(
 }
 
 void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) {
   MOZ_ASSERT(aGraph->OnGraphThread());
   // This is called when an AudioCallbackDriver switch has happened for any
   // reason, including other reasons than starting this audio input stream. We
   // reset state when this happens, as a fallback driver may have fiddled with
   // the amount of buffered silence during the switch.
-  mLiveFramesAppended = false;
+  mLiveBufferingAppended = Nothing();
   mSegment.Clear();
   if (mPacketizerInput) {
     mPacketizerInput->Clear();
   }
   mInputData.take();
 }
 
 // Called back on GraphDriver thread!
@@ -1127,21 +1126,20 @@ void AudioInputProcessing::NotifyInputSt
 void AudioInputProcessing::NotifyInputData(MediaTrackGraphImpl* aGraph,
                                            const BufferInfo aInfo,
                                            uint32_t aAlreadyBuffered) {
   MOZ_ASSERT(aGraph->OnGraphThread());
   TRACE();
 
   MOZ_ASSERT(mEnabled);
 
-  if (!mLiveFramesAppended) {
+  if (!mLiveBufferingAppended) {
     // First time we see live frames getting added. Use what's already buffered
     // in the driver's scratch buffer as a starting point.
-    mLiveFramesAppended = true;
-    mLiveBufferingAppended = aAlreadyBuffered;
+    mLiveBufferingAppended = Some(aAlreadyBuffered);
   }
 
   mInputData = Some(aInfo);
 }
 
 #define ResetProcessingIfNeeded(_processing)                         \
   do {                                                               \
     bool enabled = mAudioProcessing->_processing()->is_enabled();    \
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -220,25 +220,23 @@ class AudioInputProcessing : public Audi
   // Stores the input audio, to be processed by the APM.
   AlignedFloatBuffer mInputBuffer;
   // Stores the deinterleaved microphone audio
   AlignedFloatBuffer mDeinterleavedBuffer;
   // Stores the mixed down input audio
   AlignedFloatBuffer mInputDownmixBuffer;
   // Stores data waiting to be pulled.
   AudioSegment mSegment;
-  // Set to false by Start(). Becomes true after the first time we append real
-  // audio frames from the audio callback.
-  bool mLiveFramesAppended;
-  // Once live frames have been appended, this is the number of frames appended
-  // as pre-buffer for that data, to avoid underruns. Buffering in the track
-  // might be needed because of the AUDIO_BLOCK interval at which we run the
-  // graph, the packetizer keeping some input data. Care must be taken when
-  // turning on and off the packetizer.
-  TrackTime mLiveBufferingAppended;
+  // Set to Nothing() by Start(). Once live frames have been appended from the
+  // audio callback, this is the number of frames appended as pre-buffer for
+  // that data, to avoid underruns. Buffering in the track might be needed
+  // because of the AUDIO_BLOCK interval at which we run the graph, the
+  // packetizer keeping some input data. Care must be taken when turning on and
+  // off the packetizer.
+  Maybe<TrackTime> mLiveBufferingAppended;
   // Principal for the data that flows through this class.
   const PrincipalHandle mPrincipal;
   // Whether or not this MediaEngine is enabled. If it's not enabled, it
   // operates in "pull" mode, and we append silence only, releasing the audio
   // input track.
   bool mEnabled;
   // Whether or not we've ended and removed the AudioInputTrack.
   bool mEnded;