Bug 1499426 - Align the lifetime of AudioInputProcessing with the lifetime of MediaEngineWebRTCAudio. r=achronop
authorPaul Adenot <paul@paul.cx>
Mon, 14 Jan 2019 15:09:34 +0000
changeset 453748 2f4288c49610e004e668f6065a54ca591ac7d215
parent 453747 4a87188c418ffdb7f339203fa17c9abbe9191915
child 453749 ff87d87996fda94c59817084578291d2e3ce16c9
push id35372
push usercbrindusan@mozilla.com
push dateMon, 14 Jan 2019 21:49:33 +0000
treeherdermozilla-central@50b3268954b1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersachronop
bugs1499426
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1499426 - Align the lifetime of AudioInputProcessing with the lifetime of MediaEngineWebRTCAudio. r=achronop Differential Revision: https://phabricator.services.mozilla.com/D16201
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -515,16 +515,19 @@ void MediaEngineWebRTCMicrophoneSource::
   mTrackID = aTrackID;
   mPrincipal = aPrincipal;
 
   AudioSegment* segment = new AudioSegment();
 
   aStream->AddAudioTrack(aTrackID, aStream->GraphRate(), segment,
                          SourceMediaStream::ADDTRACK_QUEUED);
 
+  mInputProcessing = new AudioInputProcessing(mDeviceMaxChannelCount, mStream,
+                                              mTrackID, mPrincipal);
+
   LOG("Stream %p registered for microphone capture", aStream.get());
 }
 
 class StartStopMessage : public ControlMessage {
  public:
   enum StartStop { Start, Stop };
 
   StartStopMessage(AudioInputProcessing* aInputProcessing, StartStop aAction)
@@ -561,19 +564,16 @@ nsresult MediaEngineWebRTCMicrophoneSour
   CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
   if (mStream->GraphImpl()->InputDeviceID() &&
       mStream->GraphImpl()->InputDeviceID() != deviceID) {
     // For now, we only allow opening a single audio input device per document,
     // because we can only have one MSG per document.
     return NS_ERROR_FAILURE;
   }
 
-  mInputProcessing = new AudioInputProcessing(mDeviceMaxChannelCount, mStream,
-                                              mTrackID, mPrincipal);
-
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   NS_DispatchToMainThread(media::NewRunnableFrom(
       [that, deviceID, stream = mStream, track = mTrackID]() {
         if (stream->IsDestroyed()) {
           return NS_OK;
         }
 
         stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
@@ -771,17 +771,24 @@ void AudioInputProcessing::UpdateAPMExtr
   webrtc::Config config;
   config.Set<webrtc::ExtendedFilter>(
       new webrtc::ExtendedFilter(aExtendedFilter));
   config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(aDelayAgnostic));
 
   mAudioProcessing->SetExtraOptions(config);
 }
 
-void AudioInputProcessing::Start() { mEnabled = true; }
+void AudioInputProcessing::Start() {
+  mEnabled = true;
+  mLiveFramesAppended = false;
+  mLiveSilenceAppended = false;
+#ifdef DEBUG
+  mLastCallbackAppendTime = 0;
+#endif
+}
 
 void AudioInputProcessing::Stop() { mEnabled = false; }
 
 void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
                                 TrackID aTrackID, StreamTime aEndOfAppendedData,
                                 StreamTime aDesiredTime,
                                 const PrincipalHandle& aPrincipalHandle) {
   TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),