Bug 1487057 - Part 8 - Split MediaEngineWebRTCMicrophoneSource in two classes, one for control one for processing. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Fri, 07 Sep 2018 16:53:23 +0200
changeset 496739 45d2c462dc92c43eed9582c1b309f8df0c66b37f
parent 496738 273c92182c3cccd9ad6ef2abaf30d11fd42b6aea
child 496740 e1a790218e20b97d32957965379423a67196f54b
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1487057
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1487057 - Part 8 - Split MediaEngineWebRTCMicrophoneSource in two classes, one for control one for processing. r=pehrsons Big but not complex: - Remove the mutex - Move all MSG thread to a new class (AudioInputProcessing) - Remove the WebRTCAudioDataListener class, AudioInputProcessing is the listener - Use message passing for all modifications to the AudioInputProcessing. Differential Revision: https://phabricator.services.mozilla.com/D5442
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -45,101 +45,43 @@ namespace mozilla {
 #ifdef LOG
 #undef LOG
 #endif
 
 LogModule* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
-void
-WebRTCAudioDataListener::NotifyOutputData(MediaStreamGraphImpl* aGraph,
-                                          AudioDataValue* aBuffer,
-                                          size_t aFrames,
-                                          TrackRate aRate,
-                                          uint32_t aChannels)
-{
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  if (mAudioSource) {
-    mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels);
-  }
-}
-
-void
-WebRTCAudioDataListener::NotifyInputData(MediaStreamGraphImpl* aGraph,
-                                         const AudioDataValue* aBuffer,
-                                         size_t aFrames,
-                                         TrackRate aRate,
-                                         uint32_t aChannels)
-{
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  if (mAudioSource) {
-    mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels);
-  }
-}
-
-void
-WebRTCAudioDataListener::DeviceChanged(MediaStreamGraphImpl* aGraph)
-{
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  if (mAudioSource) {
-    mAudioSource->DeviceChanged(aGraph);
-  }
-}
-
-uint32_t
-WebRTCAudioDataListener::RequestedInputChannelCount(MediaStreamGraphImpl* aGraph)
-{
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  if (mAudioSource) {
-    return mAudioSource->RequestedInputChannelCount(aGraph);
-  }
-  return 0;
-}
-
-void
-WebRTCAudioDataListener::Disconnect(MediaStreamGraphImpl* aGraph)
-{
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  if (mAudioSource) {
-    mAudioSource->Disconnect(aGraph);
-    mAudioSource = nullptr;
-  }
-}
-
 /**
  * WebRTC Microphone MediaEngineSource.
  */
 
 MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
-    RefPtr<AudioDeviceInfo> aInfo,
-    const nsString& aDeviceName,
-    const nsCString& aDeviceUUID,
-    uint32_t aMaxChannelCount,
-    bool aDelayAgnostic,
-    bool aExtendedFilter)
+  RefPtr<AudioDeviceInfo> aInfo,
+  const nsString& aDeviceName,
+  const nsCString& aDeviceUUID,
+  uint32_t aMaxChannelCount,
+  bool aDelayAgnostic,
+  bool aExtendedFilter)
   : mTrackID(TRACK_NONE)
   , mPrincipal(PRINCIPAL_HANDLE_NONE)
+  , mEnabled(false)
   , mDeviceInfo(std::move(aInfo))
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mDeviceName(aDeviceName)
   , mDeviceUUID(aDeviceUUID)
+  , mDeviceMaxChannelCount(aMaxChannelCount)
   , mSettings(
       new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>(
         "MediaEngineWebRTCMicrophoneSource::mSettings",
         new media::Refcountable<dom::MediaTrackSettings>(),
         // Non-strict means it won't assert main thread for us.
         // It would be great if it did but we're already on the media thread.
         /* aStrict = */ false))
-  , mMutex("WebRTCMic::Mutex")
-  , mAudioProcessing(AudioProcessing::Create())
-  , mRequestedInputChannelCount(aMaxChannelCount)
-  , mSkipProcessing(false)
-  , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 {
 #ifndef ANDROID
   MOZ_ASSERT(mDeviceInfo->DeviceID());
 #endif
 
   // We'll init lazily as needed
   mSettings->mEchoCancellation.Construct(0);
   mSettings->mAutoGainControl.Construct(0);
@@ -255,161 +197,33 @@ MediaEngineWebRTCMicrophoneSource::Recon
     return NS_ERROR_UNEXPECTED;
   }
 
   ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
 
+void MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>& aHandle,
+                                             const RefPtr<SourceMediaStream>& aStream,
+                                             TrackID aTrackID,
+                                             StreamTime aDesiredTime,
+                                             const PrincipalHandle& aPrincipalHandle)
+{
+  // If pull is enabled, it means that the audio input is not open, and we
+  // should fill it out with silence. This is the only method called on the
+  // MSG thread.
+  mInputProcessing->Pull(aHandle, aStream, aTrackID, aDesiredTime, aPrincipalHandle);
+}
+
 bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
 {
   return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
 };
 
-// This does an early return in case of error.
-#define HANDLE_APM_ERROR(fn)                                \
-do {                                                        \
-  int rv = fn;                                              \
-  if (rv != AudioProcessing::kNoError) {                    \
-    MOZ_ASSERT_UNREACHABLE("APM error in " #fn);            \
-    return;                                                 \
-  }                                                         \
-} while(0);
-
-void MediaEngineWebRTCMicrophoneSource::UpdateAECSettingsIfNeeded(bool aEnable, EcModes aMode)
-{
-  AssertIsOnOwningThread();
-
-  using webrtc::EcModes;
-
-  EchoCancellation::SuppressionLevel level;
-
-  switch(aMode) {
-    case EcModes::kEcUnchanged:
-      level = mAudioProcessing->echo_cancellation()->suppression_level();
-      break;
-    case EcModes::kEcConference:
-      level = EchoCancellation::kHighSuppression;
-      break;
-    case EcModes::kEcDefault:
-      level = EchoCancellation::kModerateSuppression;
-      break;
-    case EcModes::kEcAec:
-      level = EchoCancellation::kModerateSuppression;
-      break;
-    case EcModes::kEcAecm:
-      // No suppression level to set for the mobile echo canceller
-      break;
-    default:
-      MOZ_LOG(GetMediaManagerLog(), LogLevel::Error, ("Bad EcMode value"));
-      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
-                             " for the echo cancelation mode.");
-      // fall back to something sensible in release
-      level = EchoCancellation::kModerateSuppression;
-      break;
-  }
-
-  // AECm and AEC are mutually exclusive.
-  if (aMode == EcModes::kEcAecm) {
-    HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(false));
-    HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(aEnable));
-  } else {
-    HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(false));
-    HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(aEnable));
-    HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->set_suppression_level(level));
-  }
-}
-
-void
-MediaEngineWebRTCMicrophoneSource::UpdateAGCSettingsIfNeeded(bool aEnable, AgcModes aMode)
-{
-  AssertIsOnOwningThread();
-
-#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
-  if (aMode == kAgcAdaptiveAnalog) {
-    MOZ_LOG(GetMediaManagerLog(),
-            LogLevel::Error,
-            ("Invalid AGC mode kAgcAdaptiveAnalog on mobile"));
-    MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
-                           " for the auto gain, on mobile.");
-    aMode = kAgcDefault;
-  }
-#endif
-  GainControl::Mode mode = kDefaultAgcMode;
-
-  switch (aMode) {
-    case AgcModes::kAgcDefault:
-      mode = kDefaultAgcMode;
-      break;
-    case AgcModes::kAgcUnchanged:
-      mode = mAudioProcessing->gain_control()->mode();
-      break;
-    case AgcModes::kAgcFixedDigital:
-      mode = GainControl::Mode::kFixedDigital;
-      break;
-    case AgcModes::kAgcAdaptiveAnalog:
-      mode = GainControl::Mode::kAdaptiveAnalog;
-      break;
-    case AgcModes::kAgcAdaptiveDigital:
-      mode = GainControl::Mode::kAdaptiveDigital;
-      break;
-    default:
-      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
-                             " for the auto gain.");
-      // This is a good fallback, it works regardless of the platform.
-      mode = GainControl::Mode::kAdaptiveDigital;
-      break;
-  }
-
-  HANDLE_APM_ERROR(mAudioProcessing->gain_control()->set_mode(mode));
-  HANDLE_APM_ERROR(mAudioProcessing->gain_control()->Enable(aEnable));
-}
-
-void
-MediaEngineWebRTCMicrophoneSource::UpdateNSSettingsIfNeeded(bool aEnable, NsModes aMode)
-{
-  AssertIsOnOwningThread();
-
-  NoiseSuppression::Level nsLevel;
-
-  switch (aMode) {
-    case NsModes::kNsDefault:
-      nsLevel = kDefaultNsMode;
-      break;
-    case NsModes::kNsUnchanged:
-      nsLevel = mAudioProcessing->noise_suppression()->level();
-      break;
-    case NsModes::kNsConference:
-      nsLevel = NoiseSuppression::kHigh;
-      break;
-    case NsModes::kNsLowSuppression:
-      nsLevel = NoiseSuppression::kLow;
-      break;
-    case NsModes::kNsModerateSuppression:
-      nsLevel = NoiseSuppression::kModerate;
-      break;
-    case NsModes::kNsHighSuppression:
-      nsLevel = NoiseSuppression::kHigh;
-      break;
-    case NsModes::kNsVeryHighSuppression:
-      nsLevel = NoiseSuppression::kVeryHigh;
-      break;
-    default:
-      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
-                             " for the noise suppression.");
-      // Pick something sensible as a faillback in release.
-      nsLevel = NoiseSuppression::kModerate;
-  }
-  HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->set_level(nsLevel));
-  HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->Enable(aEnable));
-}
-
-#undef HANDLE_APM_ERROR
-
 nsresult
 MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
     const RefPtr<const AllocationHandle>& aHandle,
     const NormalizedConstraints& aNetConstraints,
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
@@ -449,20 +263,17 @@ MediaEngineWebRTCMicrophoneSource::Updat
       prefs.mAecOn ? prefs.mAec : -1,
       prefs.mAgcOn ? prefs.mAgc : -1,
       prefs.mNoiseOn ? prefs.mNoise : -1,
       prefs.mChannels));
 
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
-      {
-        MutexAutoLock lock(mMutex);
-        mState = kAllocated;
-      }
+      mState = kAllocated;
       LOG(("Audio device %s allocated", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get()));
       break;
 
     case kStarted:
     case kStopped:
       if (prefs == mNetPrefs) {
         LOG(("UpdateSingleSource: new prefs for %s are the same as the current prefs, returning.",
              NS_ConvertUTF16toUTF8(mDeviceName).get()));
@@ -470,78 +281,205 @@ MediaEngineWebRTCMicrophoneSource::Updat
       }
       break;
 
     default:
       LOG(("Audio device %s in ignored state %d", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get(), MediaEngineSourceState(mState)));
       break;
   }
 
-  if (mState != kReleased) {
+  if (mStream) {
     UpdateAGCSettingsIfNeeded(prefs.mAgcOn, static_cast<AgcModes>(prefs.mAgc));
     UpdateNSSettingsIfNeeded(prefs.mNoiseOn, static_cast<NsModes>(prefs.mNoise));
     UpdateAECSettingsIfNeeded(prefs.mAecOn, static_cast<EcModes>(prefs.mAec));
 
-    webrtc::Config config;
-    config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter));
-    config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic));
-    mAudioProcessing->SetExtraOptions(config);
+    UpdateAPMExtraOptions(mExtendedFilter, mDelayAgnostic);
   }
   mNetPrefs = prefs;
   return NS_OK;
 }
 
-#undef HANDLE_APM_ERROR
-
-bool
-MediaEngineWebRTCMicrophoneSource::PassThrough(MediaStreamGraphImpl* aGraph) const
+void
+MediaEngineWebRTCMicrophoneSource::UpdateAECSettingsIfNeeded(
+  bool aEnable,
+  webrtc::EcModes aMode)
 {
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  return mSkipProcessing;
-}
-void
-MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
-{
-  {
-    MutexAutoLock lock(mMutex);
-    // mStream is always valid because it's set right before ::Start is called.
-    // SetPassThrough cannot be called before that, because it's running on the
-    // graph thread, and this cannot happen before the source has been started.
-    MOZ_ASSERT(mStream->GraphImpl()->CurrentDriver()->OnThread(),
-               "Wrong calling pattern, don't call this before ::SetTrack.");
-  }
-  mSkipProcessing = aPassThrough;
+  AssertIsOnOwningThread();
+
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+      class Message : public ControlMessage
+      {
+      public:
+        Message(AudioInputProcessing* aInputProcessing,
+                bool aEnable,
+                webrtc::EcModes aMode)
+          : ControlMessage(nullptr)
+          , mInputProcessing(aInputProcessing)
+          , mEnable(aEnable)
+          , mMode(aMode)
+        {
+        }
+
+        void Run() override
+        {
+          mInputProcessing->UpdateAECSettingsIfNeeded(mEnable, mMode);
+        }
+
+      protected:
+        RefPtr<AudioInputProcessing> mInputProcessing;
+        bool mEnable;
+        webrtc::EcModes mMode;
+      };
+
+      if (graph) {
+        graph->AppendMessage(
+          MakeUnique<Message>(that->mInputProcessing, aEnable, aMode));
+      }
+
+      return NS_OK;
+    }));
 }
 
-uint32_t
-MediaEngineWebRTCMicrophoneSource::GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl)
+void
+MediaEngineWebRTCMicrophoneSource::UpdateAGCSettingsIfNeeded(
+  bool aEnable,
+  webrtc::AgcModes aMode)
 {
-  MOZ_ASSERT(aGraphImpl->CurrentDriver()->OnThread(),
-             "Wrong calling pattern, don't call this before ::SetTrack.");
+  AssertIsOnOwningThread();
 
-  if (mState == kReleased) {
-    // This source has been released, and is waiting for collection. Simply
-    // return 0, this source won't contribute to the channel count decision.
-    // Again, this is temporary.
-    return 0;
-  }
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+      class Message : public ControlMessage
+      {
+      public:
+        Message(AudioInputProcessing* aInputProcessing,
+                bool aEnable,
+                webrtc::AgcModes aMode)
+          : ControlMessage(nullptr)
+          , mInputProcessing(aInputProcessing)
+          , mEnable(aEnable)
+          , mMode(aMode)
+        {
+        }
 
-  return mRequestedInputChannelCount;
+        void Run() override
+        {
+          mInputProcessing->UpdateAGCSettingsIfNeeded(mEnable, mMode);
+        }
+
+      protected:
+        RefPtr<AudioInputProcessing> mInputProcessing;
+        bool mEnable;
+        webrtc::AgcModes mMode;
+      };
+
+      if (graph) {
+        graph->AppendMessage(
+          MakeUnique<Message>(that->mInputProcessing, aEnable, aMode));
+      }
+
+      return NS_OK;
+    }));
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::SetRequestedInputChannelCount(
-  uint32_t aRequestedInputChannelCount)
+MediaEngineWebRTCMicrophoneSource::UpdateNSSettingsIfNeeded(
+  bool aEnable,
+  webrtc::NsModes aMode)
 {
-  MutexAutoLock lock(mMutex);
+  AssertIsOnOwningThread();
+
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+      class Message : public ControlMessage
+      {
+      public:
+        Message(AudioInputProcessing* aInputProcessing,
+                bool aEnable,
+                webrtc::NsModes aMode)
+          : ControlMessage(nullptr)
+          , mInputProcessing(aInputProcessing)
+          , mEnable(aEnable)
+          , mMode(aMode)
+        {
+        }
+
+        void Run() override
+        {
+          mInputProcessing->UpdateNSSettingsIfNeeded(mEnable, mMode);
+        }
+
+      protected:
+        RefPtr<AudioInputProcessing> mInputProcessing;
+        bool mEnable;
+        webrtc::NsModes mMode;
+      };
+
+      if (graph) {
+        graph->AppendMessage(
+          MakeUnique<Message>(that->mInputProcessing, aEnable, aMode));
+      }
+
+      return NS_OK;
+    }));
+}
 
-  MOZ_ASSERT(mStream->GraphImpl()->CurrentDriver()->OnThread(),
-      "Wrong calling pattern, don't call this before ::SetTrack.");
-  mRequestedInputChannelCount = aRequestedInputChannelCount;
-  mStream->GraphImpl()->ReevaluateInputDevice();
+void
+MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(bool aExtendedFilter,
+                                                         bool aDelayAgnostic)
+{
+  AssertIsOnOwningThread();
+
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom([
+    that,
+    graph = std::move(gripGraph),
+    aExtendedFilter,
+    aDelayAgnostic
+  ]() mutable {
+    class Message : public ControlMessage
+    {
+    public:
+      Message(AudioInputProcessing* aInputProcessing,
+              bool aExtendedFilter,
+              bool aDelayAgnostic)
+        : ControlMessage(nullptr)
+        , mInputProcessing(aInputProcessing)
+        , mExtendedFilter(aExtendedFilter)
+        , mDelayAgnostic(aDelayAgnostic)
+      {
+      }
+
+      void Run() override
+      {
+        mInputProcessing->UpdateAPMExtraOptions(mExtendedFilter,
+                                                mDelayAgnostic);
+      }
+
+    protected:
+      RefPtr<AudioInputProcessing> mInputProcessing;
+      bool mExtendedFilter;
+      bool mDelayAgnostic;
+    };
+
+    if (graph) {
+      graph->AppendMessage(MakeUnique<Message>(
+        that->mInputProcessing, aExtendedFilter, aDelayAgnostic));
+    }
+
+    return NS_OK;
+  }));
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
                                                  RefPtr<MediaStreamGraphImpl> aGraph)
 {
   AssertIsOnOwningThread();
   MOZ_DIAGNOSTIC_ASSERT(aGraph);
@@ -550,42 +488,42 @@ MediaEngineWebRTCMicrophoneSource::Apply
   NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(aGraph), aPrefs]() mutable {
     that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
     that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
 
     class Message : public ControlMessage {
     public:
-      Message(MediaEngineWebRTCMicrophoneSource* aSource,
+      Message(AudioInputProcessing* aInputProcessing,
               bool aPassThrough,
               uint32_t aRequestedInputChannelCount)
         : ControlMessage(nullptr)
-        , mMicrophoneSource(aSource)
+        , mInputProcessing(aInputProcessing)
         , mPassThrough(aPassThrough)
         , mRequestedInputChannelCount(aRequestedInputChannelCount)
-        {}
+      {}
 
       void Run() override
       {
-        mMicrophoneSource->SetPassThrough(mPassThrough);
-        mMicrophoneSource->SetRequestedInputChannelCount(mRequestedInputChannelCount);
+        mInputProcessing->SetPassThrough(mPassThrough);
+        mInputProcessing->SetRequestedInputChannelCount(
+          mRequestedInputChannelCount);
       }
 
     protected:
-      RefPtr<MediaEngineWebRTCMicrophoneSource> mMicrophoneSource;
+      RefPtr<AudioInputProcessing> mInputProcessing;
       bool mPassThrough;
       uint32_t mRequestedInputChannelCount;
     };
 
     bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn);
     if (graph) {
-      graph->AppendMessage(MakeUnique<Message>(that,
-                                               passThrough,
-                                               aPrefs.mChannels));
+      graph->AppendMessage(MakeUnique<Message>(
+        that->mInputProcessing, passThrough, aPrefs.mChannels));
     }
 
     return NS_OK;
   }));
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
@@ -602,21 +540,18 @@ MediaEngineWebRTCMicrophoneSource::Alloc
   LOG(("Mic source %p allocation %p Allocate()", this, handle.get()));
 
   nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
                                      aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
-  {
-    MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(!mHandle, "Only allocate once.");
-    mHandle = handle;
-  }
+  MOZ_ASSERT(!mHandle, "Only allocate once.");
+  mHandle = handle;
 
   handle.forget(aOutHandle);
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
@@ -626,26 +561,20 @@ MediaEngineWebRTCMicrophoneSource::Deall
 
   MOZ_DIAGNOSTIC_ASSERT(!mEnabled,
                         "Source should be stopped for the track before removing");
 
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     mStream->EndTrack(mTrackID);
   }
 
-  MutexAutoLock lock(mMutex);
   MOZ_ASSERT(mHandle, "Only deallocate once");
 
   // Reset all state. This is not strictly necessary, this instance will get
   // destroyed soon.
-#ifdef DEBUG
-  mLastCallbackAppendTime = 0;
-#endif
-  mLiveFramesAppended = false;
-  mLiveSilenceAppended = false;
   mHandle = nullptr;
   mStream = nullptr;
   mTrackID = TRACK_NONE;
   mPrincipal = PRINCIPAL_HANDLE_NONE;
   mEnabled = false;
 
   // If empty, no callbacks to deliver data should be occuring
   MOZ_ASSERT(mState != kReleased, "Source not allocated");
@@ -662,19 +591,16 @@ MediaEngineWebRTCMicrophoneSource::SetTr
                                             const RefPtr<SourceMediaStream>& aStream,
                                             TrackID aTrackID,
                                             const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-
-  MutexAutoLock lock(mMutex);
-
   if (mStream &&
       mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   MOZ_ASSERT(!mStream);
   MOZ_ASSERT(mTrackID == TRACK_NONE);
   MOZ_ASSERT(mPrincipal == PRINCIPAL_HANDLE_NONE);
@@ -689,22 +615,46 @@ MediaEngineWebRTCMicrophoneSource::SetTr
                          0,
                          segment,
                          SourceMediaStream::ADDTRACK_QUEUED);
 
   LOG(("Stream %p registered for microphone capture", aStream.get()));
   return NS_OK;
 }
 
+class StartStopMessage : public ControlMessage
+{
+  public:
+    StartStopMessage(AudioInputProcessing* aInputProcessing,
+                     bool aStart)
+    : ControlMessage(nullptr)
+    , mInputProcessing(aInputProcessing)
+    , mStart(aStart)
+  {
+  }
+
+  void Run() override
+  {
+    if (mStart) {
+      mInputProcessing->Start();
+    } else {
+      mInputProcessing->Stop();
+    }
+  }
+
+protected:
+  RefPtr<AudioInputProcessing> mInputProcessing;
+  bool mStart;
+};
+
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
-  MutexAutoLock lock(mMutex);
   // This spans setting both the enabled state and mState.
   if (mState == kStarted) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
 
   CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
@@ -729,28 +679,35 @@ MediaEngineWebRTCMicrophoneSource::Start
   }
 
   sInputStreamsOpen++;
 #endif
 
   MOZ_ASSERT(!mEnabled, "Source already started");
   mEnabled = true;
 
-#ifdef DEBUG
-  // Ensure that callback-tracking state is reset when callbacks start coming.
-  mLastCallbackAppendTime = 0;
-#endif
-  mLiveFramesAppended = false;
-  mLiveSilenceAppended = false;
+  AssertIsOnOwningThread();
+
+  mInputProcessing = new AudioInputProcessing(
+    mDeviceMaxChannelCount, mStream, mTrackID, mPrincipal);
+
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [ that, graph = std::move(gripGraph), enabled = that->mEnabled ]() mutable {
 
-  if (!mListener) {
-    mListener = new WebRTCAudioDataListener(this);
-  }
+    if (graph) {
+    graph->AppendMessage(
+        MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
+    }
 
-  mStream->OpenAudioInput(deviceID, mListener);
+    return NS_OK;
+  }));
+
+  mStream->OpenAudioInput(deviceID, mInputProcessing);
 
   MOZ_ASSERT(mState != kReleased);
   mState = kStarted;
 
   ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
@@ -759,140 +716,376 @@ nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
 
   MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
-  {
-    // This spans setting both the enabled state and mState.
-    MutexAutoLock lock(mMutex);
-    if (!mEnabled) {
-      // Already stopped - this is allowed
-      return NS_OK;
-    }
+  // This spans setting both the enabled state and mState.
+  if (!mEnabled) {
+    // Already stopped - this is allowed
+    return NS_OK;
+  }
 
-    mEnabled = false;
+  mEnabled = false;
 
-    CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
-    Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-    mStream->CloseAudioInput(id, mListener);
-    mListener = nullptr;
 #ifdef MOZ_PULSEAUDIO
     MOZ_ASSERT(sInputStreamsOpen > 0);
     sInputStreamsOpen--;
 #endif
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+  RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [ that, graph = std::move(gripGraph), enabled = that->mEnabled ]() mutable {
 
-    MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
-    mState = kStopped;
-  }
+    if (graph) {
+      graph->AppendMessage(
+          MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
+    }
+
+    return NS_OK;
+  }));
+
+  CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
+  Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
+  mStream->CloseAudioInput(id, mInputProcessing);
+
+  MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
+  mState = kStopped;
 
   return NS_OK;
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::GetSettings(dom::MediaTrackSettings& aOutSettings) const
 {
   MOZ_ASSERT(NS_IsMainThread());
   aOutSettings = *mSettings;
 }
 
+AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount,
+                                           RefPtr<SourceMediaStream> aStream,
+                                           TrackID aTrackID,
+                                           const PrincipalHandle& aPrincipalHandle)
+  : mStream(std::move(aStream))
+  , mAudioProcessing(AudioProcessing::Create())
+  , mRequestedInputChannelCount(aMaxChannelCount)
+  , mSkipProcessing(false)
+  , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
+#ifdef DEBUG
+  , mLastCallbackAppendTime(0)
+#endif
+  , mLiveFramesAppended(false)
+  , mLiveSilenceAppended(false)
+  , mTrackID(aTrackID)
+  , mPrincipal(aPrincipalHandle)
+{
+}
+
 void
-MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>& aHandle,
-                                        const RefPtr<SourceMediaStream>& aStream,
-                                        TrackID aTrackID,
-                                        StreamTime aDesiredTime,
-                                        const PrincipalHandle& aPrincipalHandle)
+AudioInputProcessing::Disconnect(MediaStreamGraphImpl* aGraph)
+{
+  // This method is just for asserts.
+  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::Shutdown()
+{
+  AssertIsOnOwningThread();
+
+  if (mState == kStarted) {
+    if (mEnabled) {
+      Stop(mHandle);
+    }
+    MOZ_ASSERT(mState == kStopped);
+  }
+
+  MOZ_ASSERT(mState == kAllocated || mState == kStopped);
+  Deallocate(mHandle);
+  MOZ_ASSERT(mState == kReleased);
+}
+
+bool
+AudioInputProcessing::PassThrough(MediaStreamGraphImpl* aGraph) const
+{
+  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
+  return mSkipProcessing;
+}
+
+void
+AudioInputProcessing::SetPassThrough(bool aPassThrough)
+{
+  mSkipProcessing = aPassThrough;
+}
+
+uint32_t
+AudioInputProcessing::GetRequestedInputChannelCount(
+  MediaStreamGraphImpl* aGraphImpl)
+{
+  return mRequestedInputChannelCount;
+}
+
+void
+AudioInputProcessing::SetRequestedInputChannelCount(
+  uint32_t aRequestedInputChannelCount)
+{
+  mRequestedInputChannelCount = aRequestedInputChannelCount;
+
+  mStream->GraphImpl()->ReevaluateInputDevice();
+}
+
+// This does an early return in case of error.
+#define HANDLE_APM_ERROR(fn)                                                   \
+  do {                                                                         \
+    int rv = fn;                                                               \
+    if (rv != AudioProcessing::kNoError) {                                     \
+      MOZ_ASSERT_UNREACHABLE("APM error in " #fn);                             \
+      return;                                                                  \
+    }                                                                          \
+  } while (0);
+
+void
+AudioInputProcessing::UpdateAECSettingsIfNeeded(bool aEnable, EcModes aMode)
+{
+  using webrtc::EcModes;
+
+  EchoCancellation::SuppressionLevel level;
+
+  switch (aMode) {
+    case EcModes::kEcUnchanged:
+      level = mAudioProcessing->echo_cancellation()->suppression_level();
+      break;
+    case EcModes::kEcConference:
+      level = EchoCancellation::kHighSuppression;
+      break;
+    case EcModes::kEcDefault:
+      level = EchoCancellation::kModerateSuppression;
+      break;
+    case EcModes::kEcAec:
+      level = EchoCancellation::kModerateSuppression;
+      break;
+    case EcModes::kEcAecm:
+      // No suppression level to set for the mobile echo canceller
+      break;
+    default:
+      MOZ_LOG(GetMediaManagerLog(), LogLevel::Error, ("Bad EcMode value"));
+      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
+                             " for the echo cancelation mode.");
+      // fall back to something sensible in release
+      level = EchoCancellation::kModerateSuppression;
+      break;
+  }
+
+  // AECm and AEC are mutually exclusive.
+  if (aMode == EcModes::kEcAecm) {
+    HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(false));
+    HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(aEnable));
+  } else {
+    HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(false));
+    HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(aEnable));
+    HANDLE_APM_ERROR(
+      mAudioProcessing->echo_cancellation()->set_suppression_level(level));
+  }
+}
+
+void
+AudioInputProcessing::UpdateAGCSettingsIfNeeded(bool aEnable, AgcModes aMode)
+{
+#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
+  if (aMode == kAgcAdaptiveAnalog) {
+    MOZ_LOG(GetMediaManagerLog(),
+            LogLevel::Error,
+            ("Invalid AGC mode kAgcAdaptiveAnalog on mobile"));
+    MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
+                           " for the auto gain, on mobile.");
+    aMode = kAgcDefault;
+  }
+#endif
+  GainControl::Mode mode = kDefaultAgcMode;
+
+  switch (aMode) {
+    case AgcModes::kAgcDefault:
+      mode = kDefaultAgcMode;
+      break;
+    case AgcModes::kAgcUnchanged:
+      mode = mAudioProcessing->gain_control()->mode();
+      break;
+    case AgcModes::kAgcFixedDigital:
+      mode = GainControl::Mode::kFixedDigital;
+      break;
+    case AgcModes::kAgcAdaptiveAnalog:
+      mode = GainControl::Mode::kAdaptiveAnalog;
+      break;
+    case AgcModes::kAgcAdaptiveDigital:
+      mode = GainControl::Mode::kAdaptiveDigital;
+      break;
+    default:
+      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
+                             " for the auto gain.");
+      // This is a good fallback, it works regardless of the platform.
+      mode = GainControl::Mode::kAdaptiveDigital;
+      break;
+  }
+
+  HANDLE_APM_ERROR(mAudioProcessing->gain_control()->set_mode(mode));
+  HANDLE_APM_ERROR(mAudioProcessing->gain_control()->Enable(aEnable));
+}
+
+void
+AudioInputProcessing::UpdateNSSettingsIfNeeded(bool aEnable, NsModes aMode)
+{
+  NoiseSuppression::Level nsLevel;
+
+  switch (aMode) {
+    case NsModes::kNsDefault:
+      nsLevel = kDefaultNsMode;
+      break;
+    case NsModes::kNsUnchanged:
+      nsLevel = mAudioProcessing->noise_suppression()->level();
+      break;
+    case NsModes::kNsConference:
+      nsLevel = NoiseSuppression::kHigh;
+      break;
+    case NsModes::kNsLowSuppression:
+      nsLevel = NoiseSuppression::kLow;
+      break;
+    case NsModes::kNsModerateSuppression:
+      nsLevel = NoiseSuppression::kModerate;
+      break;
+    case NsModes::kNsHighSuppression:
+      nsLevel = NoiseSuppression::kHigh;
+      break;
+    case NsModes::kNsVeryHighSuppression:
+      nsLevel = NoiseSuppression::kVeryHigh;
+      break;
+    default:
+      MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config"
+                             " for the noise suppression.");
+      // Pick something sensible as a faillback in release.
+      nsLevel = NoiseSuppression::kModerate;
+  }
+  HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->set_level(nsLevel));
+  HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->Enable(aEnable));
+}
+
+#undef HANDLE_APM_ERROR
+
+void
+AudioInputProcessing::UpdateAPMExtraOptions(bool aExtendedFilter,
+                                            bool aDelayAgnostic)
+{
+  webrtc::Config config;
+  config.Set<webrtc::ExtendedFilter>(
+    new webrtc::ExtendedFilter(aExtendedFilter));
+  config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(aDelayAgnostic));
+
+  mAudioProcessing->SetExtraOptions(config);
+}
+
+void
+AudioInputProcessing::Start()
+{
+  mEnabled = true;
+}
+
+void
+AudioInputProcessing::Stop()
+{
+  mEnabled = false;
+}
+
+void
+AudioInputProcessing::Pull(const RefPtr<const AllocationHandle>& aHandle,
+                           const RefPtr<SourceMediaStream>& aStream,
+                           TrackID aTrackID,
+                           StreamTime aDesiredTime,
+                           const PrincipalHandle& aPrincipalHandle)
 {
   TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
                                aStream.get(), aTrackID);
   StreamTime delta;
 
-  {
-    MutexAutoLock lock(mMutex);
-    if (!mHandle) {
-      // Deallocation already happened. Just return.
-      return;
-    }
+  delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
 
-    // We don't want to GetEndOfAppendedData() above at the declaration if the
-    // allocation was removed and the track non-existant. An assert will fail.
-    delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
+  if (delta < 0) {
+    LOG_FRAMES(
+      ("Not appending silence; %" PRId64 " frames already buffered", -delta));
+    return;
+  }
 
-    if (delta < 0) {
-      LOG_FRAMES(("Not appending silence for allocation %p; %" PRId64 " frames already buffered",
-                  mHandle.get(), -delta));
-      return;
-    }
+  if (!mLiveFramesAppended ||
+      !mLiveSilenceAppended) {
+    // These are the iterations after starting or resuming audio capture.
+    // Make sure there's at least one extra block buffered until audio
+    // callbacks come in. We also allow appending silence one time after
+    // audio callbacks have started, to cover the case where audio callbacks
+    // start appending data immediately and there is no extra data buffered.
+    delta += WEBAUDIO_BLOCK_SIZE;
 
-    if (!mLiveFramesAppended ||
-        !mLiveSilenceAppended) {
-      // These are the iterations after starting or resuming audio capture.
-      // Make sure there's at least one extra block buffered until audio
-      // callbacks come in. We also allow appending silence one time after
-      // audio callbacks have started, to cover the case where audio callbacks
-      // start appending data immediately and there is no extra data buffered.
-      delta += WEBAUDIO_BLOCK_SIZE;
+    // If we're supposed to be packetizing but there's no packetizer yet,
+    // there must not have been any live frames appended yet.
+    // If there were live frames appended and we haven't appended the
+    // right amount of silence, we'll have to append silence once more,
+    // failing the other assert below.
+    MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
+                  !mLiveFramesAppended);
 
-      // If we're supposed to be packetizing but there's no packetizer yet,
-      // there must not have been any live frames appended yet.
-      // If there were live frames appended and we haven't appended the
-      // right amount of silence, we'll have to append silence once more,
-      // failing the other assert below.
-      MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
-                    !mLiveFramesAppended);
+    if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
+      // Processing is active and is processed in chunks of 10ms through the
+      // input packetizer. We allow for 10ms of silence on the track to
+      // accomodate the buffering worst-case.
+      delta += mPacketizerInput->PacketSize();
+    }
+  }
 
-      if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
-        // Processing is active and is processed in chunks of 10ms through the
-        // input packetizer. We allow for 10ms of silence on the track to
-        // accomodate the buffering worst-case.
-        delta += mPacketizerInput->PacketSize();
-      }
-    }
-
-    LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
-                delta, mHandle.get()));
+  LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
+              delta,
+              aHandle.get()));
 
-    // This assertion fails when we append silence here in the same iteration
-    // as there were real audio samples already appended by the audio callback.
-    // Note that this is exempted until live samples and a subsequent chunk of
-    // silence have been appended to the track. This will cover cases like:
-    // - After Start(), there is silence (maybe multiple times) appended before
-    //   the first audio callback.
-    // - After Start(), there is real data (maybe multiple times) appended
-    //   before the first graph iteration.
-    // And other combinations of order of audio sample sources.
-    MOZ_ASSERT_IF(
-      mEnabled &&
-      mLiveFramesAppended &&
-      mLiveSilenceAppended,
-      aStream->GraphImpl()->IterationEnd() >
-      mLastCallbackAppendTime);
+  // This assertion fails when we append silence here in the same iteration
+  // as there were real audio samples already appended by the audio callback.
+  // Note that this is exempted until live samples and a subsequent chunk of
+  // silence have been appended to the track. This will cover cases like:
+  // - After Start(), there is silence (maybe multiple times) appended before
+  //   the first audio callback.
+  // - After Start(), there is real data (maybe multiple times) appended
+  //   before the first graph iteration.
+  // And other combinations of order of audio sample sources.
+  MOZ_ASSERT_IF(
+    mEnabled &&
+    mLiveFramesAppended &&
+    mLiveSilenceAppended,
+    aStream->GraphImpl()->IterationEnd() >
+    mLastCallbackAppendTime);
 
-    if (mLiveFramesAppended) {
-      mLiveSilenceAppended = true;
-    }
+  if (mLiveFramesAppended) {
+    mLiveSilenceAppended = true;
   }
 
   AudioSegment audio;
   audio.AppendNullData(delta);
   aStream->AppendToTrack(aTrackID, &audio);
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraphImpl* aGraph,
-                                                    AudioDataValue* aBuffer,
-                                                    size_t aFrames,
-                                                    TrackRate aRate,
-                                                    uint32_t aChannels)
+AudioInputProcessing::NotifyOutputData(MediaStreamGraphImpl* aGraph,
+                                       AudioDataValue* aBuffer,
+                                       size_t aFrames,
+                                       TrackRate aRate,
+                                       uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
 
+  if (!mEnabled) {
+    return;
+  }
+
   if (!mPacketizerOutput ||
       mPacketizerOutput->PacketSize() != aRate/100u ||
       mPacketizerOutput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here: if this changes,
     // we changed devices or something.
     mPacketizerOutput =
       new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
   }
@@ -970,23 +1163,24 @@ MediaEngineWebRTCMicrophoneSource::Notif
                                              deinterleavedPacketDataChannelPointers.Elements());
 
     MOZ_ASSERT(!err, "Could not process the reverse stream.");
   }
 }
 
 // Only called if we're not in passthrough mode
 void
-MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
-                                                       const AudioDataValue* aBuffer,
-                                                       size_t aFrames,
-                                                       TrackRate aRate,
-                                                       uint32_t aChannels)
+AudioInputProcessing::PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
+                                          const AudioDataValue* aBuffer,
+                                          size_t aFrames,
+                                          TrackRate aRate,
+                                          uint32_t aChannels)
 {
   MOZ_ASSERT(!PassThrough(aGraph), "This should be bypassed when in PassThrough mode.");
+  MOZ_ASSERT(mEnabled);
   size_t offset = 0;
 
   if (!mPacketizerInput ||
       mPacketizerInput->PacketSize() != aRate/100u ||
       mPacketizerInput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here.
     mPacketizerInput =
       new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
@@ -1046,36 +1240,29 @@ MediaEngineWebRTCMicrophoneSource::Packe
       processedOutputChannelPointersConst[i] = static_cast<float*>(buffer->Data()) + offset;
       offset += mPacketizerInput->PacketSize();
     }
 
     mAudioProcessing->ProcessStream(deinterleavedPacketizedInputDataChannelPointers.Elements(),
                                     inputConfig,
                                     outputConfig,
                                     processedOutputChannelPointers.Elements());
-    MutexAutoLock lock(mMutex);
-    if (mState != kStarted) {
-      return;
-    }
+
 
     AudioSegment segment;
     if (!mStream->GraphImpl()) {
       // The DOMMediaStream that owns mStream has been cleaned up
       // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
       // can happen before the MediaManager thread gets to stop capture for
       // this MediaStream.
       continue;
     }
 
-    if (!mEnabled) {
-      continue;
-    }
-
-    LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio for allocation %p",
-                mPacketizerInput->PacketSize(), mHandle.get()));
+    LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio",
+                mPacketizerInput->PacketSize()));
 
 #ifdef DEBUG
     mLastCallbackAppendTime = mStream->GraphImpl()->IterationEnd();
 #endif
     mLiveFramesAppended = true;
 
     // We already have planar audio data of the right format. Insert into the
     // MSG.
@@ -1086,48 +1273,33 @@ MediaEngineWebRTCMicrophoneSource::Packe
                          mPacketizerInput->PacketSize(),
                          mPrincipal);
     mStream->AppendToTrack(mTrackID, &segment);
   }
 }
 
 template<typename T>
 void
-MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
-                                                 size_t aFrames,
-                                                 uint32_t aChannels)
+AudioInputProcessing::InsertInGraph(const T* aBuffer,
+                                    size_t aFrames,
+                                    uint32_t aChannels)
 {
-  MutexAutoLock lock(mMutex);
-
-  if (mState != kStarted) {
-    return;
-  }
-
-  if (!mStream) {
-    return;
-  }
-
   if (!mStream->GraphImpl()) {
     // The DOMMediaStream that owns mStream has been cleaned up
     // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
     // can happen before the MediaManager thread gets to stop capture for
     // this MediaStream.
     return;
   }
 
-  if (!mEnabled) {
-    return;
-  }
-
 #ifdef DEBUG
   mLastCallbackAppendTime = mStream->GraphImpl()->IterationEnd();
 #endif
   mLiveFramesAppended = true;
 
-  // Bug 971528 - Support stereo capture in gUM
   MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
 
   AudioSegment segment;
   RefPtr<SharedBuffer> buffer =
     SharedBuffer::Create(aFrames * aChannels * sizeof(T));
   AutoTArray<const T*, 8> channels;
   if (aChannels == 1) {
     PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
@@ -1145,52 +1317,42 @@ MediaEngineWebRTCMicrophoneSource::Inser
     }
 
     DeinterleaveAndConvertBuffer(aBuffer,
         aFrames,
         aChannels,
         write_channels.Elements());
   }
 
-  LOG_FRAMES(("Appending %zu frames of raw audio for allocation %p",
-        aFrames, mHandle.get()));
+  LOG_FRAMES(("Appending %zu frames of raw audio", aFrames));
 
   MOZ_ASSERT(aChannels == channels.Length());
   segment.AppendFrames(buffer.forget(), channels, aFrames,
       mPrincipal);
 
   mStream->AppendToTrack(mTrackID, &segment);
 }
 
 // Called back on GraphDriver thread!
 // Note this can be called back after ::Shutdown()
 void
-MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraphImpl* aGraph,
-                                                   const AudioDataValue* aBuffer,
-                                                   size_t aFrames,
-                                                   TrackRate aRate,
-                                                   uint32_t aChannels)
+AudioInputProcessing::NotifyInputData(MediaStreamGraphImpl* aGraph,
+                                      const AudioDataValue* aBuffer,
+                                      size_t aFrames,
+                                      TrackRate aRate,
+                                      uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   TRACE_AUDIO_CALLBACK();
 
-  {
-    MutexAutoLock lock(mMutex);
-    if (!mHandle) {
-      // This can happen because this class is not yet using message passing,
-      // and is accessed both on the media manager thread and the MSG thread.
-      // This is to be fixed soon.
-      // When deallocating, the listener is removed via message passing, while
-      // the allocation is removed immediately, so there can be a few iterations
-      // where we need to return early here.
-      return;
-    }
-  }
+  MOZ_ASSERT(mEnabled);
+
   // If some processing is necessary, packetize and insert in the WebRTC.org
-  // code. Otherwise, directly insert the mic data in the MSG, bypassing all processing.
+  // code. Otherwise, directly insert the mic data in the MSG, bypassing all
+  // processing.
   if (PassThrough(aGraph)) {
     InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
   } else {
     PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels);
   }
 }
 
 #define ResetProcessingIfNeeded(_processing)                        \
@@ -1210,50 +1372,25 @@ do {                                    
       #_processing " on device change.");                           \
       return;                                                       \
     }                                                               \
                                                                     \
   }                                                                 \
 }  while(0)
 
 void
-MediaEngineWebRTCMicrophoneSource::DeviceChanged(MediaStreamGraphImpl* aGraph)
+AudioInputProcessing::DeviceChanged(MediaStreamGraphImpl* aGraph)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   // Reset some processing
   ResetProcessingIfNeeded(gain_control);
   ResetProcessingIfNeeded(echo_cancellation);
   ResetProcessingIfNeeded(noise_suppression);
 }
 
-void
-MediaEngineWebRTCMicrophoneSource::Disconnect(MediaStreamGraphImpl* aGraph)
-{
-  // This method is just for asserts.
-  MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-  MOZ_ASSERT(!mListener);
-}
-
-void
-MediaEngineWebRTCMicrophoneSource::Shutdown()
-{
-  AssertIsOnOwningThread();
-
-  if (mState == kStarted) {
-    if (mEnabled) {
-      Stop(mHandle);
-    }
-    MOZ_ASSERT(mState == kStopped);
-  }
-
-  MOZ_ASSERT(mState == kAllocated || mState == kStopped);
-  Deallocate(mHandle);
-  MOZ_ASSERT(mState == kReleased);
-}
-
 nsString
 MediaEngineWebRTCAudioCaptureSource::GetName() const
 {
   return NS_LITERAL_STRING(u"AudioCapture");
 }
 
 nsCString
 MediaEngineWebRTCAudioCaptureSource::GetUUID() const
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -9,56 +9,28 @@
 #include "MediaEngineWebRTC.h"
 #include "AudioPacketizer.h"
 #include "AudioSegment.h"
 #include "AudioDeviceInfo.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 
 namespace mozilla {
 
-class MediaEngineWebRTCMicrophoneSource;
-
-// This class is instantiated on the MediaManager thread, and is then sent and
-// only ever access again on the MediaStreamGraph.
-class WebRTCAudioDataListener : public AudioDataListener
-{
-protected:
-  // Protected destructor, to discourage deletion outside of Release():
-  virtual ~WebRTCAudioDataListener() {}
-
-public:
-  explicit WebRTCAudioDataListener(MediaEngineWebRTCMicrophoneSource* aAudioSource)
-    : mAudioSource(aAudioSource)
-  {}
+class AudioInputProcessing;
 
-  // AudioDataListenerInterface methods
-  void NotifyOutputData(MediaStreamGraphImpl* aGraph,
-                        AudioDataValue* aBuffer,
-                        size_t aFrames,
-                        TrackRate aRate,
-                        uint32_t aChannels) override;
-
-  void NotifyInputData(MediaStreamGraphImpl* aGraph,
-                       const AudioDataValue* aBuffer,
-                       size_t aFrames,
-                       TrackRate aRate,
-                       uint32_t aChannels) override;
-
-  uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) override;
-
-  void DeviceChanged(MediaStreamGraphImpl* aGraph) override;
-
-  void Disconnect(MediaStreamGraphImpl* aGraph) override;
-
-private:
-  RefPtr<MediaEngineWebRTCMicrophoneSource> mAudioSource;
-};
-
-class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource,
-                                          public AudioDataListenerInterface
+// This class is created and used exclusively on the Media Manager thread, with
+// exactly two exceptions:
+// - Pull is always called on the MSG thread. It only ever uses
+//   mInputProcessing. mInputProcessing is set, then a message is sent first to
+//   the main thread and then the MSG thread so that it can be used as part of
+//   the graph processing. On destruction, similarly, a message is sent to the
+//   graph so that it stops using it, and then it is deleted.
+// - mSettings is created on the MediaManager thread is always ever accessed on
+//   the Main Thread. It is const.
+class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource
 {
 public:
   MediaEngineWebRTCMicrophoneSource(RefPtr<AudioDeviceInfo> aInfo,
                                     const nsString& name,
                                     const nsCString& uuid,
                                     uint32_t maxChannelCount,
                                     bool aDelayAgnostic,
                                     bool aExtendedFilter);
@@ -85,44 +57,27 @@ public:
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
 
-  /**
-   * Assigns the current settings of the capture to aOutSettings.
-   * Main thread only.
-   */
-  void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;
-
   void Pull(const RefPtr<const AllocationHandle>& aHandle,
             const RefPtr<SourceMediaStream>& aStream,
             TrackID aTrackID,
             StreamTime aDesiredTime,
             const PrincipalHandle& aPrincipalHandle) override;
 
-  // AudioDataListenerInterface methods
-  void NotifyOutputData(MediaStreamGraphImpl* aGraph,
-                        AudioDataValue* aBuffer, size_t aFrames,
-                        TrackRate aRate, uint32_t aChannels) override;
-  void NotifyInputData(MediaStreamGraphImpl* aGraph,
-                       const AudioDataValue* aBuffer, size_t aFrames,
-                       TrackRate aRate, uint32_t aChannels) override;
-
-  void DeviceChanged(MediaStreamGraphImpl* aGraph) override;
-
-  uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) override
-  {
-    return GetRequestedInputChannelCount(aGraph);
-  }
-
-  void Disconnect(MediaStreamGraphImpl* aGraph) override;
+  /**
+   * Assigns the current settings of the capture to aOutSettings.
+   * Main thread only.
+   */
+  void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;
 
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::Microphone;
   }
 
   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
   {
@@ -131,17 +86,17 @@ public:
 
   uint32_t GetBestFitnessDistance(
     const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
     const nsString& aDeviceId) const override;
 
   void Shutdown() override;
 
 protected:
-  ~MediaEngineWebRTCMicrophoneSource() {}
+  ~MediaEngineWebRTCMicrophoneSource() = default;
 
 private:
   /**
    * Reevaluates the aggregated constraints of all allocations and restarts the
    * underlying device if necessary.
    *
    * If the given AllocationHandle was already registered, its constraints will
    * be updated before reevaluation. If not, they will be added before
@@ -159,127 +114,174 @@ private:
    * these new constraints, and capture is active, the device will be restarted.
    */
   nsresult UpdateSingleSource(const RefPtr<const AllocationHandle>& aHandle,
                               const NormalizedConstraints& aNetConstraints,
                               const MediaEnginePrefs& aPrefs,
                               const nsString& aDeviceId,
                               const char** aOutBadConstraint);
 
-
+  // These methods send a message to the AudioInputProcessing instance.
   void UpdateAECSettingsIfNeeded(bool aEnable, webrtc::EcModes aMode);
   void UpdateAGCSettingsIfNeeded(bool aEnable, webrtc::AgcModes aMode);
   void UpdateNSSettingsIfNeeded(bool aEnable, webrtc::NsModes aMode);
-
+  void UpdateAPMExtraOptions(bool aExtendedFilter, bool aDelayAgnostic);
   void ApplySettings(const MediaEnginePrefs& aPrefs,
                      RefPtr<MediaStreamGraphImpl> aGraph);
 
   bool HasEnabledTrack() const;
 
+  RefPtr<AllocationHandle> mHandle;
+
+  TrackID mTrackID = TRACK_NONE;
+  PrincipalHandle mPrincipal = PRINCIPAL_HANDLE_NONE;
+  bool mEnabled = false;
+
+  const RefPtr<AudioDeviceInfo> mDeviceInfo;
+  const bool mDelayAgnostic;
+  const bool mExtendedFilter;
+  const nsString mDeviceName;
+  const nsCString mDeviceUUID;
+
+  // The maximum number of channels that this device supports.
+  const uint32_t mDeviceMaxChannelCount;
+  // The current settings for the underlying device.
+  // Constructed on the MediaManager thread, and then only ever accessed on the
+  // main thread.
+  const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>> mSettings;
+  // To only update microphone when needed, we keep track of the prefs
+  // representing the currently applied settings for this source. This is the
+  // net result of the prefs across all allocations.
+  MediaEnginePrefs mNetPrefs;
+
+  // Current state of the resource for this source.
+  MediaEngineSourceState mState;
+
+  // The SourecMediaStream on which to append data for this microphone. Set in
+  // SetTrack as part of the initialization, and nulled in ::Deallocate.
+  RefPtr<SourceMediaStream> mStream;
+
+  // See note at the top of this class.
+  RefPtr<AudioInputProcessing> mInputProcessing;
+};
+
+// This class is created on the MediaManager thread, and then exclusively used
+// on the MSG thread.
+// All communication is done via message passing using MSG ControlMessages
+class AudioInputProcessing : public AudioDataListener
+{
+public:
+  AudioInputProcessing(uint32_t aMaxChannelCount,
+                       RefPtr<SourceMediaStream> aStream,
+                       TrackID aTrackID,
+                       const PrincipalHandle& aPrincipalHandle);
+
+  void Pull(const RefPtr<const AllocationHandle>& aHandle,
+            const RefPtr<SourceMediaStream>& aStream,
+            TrackID aTrackID,
+            StreamTime aDesiredTime,
+            const PrincipalHandle& aPrincipalHandle);
+
+  void NotifyOutputData(MediaStreamGraphImpl* aGraph,
+                        AudioDataValue* aBuffer,
+                        size_t aFrames,
+                        TrackRate aRate,
+                        uint32_t aChannels) override;
+  void NotifyInputData(MediaStreamGraphImpl* aGraph,
+                       const AudioDataValue* aBuffer,
+                       size_t aFrames,
+                       TrackRate aRate,
+                       uint32_t aChannels) override;
+
+  void Start();
+  void Stop();
+
+  void DeviceChanged(MediaStreamGraphImpl* aGraph) override;
+
+  uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) override
+  {
+    return GetRequestedInputChannelCount(aGraph);
+  }
+
+  void Disconnect(MediaStreamGraphImpl* aGraph) override;
+
   template<typename T>
-  void InsertInGraph(const T* aBuffer,
-                     size_t aFrames,
-                     uint32_t aChannels);
+  void InsertInGraph(const T* aBuffer, size_t aFrames, uint32_t aChannels);
 
   void PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
                            const AudioDataValue* aBuffer,
                            size_t aFrames,
                            TrackRate aRate,
                            uint32_t aChannels);
 
-
-  // Graph thread only.
   void SetPassThrough(bool aPassThrough);
-  // Graph thread only.
   uint32_t GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl);
-  // Graph thread only.
   void SetRequestedInputChannelCount(uint32_t aRequestedInputChannelCount);
   // This is true when all processing is disabled, we can skip
   // packetization, resampling and other processing passes.
-  // Graph thread only.
   bool PassThrough(MediaStreamGraphImpl* aGraphImpl) const;
 
-  // Those are written on the MediaManager thread, read on either the
-  // MediaManager thread or the MSG thread. Guarded by mMutex.
-  RefPtr<AllocationHandle> mHandle;
-  RefPtr<SourceMediaStream> mStream;
-  TrackID mTrackID = TRACK_NONE;
-  PrincipalHandle mPrincipal = PRINCIPAL_HANDLE_NONE;
-  bool mEnabled = false;
+  // This allow changing the APM options, enabling or disabling processing
+  // steps.
+  void UpdateAECSettingsIfNeeded(bool aEnable, webrtc::EcModes aMode);
+  void UpdateAGCSettingsIfNeeded(bool aEnable, webrtc::AgcModes aMode);
+  void UpdateNSSettingsIfNeeded(bool aEnable, webrtc::NsModes aMode);
+  void UpdateAPMExtraOptions(bool aExtendedFilter, bool aDelayAgnostic);
 
-  // Set on construction and then immutable. Used on the MediaManager thread.
-  const RefPtr<AudioDeviceInfo> mDeviceInfo;
-  // Those four members are set on construction, on the MediaManager thread.
-  const bool mDelayAgnostic;
-  const bool mExtendedFilter;
-  const nsString mDeviceName;
-  const nsCString mDeviceUUID;
-  // The current settings for the underlying device.
-  // Constructed on the MediaManager thread, and then only ever accessed on the
-  // main thread.
-  const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>> mSettings;
-  // To only update microphone when needed, we keep track of the prefs
-  // representing the currently applied settings for this source. This is the
-  // net result of the prefs across all allocations.
-  // Owning thread only.
-  MediaEnginePrefs mNetPrefs;
-
-  // Current state of the shared resource for this source. Written on the
-  // owning thread, read on either the owning thread or the MSG thread.
-  Atomic<MediaEngineSourceState> mState;
-  // This mutex must be held to access mAllocation (and its members) and
-  // modifying mListener.
-  Mutex mMutex;
-  // mListener is created on the MediaManager thread, and then sent to the MSG
-  // thread. On shutdown, we send this pointer to the MSG thread again, telling
-  // it to clean up.
-  RefPtr<WebRTCAudioDataListener> mListener;
-  // Created on the MediaManager thread, then used on the graph thread for
-  // processing, and on the MediaManager thread when setting parameters (this is
-  // thread safe).
+private:
+  ~AudioInputProcessing() = default;
+  RefPtr<SourceMediaStream> mStream;
+  // This implements the processing algoritm to apply to the input (e.g. a
+  // microphone). If all algorithms are disabled, this class in not used. This
+  // class only accepts audio chunks of 10ms. It has two inputs and one output:
+  // it is fed the speaker data and the microphone data. It outputs processed
+  // input data.
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
-  // Accessed from the GraphDriver thread except for deletion, at which point
-  // the GraphDriver thread does not touch those values.
+  // Packetizer to be able to feed 10ms packets to the input side of
+  // mAudioProcessing. Not used if the processing is bypassed.
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
+  // Packetizer to be able to feed 10ms packets to the output side of
+  // mAudioProcessing. Not used if the processing is bypassed.
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
   // The number of channels asked for by content, after clamping to the range of
   // legal channel count for this particular device. This is the number of
   // channels of the input buffer passed as parameter in NotifyInputData.
-  // Initially set on MediaManger thread in the ctor, then only ever accessed on
-  // the MSG thread.
   uint32_t mRequestedInputChannelCount;
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
-  // This is read and written to only on the MSG thread.
   bool mSkipProcessing;
-  // All these are only used on the MSG thread.
-  // Stores the mixed audio output for the reverse-stream of the AEC.
+  // Stores the mixed audio output for the reverse-stream of the AEC (the
+  // speaker data).
   AlignedFloatBuffer mOutputBuffer;
-  // Stores the microphone audio, to be processed by the APM.
+  // Stores the input audio, to be processed by the APM.
   AlignedFloatBuffer mInputBuffer;
   // Stores the deinterleaved microphone audio
   AlignedFloatBuffer mDeinterleavedBuffer;
   // Stores the mixed down input audio
   AlignedFloatBuffer mInputDownmixBuffer;
 #ifdef DEBUG
   // The MSGImpl::IterationEnd() of the last time we appended data from an
   // audio callback.
-  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-  GraphTime mLastCallbackAppendTime = 0;
+  GraphTime mLastCallbackAppendTime;
 #endif
   // Set to false by Start(). Becomes true after the first time we append real
   // audio frames from the audio callback.
-  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-  bool mLiveFramesAppended = false;
-
+  bool mLiveFramesAppended;
   // Set to false by Start(). Becomes true after the first time we append
   // silence *after* the first audio callback has appended real frames.
-  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-  bool mLiveSilenceAppended = false;
+  bool mLiveSilenceAppended;
+  // Track ID on which the data is to be appended after processing
+  TrackID mTrackID;
+  // Principal for the data that flows through this class.
+  PrincipalHandle mPrincipal;
+  // Whether or not this MediaEngine is enabled. If it's not enabled, it
+  // operates in "pull" mode, and we append silence only, releasing the audio
+  // input stream.
+  bool mEnabled;
 };
 
 
 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource
 {
 public:
   explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
   {
@@ -315,17 +317,18 @@ public:
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
 
   void Pull(const RefPtr<const AllocationHandle>& aHandle,
             const RefPtr<SourceMediaStream>& aStream,
             TrackID aTrackID,
             StreamTime aDesiredTime,
             const PrincipalHandle& aPrincipalHandle) override
-  {}
+  {
+  }
 
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::AudioCapture;
   }
 
   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
   {