Bug 1497254 - clang-format MediaEngineWebRTCAudio.{cpp,h} and MediaEnginePrefs.h. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Wed, 17 Oct 2018 13:05:59 +0000
changeset 500125 b4d7770b8661338c878166beca941eeb830f6041
parent 500124 233b7314da9d100272e895a8ce74b4490e557ac7
child 500127 00562394d243152bc7a729be27ee97efd47b7183
push id1864
push userffxbld-merge
push dateMon, 03 Dec 2018 15:51:40 +0000
treeherdermozilla-release@f040763d99ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1497254
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1497254 - clang-format MediaEngineWebRTCAudio.{cpp,h} and MediaEnginePrefs.h. r=pehrsons Differential Revision: https://phabricator.services.mozilla.com/D8959
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -37,19 +37,21 @@ static uint32_t sInputStreamsOpen = 0;
 #endif
 
 namespace mozilla {
 
 #ifdef LOG
 #undef LOG
 #endif
 
-LogModule* GetMediaManagerLog();
+LogModule*
+GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
-#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
+#define LOG_FRAMES(msg)                                                        \
+  MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 /**
  * WebRTC Microphone MediaEngineSource.
  */
 
 MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
   RefPtr<AudioDeviceInfo> aInfo,
   const nsString& aDeviceName,
@@ -101,39 +103,40 @@ MediaEngineWebRTCMicrophoneSource::GetUU
 // GetBestFitnessDistance returns the best distance the capture device can offer
 // as a whole, given an accumulated number of ConstraintSets.
 // Ideal values are considered in the first ConstraintSet only.
 // Plain values are treated as Ideal in the first ConstraintSet.
 // Plain values are treated as Exact in subsequent ConstraintSets.
 // Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
 // A finite result may be used to calculate this device's ranking as a choice.
 
-uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
-    const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
-    const nsString& aDeviceId) const
+uint32_t
+MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
+  const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+  const nsString& aDeviceId) const
 {
   uint32_t distance = 0;
 
   for (const auto* cs : aConstraintSets) {
-    distance = MediaConstraintsHelper::GetMinimumFitnessDistance(*cs, aDeviceId);
+    distance =
+      MediaConstraintsHelper::GetMinimumFitnessDistance(*cs, aDeviceId);
     break; // distance is read from first entry only
   }
   return distance;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::EvaluateSettings(
-    const NormalizedConstraints& aConstraintsUpdate,
-    const MediaEnginePrefs& aInPrefs,
-    MediaEnginePrefs* aOutPrefs,
-    const char** aOutBadConstraint)
+  const NormalizedConstraints& aConstraintsUpdate,
+  const MediaEnginePrefs& aInPrefs,
+  MediaEnginePrefs* aOutPrefs,
+  const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
 
-
   MediaEnginePrefs prefs = aInPrefs;
 
   FlattenedConstraints c(aConstraintsUpdate);
 
   prefs.mAecOn = c.mEchoCancellation.Get(aInPrefs.mAecOn);
   prefs.mAgcOn = c.mAutoGainControl.Get(aInPrefs.mAgcOn);
   prefs.mNoiseOn = c.mNoiseSuppression.Get(aInPrefs.mNoiseOn);
 
@@ -151,70 +154,74 @@ MediaEngineWebRTCMicrophoneSource::Evalu
   // A pref can force the channel count to use. If the pref has a value of zero
   // or lower, it has no effect.
   if (aInPrefs.mChannels <= 0) {
     prefs.mChannels = maxChannels;
   }
 
   // Get the number of channels asked for by content, and clamp it between the
   // pref and the maximum number of channels that the device supports.
-  prefs.mChannels = c.mChannelCount.Get(std::min(aInPrefs.mChannels,
-                                                     maxChannels));
+  prefs.mChannels =
+    c.mChannelCount.Get(std::min(aInPrefs.mChannels, maxChannels));
   prefs.mChannels = std::max(1, std::min(prefs.mChannels, maxChannels));
 
   LOG(("Audio config: aec: %d, agc: %d, noise: %d, channels: %d",
        prefs.mAecOn ? prefs.mAec : -1,
        prefs.mAgcOn ? prefs.mAgc : -1,
        prefs.mNoiseOn ? prefs.mNoise : -1,
        prefs.mChannels));
 
   *aOutPrefs = prefs;
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Reconfigure(const RefPtr<AllocationHandle>&,
-                                               const dom::MediaTrackConstraints& aConstraints,
-                                               const MediaEnginePrefs& aPrefs,
-                                               const nsString& /* aDeviceId */,
-                                               const char** aOutBadConstraint)
+MediaEngineWebRTCMicrophoneSource::Reconfigure(
+  const RefPtr<AllocationHandle>&,
+  const dom::MediaTrackConstraints& aConstraints,
+  const MediaEnginePrefs& aPrefs,
+  const nsString& /* aDeviceId */,
+  const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(mStream);
 
   LOG(("Mic source %p Reconfigure ", this));
 
   NormalizedConstraints constraints(aConstraints);
   MediaEnginePrefs outputPrefs;
-  nsresult rv = EvaluateSettings(constraints, aPrefs, &outputPrefs,
-                                 aOutBadConstraint);
+  nsresult rv =
+    EvaluateSettings(constraints, aPrefs, &outputPrefs, aOutBadConstraint);
   if (NS_FAILED(rv)) {
     if (aOutBadConstraint) {
       return NS_ERROR_INVALID_ARG;
     }
 
     nsAutoCString name;
     GetErrorName(rv, name);
     LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s",
-         this, name.Data()));
+         this,
+         name.Data()));
     Stop(nullptr);
     return NS_ERROR_UNEXPECTED;
   }
 
   ApplySettings(outputPrefs);
 
   return NS_OK;
 }
 
-void MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>&,
-                                             const RefPtr<SourceMediaStream>& aStream,
-                                             TrackID aTrackID,
-                                             StreamTime aDesiredTime,
-                                             const PrincipalHandle& aPrincipalHandle)
+void
+MediaEngineWebRTCMicrophoneSource::Pull(
+  const RefPtr<const AllocationHandle>&,
+  const RefPtr<SourceMediaStream>& aStream,
+  TrackID aTrackID,
+  StreamTime aDesiredTime,
+  const PrincipalHandle& aPrincipalHandle)
 {
   // If pull is enabled, it means that the audio input is not open, and we
   // should fill it out with silence. This is the only method called on the
   // MSG thread.
   mInputProcessing->Pull(aStream, aTrackID, aDesiredTime, aPrincipalHandle);
 }
 
 void
@@ -222,17 +229,17 @@ MediaEngineWebRTCMicrophoneSource::Updat
   bool aEnable,
   webrtc::EcModes aMode)
 {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+    [that, graph = std::move(gripGraph), aEnable, aMode]() mutable {
       class Message : public ControlMessage
       {
       public:
         Message(AudioInputProcessing* aInputProcessing,
                 bool aEnable,
                 webrtc::EcModes aMode)
           : ControlMessage(nullptr)
           , mInputProcessing(aInputProcessing)
@@ -266,17 +273,17 @@ MediaEngineWebRTCMicrophoneSource::Updat
   bool aEnable,
   webrtc::AgcModes aMode)
 {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+    [that, graph = std::move(gripGraph), aEnable, aMode]() mutable {
       class Message : public ControlMessage
       {
       public:
         Message(AudioInputProcessing* aInputProcessing,
                 bool aEnable,
                 webrtc::AgcModes aMode)
           : ControlMessage(nullptr)
           , mInputProcessing(aInputProcessing)
@@ -310,17 +317,17 @@ MediaEngineWebRTCMicrophoneSource::Updat
   bool aEnable,
   webrtc::NsModes aMode)
 {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), aEnable, aMode ]() mutable {
+    [that, graph = std::move(gripGraph), aEnable, aMode]() mutable {
       class Message : public ControlMessage
       {
       public:
         Message(AudioInputProcessing* aInputProcessing,
                 bool aEnable,
                 webrtc::NsModes aMode)
           : ControlMessage(nullptr)
           , mInputProcessing(aInputProcessing)
@@ -352,22 +359,20 @@ MediaEngineWebRTCMicrophoneSource::Updat
 void
 MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(bool aExtendedFilter,
                                                          bool aDelayAgnostic)
 {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom([
-    that,
-    graph = std::move(gripGraph),
-    aExtendedFilter,
-    aDelayAgnostic
-  ]() mutable {
+  NS_DispatchToMainThread(media::NewRunnableFrom([that,
+                                                  graph = std::move(gripGraph),
+                                                  aExtendedFilter,
+                                                  aDelayAgnostic]() mutable {
     class Message : public ControlMessage
     {
     public:
       Message(AudioInputProcessing* aInputProcessing,
               bool aExtendedFilter,
               bool aDelayAgnostic)
         : ControlMessage(nullptr)
         , mInputProcessing(aInputProcessing)
@@ -397,117 +402,126 @@ MediaEngineWebRTCMicrophoneSource::Updat
   }));
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs)
 {
   AssertIsOnOwningThread();
 
-  MOZ_ASSERT(mStream,
+  MOZ_ASSERT(
+    mStream,
     "ApplySetting is to be called only after SetTrack has been called");
 
   if (mStream) {
-    UpdateAGCSettingsIfNeeded(aPrefs.mAgcOn, static_cast<AgcModes>(aPrefs.mAgc));
-    UpdateNSSettingsIfNeeded(aPrefs.mNoiseOn, static_cast<NsModes>(aPrefs.mNoise));
+    UpdateAGCSettingsIfNeeded(aPrefs.mAgcOn,
+                              static_cast<AgcModes>(aPrefs.mAgc));
+    UpdateNSSettingsIfNeeded(aPrefs.mNoiseOn,
+                             static_cast<NsModes>(aPrefs.mNoise));
     UpdateAECSettingsIfNeeded(aPrefs.mAecOn, static_cast<EcModes>(aPrefs.mAec));
 
     UpdateAPMExtraOptions(mExtendedFilter, mDelayAgnostic);
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(graphImpl), prefs = aPrefs]() mutable {
-    that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
-    that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
-    that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
-    that->mSettings->mChannelCount.Value() = prefs.mChannels;
+  NS_DispatchToMainThread(media::NewRunnableFrom(
+    [that, graph = std::move(graphImpl), prefs = aPrefs]() mutable {
+      that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
+      that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
+      that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
+      that->mSettings->mChannelCount.Value() = prefs.mChannels;
 
-    class Message : public ControlMessage {
-    public:
-      Message(AudioInputProcessing* aInputProcessing,
-              bool aPassThrough,
-              uint32_t aRequestedInputChannelCount)
-        : ControlMessage(nullptr)
-        , mInputProcessing(aInputProcessing)
-        , mPassThrough(aPassThrough)
-        , mRequestedInputChannelCount(aRequestedInputChannelCount)
-      {}
+      class Message : public ControlMessage
+      {
+      public:
+        Message(AudioInputProcessing* aInputProcessing,
+                bool aPassThrough,
+                uint32_t aRequestedInputChannelCount)
+          : ControlMessage(nullptr)
+          , mInputProcessing(aInputProcessing)
+          , mPassThrough(aPassThrough)
+          , mRequestedInputChannelCount(aRequestedInputChannelCount)
+        {
+        }
 
-      void Run() override
-      {
-        mInputProcessing->SetPassThrough(mPassThrough);
-        mInputProcessing->SetRequestedInputChannelCount(
-          mRequestedInputChannelCount);
+        void Run() override
+        {
+          mInputProcessing->SetPassThrough(mPassThrough);
+          mInputProcessing->SetRequestedInputChannelCount(
+            mRequestedInputChannelCount);
+        }
+
+      protected:
+        RefPtr<AudioInputProcessing> mInputProcessing;
+        bool mPassThrough;
+        uint32_t mRequestedInputChannelCount;
+      };
+
+      bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
+      if (graph) {
+        graph->AppendMessage(MakeUnique<Message>(
+          that->mInputProcessing, passThrough, prefs.mChannels));
       }
 
-    protected:
-      RefPtr<AudioInputProcessing> mInputProcessing;
-      bool mPassThrough;
-      uint32_t mRequestedInputChannelCount;
-    };
-
-    bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
-    if (graph) {
-      graph->AppendMessage(MakeUnique<Message>(
-        that->mInputProcessing, passThrough, prefs.mChannels));
-    }
-
-    return NS_OK;
-  }));
+      return NS_OK;
+    }));
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
-                                            const MediaEnginePrefs& aPrefs,
-                                            const nsString& aDeviceId,
-                                            const ipc::PrincipalInfo& aPrincipalInfo,
-                                            AllocationHandle** aOutHandle,
-                                            const char** aOutBadConstraint)
+MediaEngineWebRTCMicrophoneSource::Allocate(
+  const dom::MediaTrackConstraints& aConstraints,
+  const MediaEnginePrefs& aPrefs,
+  const nsString& aDeviceId,
+  const ipc::PrincipalInfo& aPrincipalInfo,
+  AllocationHandle** aOutHandle,
+  const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aOutHandle);
 
   *aOutHandle = nullptr;
 
   mState = kAllocated;
 
   NormalizedConstraints normalized(aConstraints);
   MediaEnginePrefs outputPrefs;
-  nsresult rv = EvaluateSettings(normalized, aPrefs, &outputPrefs, aOutBadConstraint);
+  nsresult rv =
+    EvaluateSettings(normalized, aPrefs, &outputPrefs, aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
-  NS_DispatchToMainThread(media::NewRunnableFrom([that, prefs = outputPrefs]() mutable {
-    that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
-    that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
-    that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
-    that->mSettings->mChannelCount.Value() = prefs.mChannels;
-    return NS_OK;
-  }));
+  NS_DispatchToMainThread(
+    media::NewRunnableFrom([that, prefs = outputPrefs]() mutable {
+      that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
+      that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
+      that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
+      that->mSettings->mChannelCount.Value() = prefs.mChannels;
+      return NS_OK;
+    }));
 
-  return  rv;
+  return rv;
 }
 
-
 nsresult
-MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>&)
+MediaEngineWebRTCMicrophoneSource::Deallocate(
+  const RefPtr<const AllocationHandle>&)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
   class EndTrackMessage : public ControlMessage
   {
-    public:
-      EndTrackMessage(MediaStream* aStream,
-                      AudioInputProcessing* aAudioInputProcessing,
-                      TrackID aTrackID)
+  public:
+    EndTrackMessage(MediaStream* aStream,
+                    AudioInputProcessing* aAudioInputProcessing,
+                    TrackID aTrackID)
       : ControlMessage(aStream)
       , mInputProcessing(aAudioInputProcessing)
       , mTrackID(aTrackID)
     {
     }
 
     void Run() override
     {
@@ -519,60 +533,60 @@ MediaEngineWebRTCMicrophoneSource::Deall
     RefPtr<AudioInputProcessing> mInputProcessing;
     TrackID mTrackID;
   };
 
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     RefPtr<MediaStream> sourceStream = mStream;
     RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
     RefPtr<AudioInputProcessing> inputProcessing = mInputProcessing;
-    NS_DispatchToMainThread(media::NewRunnableFrom(
-      [ graph = std::move(graphImpl),
-        stream = std::move(sourceStream),
-        audioInputProcessing = std::move(inputProcessing),
-        trackID = mTrackID]() mutable {
+    NS_DispatchToMainThread(
+      media::NewRunnableFrom([graph = std::move(graphImpl),
+                              stream = std::move(sourceStream),
+                              audioInputProcessing = std::move(inputProcessing),
+                              trackID = mTrackID]() mutable {
         if (graph) {
           graph->AppendMessage(
-              MakeUnique<EndTrackMessage>(stream, audioInputProcessing, trackID));
+            MakeUnique<EndTrackMessage>(stream, audioInputProcessing, trackID));
         }
         return NS_OK;
-      }
-    ));
+      }));
   }
 
   MOZ_ASSERT(mTrackID != TRACK_NONE, "Only deallocate once");
 
   // Reset all state. This is not strictly necessary, this instance will get
   // destroyed soon.
   mStream = nullptr;
   mTrackID = TRACK_NONE;
   mPrincipal = PRINCIPAL_HANDLE_NONE;
 
   // If empty, no callbacks to deliver data should be occuring
   MOZ_ASSERT(mState != kReleased, "Source not allocated");
   MOZ_ASSERT(mState != kStarted, "Source not stopped");
 
   mState = kReleased;
-  LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
+  LOG(
+    ("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr<const AllocationHandle>&,
-                                            const RefPtr<SourceMediaStream>& aStream,
-                                            TrackID aTrackID,
-                                            const PrincipalHandle& aPrincipal)
+MediaEngineWebRTCMicrophoneSource::SetTrack(
+  const RefPtr<const AllocationHandle>&,
+  const RefPtr<SourceMediaStream>& aStream,
+  TrackID aTrackID,
+  const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-  if (mStream &&
-      mStream->Graph() != aStream->Graph()) {
+  if (mStream && mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   MOZ_ASSERT(!mStream);
   MOZ_ASSERT(mTrackID == TRACK_NONE);
   MOZ_ASSERT(mPrincipal == PRINCIPAL_HANDLE_NONE);
   mStream = aStream;
   mTrackID = aTrackID;
@@ -587,35 +601,35 @@ MediaEngineWebRTCMicrophoneSource::SetTr
                          SourceMediaStream::ADDTRACK_QUEUED);
 
   LOG(("Stream %p registered for microphone capture", aStream.get()));
   return NS_OK;
 }
 
 class StartStopMessage : public ControlMessage
 {
-  public:
-    enum StartStop
-    {
-      Start,
-      Stop
-    };
+public:
+  enum StartStop
+  {
+    Start,
+    Stop
+  };
 
-    StartStopMessage(AudioInputProcessing* aInputProcessing, StartStop aAction)
-      : ControlMessage(nullptr)
-      , mInputProcessing(aInputProcessing)
-      , mAction(aAction)
-    {
+  StartStopMessage(AudioInputProcessing* aInputProcessing, StartStop aAction)
+    : ControlMessage(nullptr)
+    , mInputProcessing(aInputProcessing)
+    , mAction(aAction)
+  {
   }
 
   void Run() override
   {
     if (mAction == StartStopMessage::Start) {
       mInputProcessing->Start();
-    } else if (mAction == StartStopMessage::Stop){
+    } else if (mAction == StartStopMessage::Stop) {
       mInputProcessing->Stop();
     } else {
       MOZ_CRASH("Invalid enum value");
     }
   }
 
 protected:
   RefPtr<AudioInputProcessing> mInputProcessing;
@@ -661,18 +675,17 @@ MediaEngineWebRTCMicrophoneSource::Start
   AssertIsOnOwningThread();
 
   mInputProcessing = new AudioInputProcessing(
     mDeviceMaxChannelCount, mStream, mTrackID, mPrincipal);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), deviceID ]() mutable {
-
+    [that, graph = std::move(gripGraph), deviceID]() mutable {
       if (graph) {
         graph->AppendMessage(MakeUnique<StartStopMessage>(
           that->mInputProcessing, StartStopMessage::Start));
       }
 
       that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
 
       return NS_OK;
@@ -694,24 +707,23 @@ MediaEngineWebRTCMicrophoneSource::Stop(
   MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
   if (mState == kStopped) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
 #ifdef MOZ_PULSEAUDIO
-    MOZ_ASSERT(sInputStreamsOpen > 0);
-    sInputStreamsOpen--;
+  MOZ_ASSERT(sInputStreamsOpen > 0);
+  sInputStreamsOpen--;
 #endif
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), stream = mStream ]() mutable {
-
+    [that, graph = std::move(gripGraph), stream = mStream]() mutable {
       if (graph) {
         graph->AppendMessage(MakeUnique<StartStopMessage>(
           that->mInputProcessing, StartStopMessage::Stop));
       }
 
       CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
       Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
       stream->CloseAudioInput(id, that->mInputProcessing);
@@ -721,26 +733,28 @@ MediaEngineWebRTCMicrophoneSource::Stop(
 
   MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
   mState = kStopped;
 
   return NS_OK;
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::GetSettings(dom::MediaTrackSettings& aOutSettings) const
+MediaEngineWebRTCMicrophoneSource::GetSettings(
+  dom::MediaTrackSettings& aOutSettings) const
 {
   MOZ_ASSERT(NS_IsMainThread());
   aOutSettings = *mSettings;
 }
 
-AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount,
-                                           RefPtr<SourceMediaStream> aStream,
-                                           TrackID aTrackID,
-                                           const PrincipalHandle& aPrincipalHandle)
+AudioInputProcessing::AudioInputProcessing(
+  uint32_t aMaxChannelCount,
+  RefPtr<SourceMediaStream> aStream,
+  TrackID aTrackID,
+  const PrincipalHandle& aPrincipalHandle)
   : mStream(std::move(aStream))
   , mAudioProcessing(AudioProcessing::Create())
   , mRequestedInputChannelCount(aMaxChannelCount)
   , mSkipProcessing(false)
   , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 #ifdef DEBUG
   , mLastCallbackAppendTime(0)
 #endif
@@ -965,34 +979,33 @@ AudioInputProcessing::Stop()
 }
 
 void
 AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
                            TrackID aTrackID,
                            StreamTime aDesiredTime,
                            const PrincipalHandle& aPrincipalHandle)
 {
-  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
-                               aStream.get(), aTrackID);
+  TRACE_AUDIO_CALLBACK_COMMENT(
+    "SourceMediaStream %p track %i", aStream.get(), aTrackID);
   StreamTime delta;
 
   if (mEnded) {
     return;
   }
 
   delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
 
   if (delta < 0) {
     LOG_FRAMES(
       ("Not appending silence; %" PRId64 " frames already buffered", -delta));
     return;
   }
 
-  if (!mLiveFramesAppended ||
-      !mLiveSilenceAppended) {
+  if (!mLiveFramesAppended || !mLiveSilenceAppended) {
     // These are the iterations after starting or resuming audio capture.
     // Make sure there's at least one extra block buffered until audio
     // callbacks come in. We also allow appending silence one time after
     // audio callbacks have started, to cover the case where audio callbacks
     // start appending data immediately and there is no extra data buffered.
     delta += WEBAUDIO_BLOCK_SIZE;
 
     // If we're supposed to be packetizing but there's no packetizer yet,
@@ -1006,34 +1019,29 @@ AudioInputProcessing::Pull(const RefPtr<
     if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
       // Processing is active and is processed in chunks of 10ms through the
       // input packetizer. We allow for 10ms of silence on the track to
       // accomodate the buffering worst-case.
       delta += mPacketizerInput->PacketSize();
     }
   }
 
-  LOG_FRAMES(("Pulling %" PRId64 " frames of silence.",
-              delta));
+  LOG_FRAMES(("Pulling %" PRId64 " frames of silence.", delta));
 
   // This assertion fails when we append silence here in the same iteration
   // as there were real audio samples already appended by the audio callback.
   // Note that this is exempted until live samples and a subsequent chunk of
   // silence have been appended to the track. This will cover cases like:
   // - After Start(), there is silence (maybe multiple times) appended before
   //   the first audio callback.
   // - After Start(), there is real data (maybe multiple times) appended
   //   before the first graph iteration.
   // And other combinations of order of audio sample sources.
-  MOZ_ASSERT_IF(
-    mEnabled &&
-    mLiveFramesAppended &&
-    mLiveSilenceAppended,
-    aStream->GraphImpl()->IterationEnd() >
-    mLastCallbackAppendTime);
+  MOZ_ASSERT_IF(mEnabled && mLiveFramesAppended && mLiveSilenceAppended,
+                aStream->GraphImpl()->IterationEnd() > mLastCallbackAppendTime);
 
   if (mLiveFramesAppended) {
     mLiveSilenceAppended = true;
   }
 
   AudioSegment audio;
   audio.AppendNullData(delta);
   aStream->AppendToTrack(aTrackID, &audio);
@@ -1044,30 +1052,29 @@ AudioInputProcessing::NotifyOutputData(M
                                        AudioDataValue* aBuffer,
                                        size_t aFrames,
                                        TrackRate aRate,
                                        uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   MOZ_ASSERT(mEnabled);
 
-  if (!mPacketizerOutput ||
-      mPacketizerOutput->PacketSize() != aRate/100u ||
+  if (!mPacketizerOutput || mPacketizerOutput->PacketSize() != aRate / 100u ||
       mPacketizerOutput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here: if this changes,
     // we changed devices or something.
     mPacketizerOutput =
-      new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
+      new AudioPacketizer<AudioDataValue, float>(aRate / 100, aChannels);
   }
 
   mPacketizerOutput->Input(aBuffer, aFrames);
 
   while (mPacketizerOutput->PacketsAvailable()) {
-    uint32_t samplesPerPacket = mPacketizerOutput->PacketSize() *
-                                mPacketizerOutput->Channels();
+    uint32_t samplesPerPacket =
+      mPacketizerOutput->PacketSize() * mPacketizerOutput->Channels();
     if (mOutputBuffer.Length() < samplesPerPacket) {
       mOutputBuffer.SetLength(samplesPerPacket);
     }
     if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
       mDeinterleavedBuffer.SetLength(samplesPerPacket);
     }
     float* packet = mOutputBuffer.Data();
     mPacketizerOutput->Output(packet);
@@ -1075,23 +1082,22 @@ AudioInputProcessing::NotifyOutputData(M
     AutoTArray<float*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers;
     float* interleavedFarend = nullptr;
     uint32_t channelCountFarend = 0;
     uint32_t framesPerPacketFarend = 0;
 
     // Downmix from aChannels to MAX_CHANNELS if needed. We always have floats
     // here, the packetized performed the conversion.
     if (aChannels > MAX_CHANNELS) {
-      AudioConverter converter(AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT),
-                               AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT));
+      AudioConverter converter(
+        AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT),
+        AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT));
       framesPerPacketFarend = mPacketizerOutput->PacketSize();
       framesPerPacketFarend =
-        converter.Process(mInputDownmixBuffer,
-                          packet,
-                          framesPerPacketFarend);
+        converter.Process(mInputDownmixBuffer, packet, framesPerPacketFarend);
       interleavedFarend = mInputDownmixBuffer.Data();
       channelCountFarend = MAX_CHANNELS;
       deinterleavedPacketDataChannelPointers.SetLength(MAX_CHANNELS);
     } else {
       interleavedFarend = packet;
       channelCountFarend = aChannels;
       framesPerPacketFarend = mPacketizerOutput->PacketSize();
       deinterleavedPacketDataChannelPointers.SetLength(aChannels);
@@ -1101,124 +1107,132 @@ AudioInputProcessing::NotifyOutputData(M
                (channelCountFarend == 1 || channelCountFarend == 2) &&
                framesPerPacketFarend);
 
     if (mInputBuffer.Length() < framesPerPacketFarend * channelCountFarend) {
       mInputBuffer.SetLength(framesPerPacketFarend * channelCountFarend);
     }
 
     size_t offset = 0;
-    for (size_t i = 0; i < deinterleavedPacketDataChannelPointers.Length(); ++i) {
+    for (size_t i = 0; i < deinterleavedPacketDataChannelPointers.Length();
+         ++i) {
       deinterleavedPacketDataChannelPointers[i] = mInputBuffer.Data() + offset;
       offset += framesPerPacketFarend;
     }
 
     // Deinterleave, prepare a channel pointers array, with enough storage for
     // the frames.
-    DeinterleaveAndConvertBuffer(interleavedFarend,
-                                 framesPerPacketFarend,
-                                 channelCountFarend,
-                                 deinterleavedPacketDataChannelPointers.Elements());
+    DeinterleaveAndConvertBuffer(
+      interleavedFarend,
+      framesPerPacketFarend,
+      channelCountFarend,
+      deinterleavedPacketDataChannelPointers.Elements());
 
     // Having the same config for input and output means we potentially save
     // some CPU.
     StreamConfig inputConfig(aRate, channelCountFarend, false);
     StreamConfig outputConfig = inputConfig;
 
     // Passing the same pointers here saves a copy inside this function.
-    DebugOnly<int> err =
-      mAudioProcessing->ProcessReverseStream(deinterleavedPacketDataChannelPointers.Elements(),
-                                             inputConfig,
-                                             outputConfig,
-                                             deinterleavedPacketDataChannelPointers.Elements());
+    DebugOnly<int> err = mAudioProcessing->ProcessReverseStream(
+      deinterleavedPacketDataChannelPointers.Elements(),
+      inputConfig,
+      outputConfig,
+      deinterleavedPacketDataChannelPointers.Elements());
 
     MOZ_ASSERT(!err, "Could not process the reverse stream.");
   }
 }
 
 // Only called if we're not in passthrough mode
 void
 AudioInputProcessing::PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
                                           const AudioDataValue* aBuffer,
                                           size_t aFrames,
                                           TrackRate aRate,
                                           uint32_t aChannels)
 {
-  MOZ_ASSERT(!PassThrough(aGraph), "This should be bypassed when in PassThrough mode.");
+  MOZ_ASSERT(!PassThrough(aGraph),
+             "This should be bypassed when in PassThrough mode.");
   MOZ_ASSERT(mEnabled);
   size_t offset = 0;
 
-  if (!mPacketizerInput ||
-      mPacketizerInput->PacketSize() != aRate/100u ||
+  if (!mPacketizerInput || mPacketizerInput->PacketSize() != aRate / 100u ||
       mPacketizerInput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here.
     mPacketizerInput =
-      new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
+      new AudioPacketizer<AudioDataValue, float>(aRate / 100, aChannels);
   }
 
   // Packetize our input data into 10ms chunks, deinterleave into planar channel
   // buffers, process, and append to the right MediaStreamTrack.
   mPacketizerInput->Input(aBuffer, static_cast<uint32_t>(aFrames));
 
   while (mPacketizerInput->PacketsAvailable()) {
-    uint32_t samplesPerPacket = mPacketizerInput->PacketSize() *
-      mPacketizerInput->Channels();
+    uint32_t samplesPerPacket =
+      mPacketizerInput->PacketSize() * mPacketizerInput->Channels();
     if (mInputBuffer.Length() < samplesPerPacket) {
       mInputBuffer.SetLength(samplesPerPacket);
     }
     if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
       mDeinterleavedBuffer.SetLength(samplesPerPacket);
     }
     float* packet = mInputBuffer.Data();
     mPacketizerInput->Output(packet);
 
     // Deinterleave the input data
     // Prepare an array pointing to deinterleaved channels.
     AutoTArray<float*, 8> deinterleavedPacketizedInputDataChannelPointers;
     deinterleavedPacketizedInputDataChannelPointers.SetLength(aChannels);
     offset = 0;
-    for (size_t i = 0; i < deinterleavedPacketizedInputDataChannelPointers.Length(); ++i) {
-      deinterleavedPacketizedInputDataChannelPointers[i] = mDeinterleavedBuffer.Data() + offset;
+    for (size_t i = 0;
+         i < deinterleavedPacketizedInputDataChannelPointers.Length();
+         ++i) {
+      deinterleavedPacketizedInputDataChannelPointers[i] =
+        mDeinterleavedBuffer.Data() + offset;
       offset += mPacketizerInput->PacketSize();
     }
 
     // Deinterleave to mInputBuffer, pointed to by inputBufferChannelPointers.
-    Deinterleave(packet, mPacketizerInput->PacketSize(), aChannels,
-        deinterleavedPacketizedInputDataChannelPointers.Elements());
+    Deinterleave(packet,
+                 mPacketizerInput->PacketSize(),
+                 aChannels,
+                 deinterleavedPacketizedInputDataChannelPointers.Elements());
 
-    StreamConfig inputConfig(aRate,
-                             aChannels,
-                             false /* we don't use typing detection*/);
+    StreamConfig inputConfig(
+      aRate, aChannels, false /* we don't use typing detection*/);
     StreamConfig outputConfig = inputConfig;
 
     // Bug 1404965: Get the right delay here, it saves some work down the line.
     mAudioProcessing->set_stream_delay_ms(0);
 
     // Bug 1414837: find a way to not allocate here.
-    RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(mPacketizerInput->PacketSize() * aChannels * sizeof(float));
+    RefPtr<SharedBuffer> buffer = SharedBuffer::Create(
+      mPacketizerInput->PacketSize() * aChannels * sizeof(float));
 
     // Prepare channel pointers to the SharedBuffer created above.
     AutoTArray<float*, 8> processedOutputChannelPointers;
     AutoTArray<const float*, 8> processedOutputChannelPointersConst;
     processedOutputChannelPointers.SetLength(aChannels);
     processedOutputChannelPointersConst.SetLength(aChannels);
 
     offset = 0;
     for (size_t i = 0; i < processedOutputChannelPointers.Length(); ++i) {
-      processedOutputChannelPointers[i] = static_cast<float*>(buffer->Data()) + offset;
-      processedOutputChannelPointersConst[i] = static_cast<float*>(buffer->Data()) + offset;
+      processedOutputChannelPointers[i] =
+        static_cast<float*>(buffer->Data()) + offset;
+      processedOutputChannelPointersConst[i] =
+        static_cast<float*>(buffer->Data()) + offset;
       offset += mPacketizerInput->PacketSize();
     }
 
-    mAudioProcessing->ProcessStream(deinterleavedPacketizedInputDataChannelPointers.Elements(),
-                                    inputConfig,
-                                    outputConfig,
-                                    processedOutputChannelPointers.Elements());
-
+    mAudioProcessing->ProcessStream(
+      deinterleavedPacketizedInputDataChannelPointers.Elements(),
+      inputConfig,
+      outputConfig,
+      processedOutputChannelPointers.Elements());
 
     AudioSegment segment;
     if (!mStream->GraphImpl()) {
       // The DOMMediaStream that owns mStream has been cleaned up
       // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
       // can happen before the MediaManager thread gets to stop capture for
       // this MediaStream.
       continue;
@@ -1271,35 +1285,32 @@ AudioInputProcessing::InsertInGraph(cons
   AutoTArray<const T*, 8> channels;
   if (aChannels == 1) {
     PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
     channels.AppendElement(static_cast<T*>(buffer->Data()));
   } else {
     channels.SetLength(aChannels);
     AutoTArray<T*, 8> write_channels;
     write_channels.SetLength(aChannels);
-    T * samples = static_cast<T*>(buffer->Data());
+    T* samples = static_cast<T*>(buffer->Data());
 
     size_t offset = 0;
-    for(uint32_t i = 0; i < aChannels; ++i) {
+    for (uint32_t i = 0; i < aChannels; ++i) {
       channels[i] = write_channels[i] = samples + offset;
       offset += aFrames;
     }
 
-    DeinterleaveAndConvertBuffer(aBuffer,
-        aFrames,
-        aChannels,
-        write_channels.Elements());
+    DeinterleaveAndConvertBuffer(
+      aBuffer, aFrames, aChannels, write_channels.Elements());
   }
 
   LOG_FRAMES(("Appending %zu frames of raw audio", aFrames));
 
   MOZ_ASSERT(aChannels == channels.Length());
-  segment.AppendFrames(buffer.forget(), channels, aFrames,
-      mPrincipal);
+  segment.AppendFrames(buffer.forget(), channels, aFrames, mPrincipal);
 
   mStream->AppendToTrack(mTrackID, &segment);
 }
 
 // Called back on GraphDriver thread!
 // Note this can be called back after ::Shutdown()
 void
 AudioInputProcessing::NotifyInputData(MediaStreamGraphImpl* aGraph,
@@ -1318,36 +1329,35 @@ AudioInputProcessing::NotifyInputData(Me
   // processing.
   if (PassThrough(aGraph)) {
     InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
   } else {
     PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels);
   }
 }
 
-#define ResetProcessingIfNeeded(_processing)                        \
-do {                                                                \
-  bool enabled = mAudioProcessing->_processing()->is_enabled();     \
-                                                                    \
-  if (enabled) {                                                    \
-    int rv = mAudioProcessing->_processing()->Enable(!enabled);     \
-    if (rv) {                                                       \
-      NS_WARNING("Could not reset the status of the "               \
-      #_processing " on device change.");                           \
-      return;                                                       \
-    }                                                               \
-    rv = mAudioProcessing->_processing()->Enable(enabled);          \
-    if (rv) {                                                       \
-      NS_WARNING("Could not reset the status of the "               \
-      #_processing " on device change.");                           \
-      return;                                                       \
-    }                                                               \
-                                                                    \
-  }                                                                 \
-}  while(0)
+#define ResetProcessingIfNeeded(_processing)                                   \
+  do {                                                                         \
+    bool enabled = mAudioProcessing->_processing()->is_enabled();              \
+                                                                               \
+    if (enabled) {                                                             \
+      int rv = mAudioProcessing->_processing()->Enable(!enabled);              \
+      if (rv) {                                                                \
+        NS_WARNING("Could not reset the status of the " #_processing           \
+                   " on device change.");                                      \
+        return;                                                                \
+      }                                                                        \
+      rv = mAudioProcessing->_processing()->Enable(enabled);                   \
+      if (rv) {                                                                \
+        NS_WARNING("Could not reset the status of the " #_processing           \
+                   " on device change.");                                      \
+        return;                                                                \
+      }                                                                        \
+    }                                                                          \
+  } while (0)
 
 void
 AudioInputProcessing::DeviceChanged(MediaStreamGraphImpl* aGraph)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   // Reset some processing
   ResetProcessingIfNeeded(gain_control);
   ResetProcessingIfNeeded(echo_cancellation);
@@ -1382,53 +1392,55 @@ MediaEngineWebRTCAudioCaptureSource::Get
   uuid.ToProvidedString(uuidBuffer);
   asciiString.AssignASCII(uuidBuffer);
 
   // Remove {} and the null terminator
   return nsCString(Substring(asciiString, 1, NSID_LENGTH - 3));
 }
 
 nsresult
-MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr<const AllocationHandle>&,
-                                              const RefPtr<SourceMediaStream>& aStream,
-                                              TrackID aTrackID,
-                                              const PrincipalHandle& aPrincipalHandle)
+MediaEngineWebRTCAudioCaptureSource::SetTrack(
+  const RefPtr<const AllocationHandle>&,
+  const RefPtr<SourceMediaStream>& aStream,
+  TrackID aTrackID,
+  const PrincipalHandle& aPrincipalHandle)
 {
   AssertIsOnOwningThread();
   // Nothing to do here. aStream is a placeholder dummy and not exposed.
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCAudioCaptureSource::Start(const RefPtr<const AllocationHandle>&)
+MediaEngineWebRTCAudioCaptureSource::Start(
+  const RefPtr<const AllocationHandle>&)
 {
   AssertIsOnOwningThread();
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCAudioCaptureSource::Stop(const RefPtr<const AllocationHandle>&)
 {
   AssertIsOnOwningThread();
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCAudioCaptureSource::Reconfigure(
-    const RefPtr<AllocationHandle>&,
-    const dom::MediaTrackConstraints& aConstraints,
-    const MediaEnginePrefs &aPrefs,
-    const nsString& aDeviceId,
-    const char** aOutBadConstraint)
+  const RefPtr<AllocationHandle>&,
+  const dom::MediaTrackConstraints& aConstraints,
+  const MediaEnginePrefs& aPrefs,
+  const nsString& aDeviceId,
+  const char** aOutBadConstraint)
 {
   return NS_OK;
 }
 
 uint32_t
 MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
-    const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
-    const nsString& aDeviceId) const
+  const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+  const nsString& aDeviceId) const
 {
   // There is only one way of capturing audio for now, and it's always adequate.
   return 0;
 }
 
 }
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -30,25 +30,22 @@ class MediaEngineWebRTCMicrophoneSource 
 public:
   MediaEngineWebRTCMicrophoneSource(RefPtr<AudioDeviceInfo> aInfo,
                                     const nsString& name,
                                     const nsCString& uuid,
                                     uint32_t maxChannelCount,
                                     bool aDelayAgnostic,
                                     bool aExtendedFilter);
 
-  bool RequiresSharing() const override
-  {
-    return false;
-  }
+  bool RequiresSharing() const override { return false; }
 
   nsString GetName() const override;
   nsCString GetUUID() const override;
 
-  nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
+  nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
                     const nsString& aDeviceId,
                     const ipc::PrincipalInfo& aPrincipalInfo,
                     AllocationHandle** aOutHandle,
                     const char** aOutBadConstraint) override;
   nsresult Deallocate(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                     const RefPtr<SourceMediaStream>& aStream,
@@ -128,17 +125,18 @@ private:
   const nsString mDeviceName;
   const nsCString mDeviceUUID;
 
   // The maximum number of channels that this device supports.
   const uint32_t mDeviceMaxChannelCount;
   // The current settings for the underlying device.
   // Constructed on the MediaManager thread, and then only ever accessed on the
   // main thread.
-  const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>> mSettings;
+  const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>>
+    mSettings;
 
   // Current state of the resource for this source.
   MediaEngineSourceState mState;
 
   // The SourecMediaStream on which to append data for this microphone. Set in
   // SetTrack as part of the initialization, and nulled in ::Deallocate.
   RefPtr<SourceMediaStream> mStream;
 
@@ -260,27 +258,24 @@ private:
   // Whether or not this MediaEngine is enabled. If it's not enabled, it
   // operates in "pull" mode, and we append silence only, releasing the audio
   // input stream.
   bool mEnabled;
   // Whether or not we've ended and removed the track in the SourceMediaStream
   bool mEnded;
 };
 
-
 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource
 {
 public:
-  explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
-  {
-  }
+  explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid) {}
   nsString GetName() const override;
   nsCString GetUUID() const override;
-  nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
-                    const MediaEnginePrefs &aPrefs,
+  nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
+                    const MediaEnginePrefs& aPrefs,
                     const nsString& aDeviceId,
                     const ipc::PrincipalInfo& aPrincipalInfo,
                     AllocationHandle** aOutHandle,
                     const char** aOutBadConstraint) override
   {
     // Nothing to do here, everything is managed in MediaManager.cpp
     *aOutHandle = nullptr;
     return NS_OK;