Bug 1286096 - Wire up audio getSettings(). r=padenot,smaug
authorJan-Ivar Bruaroey <jib@mozilla.com>
Sat, 16 Jul 2016 15:33:54 -0400
changeset 347086 26085bd0d4289dd674d2da6cb52ec6a8fe37eef6
parent 347085 274a3c105eb5925b05251990c519f78b6d46f436
child 347087 4c84eea22241b1ab95b64fee3596484ebdf78d24
push id6389
push userraliiev@mozilla.com
push dateMon, 19 Sep 2016 13:38:22 +0000
treeherdermozilla-beta@01d67bfe6c81 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot, smaug
bugs1286096
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1286096 - Wire up audio getSettings(). r=padenot,smaug MozReview-Commit-ID: KH6xcAnd3DX
dom/media/MediaManager.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/webidl/MediaTrackSettings.webidl
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -356,16 +356,19 @@ public:
            mVideoDevice->GetMediaSource() == dom::MediaSourceEnum::Browser;
   }
 
   void GetSettings(dom::MediaTrackSettings& aOutSettings)
   {
     if (mVideoDevice) {
       mVideoDevice->GetSource()->GetSettings(aOutSettings);
     }
+    if (mAudioDevice) {
+      mAudioDevice->GetSource()->GetSettings(aOutSettings);
+    }
   }
 
   // implement in .cpp to avoid circular dependency with MediaOperationTask
   // Can be invoked from EITHER MainThread or MSG thread
   void Stop();
 
   void
   AudioConfig(bool aEchoOn, uint32_t aEcho,
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -419,37 +419,17 @@ class MediaEngineWebRTCMicrophoneSource 
 {
   typedef MediaEngineAudioSource Super;
 public:
   MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
                                     webrtc::VoiceEngine* aVoiceEnginePtr,
                                     mozilla::AudioInput* aAudioInput,
                                     int aIndex,
                                     const char* name,
-                                    const char* uuid)
-    : MediaEngineAudioSource(kReleased)
-    , mVoiceEngine(aVoiceEnginePtr)
-    , mAudioInput(aAudioInput)
-    , mMonitor("WebRTCMic.Monitor")
-    , mThread(aThread)
-    , mCapIndex(aIndex)
-    , mChannel(-1)
-    , mStarted(false)
-    , mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
-    , mPlayoutDelay(0)
-    , mNullTransport(nullptr)
-    , mSkipProcessing(false)
-  {
-    MOZ_ASSERT(aVoiceEnginePtr);
-    MOZ_ASSERT(aAudioInput);
-    mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
-    mDeviceUUID.Assign(uuid);
-    mListener = new mozilla::WebRTCAudioDataListener(this);
-    // We'll init lazily as needed
-  }
+                                    const char* uuid);
 
   void GetName(nsAString& aName) const override;
   void GetUUID(nsACString& aUUID) const override;
 
   nsresult Deallocate(AllocationHandle* aHandle) override;
   nsresult Start(SourceMediaStream* aStream,
                  TrackID aID,
                  const PrincipalHandle& aPrincipalHandle) override;
@@ -509,16 +489,18 @@ protected:
 private:
   nsresult
   UpdateSingleSource(const AllocationHandle* aHandle,
                      const NormalizedConstraints& aNetConstraints,
                      const MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId,
                      const char** aOutBadConstraint) override;
 
+  void SetLastPrefs(const MediaEnginePrefs& aPrefs);
+
   // These allocate/configure and release the channel
   bool AllocChannel();
   void FreeChannel();
   // These start/stop VoEBase and associated interfaces
   bool InitEngine();
   void DeInitEngine();
 
   // This is true when all processing is disabled, we can skip
@@ -575,16 +557,19 @@ private:
 
   NullTransport *mNullTransport;
 
   nsTArray<int16_t> mInputBuffer;
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
   bool mSkipProcessing;
+
+  // To only update microphone when needed, we keep track of previous settings.
+  MediaEnginePrefs mLastPrefs;
 };
 
 class MediaEngineWebRTC : public MediaEngine
 {
   typedef MediaEngine Super;
 public:
   explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
 
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -178,16 +178,47 @@ AudioOutputObserver::InsertFarEnd(const 
         mPlayoutFifo->Push((int8_t *) mSaved); // takes ownership
         mSaved = nullptr;
         mSamplesSaved = 0;
       }
     }
   }
 }
 
+MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
+    nsIThread* aThread,
+    webrtc::VoiceEngine* aVoiceEnginePtr,
+    mozilla::AudioInput* aAudioInput,
+    int aIndex,
+    const char* name,
+    const char* uuid)
+  : MediaEngineAudioSource(kReleased)
+  , mVoiceEngine(aVoiceEnginePtr)
+  , mAudioInput(aAudioInput)
+  , mMonitor("WebRTCMic.Monitor")
+  , mThread(aThread)
+  , mCapIndex(aIndex)
+  , mChannel(-1)
+  , mStarted(false)
+  , mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
+  , mPlayoutDelay(0)
+  , mNullTransport(nullptr)
+  , mSkipProcessing(false)
+{
+  MOZ_ASSERT(aVoiceEnginePtr);
+  MOZ_ASSERT(aAudioInput);
+  mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
+  mDeviceUUID.Assign(uuid);
+  mListener = new mozilla::WebRTCAudioDataListener(this);
+  mSettings.mEchoCancellation.Construct(0);
+  mSettings.mMozAutoGainControl.Construct(0);
+  mSettings.mMozNoiseSuppression.Construct(0);
+  // We'll init lazily as needed
+}
+
 void
 MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName) const
 {
   aName.Assign(mDeviceName);
   return;
 }
 
 void
@@ -227,37 +258,43 @@ MediaEngineWebRTCMicrophoneSource::Resta
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aHandle);
   NormalizedConstraints constraints(aConstraints);
   return ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
                               aOutBadConstraint);
 }
 
+bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
+{
+  return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
+};
+
 nsresult
 MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
     const AllocationHandle* aHandle,
     const NormalizedConstraints& aNetConstraints,
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
   FlattenedConstraints c(aNetConstraints);
 
-  bool aec_on = c.mEchoCancellation.Get(aPrefs.mAecOn);
-  bool agc_on = c.mMozAutoGainControl.Get(aPrefs.mAgcOn);
-  bool noise_on = c.mMozNoiseSuppression.Get(aPrefs.mNoiseOn);
+  MediaEnginePrefs prefs = aPrefs;
+  prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
+  prefs.mAgcOn = c.mMozAutoGainControl.Get(prefs.mAgcOn);
+  prefs.mNoiseOn = c.mMozNoiseSuppression.Get(prefs.mNoiseOn);
 
   LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d",
-       aec_on ? aPrefs.mAec : -1,
-       agc_on ? aPrefs.mAgc : -1,
-       noise_on ? aPrefs.mNoise : -1,
-       aPrefs.mPlayoutDelay));
+       prefs.mAecOn ? prefs.mAec : -1,
+       prefs.mAgcOn ? prefs.mAgc : -1,
+       prefs.mNoiseOn ? prefs.mNoise : -1,
+       prefs.mPlayoutDelay));
 
-  mPlayoutDelay = aPrefs.mPlayoutDelay;
+  mPlayoutDelay = prefs.mPlayoutDelay;
 
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
       if (sChannelsOpen == 0) {
         if (!InitEngine()) {
           LOG(("Audio engine is not initalized"));
           return NS_ERROR_FAILURE;
@@ -278,16 +315,19 @@ MediaEngineWebRTCMicrophoneSource::Updat
         return NS_ERROR_FAILURE;
       }
       sChannelsOpen++;
       mState = kAllocated;
       LOG(("Audio device %d allocated", mCapIndex));
       break;
 
     case kStarted:
+      if (prefs == mLastPrefs) {
+        return NS_OK;
+      }
       if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
         MonitorAutoLock lock(mMonitor);
         if (mSources.IsEmpty()) {
           LOG(("Audio device %d reallocated", mCapIndex));
         } else {
           LOG(("Audio device %d allocated shared", mCapIndex));
         }
       }
@@ -297,40 +337,62 @@ MediaEngineWebRTCMicrophoneSource::Updat
       LOG(("Audio device %d %s in ignored state %d", mCapIndex,
            (aHandle? aHandle->mOrigin.get() : ""), mState));
       break;
   }
 
   if (sChannelsOpen > 0) {
     int error;
 
-    if (0 != (error = mVoEProcessing->SetEcStatus(aec_on, (webrtc::EcModes) aPrefs.mAec))) {
+    error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec);
+    if (error) {
       LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
       // Overhead of capturing all the time is very low (<0.1% of an audio only call)
-      if (aec_on) {
-        if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) {
+      if (prefs.mAecOn) {
+        error = mVoEProcessing->SetEcMetricsStatus(true);
+        if (error) {
           LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error));
         }
       }
     }
-    if (0 != (error = mVoEProcessing->SetAgcStatus(agc_on, (webrtc::AgcModes) aPrefs.mAgc))) {
+    error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc);
+    if (error) {
       LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
     }
-    if (0 != (error = mVoEProcessing->SetNsStatus(noise_on, (webrtc::NsModes) aPrefs.mNoise))) {
+    error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise);
+    if (error) {
       LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error));
     }
   }
 
-  mSkipProcessing = !(aec_on || agc_on || noise_on);
+  mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
   if (mSkipProcessing) {
     mSampleFrequency = MediaEngine::USE_GRAPH_RATE;
   }
+  SetLastPrefs(prefs);
   return NS_OK;
 }
 
+void
+MediaEngineWebRTCMicrophoneSource::SetLastPrefs(
+    const MediaEnginePrefs& aPrefs)
+{
+  mLastPrefs = aPrefs;
+
+  RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+
+  NS_DispatchToMainThread(media::NewRunnableFrom([this, that, aPrefs]() mutable {
+    mSettings.mEchoCancellation.Value() = aPrefs.mAecOn;
+    mSettings.mMozAutoGainControl.Value() = aPrefs.mAgcOn;
+    mSettings.mMozNoiseSuppression.Value() = aPrefs.mNoiseOn;
+    return NS_OK;
+  }));
+}
+
+
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle)
 {
   AssertIsOnOwningThread();
 
   Super::Deallocate(aHandle);
 
   if (!mRegisteredHandles.Length()) {
--- a/dom/webidl/MediaTrackSettings.webidl
+++ b/dom/webidl/MediaTrackSettings.webidl
@@ -8,16 +8,19 @@
  */
 
 dictionary MediaTrackSettings {
     long      width;
     long      height;
     double    frameRate;
     DOMString facingMode;
     DOMString deviceId;
+    boolean echoCancellation;
+    boolean mozNoiseSuppression;
+    boolean mozAutoGainControl;
 
     // Mozilla-specific extensions:
 
     // http://fluffy.github.io/w3c-screen-share/#screen-based-video-constraints
     // OBE by http://w3c.github.io/mediacapture-screen-share
 
     DOMString mediaSource;