Bug 1273206: Shut down all getUserMedia VoEBase channels when not in use r=padenot
authorRandell Jesup <rjesup@jesup.org>
Mon, 23 May 2016 10:22:47 -0400
changeset 339552 91c13e5235077d947cea0e262f13485247750029
parent 339551 c2b51f5ad84348a107cbf5e728f236e9e787b8ea
child 339553 16a18faa536392f4a263a1470f177bbeb338c324
push id1183
push userraliiev@mozilla.com
push dateMon, 05 Sep 2016 20:01:49 +0000
treeherdermozilla-release@3148731bed45 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1273206
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1273206: Shut down all getUserMedia VoEBase channels when not in use r=padenot MozReview-Commit-ID: KLGKNvwJpKg
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -425,32 +425,31 @@ public:
     : MediaEngineAudioSource(kReleased)
     , mVoiceEngine(aVoiceEnginePtr)
     , mAudioInput(aAudioInput)
     , mMonitor("WebRTCMic.Monitor")
     , mThread(aThread)
     , mCapIndex(aIndex)
     , mChannel(-1)
     , mNrAllocations(0)
-    , mInitDone(false)
     , mStarted(false)
     , mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
     , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
     , mEchoCancel(webrtc::kEcDefault)
     , mAGC(webrtc::kAgcDefault)
     , mNoiseSuppress(webrtc::kNsDefault)
     , mPlayoutDelay(0)
     , mNullTransport(nullptr)
     , mInputBufferLen(0) {
     MOZ_ASSERT(aVoiceEnginePtr);
     MOZ_ASSERT(aAudioInput);
     mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
     mDeviceUUID.Assign(uuid);
     mListener = new mozilla::WebRTCAudioDataListener(this);
-    Init();
+    // We'll init lazily as needed
   }
 
   void GetName(nsAString& aName) override;
   void GetUUID(nsACString& aUUID) override;
 
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
                     const nsString& aDeviceId,
@@ -508,44 +507,52 @@ public:
   NS_DECL_THREADSAFE_ISUPPORTS
 
 protected:
   ~MediaEngineWebRTCMicrophoneSource() {
     Shutdown();
   }
 
 private:
-  void Init();
+  // These allocate/configure and release the channel
+  bool AllocChannel();
+  void FreeChannel();
+  // These start/stop VoEBase and associated interfaces
+  bool InitEngine();
+  void DeInitEngine();
 
   webrtc::VoiceEngine* mVoiceEngine;
   RefPtr<mozilla::AudioInput> mAudioInput;
   RefPtr<WebRTCAudioDataListener> mListener;
 
-  ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
-  ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
-  ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
-  ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
+  // Note: shared across all microphone sources - we don't want to Terminate()
+  // the VoEBase until there are no active captures
+  static int sChannelsOpen;
+  static ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
+  static ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
+  static ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
+  static ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
 
   // accessed from the GraphDriver thread except for deletion
   nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
 
   // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
   // transitions of mState from kStarted to kStopped (which are combined with
   // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
   // threads.
   Monitor mMonitor;
   nsTArray<RefPtr<SourceMediaStream>> mSources;
   nsTArray<PrincipalHandle> mPrincipalHandles; // Maps to mSources.
+
   nsCOMPtr<nsIThread> mThread;
   int mCapIndex;
   int mChannel;
-  int mNrAllocations; // When this becomes 0, we shut down HW
+  int mNrAllocations; // Per-channel - When this becomes 0, we shut down HW for the channel
   TrackID mTrackID;
-  bool mInitDone;
   bool mStarted;
 
   nsString mDeviceName;
   nsCString mDeviceUUID;
 
   uint32_t mSampleFrequency;
   bool mEchoOn, mAgcOn, mNoiseOn;
   webrtc::EcModes  mEchoCancel;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -43,16 +43,22 @@ extern LogModule* GetMediaManagerLog();
  * Webrtc microphone source source.
  */
 NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
 NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
 
 // XXX temp until MSG supports registration
 StaticRefPtr<AudioOutputObserver> gFarendObserver;
 
+int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
+ScopedCustomReleasePtr<webrtc::VoEBase> MediaEngineWebRTCMicrophoneSource::mVoEBase;
+ScopedCustomReleasePtr<webrtc::VoEExternalMedia> MediaEngineWebRTCMicrophoneSource::mVoERender;
+ScopedCustomReleasePtr<webrtc::VoENetwork> MediaEngineWebRTCMicrophoneSource::mVoENetwork;
+ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> MediaEngineWebRTCMicrophoneSource::mVoEProcessing;
+
 AudioOutputObserver::AudioOutputObserver()
   : mPlayoutFreq(0)
   , mPlayoutChannels(0)
   , mChunkSize(0)
   , mSaved(nullptr)
   , mSamplesSaved(0)
 {
   // Buffers of 10ms chunks
@@ -174,30 +180,24 @@ AudioOutputObserver::InsertFarEnd(const 
       }
     }
   }
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
 {
-  if (mInitDone) {
-    aName.Assign(mDeviceName);
-  }
-
+  aName.Assign(mDeviceName);
   return;
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
 {
-  if (mInitDone) {
-    aUUID.Assign(mDeviceUUID);
-  }
-
+  aUUID.Assign(mDeviceUUID);
   return;
 }
 
 // GetBestFitnessDistance returns the best distance the capture device can offer
 // as a whole, given an accumulated number of ConstraintSets.
 // Ideal values are considered in the first ConstraintSet only.
 // Plain values are treated as Ideal in the first ConstraintSet.
 // Plain values are treated as Exact in subsequent ConstraintSets.
@@ -220,26 +220,39 @@ uint32_t MediaEngineWebRTCMicrophoneSour
 nsresult
 MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
                                             const MediaEnginePrefs &aPrefs,
                                             const nsString& aDeviceId,
                                             const nsACString& aOrigin)
 {
   AssertIsOnOwningThread();
   if (mState == kReleased) {
-    if (mInitDone) {
-      if (mAudioInput->SetRecordingDevice(mCapIndex)) {
+    if (sChannelsOpen == 0) {
+      if (!InitEngine()) {
+        LOG(("Audio engine is not initalized"));
         return NS_ERROR_FAILURE;
       }
-      mState = kAllocated;
-      LOG(("Audio device %d allocated", mCapIndex));
-    } else {
+    }
+    if (!AllocChannel()) {
+      if (sChannelsOpen == 0) {
+        DeInitEngine();
+      }
       LOG(("Audio device is not initalized"));
       return NS_ERROR_FAILURE;
     }
+    if (mAudioInput->SetRecordingDevice(mCapIndex)) {
+      FreeChannel();
+      if (sChannelsOpen == 0) {
+        DeInitEngine();
+      }
+      return NS_ERROR_FAILURE;
+    }
+    sChannelsOpen++;
+    mState = kAllocated;
+    LOG(("Audio device %d allocated", mCapIndex));
   } else if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
     MonitorAutoLock lock(mMonitor);
     if (mSources.IsEmpty()) {
       LOG(("Audio device %d reallocated", mCapIndex));
     } else {
       LOG(("Audio device %d allocated shared", mCapIndex));
     }
   }
@@ -286,17 +299,17 @@ MediaEngineWebRTCMicrophoneSource::Resta
   }
   if ((webrtc::NsModes) aPrefs.mNoise != webrtc::kNsUnchanged) {
     if (mNoiseSuppress != (webrtc::NsModes) aPrefs.mNoise) {
       update_noise = true;
       mNoiseSuppress = (webrtc::NsModes) aPrefs.mNoise;
     }
   }
 
-  if (mInitDone) {
+  if (sChannelsOpen > 0) {
     int error;
 
     if (update_echo &&
       0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aPrefs.mAec))) {
       LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
       // Overhead of capturing all the time is very low (<0.1% of an audio only call)
       if (mEchoOn) {
         if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) {
@@ -323,31 +336,36 @@ MediaEngineWebRTCMicrophoneSource::Deall
   --mNrAllocations;
   MOZ_ASSERT(mNrAllocations >= 0, "Double-deallocations are prohibited");
   if (mNrAllocations == 0) {
     // If empty, no callbacks to deliver data should be occuring
     if (mState != kStopped && mState != kAllocated) {
       return NS_ERROR_FAILURE;
     }
 
+    FreeChannel();
     mState = kReleased;
     LOG(("Audio device %d deallocated", mCapIndex));
+    MOZ_ASSERT(sChannelsOpen > 0);
+    if (--sChannelsOpen == 0) {
+      DeInitEngine();
+    }
   } else {
     LOG(("Audio device %d deallocated but still in use", mCapIndex));
   }
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
                                          TrackID aID,
                                          const PrincipalHandle& aPrincipalHandle)
 {
   AssertIsOnOwningThread();
-  if (!mInitDone || !aStream) {
+  if (sChannelsOpen == 0 || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
     mPrincipalHandles.AppendElement(aPrincipalHandle);
     MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
@@ -534,105 +552,133 @@ void
 MediaEngineWebRTCMicrophoneSource::DeviceChanged() {
   // Reset some processing
   bool enabled;
   ResetProcessingIfNeeded(Agc);
   ResetProcessingIfNeeded(Ec);
   ResetProcessingIfNeeded(Ns);
 }
 
-void
-MediaEngineWebRTCMicrophoneSource::Init()
+bool
+MediaEngineWebRTCMicrophoneSource::InitEngine()
 {
+  MOZ_ASSERT(!mVoEBase);
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
 
   mVoEBase->Init();
 
   mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
   if (!mVoERender) {
-    return;
+    return false;
   }
   mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
   if (!mVoENetwork) {
-    return;
+    return false;
   }
 
   mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
   if (!mVoEProcessing) {
-    return;
+    return false;
   }
+  mNullTransport = new NullTransport();
+  return true;
+}
+
+bool
+MediaEngineWebRTCMicrophoneSource::AllocChannel()
+{
+  MOZ_ASSERT(mVoEBase);
 
   mChannel = mVoEBase->CreateChannel();
   if (mChannel < 0) {
-    return;
+    return false;
   }
-  mNullTransport = new NullTransport();
   if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
-    return;
+    return false;
   }
 
   mSampleFrequency = MediaEngine::DEFAULT_SAMPLE_RATE;
   LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
 
   // Check for availability.
   if (mAudioInput->SetRecordingDevice(mCapIndex)) {
-    return;
+    return false;
   }
 
 #ifndef MOZ_B2G
   // Because of the permission mechanism of B2G, we need to skip the status
   // check here.
   bool avail = false;
   mAudioInput->GetRecordingDeviceStatus(avail);
   if (!avail) {
-    return;
+    return false;
   }
 #endif // MOZ_B2G
 
   // Set "codec" to PCM, 32kHz on 1 channel
   ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
   if (!ptrVoECodec) {
-    return;
+    return false;
   }
 
   webrtc::CodecInst codec;
   strcpy(codec.plname, ENCODING);
   codec.channels = CHANNELS;
   MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
   codec.rate = SAMPLE_RATE(mSampleFrequency);
   codec.plfreq = mSampleFrequency;
   codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
   codec.pltype = 0; // Default payload type
 
   if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
-    mInitDone = true;
+    return true;
   }
+  return false;
+}
+
+// This shuts down the engine when no channel is open
+void
+MediaEngineWebRTCMicrophoneSource::DeInitEngine()
+{
+  if (mVoEBase) {
+    mVoEBase->Terminate();
+    delete mNullTransport;
+    mNullTransport = nullptr;
+
+    mVoEProcessing = nullptr;
+    mVoENetwork = nullptr;
+    mVoERender = nullptr;
+    mVoEBase = nullptr;
+  }
+}
+
+// This shuts down the engine when no channel is open
+void
+MediaEngineWebRTCMicrophoneSource::FreeChannel()
+{
+  if (mChannel != -1) {
+    if (mVoENetwork) {
+      mVoENetwork->DeRegisterExternalTransport(mChannel);
+    }
+    mVoEBase->DeleteChannel(mChannel);
+    mChannel = -1;
+  }
+  mState = kReleased;
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::Shutdown()
 {
   if (mListener) {
     // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
     mListener->Shutdown();
     // Don't release the webrtc.org pointers yet until the Listener is (async) shutdown
     mListener = nullptr;
   }
 
-  if (!mInitDone) {
-    // duplicate these here in case we failed during Init()
-    if (mChannel != -1 && mVoENetwork) {
-      mVoENetwork->DeRegisterExternalTransport(mChannel);
-    }
-
-    delete mNullTransport;
-    mNullTransport = nullptr;
-    return;
-  }
-
   if (mState == kStarted) {
     SourceMediaStream *source;
     bool empty;
 
     while (1) {
       {
         MonitorAutoLock lock(mMonitor);
         empty = mSources.IsEmpty();
@@ -645,33 +691,20 @@ MediaEngineWebRTCMicrophoneSource::Shutd
     }
     MOZ_ASSERT(mState == kStopped);
   }
 
   if (mState == kAllocated || mState == kStopped) {
     Deallocate();
   }
 
-  mVoEBase->Terminate();
-  if (mChannel != -1) {
-    mVoENetwork->DeRegisterExternalTransport(mChannel);
-  }
-
-  delete mNullTransport;
-  mNullTransport = nullptr;
-
-  mVoEProcessing = nullptr;
-  mVoENetwork = nullptr;
-  mVoERender = nullptr;
-  mVoEBase = nullptr;
+  FreeChannel();
+  DeInitEngine();
 
   mAudioInput = nullptr;
-
-  mState = kReleased;
-  mInitDone = false;
 }
 
 typedef int16_t sample;
 
 void
 MediaEngineWebRTCMicrophoneSource::Process(int channel,
                                            webrtc::ProcessingTypes type,
                                            sample *audio10ms, int length,