Bug 1156472 - Part 2 - Rename MediaEngineWebRTCAudioSource to MediaEngineWebRTCMicrophoneSource. r=jesup
authorPaul Adenot <paul@paul.cx>
Fri, 24 Jul 2015 14:28:16 +0200
changeset 286416 778457f7bae7eef97125f8931c0a63f29612f4cc
parent 286415 90696b162030bcbe7761a293b561e3f930849cb6
child 286417 865d6aa9cc6732e074d656872ed85bd50fc7d716
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs1156472
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1156472 - Part 2 - Rename MediaEngineWebRTCAudioSource to MediaEngineWebRTCMicrophoneSource. r=jesup There are now two different possible audio source, so this was getting confusing.
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -353,25 +353,24 @@ MediaEngineWebRTC::EnumerateAudioDevices
     }
 
     if (uniqueId[0] == '\0') {
       // Mac and Linux don't set uniqueId!
       MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia
       strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
     }
 
-    nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
+    nsRefPtr<MediaEngineWebRTCMicrophoneSource> aSource;
     NS_ConvertUTF8toUTF16 uuid(uniqueId);
     if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
       // We've already seen this device, just append.
       aASources->AppendElement(aSource.get());
     } else {
-      aSource = new MediaEngineWebRTCAudioSource(
-        mThread, mVoiceEngine, i, deviceName, uniqueId
-      );
+      aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
+                                                      deviceName, uniqueId);
       mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
       aASources->AppendElement(aSource);
     }
   }
 }
 
 static PLDHashOperator
 ClearVideoSource (const nsAString&, // unused
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -128,23 +128,26 @@ private:
 
   int mMinFps; // Min rate we want to accept
   dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
 
   size_t NumCapabilities() override;
   void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
 };
 
-class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
-                                     public webrtc::VoEMediaProcess,
-                                     private MediaConstraintsHelper
+class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
+                                          public webrtc::VoEMediaProcess,
+                                          private MediaConstraintsHelper
 {
 public:
-  MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
-                               int aIndex, const char* name, const char* uuid)
+  MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
+                                    webrtc::VoiceEngine* aVoiceEnginePtr,
+                                    int aIndex,
+                                    const char* name,
+                                    const char* uuid)
     : MediaEngineAudioSource(kReleased)
     , mVoiceEngine(aVoiceEnginePtr)
     , mMonitor("WebRTCMic.Monitor")
     , mThread(aThread)
     , mCapIndex(aIndex)
     , mChannel(-1)
     , mInitDone(false)
     , mStarted(false)
@@ -202,17 +205,17 @@ public:
                int16_t audio10ms[], int length,
                int samplingFreq, bool isStereo) override;
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   virtual void Shutdown() override;
 
 protected:
-  ~MediaEngineWebRTCAudioSource() { Shutdown(); }
+  ~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
 
 private:
   void Init();
 
   webrtc::VoiceEngine* mVoiceEngine;
   ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
   ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
@@ -289,14 +292,15 @@ private:
   bool mBrowserEngineInit;
   bool mWinEngineInit;
   bool mAppEngineInit;
   bool mHasTabVideoSource;
 
   // Store devices we've already seen in a hashtable for quick return.
   // Maps UUID to MediaEngineSource (one set for audio, one for video).
   nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
+  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCMicrophoneSource>
+  mAudioSources;
 };
 
 }
 
 #endif /* NSMEDIAENGINEWEBRTC_H_ */
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -36,19 +36,19 @@ namespace mozilla {
 #undef LOG
 #endif
 
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 /**
- * Webrtc audio source.
+ * Webrtc microphone source source.
  */
-NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
 
 // XXX temp until MSG supports registration
 StaticRefPtr<AudioOutputObserver> gFarendObserver;
 
 AudioOutputObserver::AudioOutputObserver()
   : mPlayoutFreq(0)
   , mPlayoutChannels(0)
   , mChunkSize(0)
@@ -172,40 +172,40 @@ AudioOutputObserver::InsertFarEnd(const 
         mSaved = nullptr;
         mSamplesSaved = 0;
       }
     }
   }
 }
 
 void
-MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
+MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
 {
   if (mInitDone) {
     aName.Assign(mDeviceName);
   }
 
   return;
 }
 
 void
-MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
+MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
 {
   if (mInitDone) {
     aUUID.Assign(mDeviceUUID);
   }
 
   return;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
-                                     bool aAgcOn, uint32_t aAGC,
-                                     bool aNoiseOn, uint32_t aNoise,
-                                     int32_t aPlayoutDelay)
+MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
+                                          bool aAgcOn, uint32_t aAGC,
+                                          bool aNoiseOn, uint32_t aNoise,
+                                          int32_t aPlayoutDelay)
 {
   LOG(("Audio config: aec: %d, agc: %d, noise: %d",
        aEchoOn ? aEcho : -1,
        aAgcOn ? aAGC : -1,
        aNoiseOn ? aNoise : -1));
 
   bool update_echo = (mEchoOn != aEchoOn);
   bool update_agc = (mAgcOn != aAgcOn);
@@ -276,19 +276,19 @@ uint32_t MediaEngineWebRTCAudioSource::G
   for (const MediaTrackConstraintSet* cs : aConstraintSets) {
     distance = GetMinimumFitnessDistance(*cs, false, aDeviceId);
     break; // distance is read from first entry only
   }
   return distance;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
-                                       const MediaEnginePrefs &aPrefs,
-                                       const nsString& aDeviceId)
+MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
+                                            const MediaEnginePrefs &aPrefs,
+                                            const nsString& aDeviceId)
 {
   if (mState == kReleased) {
     if (mInitDone) {
       ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
       if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
         return NS_ERROR_FAILURE;
       }
       mState = kAllocated;
@@ -304,17 +304,17 @@ MediaEngineWebRTCAudioSource::Allocate(c
     } else {
       LOG(("Audio device %d allocated shared", mCapIndex));
     }
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Deallocate()
+MediaEngineWebRTCMicrophoneSource::Deallocate()
 {
   bool empty;
   {
     MonitorAutoLock lock(mMonitor);
     empty = mSources.IsEmpty();
   }
   if (empty) {
     // If empty, no callbacks to deliver data should be occuring
@@ -326,17 +326,18 @@ MediaEngineWebRTCAudioSource::Deallocate
     LOG(("Audio device %d deallocated", mCapIndex));
   } else {
     LOG(("Audio device %d deallocated but still in use", mCapIndex));
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
+                                         TrackID aID)
 {
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
@@ -379,17 +380,17 @@ MediaEngineWebRTCAudioSource::Start(Sour
 
   // Attach external media processor, so this::Process will be called.
   mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
+MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
   {
     MonitorAutoLock lock(mMonitor);
 
     if (!mSources.RemoveElement(aSource)) {
       // Already stopped - this is allowed
       return NS_OK;
     }
@@ -416,27 +417,27 @@ MediaEngineWebRTCAudioSource::Stop(Sourc
   }
   if (mVoEBase->StopReceive(mChannel)) {
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 void
-MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
-                                         SourceMediaStream *aSource,
-                                         TrackID aID,
-                                         StreamTime aDesiredTime)
+MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
+                                              SourceMediaStream *aSource,
+                                              TrackID aID,
+                                              StreamTime aDesiredTime)
 {
   // Ignore - we push audio data
   LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
 }
 
 void
-MediaEngineWebRTCAudioSource::Init()
+MediaEngineWebRTCMicrophoneSource::Init()
 {
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
 
   mVoEBase->Init();
 
   mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
   if (!mVoERender) {
     return;
@@ -491,17 +492,17 @@ MediaEngineWebRTCAudioSource::Init()
   codec.pltype = 0; // Default payload type
 
   if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
     mInitDone = true;
   }
 }
 
 void
-MediaEngineWebRTCAudioSource::Shutdown()
+MediaEngineWebRTCMicrophoneSource::Shutdown()
 {
   if (!mInitDone) {
     // duplicate these here in case we failed during Init()
     if (mChannel != -1 && mVoENetwork) {
       mVoENetwork->DeRegisterExternalTransport(mChannel);
     }
 
     delete mNullTransport;
@@ -546,19 +547,20 @@ MediaEngineWebRTCAudioSource::Shutdown()
 
   mState = kReleased;
   mInitDone = false;
 }
 
 typedef int16_t sample;
 
 void
-MediaEngineWebRTCAudioSource::Process(int channel,
-  webrtc::ProcessingTypes type, sample* audio10ms,
-  int length, int samplingFreq, bool isStereo)
+MediaEngineWebRTCMicrophoneSource::Process(int channel,
+                                           webrtc::ProcessingTypes type,
+                                           sample *audio10ms, int length,
+                                           int samplingFreq, bool isStereo)
 {
   // On initial capture, throw away all far-end data except the most recent sample
   // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
   // input code with "old" audio.
   if (!mStarted) {
     mStarted  = true;
     while (gFarendObserver->Size() > 1) {
       free(gFarendObserver->Pop()); // only call if size() > 0