Bug 971528 - Allocate given number of channels for WebRTC mic source. r=jesup
authorAlex Chronopoulos <achronop@gmail.com>
Fri, 02 Jun 2017 09:12:21 +0300
changeset 410080 73260f7c6dae11039cc4c738ac8714671d8cd1df
parent 410079 f655c216934b0cf02a2f4fe833a20071470b0623
child 410081 324ef6c1803078225fba97edf9ca56ae18e146d4
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs971528
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 971528 - Allocate given number of channels for WebRTC mic source. r=jesup MozReview-Commit-ID: 6o28Bn6AJ1A
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -148,16 +148,17 @@ public:
   // Threadsafe because it's referenced from an MicrophoneSource, which can
   // had references to it on other threads.
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
 
   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
   virtual int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
                                      char aStrGuidUTF8[128]) = 0;
   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
+  virtual int GetChannelCount(int aDeviceIndex, uint32_t& aChannels) = 0;
   virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
   virtual void StopRecording(SourceMediaStream *aStream) = 0;
   virtual int SetRecordingDevice(int aIndex) = 0;
 
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~AudioInput() {}
 
@@ -259,16 +260,21 @@ public:
   int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
     // so unless it was removed, say it's available
     aIsAvailable = true;
     return 0;
   }
 
+  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  {
+    return GetDeviceMaxChannels(aDeviceIndex, aChannels);
+  }
+
   static int GetDeviceMaxChannels(int aDeviceIndex, uint32_t& aChannels)
   {
 #ifdef MOZ_WIDGET_ANDROID
     aChannels = 1;
 #else
     int32_t devindex = DeviceIndex(aDeviceIndex);
     if (mDevices.count == 0 || devindex < 0) {
       return 1;
@@ -374,16 +380,22 @@ public:
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
     return 0;
   }
 
+  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  {
+    aChannels = 1; // default to mono
+    return 0;
+  }
+
   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
   void StopRecording(SourceMediaStream *aStream) {}
 
   int SetRecordingDevice(int aIndex)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -611,37 +611,38 @@ MediaEngineWebRTCMicrophoneSource::Inser
     // Bug 971528 - Support stereo capture in gUM
     MOZ_ASSERT(aChannels == 1 || aChannels == 2,
         "GraphDriver only supports mono and stereo audio for now");
 
     nsAutoPtr<AudioSegment> segment(new AudioSegment());
     RefPtr<SharedBuffer> buffer =
       SharedBuffer::Create(aFrames * aChannels * sizeof(T));
     AutoTArray<const T*, 8> channels;
-    channels.SetLength(aChannels);
     if (aChannels == 1) {
       PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
       channels.AppendElement(static_cast<T*>(buffer->Data()));
     } else {
+      channels.SetLength(aChannels);
       AutoTArray<T*, 8> write_channels;
       write_channels.SetLength(aChannels);
       T * samples = static_cast<T*>(buffer->Data());
 
       size_t offset = 0;
       for(uint32_t i = 0; i < aChannels; ++i) {
         channels[i] = write_channels[i] = samples + offset;
         offset += aFrames;
       }
 
       DeinterleaveAndConvertBuffer(aBuffer,
                                    aFrames,
                                    aChannels,
                                    write_channels.Elements());
     }
 
+    MOZ_ASSERT(aChannels == channels.Length());
     segment->AppendFrames(buffer.forget(), channels, aFrames,
                          mPrincipalHandles[i]);
     segment->GetStartTime(insertTime);
 
     mSources[i]->AppendToTrack(mTrackID, segment);
   }
 }
 
@@ -784,22 +785,26 @@ MediaEngineWebRTCMicrophoneSource::Alloc
         if (!avail) {
           if (sChannelsOpen == 0) {
             DeInitEngine();
           }
           return false;
         }
 #endif // MOZ_B2G
 
-        // Set "codec" to PCM, 32kHz on 1 channel
+        // Set "codec" to PCM, 32kHz on device's channels
         ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
         if (ptrVoECodec) {
           webrtc::CodecInst codec;
           strcpy(codec.plname, ENCODING);
           codec.channels = CHANNELS;
+          uint32_t channels = 0;
+          if (mAudioInput->GetChannelCount(mCapIndex, channels) == 0) {
+            codec.channels = channels;
+          }
           MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
           codec.rate = SAMPLE_RATE(mSampleFrequency);
           codec.plfreq = mSampleFrequency;
           codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
           codec.pltype = 0; // Default payload type
 
           if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
             mState = kAllocated;
@@ -891,18 +896,18 @@ MediaEngineWebRTCMicrophoneSource::Proce
       }
     }
   }
 
   MonitorAutoLock lock(mMonitor);
   if (mState != kStarted)
     return;
 
-  MOZ_ASSERT(!isStereo);
-  InsertInGraph<int16_t>(audio10ms, length, 1);
+  uint32_t channels = isStereo ? 2 : 1;
+  InsertInGraph<int16_t>(audio10ms, length, channels);
   return;
 }
 
 void
 MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName) const
 {
   aName.AssignLiteral("AudioCapture");
 }