Bug 971528 - Allocate given number of channels for WebRTC mic source. r=jesup
☠☠ backed out by 9e3fba916ae2 ☠ ☠
authorAlex Chronopoulos <achronop@gmail.com>
Mon, 29 May 2017 13:48:06 +0300
changeset 409956 08a538f6350be53424c64b7d36f2be2c332dafbb
parent 409955 c37d2cdef062707ea3cecff26c3efea43317d336
child 409957 9e3fba916ae2f3773c49d32673dee1ddff60a33c
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs971528
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 971528 - Allocate given number of channels for WebRTC mic source. r=jesup MozReview-Commit-ID: EVii9ACkjBu
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -148,16 +148,17 @@ public:
   // Threadsafe because it's referenced from an MicrophoneSource, which can
   // had references to it on other threads.
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
 
   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
   virtual int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
                                      char aStrGuidUTF8[128]) = 0;
   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
+  virtual int GetChannelCount(int aDeviceIndex, uint32_t& aChannels) = 0;
   virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
   virtual void StopRecording(SourceMediaStream *aStream) = 0;
   virtual int SetRecordingDevice(int aIndex) = 0;
 
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~AudioInput() {}
 
@@ -259,16 +260,21 @@ public:
   int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
     // so unless it was removed, say it's available
     aIsAvailable = true;
     return 0;
   }
 
+  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  {
+    return GetDeviceMaxChannels(aDeviceIndex, aChannels);
+  }
+
   static int GetDeviceMaxChannels(int aDeviceIndex, uint32_t& aChannels)
   {
 #ifdef MOZ_WIDGET_ANDROID
     aChannels = 1;
 #else
     int32_t devindex = DeviceIndex(aDeviceIndex);
     if (!mDevices || devindex < 0) {
       return 1;
@@ -374,16 +380,22 @@ public:
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
     return 0;
   }
 
+  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  {
+    aChannels = 1; // default to mono
+    return 0;
+  }
+
   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
   void StopRecording(SourceMediaStream *aStream) {}
 
   int SetRecordingDevice(int aIndex)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -611,37 +611,38 @@ MediaEngineWebRTCMicrophoneSource::Inser
     // Bug 971528 - Support stereo capture in gUM
     MOZ_ASSERT(aChannels == 1 || aChannels == 2,
         "GraphDriver only supports mono and stereo audio for now");
 
     nsAutoPtr<AudioSegment> segment(new AudioSegment());
     RefPtr<SharedBuffer> buffer =
       SharedBuffer::Create(aFrames * aChannels * sizeof(T));
     AutoTArray<const T*, 8> channels;
-    channels.SetLength(aChannels);
     if (aChannels == 1) {
       PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
       channels.AppendElement(static_cast<T*>(buffer->Data()));
     } else {
+      channels.SetLength(aChannels);
       AutoTArray<T*, 8> write_channels;
       write_channels.SetLength(aChannels);
       T * samples = static_cast<T*>(buffer->Data());
 
       size_t offset = 0;
       for(uint32_t i = 0; i < aChannels; ++i) {
         channels[i] = write_channels[i] = samples + offset;
         offset += aFrames;
       }
 
       DeinterleaveAndConvertBuffer(aBuffer,
                                    aFrames,
                                    aChannels,
                                    write_channels.Elements());
     }
 
+    MOZ_ASSERT(aChannels == channels.Length());
     segment->AppendFrames(buffer.forget(), channels, aFrames,
                          mPrincipalHandles[i]);
     segment->GetStartTime(insertTime);
 
     mSources[i]->AppendToTrack(mTrackID, segment);
   }
 }
 
@@ -784,22 +785,26 @@ MediaEngineWebRTCMicrophoneSource::Alloc
         if (!avail) {
           if (sChannelsOpen == 0) {
             DeInitEngine();
           }
           return false;
         }
 #endif // MOZ_B2G
 
-        // Set "codec" to PCM, 32kHz on 1 channel
+        // Set "codec" to PCM, 32kHz on device's channels
         ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
         if (ptrVoECodec) {
           webrtc::CodecInst codec;
           strcpy(codec.plname, ENCODING);
           codec.channels = CHANNELS;
+          uint32_t channels = 0;
+          if (mAudioInput->GetChannelCount(mCapIndex, channels) == 0) {
+            codec.channels = channels;
+          }
           MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
           codec.rate = SAMPLE_RATE(mSampleFrequency);
           codec.plfreq = mSampleFrequency;
           codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
           codec.pltype = 0; // Default payload type
 
           if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
             mState = kAllocated;
@@ -891,18 +896,18 @@ MediaEngineWebRTCMicrophoneSource::Proce
       }
     }
   }
 
   MonitorAutoLock lock(mMonitor);
   if (mState != kStarted)
     return;
 
-  MOZ_ASSERT(!isStereo);
-  InsertInGraph<int16_t>(audio10ms, length, 1);
+  uint32_t channels = isStereo ? 2 : 1;
+  InsertInGraph<int16_t>(audio10ms, length, channels);
   return;
 }
 
 void
 MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName) const
 {
   aName.AssignLiteral("AudioCapture");
 }