Bug 1396107: switch to using the same audio input method upstream webrtc.org does r=dminor,padenot
authorRandell Jesup <rjesup@jesup.org>
Tue, 12 Sep 2017 10:14:41 -0400
changeset 429793 f4561375608bbbca3f3882bb6007ec7582e90ac5
parent 429792 ba4c92f6f9f17f1b85e6d7a8ef439aa68ba9f934
child 429794 0cbbb59b5e61c5e1af0d42df72f56a9b736a454b
push id7761
push userjlund@mozilla.com
push dateFri, 15 Sep 2017 00:19:52 +0000
treeherdermozilla-beta@c38455951db4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdminor, padenot
bugs1396107
milestone57.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1396107: switch to using the same audio input method upstream webrtc.org does r=dminor,padenot
media/webrtc/signaling/gtest/mediaconduit_unittests.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/gtest/mediaconduit_unittests.cpp
+++ b/media/webrtc/signaling/gtest/mediaconduit_unittests.cpp
@@ -238,17 +238,18 @@ void AudioSendAndReceive::GenerateAndRea
       fclose(outFile);
       return;
     }
 
     numSamplesReadFromInput += PLAYOUT_SAMPLE_LENGTH;
 
     mSession->SendAudioFrame(audioInput.get(),
                              PLAYOUT_SAMPLE_LENGTH,
-                             PLAYOUT_SAMPLE_FREQUENCY,10);
+                             PLAYOUT_SAMPLE_FREQUENCY,
+                             1, 10);
 
     PR_Sleep(PR_MillisecondsToInterval(10));
     mOtherSession->GetAudioFrame(audioOutput.get(), PLAYOUT_SAMPLE_FREQUENCY,
                                  10, sampleLengthDecoded);
     if(sampleLengthDecoded == 0)
     {
       cerr << " Zero length Sample " << endl;
     }
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -596,19 +596,20 @@ WebrtcAudioConduit::EnableAudioLevelExte
     return kMediaConduitUnknownError;
   }
 
   return kMediaConduitNoError;
 }
 
 MediaConduitErrorCode
 WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[],
-                                    int32_t lengthSamples,
-                                    int32_t samplingFreqHz,
-                                    int32_t capture_delay)
+                                   int32_t lengthSamples, // per channel
+                                   int32_t samplingFreqHz,
+                                   uint32_t channels,
+                                   int32_t capture_delay)
 {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
   // Following checks need to be performed
   // 1. Non null audio buffer pointer,
   // 2. invalid sampling frequency -  less than 0 or unsupported ones
   // 3. Appropriate Sample Length for 10 ms audio-frame. This represents
   //    block size the VoiceEngine feeds into encoder for passed in audio-frame
   //    Ex: for 16000 sampling rate , valid block-length is 160
@@ -641,29 +642,21 @@ WebrtcAudioConduit::SendAudioFrame(const
 
   if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
     struct Processing insert = { TimeStamp::Now(), 0 };
     mProcessing.AppendElement(insert);
   }
 
   capture_delay = mCaptureDelay;
   //Insert the samples
-  if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data,
-                                                lengthSamples,
-                                                samplingFreqHz,
-                                                capture_delay) == -1)
-  {
-    int error = mPtrVoEBase->LastError();
-    CSFLogError(logTag,  "%s Inserting audio data Failed %d", __FUNCTION__, error);
-    if(error == VE_RUNTIME_REC_ERROR)
-    {
-      return kMediaConduitRecordingError;
-    }
-    return kMediaConduitUnknownError;
-  }
+  mPtrVoEBase->audio_transport()->PushCaptureData(mChannel, audio_data,
+                                                  sizeof(audio_data[0])*8, // bits
+                                                  samplingFreqHz,
+                                                  channels,
+                                                  lengthSamples);
   // we should be good here
   return kMediaConduitNoError;
 }
 
 MediaConduitErrorCode
 WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
                                    int32_t samplingFreqHz,
                                    int32_t capture_delay,
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -120,16 +120,17 @@ public:
                                  in milliseconds.
    * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the inserted audio-samples can be transmitted by the conduit
    *
    */
   virtual MediaConduitErrorCode SendAudioFrame(const int16_t speechData[],
                                                int32_t lengthSamples,
                                                int32_t samplingFreqHz,
+                                               uint32_t channels,
                                                int32_t capture_time) override;
 
   /**
    * Function to grab a decoded audio-sample from the media engine for rendering
    * / playoutof length 10 milliseconds.
    *
    * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
    * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -470,19 +470,20 @@ public:
    * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000, 32000 ...)
    * @param capture_delay [in]:  Approx Delay from recording until it is delivered to VoiceEngine
                                  in milliseconds.
    * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the inserted audio-samples can be transmitted by the conduit
    *
    */
   virtual MediaConduitErrorCode SendAudioFrame(const int16_t audioData[],
-                                                int32_t lengthSamples,
-                                                int32_t samplingFreqHz,
-                                                int32_t capture_delay) = 0;
+                                               int32_t lengthSamples,
+                                               int32_t samplingFreqHz,
+                                               uint32_t channels,
+                                               int32_t capture_delay) = 0;
 
   /**
    * Function to grab a decoded audio-sample from the media engine for rendering
    * / playoutof length 10 milliseconds.
    *
    * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
    * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
    * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -514,20 +514,18 @@ public:
         packetizer_->Channels() != outputChannels) {
       // It's ok to drop the audio still in the packetizer here.
       packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
     }
 
     packetizer_->Input(samples, chunk.mDuration);
 
     while (packetizer_->PacketsAvailable()) {
-      uint32_t samplesPerPacket = packetizer_->PacketSize() *
-                                  packetizer_->Channels();
       packetizer_->Output(packet_);
-      mConduit->SendAudioFrame(packet_, samplesPerPacket, rate, 0);
+      mConduit->SendAudioFrame(packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
     }
   }
 
   void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
   {
     RUN_ON_THREAD(mThread,
                   WrapRunnable(RefPtr<AudioProxyThread>(this),
                                &AudioProxyThread::InternalProcessAudioChunk,