Bug 1428392 - Rename the MediaEngineWebRTCMicrophoneSource packetizer to indicate it's packetizing the input data (microphone). r=pehrsons
authorPaul Adenot <paul@paul.cx>
Fri, 05 Jan 2018 17:19:22 +0100
changeset 452709 209f16a94427450241c79352f3cc5b3cf930a5ff
parent 452708 71f7a27c283c983838803f8fa1df2cda35fb770b
child 452710 a638a0a5874c954fc15ce63ba9464c539db538e5
push id1648
push usermtabara@mozilla.com
push dateThu, 01 Mar 2018 12:45:47 +0000
treeherdermozilla-release@cbb9688c2eeb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1428392
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1428392 - Rename the MediaEngineWebRTCMicrophoneSource packetizer to indicate it's packetizing the input data (microphone). r=pehrsons MozReview-Commit-ID: AcjAeXdN8iA
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -520,17 +520,17 @@ private:
 
   // Note: shared across all microphone sources
   static int sChannelsOpen;
 
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
   const RefPtr<AudioOutputObserver> mAudioOutputObserver;
 
   // accessed from the GraphDriver thread except for deletion
-  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizer;
+  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
 
   // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
   // transitions of mState from kStarted to kStopped (which are combined with
   // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
   // threads.
   Monitor mMonitor;
   nsTArray<RefPtr<SourceMediaStream>> mSources;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -709,21 +709,21 @@ MediaEngineWebRTCMicrophoneSource::Packe
                                                        const AudioDataValue* aBuffer,
                                                        size_t aFrames,
                                                        TrackRate aRate,
                                                        uint32_t aChannels)
 {
   MOZ_ASSERT(!PassThrough(), "This should be bypassed when in PassThrough mode.");
   size_t offset = 0;
 
-  if (!mPacketizer ||
-      mPacketizer->PacketSize() != aRate/100u ||
-      mPacketizer->Channels() != aChannels) {
+  if (!mPacketizerInput ||
+      mPacketizerInput->PacketSize() != aRate/100u ||
+      mPacketizerInput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here.
-    mPacketizer =
+    mPacketizerInput =
       new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
   }
 
   // On initial capture, throw away all far-end data except the most recent sample
   // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
   // input code with "old" audio.
   if (!mStarted) {
     mStarted  = true;
@@ -809,68 +809,68 @@ MediaEngineWebRTCMicrophoneSource::Packe
       MOZ_LOG(GetMediaManagerLog(), LogLevel::Error,
           ("error in audio ProcessReverseStream(): %d", err));
       return;
     }
   }
 
   // Packetize our input data into 10ms chunks, deinterleave into planar channel
   // buffers, process, and append to the right MediaStreamTrack.
-  mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
+  mPacketizerInput->Input(aBuffer, static_cast<uint32_t>(aFrames));
 
-  while (mPacketizer->PacketsAvailable()) {
-    uint32_t samplesPerPacket = mPacketizer->PacketSize() *
-      mPacketizer->Channels();
+  while (mPacketizerInput->PacketsAvailable()) {
+    uint32_t samplesPerPacket = mPacketizerInput->PacketSize() *
+      mPacketizerInput->Channels();
     if (mInputBuffer.Length() < samplesPerPacket) {
       mInputBuffer.SetLength(samplesPerPacket);
     }
     if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
       mDeinterleavedBuffer.SetLength(samplesPerPacket);
     }
     float* packet = mInputBuffer.Data();
-    mPacketizer->Output(packet);
+    mPacketizerInput->Output(packet);
 
     // Deinterleave the input data
     // Prepare an array pointing to deinterleaved channels.
     AutoTArray<float*, 8> deinterleavedPacketizedInputDataChannelPointers;
     deinterleavedPacketizedInputDataChannelPointers.SetLength(aChannels);
     offset = 0;
     for (size_t i = 0; i < deinterleavedPacketizedInputDataChannelPointers.Length(); ++i) {
       deinterleavedPacketizedInputDataChannelPointers[i] = mDeinterleavedBuffer.Data() + offset;
-      offset += mPacketizer->PacketSize();
+      offset += mPacketizerInput->PacketSize();
     }
 
     // Deinterleave to mInputBuffer, pointed to by inputBufferChannelPointers.
-    Deinterleave(packet, mPacketizer->PacketSize(), aChannels,
+    Deinterleave(packet, mPacketizerInput->PacketSize(), aChannels,
         deinterleavedPacketizedInputDataChannelPointers.Elements());
 
     StreamConfig inputConfig(aRate,
                              aChannels,
                              false /* we don't use typing detection*/);
     StreamConfig outputConfig = inputConfig;
 
     // Bug 1404965: Get the right delay here, it saves some work down the line.
     mAudioProcessing->set_stream_delay_ms(0);
 
     // Bug 1414837: find a way to not allocate here.
     RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(mPacketizer->PacketSize() * aChannels * sizeof(float));
+      SharedBuffer::Create(mPacketizerInput->PacketSize() * aChannels * sizeof(float));
     AudioSegment segment;
 
     // Prepare channel pointers to the SharedBuffer created above.
     AutoTArray<float*, 8> processedOutputChannelPointers;
     AutoTArray<const float*, 8> processedOutputChannelPointersConst;
     processedOutputChannelPointers.SetLength(aChannels);
     processedOutputChannelPointersConst.SetLength(aChannels);
 
     offset = 0;
     for (size_t i = 0; i < processedOutputChannelPointers.Length(); ++i) {
       processedOutputChannelPointers[i] = static_cast<float*>(buffer->Data()) + offset;
       processedOutputChannelPointersConst[i] = static_cast<float*>(buffer->Data()) + offset;
-      offset += mPacketizer->PacketSize();
+      offset += mPacketizerInput->PacketSize();
     }
 
     mAudioProcessing->ProcessStream(deinterleavedPacketizedInputDataChannelPointers.Elements(),
                                     inputConfig,
                                     outputConfig,
                                     processedOutputChannelPointers.Elements());
     MonitorAutoLock lock(mMonitor);
     if (mState != kStarted)
@@ -882,17 +882,17 @@ MediaEngineWebRTCMicrophoneSource::Packe
       }
 
       // We already have planar audio data of the right format. Insert into the
       // MSG.
       MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels);
       RefPtr<SharedBuffer> other = buffer;
       segment.AppendFrames(other.forget(),
                            processedOutputChannelPointersConst,
-                           mPacketizer->PacketSize(),
+                           mPacketizerInput->PacketSize(),
                            mPrincipalHandles[i]);
       mSources[i]->AppendToTrack(mTrackID, &segment);
     }
   }
 }
 
 template<typename T>
 void