bug 1197028 use AudioChunk::ChannelCount() r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Thu, 03 Sep 2015 17:30:16 +1200
changeset 261547 9e530224d97f8653a148aae99871bdd35d42b8f4
parent 261546 6f71147e33e7d128c491257b34304e52fe63bcc6
child 261548 efb40bb86a795388684bc4b8505034336587bdfc
push id29348
push userkwierso@gmail.com
push dateWed, 09 Sep 2015 20:47:39 +0000
treeherdermozilla-central@dd2a1d737a64 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1197028
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1197028 use AudioChunk::ChannelCount() r=padenot
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioNodeEngine.cpp
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ChannelMergerNode.cpp
dom/media/webaudio/ChannelSplitterNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayBuffer.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/PanningUtils.h
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
dom/media/webaudio/blink/DynamicsCompressor.cpp
dom/media/webaudio/blink/HRTFPanner.cpp
dom/media/webaudio/blink/Reverb.cpp
--- a/dom/media/webaudio/AnalyserNode.cpp
+++ b/dom/media/webaudio/AnalyserNode.cpp
@@ -324,17 +324,17 @@ AnalyserNode::GetTimeDomainData(float* a
 
   size_t readChunk =
     mCurrentChunk - ((fftSize - 1) >> WEBAUDIO_BLOCK_SIZE_BITS);
   size_t readIndex = (0 - fftSize) & (WEBAUDIO_BLOCK_SIZE - 1);
   MOZ_ASSERT(readIndex == 0 || readIndex + fftSize == WEBAUDIO_BLOCK_SIZE);
 
   for (size_t writeIndex = 0; writeIndex < aLength; ) {
     const AudioChunk& chunk = mChunks[readChunk & (CHUNK_COUNT - 1)];
-    const size_t channelCount = chunk.mChannelData.Length();
+    const size_t channelCount = chunk.ChannelCount();
     size_t copyLength =
       std::min<size_t>(aLength - writeIndex, WEBAUDIO_BLOCK_SIZE);
     float* dataOut = &aData[writeIndex];
 
     if (channelCount == 0) {
       PodZero(dataOut, copyLength);
     } else {
       float scale = chunk.mVolume / channelCount;
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -74,17 +74,17 @@ public:
     }
 
     // Skip copying if there is no buffer.
     uint32_t outputChannelCount = mBuffer ? mNumberOfChannels : 0;
 
     // Record our input buffer
     MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?");
     const uint32_t duration = std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex);
-    const uint32_t inputChannelCount = aInput.mChannelData.Length();
+    const uint32_t inputChannelCount = aInput.ChannelCount();
     for (uint32_t i = 0; i < outputChannelCount; ++i) {
       float* outputData = mBuffer->GetDataForWrite(i) + mWriteIndex;
       if (aInput.IsNull() || i >= inputChannelCount) {
         PodZero(outputData, duration);
       } else {
         const float* inputBuffer = static_cast<const float*>(aInput.mChannelData[i]);
         if (duration == WEBAUDIO_BLOCK_SIZE) {
           // Use the optimized version of the copy with scale operation
--- a/dom/media/webaudio/AudioNodeEngine.cpp
+++ b/dom/media/webaudio/AudioNodeEngine.cpp
@@ -35,17 +35,17 @@ ThreadSharedFloatArrayBufferList::Create
 void
 WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength)
 {
   MOZ_ASSERT(aStart + aLength <= WEBAUDIO_BLOCK_SIZE);
   MOZ_ASSERT(!aChunk->IsNull(), "You should pass a non-null chunk");
   if (aLength == 0)
     return;
 
-  for (uint32_t i = 0; i < aChunk->mChannelData.Length(); ++i) {
+  for (uint32_t i = 0; i < aChunk->ChannelCount(); ++i) {
     PodZero(aChunk->ChannelFloatsForWrite(i) + aStart, aLength);
   }
 }
 
 void AudioBufferCopyWithScale(const float* aInput,
                               float aScale,
                               float* aOutput,
                               uint32_t aSize)
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -388,30 +388,30 @@ AudioNodeStream::ObtainInputBlock(AudioC
     AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
     MOZ_ASSERT(chunk);
     if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
       continue;
     }
 
     inputChunks.AppendElement(chunk);
     outputChannelCount =
-      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
+      GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount());
   }
 
   outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
 
   uint32_t inputChunkCount = inputChunks.Length();
   if (inputChunkCount == 0 ||
-      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
+      (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
   if (inputChunkCount == 1 &&
-      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
+      inputChunks[0]->ChannelCount() == outputChannelCount) {
     aTmpChunk = *inputChunks[0];
     return;
   }
 
   if (outputChannelCount == 0) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
@@ -426,17 +426,17 @@ AudioNodeStream::ObtainInputBlock(AudioC
 }
 
 void
 AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
                                       AudioChunk* aBlock,
                                       nsTArray<float>* aDownmixBuffer)
 {
   nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
-  UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
+  UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);
 
   for (uint32_t c = 0; c < channels.Length(); ++c) {
     const float* inputData = static_cast<const float*>(channels[c]);
     float* outputData = aBlock->ChannelFloatsForWrite(c);
     if (inputData) {
       if (aInputIndex == 0) {
         AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
       } else {
@@ -453,17 +453,17 @@ AudioNodeStream::AccumulateInputChunk(ui
 void
 AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
                                    uint32_t aOutputChannelCount,
                                    nsTArray<const float*>& aOutputChannels,
                                    nsTArray<float>& aDownmixBuffer)
 {
   static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
 
-  for (uint32_t i = 0; i < aChunk->mChannelData.Length(); i++) {
+  for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
     aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
   }
   if (aOutputChannels.Length() < aOutputChannelCount) {
     if (mChannelInterpretation == ChannelInterpretation::Speakers) {
       AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, SilentChannel::ZeroChannel<float>());
       NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
                    "We called GetAudioChannelsSuperset to avoid this");
     } else {
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -163,28 +163,28 @@ public:
         }
 
         aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
       }
 
       PodArrayZero(inputBuffer);
 
-    } else if(mBiquads.Length() != aInput.mChannelData.Length()){
+    } else if(mBiquads.Length() != aInput.ChannelCount()){
       if (mBiquads.IsEmpty()) {
         nsRefPtr<PlayingRefChangeHandler> refchanged =
           new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF);
         aStream->Graph()->
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
       } else { // Help people diagnose bug 924718
         NS_WARNING("BiquadFilterNode channel count changes may produce audio glitches");
       }
 
       // Adjust the number of biquads based on the number of channels
-      mBiquads.SetLength(aInput.mChannelData.Length());
+      mBiquads.SetLength(aInput.ChannelCount());
     }
 
     uint32_t numberOfChannels = mBiquads.Length();
     AllocateAudioBlock(numberOfChannels, aOutput);
 
     StreamTime pos = aStream->GetCurrentPosition();
 
     double freq = mFrequency.GetValueAtTime(pos);
--- a/dom/media/webaudio/ChannelMergerNode.cpp
+++ b/dom/media/webaudio/ChannelMergerNode.cpp
@@ -28,30 +28,30 @@ public:
                                     OutputChunks& aOutput,
                                     bool* aFinished) override
   {
     MOZ_ASSERT(aInput.Length() >= 1, "Should have one or more input ports");
 
     // Get the number of output channels, and allocate it
     size_t channelCount = 0;
     for (uint16_t i = 0; i < InputCount(); ++i) {
-      channelCount += aInput[i].mChannelData.Length();
+      channelCount += aInput[i].ChannelCount();
     }
     if (channelCount == 0) {
       aOutput[0].SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
     channelCount = std::min(channelCount, WebAudioUtils::MaxChannelCount);
     AllocateAudioBlock(channelCount, &aOutput[0]);
 
     // Append each channel in each input to the output
     size_t channelIndex = 0;
     for (uint16_t i = 0; true; ++i) {
       MOZ_ASSERT(i < InputCount());
-      for (size_t j = 0; j < aInput[i].mChannelData.Length(); ++j) {
+      for (size_t j = 0; j < aInput[i].ChannelCount(); ++j) {
         AudioBlockCopyChannelWithScale(
             static_cast<const float*>(aInput[i].mChannelData[j]),
             aInput[i].mVolume,
             aOutput[0].ChannelFloatsForWrite(channelIndex));
         ++channelIndex;
         if (channelIndex >= channelCount) {
           return;
         }
--- a/dom/media/webaudio/ChannelSplitterNode.cpp
+++ b/dom/media/webaudio/ChannelSplitterNode.cpp
@@ -27,17 +27,17 @@ public:
                                     const OutputChunks& aInput,
                                     OutputChunks& aOutput,
                                     bool* aFinished) override
   {
     MOZ_ASSERT(aInput.Length() == 1, "Should only have one input port");
 
     aOutput.SetLength(OutputCount());
     for (uint16_t i = 0; i < OutputCount(); ++i) {
-      if (i < aInput[0].mChannelData.Length()) {
+      if (i < aInput[0].ChannelCount()) {
         // Split out existing channels
         AllocateAudioBlock(1, &aOutput[i]);
         AudioBlockCopyChannelWithScale(
             static_cast<const float*>(aInput[0].mChannelData[i]),
             aInput[0].mVolume,
             aOutput[i].ChannelFloatsForWrite(0));
       } else {
         // Pad with silent channels if needed
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -126,17 +126,17 @@ public:
             DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
         }
         aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
       }
     } else {
       if (aInput.mVolume != 1.0f) {
         // Pre-multiply the input's volume
-        uint32_t numChannels = aInput.mChannelData.Length();
+        uint32_t numChannels = aInput.ChannelCount();
         AllocateAudioBlock(numChannels, &input);
         for (uint32_t i = 0; i < numChannels; ++i) {
           const float* src = static_cast<const float*>(aInput.mChannelData[i]);
           float* dest = input.ChannelFloatsForWrite(i);
           AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest);
         }
       }
 
--- a/dom/media/webaudio/DelayBuffer.cpp
+++ b/dom/media/webaudio/DelayBuffer.cpp
@@ -24,17 +24,17 @@ DelayBuffer::SizeOfExcludingThis(MallocS
   amount += mUpmixChannels.ShallowSizeOfExcludingThis(aMallocSizeOf);
   return amount;
 }
 
 void
 DelayBuffer::Write(const AudioChunk& aInputChunk)
 {
   // We must have a reference to the buffer if there are channels
-  MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.mChannelData.Length());
+  MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.ChannelCount());
 #ifdef DEBUG
   MOZ_ASSERT(!mHaveWrittenBlock);
   mHaveWrittenBlock = true;
 #endif
 
   if (!EnsureBuffer()) {
     return;
   }
@@ -111,17 +111,17 @@ DelayBuffer::ReadChannel(const double aP
 }
 
 void
 DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                           AudioChunk* aOutputChunk,
                           uint32_t aFirstChannel, uint32_t aNumChannelsToRead,
                           ChannelInterpretation aChannelInterpretation)
 {
-  uint32_t totalChannelCount = aOutputChunk->mChannelData.Length();
+  uint32_t totalChannelCount = aOutputChunk->ChannelCount();
   uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead;
   MOZ_ASSERT(readChannelsEnd <= totalChannelCount);
 
   if (mUpmixChannels.Length() != totalChannelCount) {
     mLastReadChunk = -1; // invalidate cache
   }
 
   for (uint32_t channel = aFirstChannel;
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -98,21 +98,21 @@ public:
                             bool* aFinished) override
   {
     if (aInput.IsNull()) {
       // Just output silence
       *aOutput = aInput;
       return;
     }
 
-    const uint32_t channelCount = aInput.mChannelData.Length();
+    const uint32_t channelCount = aInput.ChannelCount();
     if (mCompressor->numberOfChannels() != channelCount) {
       // Create a new compressor object with a new channel count
       mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(),
-                                                    aInput.mChannelData.Length());
+                                                    aInput.ChannelCount());
     }
 
     StreamTime pos = aStream->GetCurrentPosition();
     mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold,
                                    mThreshold.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamKnee,
                                    mKnee.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRatio,
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -76,29 +76,29 @@ public:
       } else {
         *aOutput = aInput;
         aOutput->mVolume *= gain;
       }
     } else {
       // First, compute a vector of gains for each track tick based on the
       // timeline at hand, and then for each channel, multiply the values
       // in the buffer with the gain vector.
-      AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
+      AllocateAudioBlock(aInput.ChannelCount(), aOutput);
 
       // Compute the gain values for the duration of the input AudioChunk
       StreamTime tick = aStream->GetCurrentPosition();
       float computedGain[WEBAUDIO_BLOCK_SIZE];
       mGain.GetValuesAtTime(tick, computedGain, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         computedGain[counter] *= aInput.mVolume;
       }
 
       // Apply the gain to the output buffer
-      for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
+      for (size_t channel = 0; channel < aOutput->ChannelCount(); ++channel) {
         const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]);
         float* buffer = aOutput->ChannelFloatsForWrite(channel);
         AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
       }
     }
   }
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -318,17 +318,17 @@ PannerNodeEngine::HRTFPanningFunction(co
   mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
 }
 
 void
 PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
                                             AudioChunk* aOutput)
 {
   float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
-  int inputChannels = aInput.mChannelData.Length();
+  int inputChannels = aInput.ChannelCount();
 
   // If both the listener are in the same spot, and no cone gain is specified,
   // this node is noop.
   if (mListenerPosition == mPosition &&
       mConeInnerAngle == 360 &&
       mConeOuterAngle == 360) {
     *aOutput = aInput;
     return;
--- a/dom/media/webaudio/PanningUtils.h
+++ b/dom/media/webaudio/PanningUtils.h
@@ -47,17 +47,17 @@ GainStereoToStereo(const AudioChunk& aIn
 }
 
 // T can be float or an array of float, and  U can be bool or an array of bool,
 // depending if the value of the parameters are constant for this block.
 template<typename T, typename U>
 void ApplyStereoPanning(const AudioChunk& aInput, AudioChunk* aOutput,
                         T aGainL, T aGainR, U aOnLeft)
 {
-  if (aInput.mChannelData.Length() == 1) {
+  if (aInput.ChannelCount() == 1) {
     GainMonoToStereo(aInput, aOutput, aGainL, aGainR);
   } else {
     GainStereoToStereo(aInput, aOutput, aGainL, aGainR, aOnLeft);
   }
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -310,17 +310,17 @@ public:
     // First, record our input buffer, if its allocation succeeded.
     uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
     for (uint32_t i = 0; i < inputChannelCount; ++i) {
       float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
       if (aInput.IsNull()) {
         PodZero(writeData, aInput.GetDuration());
       } else {
         MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
-        MOZ_ASSERT(aInput.mChannelData.Length() == inputChannelCount);
+        MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
         AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
                                        aInput.mVolume, writeData);
       }
     }
     mInputWriteIndex += aInput.GetDuration();
 
     // Now, see if we have data to output
     // Note that we need to do this before sending the buffer to the main
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -214,17 +214,17 @@ public:
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             const AudioChunk& aInput,
                             AudioChunk* aOutput,
                             bool* aFinished) override
   {
-    uint32_t channelCount = aInput.mChannelData.Length();
+    uint32_t channelCount = aInput.ChannelCount();
     if (!mCurve.Length() || !channelCount) {
       // Optimize the case where we don't have a curve buffer,
       // or the input is null.
       *aOutput = aInput;
       return;
     }
 
     AllocateAudioBlock(channelCount, aOutput);
--- a/dom/media/webaudio/blink/DynamicsCompressor.cpp
+++ b/dom/media/webaudio/blink/DynamicsCompressor.cpp
@@ -149,18 +149,18 @@ void DynamicsCompressor::setEmphasisPara
 }
 
 void DynamicsCompressor::process(const AudioChunk* sourceChunk, AudioChunk* destinationChunk, unsigned framesToProcess)
 {
     // Though numberOfChannels is retrived from destinationBus, we still name it numberOfChannels instead of numberOfDestinationChannels.
     // It's because we internally match sourceChannels's size to destinationBus by channel up/down mix. Thus we need numberOfChannels
     // to do the loop work for both m_sourceChannels and m_destinationChannels.
 
-    unsigned numberOfChannels = destinationChunk->mChannelData.Length();
-    unsigned numberOfSourceChannels = sourceChunk->mChannelData.Length();
+    unsigned numberOfChannels = destinationChunk->ChannelCount();
+    unsigned numberOfSourceChannels = sourceChunk->ChannelCount();
 
     MOZ_ASSERT(numberOfChannels == m_numberOfChannels && numberOfSourceChannels);
 
     if (numberOfChannels != m_numberOfChannels || !numberOfSourceChannels) {
         destinationChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
     }
 
--- a/dom/media/webaudio/blink/HRTFPanner.cpp
+++ b/dom/media/webaudio/blink/HRTFPanner.cpp
@@ -127,23 +127,23 @@ int HRTFPanner::calculateDesiredAzimuthI
     desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex);
     return desiredAzimuthIndex;
 }
 
 void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk* inputBus, AudioChunk* outputBus)
 {
 #ifdef DEBUG
     unsigned numInputChannels =
-        inputBus->IsNull() ? 0 : inputBus->mChannelData.Length();
+        inputBus->IsNull() ? 0 : inputBus->ChannelCount();
 
     MOZ_ASSERT(numInputChannels <= 2);
     MOZ_ASSERT(inputBus->mDuration == WEBAUDIO_BLOCK_SIZE);
 #endif
 
-    bool isOutputGood = outputBus && outputBus->mChannelData.Length() == 2 && outputBus->mDuration == WEBAUDIO_BLOCK_SIZE;
+    bool isOutputGood = outputBus && outputBus->ChannelCount() == 2 && outputBus->mDuration == WEBAUDIO_BLOCK_SIZE;
     MOZ_ASSERT(isOutputGood);
 
     if (!isOutputGood) {
         if (outputBus)
             outputBus->SetNull(outputBus->mDuration);
         return;
     }
 
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -149,32 +149,32 @@ void Reverb::initialize(const nsTArray<c
         WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE);
     }
 }
 
 void Reverb::process(const AudioChunk* sourceBus, AudioChunk* destinationBus, size_t framesToProcess)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
-    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->mChannelData.Length() > 0 && destinationBus->mChannelData.Length() > 0
+    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0
         && framesToProcess <= MaxFrameSize && framesToProcess <= size_t(sourceBus->mDuration) && framesToProcess <= size_t(destinationBus->mDuration);
 
     MOZ_ASSERT(isSafeToProcess);
     if (!isSafeToProcess)
         return;
 
     // For now only handle mono or stereo output
-    MOZ_ASSERT(destinationBus->mChannelData.Length() <= 2);
+    MOZ_ASSERT(destinationBus->ChannelCount() <= 2);
 
     float* destinationChannelL = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[0]));
     const float* sourceBusL = static_cast<const float*>(sourceBus->mChannelData[0]);
 
     // Handle input -> output matrixing...
-    size_t numInputChannels = sourceBus->mChannelData.Length();
-    size_t numOutputChannels = destinationBus->mChannelData.Length();
+    size_t numInputChannels = sourceBus->ChannelCount();
+    size_t numOutputChannels = destinationBus->ChannelCount();
     size_t numReverbChannels = m_convolvers.Length();
 
     if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) {
         // 2 -> 2 -> 2
         const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
         float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
         m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
         m_convolvers[1]->process(sourceBusR, sourceBus->mDuration, destinationChannelR, destinationBus->mDuration, framesToProcess);