bug 1197028 use AudioBlock for web audio processing to reuse buffers shared downstream r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Thu, 03 Sep 2015 19:01:50 +1200
changeset 294149 d2b08c513afec90c82d2ba13cd0085f46ff42f4c
parent 294148 6db3d033d1dc98b3d1e1191d17fbc7be156ced8b
child 294150 253dc0ad68e639adddc7ff16f41d58b178394d19
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1197028
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1197028 use AudioBlock for web audio processing to reuse buffers shared downstream r=padenot
dom/media/AudioSegment.h
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioBlock.cpp
dom/media/webaudio/AudioBlock.h
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioNodeEngine.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeExternalInputStream.cpp
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/AudioParam.cpp
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ChannelMergerNode.cpp
dom/media/webaudio/ChannelSplitterNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayBuffer.cpp
dom/media/webaudio/DelayBuffer.h
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/PanningUtils.h
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
dom/media/webaudio/blink/DynamicsCompressor.cpp
dom/media/webaudio/blink/DynamicsCompressor.h
dom/media/webaudio/blink/HRTFPanner.cpp
dom/media/webaudio/blink/HRTFPanner.h
dom/media/webaudio/blink/Reverb.cpp
dom/media/webaudio/blink/Reverb.h
--- a/dom/media/AudioSegment.h
+++ b/dom/media/AudioSegment.h
@@ -187,53 +187,18 @@ struct AudioChunk {
   {
     mBuffer = nullptr;
     mChannelData.Clear();
     mDuration = aDuration;
     mVolume = 1.0f;
     mBufferFormat = AUDIO_FORMAT_SILENCE;
   }
 
-  bool IsSilentOrSubnormal() const
-  {
-    if (!mBuffer) {
-      return true;
-    }
-
-    for (uint32_t i = 0, length = mChannelData.Length(); i < length; ++i) {
-      const float* channel = static_cast<const float*>(mChannelData[i]);
-      for (StreamTime frame = 0; frame < mDuration; ++frame) {
-        if (fabs(channel[frame]) >= FLT_MIN) {
-          return false;
-        }
-      }
-    }
-
-    return true;
-  }
-
   size_t ChannelCount() const { return mChannelData.Length(); }
 
-  float* ChannelFloatsForWrite(size_t aChannel)
-  {
-    MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32);
-    MOZ_ASSERT(!mBuffer->IsShared());
-    return static_cast<float*>(const_cast<void*>(mChannelData[aChannel]));
-  }
-
-  void ReleaseBufferIfShared()
-  {
-    if (mBuffer && mBuffer->IsShared()) {
-      // Remove pointers into the buffer, but keep the array allocation for
-      // chunk re-use.
-      mChannelData.ClearAndRetainStorage();
-      mBuffer = nullptr;
-    }
-  }
-
   bool IsMuted() const { return mVolume == 0.0f; }
 
   size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const
   {
     return SizeOfExcludingThis(aMallocSizeOf, true);
   }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, bool aUnshared) const
--- a/dom/media/webaudio/AnalyserNode.cpp
+++ b/dom/media/webaudio/AnalyserNode.cpp
@@ -54,23 +54,24 @@ class AnalyserNodeEngine final : public 
 public:
   explicit AnalyserNodeEngine(AnalyserNode* aNode)
     : AudioNodeEngine(aNode)
   {
     MOZ_ASSERT(NS_IsMainThread());
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     *aOutput = aInput;
 
-    nsRefPtr<TransferBuffer> transfer = new TransferBuffer(aStream, aInput);
+    nsRefPtr<TransferBuffer> transfer =
+      new TransferBuffer(aStream, aInput.AsAudioChunk());
     NS_DispatchToMainThread(transfer);
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 };
--- a/dom/media/webaudio/AudioBlock.cpp
+++ b/dom/media/webaudio/AudioBlock.cpp
@@ -120,35 +120,43 @@ void
 AudioBlock::ClearDownstreamMark() {
   if (mBufferIsDownstreamRef) {
     mBuffer->AsAudioBlockBuffer()->DownstreamRefRemoved();
     mBufferIsDownstreamRef = false;
   }
 }
 
 void
-AllocateAudioBlock(uint32_t aChannelCount, AudioChunk* aChunk)
+AudioBlock::AssertNoLastingShares() {
+  MOZ_ASSERT(!mBuffer->AsAudioBlockBuffer()->HasLastingShares());
+}
+
+void
+AudioBlock::AllocateChannels(uint32_t aChannelCount)
 {
-  if (aChunk->mBuffer && aChunk->ChannelCount() == aChannelCount) {
-    AudioBlockBuffer* buffer = aChunk->mBuffer->AsAudioBlockBuffer();
+  MOZ_ASSERT(mDuration == WEBAUDIO_BLOCK_SIZE);
+
+  if (mBufferIsDownstreamRef) {
+    // This is not our buffer to re-use.
+    ClearDownstreamMark();
+  } else if (mBuffer && ChannelCount() == aChannelCount) {
+    AudioBlockBuffer* buffer = mBuffer->AsAudioBlockBuffer();
     if (buffer && !buffer->HasLastingShares()) {
-      MOZ_ASSERT(aChunk->mBufferFormat == AUDIO_FORMAT_FLOAT32);
-      MOZ_ASSERT(aChunk->mDuration == WEBAUDIO_BLOCK_SIZE);
+      MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32);
       // No need to allocate again.
-      aChunk->mVolume = 1.0f;
+      mVolume = 1.0f;
       return;
     }
   }
 
   // XXX for SIMD purposes we should do something here to make sure the
   // channel buffers are 16-byte aligned.
   nsRefPtr<AudioBlockBuffer> buffer = AudioBlockBuffer::Create(aChannelCount);
-  aChunk->mDuration = WEBAUDIO_BLOCK_SIZE;
-  aChunk->mChannelData.SetLength(aChannelCount);
+  mChannelData.SetLength(aChannelCount);
   for (uint32_t i = 0; i < aChannelCount; ++i) {
-    aChunk->mChannelData[i] = buffer->ChannelData(i);
+    mChannelData[i] = buffer->ChannelData(i);
   }
-  aChunk->mBuffer = buffer.forget();
-  aChunk->mVolume = 1.0f;
-  aChunk->mBufferFormat = AUDIO_FORMAT_FLOAT32;
+  mBuffer = buffer.forget();
+  mVolume = 1.0f;
+  mBufferFormat = AUDIO_FORMAT_FLOAT32;
 }
 
 } // namespace mozilla
--- a/dom/media/webaudio/AudioBlock.h
+++ b/dom/media/webaudio/AudioBlock.h
@@ -43,16 +43,31 @@ public:
   using AudioChunk::mBufferFormat;
 
   const AudioChunk& AsAudioChunk() const { return *this; }
   AudioChunk* AsMutableChunk() {
     void ClearDownstreamMark();
     return this;
   }
 
+  /**
+   * Allocates, if necessary, aChannelCount buffers of WEBAUDIO_BLOCK_SIZE float
+   * samples for writing.
+   */
+  void AllocateChannels(uint32_t aChannelCount);
+
+  float* ChannelFloatsForWrite(size_t aChannel)
+  {
+    MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32);
+#if DEBUG
+    AssertNoLastingShares();
+#endif
+    return static_cast<float*>(const_cast<void*>(mChannelData[aChannel]));
+  }
+
   void SetBuffer(ThreadSharedObject* aNewBuffer);
   void SetNull(StreamTime aDuration) {
     MOZ_ASSERT(aDuration == WEBAUDIO_BLOCK_SIZE);
     SetBuffer(nullptr);
     mChannelData.Clear();
     mVolume = 1.0f;
     mBufferFormat = AUDIO_FORMAT_SILENCE;
   }
@@ -83,27 +98,23 @@ public:
       }
     }
 
     return true;
   }
 
 private:
   void ClearDownstreamMark();
+  void AssertNoLastingShares();
 
   // mBufferIsDownstreamRef is set only when mBuffer references an
   // AudioBlockBuffer created in a different AudioBlock.  That can happen when
   // this AudioBlock is on a node downstream from the node which created the
   // buffer.  When this is set, the AudioBlockBuffer is notified that this
   // reference does prevent the upstream node from re-using the buffer next
   // iteration and modifying its contents.  The AudioBlockBuffer is also
   // notified when mBuffer releases this reference.
   bool mBufferIsDownstreamRef = false;
 };
-/**
- * Allocates, if necessary, aChannelCount buffers of WEBAUDIO_BLOCK_SIZE float
- * samples for writing to an AudioChunk.
- */
-void AllocateAudioBlock(uint32_t aChannelCount, AudioChunk* aChunk);
 
 } // namespace mozilla
 
 #endif // MOZILLA_AUDIOBLOCK_H_
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -182,32 +182,31 @@ public:
       // filter width.
       mBeginProcessing =
         (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum;
     }
   }
 
   // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer
   // at offset aSourceOffset.  This avoids copying memory.
-  void BorrowFromInputBuffer(AudioChunk* aOutput,
+  void BorrowFromInputBuffer(AudioBlock* aOutput,
                              uint32_t aChannels)
   {
-    aOutput->mDuration = WEBAUDIO_BLOCK_SIZE;
-    aOutput->mBuffer = mBuffer;
+    aOutput->SetBuffer(mBuffer);
     aOutput->mChannelData.SetLength(aChannels);
     for (uint32_t i = 0; i < aChannels; ++i) {
       aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition;
     }
     aOutput->mVolume = 1.0f;
     aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32;
   }
 
   // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset
   // and put it at offset aBufferOffset in the destination buffer.
-  void CopyFromInputBuffer(AudioChunk* aOutput,
+  void CopyFromInputBuffer(AudioBlock* aOutput,
                            uint32_t aChannels,
                            uintptr_t aOffsetWithinBlock,
                            uint32_t aNumberOfFrames) {
     for (uint32_t i = 0; i < aChannels; ++i) {
       float* baseChannelData = aOutput->ChannelFloatsForWrite(i);
       memcpy(baseChannelData + aOffsetWithinBlock,
              mBuffer->GetData(i) + mBufferPosition,
              aNumberOfFrames * sizeof(float));
@@ -215,17 +214,17 @@ public:
   }
 
   // Resamples input data to an output buffer, according to |mBufferSampleRate| and
   // the playbackRate/detune.
   // The number of frames consumed/produced depends on the amount of space
   // remaining in both the input and output buffer, and the playback rate (that
   // is, the ratio between the output samplerate and the input samplerate).
   void CopyFromInputBufferWithResampling(AudioNodeStream* aStream,
-                                         AudioChunk* aOutput,
+                                         AudioBlock* aOutput,
                                          uint32_t aChannels,
                                          uint32_t* aOffsetWithinBlock,
                                          StreamTime* aCurrentPosition,
                                          int32_t aBufferMax) {
     // TODO: adjust for mStop (see bug 913854 comment 9).
     uint32_t availableInOutputBuffer =
       WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock;
     SpeexResamplerState* resampler = mResampler;
@@ -314,31 +313,31 @@ public:
   /**
    * Fill aOutput with as many zero frames as we can, and advance
    * aOffsetWithinBlock and aCurrentPosition based on how many frames we write.
    * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or
    * aCurrentPosition past aMaxPos.  This function knows when it needs to
    * allocate the output buffer, and also optimizes the case where it can avoid
    * memory allocations.
    */
-  void FillWithZeroes(AudioChunk* aOutput,
+  void FillWithZeroes(AudioBlock* aOutput,
                       uint32_t aChannels,
                       uint32_t* aOffsetWithinBlock,
                       StreamTime* aCurrentPosition,
                       StreamTime aMaxPos)
   {
     MOZ_ASSERT(*aCurrentPosition < aMaxPos);
     uint32_t numFrames =
       std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
                            aMaxPos - *aCurrentPosition);
     if (numFrames == WEBAUDIO_BLOCK_SIZE) {
       aOutput->SetNull(numFrames);
     } else {
       if (*aOffsetWithinBlock == 0) {
-        AllocateAudioBlock(aChannels, aOutput);
+        aOutput->AllocateChannels(aChannels);
       }
       WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames);
     }
     *aOffsetWithinBlock += numFrames;
     *aCurrentPosition += numFrames;
   }
 
   /**
@@ -346,17 +345,17 @@ public:
    * advance aOffsetWithinBlock and aCurrentPosition based on how many frames
    * we write.  This will never advance aOffsetWithinBlock past
    * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop.  It takes data from
    * the buffer at aBufferOffset, and never takes more data than aBufferMax.
    * This function knows when it needs to allocate the output buffer, and also
    * optimizes the case where it can avoid memory allocations.
    */
   void CopyFromBuffer(AudioNodeStream* aStream,
-                      AudioChunk* aOutput,
+                      AudioBlock* aOutput,
                       uint32_t aChannels,
                       uint32_t* aOffsetWithinBlock,
                       StreamTime* aCurrentPosition,
                       int32_t aBufferMax)
   {
     MOZ_ASSERT(*aCurrentPosition < mStop);
     uint32_t numFrames =
       std::min(std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
@@ -365,17 +364,17 @@ public:
     if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) {
       MOZ_ASSERT(mBufferPosition < aBufferMax);
       BorrowFromInputBuffer(aOutput, aChannels);
       *aOffsetWithinBlock += numFrames;
       *aCurrentPosition += numFrames;
       mBufferPosition += numFrames;
     } else {
       if (*aOffsetWithinBlock == 0) {
-        AllocateAudioBlock(aChannels, aOutput);
+        aOutput->AllocateChannels(aChannels);
       }
       if (!mResampler) {
         MOZ_ASSERT(mBufferPosition < aBufferMax);
         CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames);
         *aOffsetWithinBlock += numFrames;
         *aCurrentPosition += numFrames;
         mBufferPosition += numFrames;
       } else {
@@ -416,18 +415,18 @@ public:
 
     detune = std::min(std::max(-1200.f, detune), 1200.f);
 
     int32_t outRate = ComputeFinalOutSampleRate(playbackRate, detune);
     UpdateResampler(outRate, aChannels);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (!mBuffer || !mBufferEnd) {
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
     uint32_t channels = mBuffer->GetChannels();
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -41,18 +41,18 @@ public:
     , mNumberOfChannels(aNumberOfChannels)
     , mLength(aLength)
     , mSampleRate(aSampleRate)
     , mBufferAllocated(false)
   {
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     // Do this just for the sake of political correctness; this output
     // will not go anywhere.
     *aOutput = aInput;
 
     // The output buffer is allocated lazily, on the rendering thread, when
     // non-null input is received.
@@ -228,18 +228,18 @@ public:
     , mVolume(1.0f)
     , mLastInputMuted(true)
     , mSuspended(false)
   {
     MOZ_ASSERT(aNode);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     *aOutput = aInput;
     aOutput->mVolume *= mVolume;
 
     if (mSuspended) {
       return;
     }
--- a/dom/media/webaudio/AudioNodeEngine.cpp
+++ b/dom/media/webaudio/AudioNodeEngine.cpp
@@ -28,17 +28,18 @@ ThreadSharedFloatArrayBufferList::Create
 
     buffer->SetData(i, channelData, js_free, channelData);
   }
 
   return buffer.forget();
 }
 
 void
-WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength)
+WriteZeroesToAudioBlock(AudioBlock* aChunk,
+                        uint32_t aStart, uint32_t aLength)
 {
   MOZ_ASSERT(aStart + aLength <= WEBAUDIO_BLOCK_SIZE);
   MOZ_ASSERT(!aChunk->IsNull(), "You should pass a non-null chunk");
   if (aLength == 0)
     return;
 
   for (uint32_t i = 0; i < aChunk->ChannelCount(); ++i) {
     PodZero(aChunk->ChannelFloatsForWrite(i) + aStart, aLength);
@@ -265,9 +266,30 @@ AudioBufferSumOfSquares(const float* aIn
   float sum = 0.0f;
   while (aLength--) {
     sum += *aInput * *aInput;
     ++aInput;
   }
   return sum;
 }
 
+void
+AudioNodeEngine::ProcessBlock(AudioNodeStream* aStream,
+                              const AudioBlock& aInput,
+                              AudioBlock* aOutput,
+                              bool* aFinished)
+{
+  MOZ_ASSERT(mInputCount <= 1 && mOutputCount <= 1);
+  *aOutput = aInput;
+}
+
+void
+AudioNodeEngine::ProcessBlocksOnPorts(AudioNodeStream* aStream,
+                                      const OutputChunks& aInput,
+                                      OutputChunks& aOutput,
+                                      bool* aFinished)
+{
+  MOZ_ASSERT(mInputCount > 1 || mOutputCount > 1);
+  // Only produce one output port, and drop all other input ports.
+  aOutput[0] = aInput[0];
+}
+
 } // namespace mozilla
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -14,16 +14,17 @@
 namespace mozilla {
 
 namespace dom {
 struct ThreeDPoint;
 class AudioParamTimeline;
 class DelayNodeEngine;
 } // namespace dom
 
+class AudioBlock;
 class AudioNodeStream;
 
 /**
  * This class holds onto a set of immutable channel buffers. The storage
  * for the buffers must be malloced, but the buffer pointers and the malloc
  * pointers can be different (e.g. if the buffers are contained inside
  * some malloced object).
  */
@@ -127,17 +128,18 @@ public:
 
 private:
   nsAutoTArray<Storage, 2> mContents;
 };
 
 /**
  * aChunk must have been allocated by AllocateAudioBlock.
  */
-void WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength);
+void WriteZeroesToAudioBlock(AudioBlock* aChunk, uint32_t aStart,
+                             uint32_t aLength);
 
 /**
  * Copy with scale. aScale == 1.0f should be optimized.
  */
 void AudioBufferCopyWithScale(const float* aInput,
                               float aScale,
                               float* aOutput,
                               uint32_t aSize);
@@ -245,17 +247,17 @@ AudioBufferSumOfSquares(const float* aIn
 /**
  * All methods of this class and its subclasses are called on the
  * MediaStreamGraph thread.
  */
 class AudioNodeEngine
 {
 public:
   // This should be compatible with AudioNodeStream::OutputChunks.
-  typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
+  typedef nsAutoTArray<AudioBlock, 1> OutputChunks;
 
   explicit AudioNodeEngine(dom::AudioNode* aNode)
     : mNode(aNode)
     , mInputCount(aNode ? aNode->NumberOfInputs() : 1)
     , mOutputCount(aNode ? aNode->NumberOfOutputs() : 0)
   {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_COUNT_CTOR(AudioNodeEngine);
@@ -306,29 +308,25 @@ public:
    * (the mixed data for input 0).
    * aInput is guaranteed to have float sample format (if it has samples at all)
    * and to have been resampled to the sampling rate for the stream, and to have
    * exactly WEBAUDIO_BLOCK_SIZE samples.
    * *aFinished is set to false by the caller. If the callee sets it to true,
    * we'll finish the stream and not call this again.
    */
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
-                            bool* aFinished)
-  {
-    MOZ_ASSERT(mInputCount <= 1 && mOutputCount <= 1);
-    *aOutput = aInput;
-  }
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
+                            bool* aFinished);
   /**
    * Produce the next block of audio samples, before input is provided.
    * ProcessBlock() will be called later, and it then should not change
    * aOutput.  This is used only for DelayNodeEngine in a feedback loop.
    */
-  virtual void ProduceBlockBeforeInput(AudioChunk* aOutput)
+  virtual void ProduceBlockBeforeInput(AudioBlock* aOutput)
   {
     NS_NOTREACHED("ProduceBlockBeforeInput called on wrong engine\n");
   }
 
   /**
    * Produce the next block of audio samples, given input samples in the aInput
    * array.  There is one input sample per active port in aInput, in order.
    * This is the multi-input/output version of ProcessBlock.  Only one kind
@@ -341,22 +339,17 @@ public:
    * of AudioChunks to aOutput as advertized by the AudioNode implementation.
    * An engine may choose to produce fewer inputs than advertizes by the
    * corresponding AudioNode, in which case it will be interpreted as a channel
    * of silence.
    */
   virtual void ProcessBlocksOnPorts(AudioNodeStream* aStream,
                                     const OutputChunks& aInput,
                                     OutputChunks& aOutput,
-                                    bool* aFinished)
-  {
-    MOZ_ASSERT(mInputCount > 1 || mOutputCount > 1);
-    // Only produce one output port, and drop all other input ports.
-    aOutput[0] = aInput[0];
-  }
+                                    bool* aFinished);
 
   bool HasNode() const
   {
     MOZ_ASSERT(NS_IsMainThread());
     return !!mNode;
   }
 
   dom::AudioNode* NodeMainThread() const
--- a/dom/media/webaudio/AudioNodeExternalInputStream.cpp
+++ b/dom/media/webaudio/AudioNodeExternalInputStream.cpp
@@ -39,17 +39,17 @@ AudioNodeExternalInputStream::Create(Med
 
 /**
  * Copies the data in aInput to aOffsetInBlock within aBlock.
  * aBlock must have been allocated with AllocateInputBlock and have a channel
  * count that's a superset of the channels in aInput.
  */
 template <typename T>
 static void
-CopyChunkToBlock(AudioChunk& aInput, AudioChunk *aBlock,
+CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                  uint32_t aOffsetInBlock)
 {
   uint32_t blockChannels = aBlock->ChannelCount();
   nsAutoTArray<const T*,2> channels;
   if (aInput.IsNull()) {
     channels.SetLength(blockChannels);
     PodZero(channels.Elements(), blockChannels);
   } else {
@@ -74,33 +74,33 @@ CopyChunkToBlock(AudioChunk& aInput, Aud
 }
 
 /**
  * Converts the data in aSegment to a single chunk aBlock. aSegment must have
  * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the
  * channels in every chunk of aSegment. aBlock must be float format or null.
  */
 static void ConvertSegmentToAudioBlock(AudioSegment* aSegment,
-                                       AudioChunk* aBlock,
+                                       AudioBlock* aBlock,
                                        int32_t aFallbackChannelCount)
 {
   NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");
 
   {
     AudioSegment::ChunkIterator ci(*aSegment);
     NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!");
     if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE &&
         (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) {
       // Return this chunk directly to avoid copying data.
       *aBlock = *ci;
       return;
     }
   }
 
-  AllocateAudioBlock(aFallbackChannelCount, aBlock);
+  aBlock->AllocateChannels(aFallbackChannelCount);
 
   uint32_t duration = 0;
   for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) {
     switch (ci->mBufferFormat) {
       case AUDIO_FORMAT_S16: {
         CopyChunkToBlock<int16_t>(*ci, aBlock, duration);
         break;
       }
@@ -177,21 +177,21 @@ AudioNodeExternalInputStream::ProcessInp
       inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
     }
   }
 
   uint32_t accumulateIndex = 0;
   if (inputChannels) {
     nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
     for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
-      AudioChunk tmpChunk;
+      AudioBlock tmpChunk;
       ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
       if (!tmpChunk.IsNull()) {
         if (accumulateIndex == 0) {
-          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
+          mLastChunks[0].AllocateChannels(inputChannels);
         }
         AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
         accumulateIndex++;
       }
     }
   }
   if (accumulateIndex == 0) {
     mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -363,34 +363,35 @@ AudioNodeStream::ComputedNumberOfChannel
   default:
   case ChannelCountMode::Max:
     // Nothing to do here, just shut up the compiler warning.
     return aInputChannelCount;
   }
 }
 
 void
-AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
+AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk,
+                                  uint32_t aPortIndex)
 {
   uint32_t inputCount = mInputs.Length();
   uint32_t outputChannelCount = 1;
-  nsAutoTArray<AudioChunk*,250> inputChunks;
+  nsAutoTArray<const AudioBlock*,250> inputChunks;
   for (uint32_t i = 0; i < inputCount; ++i) {
     if (aPortIndex != mInputs[i]->InputNumber()) {
       // This input is connected to a different port
       continue;
     }
     MediaStream* s = mInputs[i]->GetSource();
     AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
     MOZ_ASSERT(a == s->AsAudioNodeStream());
     if (a->IsAudioParamStream()) {
       continue;
     }
 
-    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
+    const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
     MOZ_ASSERT(chunk);
     if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
       continue;
     }
 
     inputChunks.AppendElement(chunk);
     outputChannelCount =
       GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount());
@@ -411,28 +412,29 @@ AudioNodeStream::ObtainInputBlock(AudioC
     return;
   }
 
   if (outputChannelCount == 0) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
-  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
+  aTmpChunk.AllocateChannels(outputChannelCount);
   // The static storage here should be 1KB, so it's fine
   nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
 
   for (uint32_t i = 0; i < inputChunkCount; ++i) {
     AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
   }
 }
 
 void
-AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                                      AudioChunk* aBlock,
+AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
+                                      const AudioBlock& aChunk,
+                                      AudioBlock* aBlock,
                                       nsTArray<float>* aDownmixBuffer)
 {
   nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
   UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);
 
   for (uint32_t c = 0; c < channels.Length(); ++c) {
     const float* inputData = static_cast<const float*>(channels[c]);
     float* outputData = aBlock->ChannelFloatsForWrite(c);
@@ -446,17 +448,17 @@ AudioNodeStream::AccumulateInputChunk(ui
       if (aInputIndex == 0) {
         PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
       }
     }
   }
 }
 
 void
-AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
+AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk,
                                    uint32_t aOutputChannelCount,
                                    nsTArray<const float*>& aOutputChannels,
                                    nsTArray<float>& aDownmixBuffer)
 {
   static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
 
   for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
     aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
@@ -533,22 +535,16 @@ AudioNodeStream::ProcessInput(GraphTime 
       mLastChunks[0] = mInputChunks[0];
     } else {
       if (maxInputs <= 1 && outputCount <= 1) {
         mEngine->ProcessBlock(this, mInputChunks[0], &mLastChunks[0], &finished);
       } else {
         mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
       }
     }
-    for (auto& chunk : mInputChunks) {
-      // If the buffer is shared then it won't be reused, so release the
-      // reference now.  Keep the channel data array to save a free/alloc
-      // pair.
-      chunk.ReleaseBufferIfShared();
-    }
     for (uint16_t i = 0; i < outputCount; ++i) {
       NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                    "Invalid WebAudio chunk size");
     }
     if (finished) {
       mMarkAsFinishedAfterThisBlock = true;
     }
 
@@ -599,24 +595,24 @@ AudioNodeStream::ProduceOutputBeforeInpu
 
 void
 AudioNodeStream::AdvanceOutputSegment()
 {
   StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
   AudioSegment* segment = track->Get<AudioSegment>();
 
   if (mFlags & EXTERNAL_OUTPUT) {
-    segment->AppendAndConsumeChunk(&mLastChunks[0]);
+    segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
   } else {
     segment->AppendNullData(mLastChunks[0].GetDuration());
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
-    AudioChunk copyChunk = mLastChunks[0];
+    AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
     AudioSegment tmpSegment;
     tmpSegment.AppendAndConsumeChunk(&copyChunk);
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                 segment->GetDuration(), 0, tmpSegment);
   }
 }
 
 StreamTime
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -36,17 +36,17 @@ class AudioNodeStream : public Processed
   typedef dom::ChannelCountMode ChannelCountMode;
   typedef dom::ChannelInterpretation ChannelInterpretation;
 
 public:
   typedef mozilla::dom::AudioContext AudioContext;
 
   enum { AUDIO_TRACK = 1 };
 
-  typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
+  typedef nsAutoTArray<AudioBlock, 1> OutputChunks;
 
   // Flags re main thread updates and stream output.
   typedef unsigned Flags;
   enum : Flags {
     NO_STREAM_FLAGS = 0U,
     NEED_MAIN_THREAD_FINISHED = 1U << 0,
     NEED_MAIN_THREAD_CURRENT_TIME = 1U << 1,
     // Internal AudioNodeStreams can only pass their output to another
@@ -166,30 +166,30 @@ public:
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
 
 protected:
   void AdvanceOutputSegment();
   void FinishOutput();
-  void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                            AudioChunk* aBlock,
+  void AccumulateInputChunk(uint32_t aInputIndex, const AudioBlock& aChunk,
+                            AudioBlock* aBlock,
                             nsTArray<float>* aDownmixBuffer);
-  void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
+  void UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount,
                          nsTArray<const float*>& aOutputChannels,
                          nsTArray<float>& aDownmixBuffer);
 
   uint32_t ComputedNumberOfChannels(uint32_t aInputChannelCount);
-  void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
+  void ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex);
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The mixed input blocks are kept from iteration to iteration to avoid
-  // reallocating channel data arrays.
+  // reallocating channel data arrays and any buffers for mixing.
   OutputChunks mInputChunks;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
   // This is necessary to be able to find all the nodes for a given
   // AudioContext. It is set on the main thread, in the constructor.
   const AudioContext::AudioContextId mAudioContextId;
--- a/dom/media/webaudio/AudioParam.cpp
+++ b/dom/media/webaudio/AudioParam.cpp
@@ -127,17 +127,17 @@ float
 AudioParamTimeline::AudioNodeInputValue(size_t aCounter) const
 {
   MOZ_ASSERT(mStream);
 
   // If we have a chunk produced by the AudioNode inputs to the AudioParam,
   // get its value now.  We use aCounter to tell us which frame of the last
   // AudioChunk to look at.
   float audioNodeInputValue = 0.0f;
-  const AudioChunk& lastAudioNodeChunk =
+  const AudioBlock& lastAudioNodeChunk =
     static_cast<AudioNodeStream*>(mStream.get())->LastChunks()[0];
   if (!lastAudioNodeChunk.IsNull()) {
     MOZ_ASSERT(lastAudioNodeChunk.GetDuration() == WEBAUDIO_BLOCK_SIZE);
     audioNodeInputValue =
       static_cast<const float*>(lastAudioNodeChunk.mChannelData[0])[aCounter];
     audioNodeInputValue *= lastAudioNodeChunk.mVolume;
   }
 
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -133,18 +133,18 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad BiquadFilterNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     float inputBuffer[WEBAUDIO_BLOCK_SIZE];
 
     if (aInput.IsNull()) {
       bool hasTail = false;
       for (uint32_t i = 0; i < mBiquads.Length(); ++i) {
         if (mBiquads[i].hasTail()) {
@@ -178,17 +178,17 @@ public:
         NS_WARNING("BiquadFilterNode channel count changes may produce audio glitches");
       }
 
       // Adjust the number of biquads based on the number of channels
       mBiquads.SetLength(aInput.ChannelCount());
     }
 
     uint32_t numberOfChannels = mBiquads.Length();
-    AllocateAudioBlock(numberOfChannels, aOutput);
+    aOutput->AllocateChannels(numberOfChannels);
 
     StreamTime pos = aStream->GetCurrentPosition();
 
     double freq = mFrequency.GetValueAtTime(pos);
     double q = mQ.GetValueAtTime(pos);
     double gain = mGain.GetValueAtTime(pos);
     double detune = mDetune.GetValueAtTime(pos);
 
--- a/dom/media/webaudio/ChannelMergerNode.cpp
+++ b/dom/media/webaudio/ChannelMergerNode.cpp
@@ -35,17 +35,17 @@ public:
     for (uint16_t i = 0; i < InputCount(); ++i) {
       channelCount += aInput[i].ChannelCount();
     }
     if (channelCount == 0) {
       aOutput[0].SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
     channelCount = std::min(channelCount, WebAudioUtils::MaxChannelCount);
-    AllocateAudioBlock(channelCount, &aOutput[0]);
+    aOutput[0].AllocateChannels(channelCount);
 
     // Append each channel in each input to the output
     size_t channelIndex = 0;
     for (uint16_t i = 0; true; ++i) {
       MOZ_ASSERT(i < InputCount());
       for (size_t j = 0; j < aInput[i].ChannelCount(); ++j) {
         AudioBlockCopyChannelWithScale(
             static_cast<const float*>(aInput[i].mChannelData[j]),
--- a/dom/media/webaudio/ChannelSplitterNode.cpp
+++ b/dom/media/webaudio/ChannelSplitterNode.cpp
@@ -29,17 +29,17 @@ public:
                                     bool* aFinished) override
   {
     MOZ_ASSERT(aInput.Length() == 1, "Should only have one input port");
 
     aOutput.SetLength(OutputCount());
     for (uint16_t i = 0; i < OutputCount(); ++i) {
       if (i < aInput[0].ChannelCount()) {
         // Split out existing channels
-        AllocateAudioBlock(1, &aOutput[i]);
+        aOutput[i].AllocateChannels(1);
         AudioBlockCopyChannelWithScale(
             static_cast<const float*>(aInput[0].mChannelData[i]),
             aInput[0].mVolume,
             aOutput[i].ChannelFloatsForWrite(0));
       } else {
         // Pad with silent channels if needed
         aOutput[i].SetNull(WEBAUDIO_BLOCK_SIZE);
       }
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -97,64 +97,64 @@ public:
 
     mReverb = new WebCore::Reverb(mBuffer, mBufferLength,
                                   WEBAUDIO_BLOCK_SIZE,
                                   MaxFFTSize, 2, mUseBackgroundThreads,
                                   mNormalize, mSampleRate);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (!mReverb) {
       *aOutput = aInput;
       return;
     }
 
-    AudioChunk input = aInput;
+    AudioBlock input = aInput;
     if (aInput.IsNull()) {
       if (mLeftOverData > 0) {
         mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
-        AllocateAudioBlock(1, &input);
+        input.AllocateChannels(1);
         WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE);
       } else {
         if (mLeftOverData != INT32_MIN) {
           mLeftOverData = INT32_MIN;
           nsRefPtr<PlayingRefChanged> refchanged =
             new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
           aStream->Graph()->
             DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
         }
         aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
       }
     } else {
       if (aInput.mVolume != 1.0f) {
         // Pre-multiply the input's volume
         uint32_t numChannels = aInput.ChannelCount();
-        AllocateAudioBlock(numChannels, &input);
+        input.AllocateChannels(numChannels);
         for (uint32_t i = 0; i < numChannels; ++i) {
           const float* src = static_cast<const float*>(aInput.mChannelData[i]);
           float* dest = input.ChannelFloatsForWrite(i);
           AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest);
         }
       }
 
       if (mLeftOverData <= 0) {
         nsRefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         aStream->Graph()->
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
       }
       mLeftOverData = mBufferLength;
       MOZ_ASSERT(mLeftOverData > 0);
     }
-    AllocateAudioBlock(2, aOutput);
+    aOutput->AllocateChannels(2);
 
     mReverb->process(&input, aOutput, WEBAUDIO_BLOCK_SIZE);
   }
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
     if (mBuffer && !mBuffer->IsShared()) {
--- a/dom/media/webaudio/DelayBuffer.cpp
+++ b/dom/media/webaudio/DelayBuffer.cpp
@@ -21,38 +21,38 @@ DelayBuffer::SizeOfExcludingThis(MallocS
     amount += mChunks[i].SizeOfExcludingThis(aMallocSizeOf, false);
   }
 
   amount += mUpmixChannels.ShallowSizeOfExcludingThis(aMallocSizeOf);
   return amount;
 }
 
 void
-DelayBuffer::Write(const AudioChunk& aInputChunk)
+DelayBuffer::Write(const AudioBlock& aInputChunk)
 {
   // We must have a reference to the buffer if there are channels
   MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.ChannelCount());
 #ifdef DEBUG
   MOZ_ASSERT(!mHaveWrittenBlock);
   mHaveWrittenBlock = true;
 #endif
 
   if (!EnsureBuffer()) {
     return;
   }
 
   if (mCurrentChunk == mLastReadChunk) {
     mLastReadChunk = -1; // invalidate cache
   }
-  mChunks[mCurrentChunk] = aInputChunk;
+  mChunks[mCurrentChunk] = aInputChunk.AsAudioChunk();
 }
 
 void
 DelayBuffer::Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-                  AudioChunk* aOutputChunk,
+                  AudioBlock* aOutputChunk,
                   ChannelInterpretation aChannelInterpretation)
 {
   int chunkCount = mChunks.Length();
   if (!chunkCount) {
     aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
@@ -79,45 +79,45 @@ DelayBuffer::Read(const double aPerFrame
     channelCount = GetAudioChannelsSuperset(channelCount,
                                             mChunks[i].ChannelCount());
     if (i == youngestChunk) {
       break;
     }
   }
 
   if (channelCount) {
-    AllocateAudioBlock(channelCount, aOutputChunk);
+    aOutputChunk->AllocateChannels(channelCount);
     ReadChannels(aPerFrameDelays, aOutputChunk,
                  0, channelCount, aChannelInterpretation);
   } else {
     aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
   }
 
   // Remember currentDelayFrames for the next ProcessBlock call
   mCurrentDelay = aPerFrameDelays[WEBAUDIO_BLOCK_SIZE - 1];
 }
 
 void
 DelayBuffer::ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-                         AudioChunk* aOutputChunk, uint32_t aChannel,
+                         AudioBlock* aOutputChunk, uint32_t aChannel,
                          ChannelInterpretation aChannelInterpretation)
 {
   if (!mChunks.Length()) {
     float* outputChannel = aOutputChunk->ChannelFloatsForWrite(aChannel);
     PodZero(outputChannel, WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
   ReadChannels(aPerFrameDelays, aOutputChunk,
                aChannel, 1, aChannelInterpretation);
 }
 
 void
 DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-                          AudioChunk* aOutputChunk,
+                          AudioBlock* aOutputChunk,
                           uint32_t aFirstChannel, uint32_t aNumChannelsToRead,
                           ChannelInterpretation aChannelInterpretation)
 {
   uint32_t totalChannelCount = aOutputChunk->ChannelCount();
   uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead;
   MOZ_ASSERT(readChannelsEnd <= totalChannelCount);
 
   if (mUpmixChannels.Length() != totalChannelCount) {
@@ -161,17 +161,17 @@ DelayBuffer::ReadChannels(const double a
       }
 
       interpolationFactor = 1.0 - interpolationFactor;
     }
   }
 }
 
 void
-DelayBuffer::Read(double aDelayTicks, AudioChunk* aOutputChunk,
+DelayBuffer::Read(double aDelayTicks, AudioBlock* aOutputChunk,
                   ChannelInterpretation aChannelInterpretation)
 {
   const bool firstTime = mCurrentDelay < 0.0;
   double currentDelay = firstTime ? aDelayTicks : mCurrentDelay;
 
   double computedDelay[WEBAUDIO_BLOCK_SIZE];
 
   for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
--- a/dom/media/webaudio/DelayBuffer.h
+++ b/dom/media/webaudio/DelayBuffer.h
@@ -34,34 +34,34 @@ public:
     // The 180 second limit in AudioContext::CreateDelay() and the
     // 1 << MEDIA_TIME_FRAC_BITS limit on sample rate provide a limit on the
     // maximum delay.
     MOZ_ASSERT(aMaxDelayTicks <=
                std::numeric_limits<decltype(mMaxDelayTicks)>::max());
   }
 
   // Write a WEBAUDIO_BLOCK_SIZE block for aChannelCount channels.
-  void Write(const AudioChunk& aInputChunk);
+  void Write(const AudioBlock& aInputChunk);
 
   // Read a block with an array of delays, in ticks, for each sample frame.
   // Each delay should be >= 0 and <= MaxDelayTicks().
   void Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-            AudioChunk* aOutputChunk,
+            AudioBlock* aOutputChunk,
             ChannelInterpretation aChannelInterpretation);
   // Read a block with a constant delay, which will be smoothed with the
   // previous delay.  The delay should be >= 0 and <= MaxDelayTicks().
-  void Read(double aDelayTicks, AudioChunk* aOutputChunk,
+  void Read(double aDelayTicks, AudioBlock* aOutputChunk,
             ChannelInterpretation aChannelInterpretation);
 
   // Read into one of the channels of aOutputChunk, given an array of
   // delays in ticks.  This is useful when delays are different on different
   // channels.  aOutputChunk must have already been allocated with at least as
   // many channels as were in any of the blocks passed to Write().
   void ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-                   AudioChunk* aOutputChunk, uint32_t aChannel,
+                   AudioBlock* aOutputChunk, uint32_t aChannel,
                    ChannelInterpretation aChannelInterpretation);
 
   // Advance the buffer pointer
   void NextBlock()
   {
     mCurrentChunk = (mCurrentChunk + 1) % mChunks.Length();
 #ifdef DEBUG
     MOZ_ASSERT(mHaveWrittenBlock);
@@ -75,17 +75,17 @@ public:
   };
 
   int MaxDelayTicks() const { return mMaxDelayTicks; }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
 
 private:
   void ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
-                    AudioChunk* aOutputChunk,
+                    AudioBlock* aOutputChunk,
                     uint32_t aFirstChannel, uint32_t aNumChannelsToRead,
                     ChannelInterpretation aChannelInterpretation);
   bool EnsureBuffer();
   int PositionForDelay(int aDelay);
   int ChunkForPosition(int aPosition);
   int OffsetForPosition(int aPosition);
   int ChunkForDelay(int aDelay);
   void UpdateUpmixChannels(int aNewReadChunk, uint32_t channelCount,
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -71,18 +71,18 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad DelayNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
     MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate());
 
     if (!aInput.IsSilentOrSubnormal()) {
       if (mLeftOverData <= 0) {
         nsRefPtr<PlayingRefChanged> refchanged =
@@ -114,17 +114,17 @@ public:
     // ProduceBlockBeforeInput() when in a cycle.
     if (!mHaveProducedBeforeInput) {
       UpdateOutputBlock(aOutput, 0.0);
     }
     mHaveProducedBeforeInput = false;
     mBuffer.NextBlock();
   }
 
-  void UpdateOutputBlock(AudioChunk* aOutput, double minDelay)
+  void UpdateOutputBlock(AudioBlock* aOutput, double minDelay)
   {
     double maxDelay = mMaxDelay;
     double sampleRate = mSource->SampleRate();
     ChannelInterpretation channelInterpretation =
       mSource->GetChannelInterpretation();
     if (mDelay.HasSimpleValue()) {
       // If this DelayNode is in a cycle, make sure the delay value is at least
       // one block, even if that is greater than maxDelay.
@@ -146,17 +146,17 @@ public:
         double delayAtTickClamped =
           std::max(minDelay, std::min(delayAtTick, maxDelay));
         computedDelay[counter] = delayAtTickClamped;
       }
       mBuffer.Read(computedDelay, aOutput, channelInterpretation);
     }
   }
 
-  virtual void ProduceBlockBeforeInput(AudioChunk* aOutput) override
+  virtual void ProduceBlockBeforeInput(AudioBlock* aOutput) override
   {
     if (mLeftOverData <= 0) {
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
     } else {
       UpdateOutputBlock(aOutput, WEBAUDIO_BLOCK_SIZE);
     }
     mHaveProducedBeforeInput = true;
   }
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -88,18 +88,18 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mRelease, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad DynamicsCompresssorNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (aInput.IsNull()) {
       // Just output silence
       *aOutput = aInput;
       return;
     }
 
@@ -117,17 +117,17 @@ public:
                                    mKnee.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRatio,
                                    mRatio.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamAttack,
                                    mAttack.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRelease,
                                    mRelease.GetValueAtTime(pos));
 
-    AllocateAudioBlock(channelCount, aOutput);
+    aOutput->AllocateChannels(channelCount);
     mCompressor->process(&aInput, aOutput, aInput.GetDuration());
 
     SendReductionParamToMainThread(aStream,
                                    mCompressor->parameterValue(DynamicsCompressor::ParamReduction));
   }
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
   {
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -54,18 +54,18 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad GainNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     if (aInput.IsNull()) {
       // If input is silent, so is the output
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
     } else if (mGain.HasSimpleValue()) {
@@ -76,17 +76,17 @@ public:
       } else {
         *aOutput = aInput;
         aOutput->mVolume *= gain;
       }
     } else {
       // First, compute a vector of gains for each track tick based on the
       // timeline at hand, and then for each channel, multiply the values
       // in the buffer with the gain vector.
-      AllocateAudioBlock(aInput.ChannelCount(), aOutput);
+      aOutput->AllocateChannels(aInput.ChannelCount());
 
       // Compute the gain values for the duration of the input AudioChunk
       StreamTime tick = aStream->GetCurrentPosition();
       float computedGain[WEBAUDIO_BLOCK_SIZE];
       mGain.GetValuesAtTime(tick, computedGain, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         computedGain[counter] *= aInput.mVolume;
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -272,24 +272,24 @@ public:
 
       // Calculate next phase position from wrapped value j1 to avoid loss of
       // precision at large values.
       mPhase =
         j1 + sampleInterpolationFactor + basePhaseIncrement * mFinalFrequency;
     }
   }
 
-  void ComputeSilence(AudioChunk *aOutput)
+  void ComputeSilence(AudioBlock *aOutput)
   {
     aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     StreamTime ticks = aStream->GetCurrentPosition();
     if (mStart == -1) {
       ComputeSilence(aOutput);
       return;
@@ -302,17 +302,17 @@ public:
       return;
     }
     if (ticks + WEBAUDIO_BLOCK_SIZE <= mStart) {
       // We're not playing yet.
       ComputeSilence(aOutput);
       return;
     }
 
-    AllocateAudioBlock(1, aOutput);
+    aOutput->AllocateChannels(1);
     float* output = aOutput->ChannelFloatsForWrite(0);
 
     uint32_t start, end;
     FillBounds(output, ticks, start, end);
 
     // Synthesize the correct waveform.
     switch(mType) {
       case OscillatorType::Sine:
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -130,18 +130,18 @@ public:
     case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break;
     case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break;
     default:
       NS_ERROR("Bad PannerNodeEngine DoubleParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool *aFinished) override
   {
     if (aInput.IsNull()) {
       // mLeftOverData != INT_MIN means that the panning model was HRTF and a
       // tail-time reference was added.  Even if the model is now equalpower,
       // the reference will need to be removed.
       if (mLeftOverData > 0 &&
           mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
@@ -172,18 +172,18 @@ public:
     (this->*mPanningModelFunction)(aInput, aOutput);
   }
 
   void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation);
   float ComputeConeGain();
   // Compute how much the distance contributes to the gain reduction.
   float ComputeDistanceGain();
 
-  void EqualPowerPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput);
-  void HRTFPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput);
+  void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput);
+  void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput);
 
   float LinearGainFunction(float aDistance);
   float InverseGainFunction(float aDistance);
   float ExponentialGainFunction(float aDistance);
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
@@ -195,17 +195,17 @@ public:
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
   nsAutoPtr<HRTFPanner> mHRTFPanner;
-  typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioChunk& aInput, AudioChunk* aOutput);
+  typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput);
   PanningModelFunction mPanningModelFunction;
   typedef float (PannerNodeEngine::*DistanceModelFunction)(float aDistance);
   DistanceModelFunction mDistanceModelFunction;
   ThreeDPoint mPosition;
   ThreeDPoint mOrientation;
   ThreeDPoint mVelocity;
   double mRefDistance;
   double mMaxDistance;
@@ -297,50 +297,50 @@ PannerNodeEngine::InverseGainFunction(fl
 
 float
 PannerNodeEngine::ExponentialGainFunction(float aDistance)
 {
   return pow(aDistance / mRefDistance, -mRolloffFactor);
 }
 
 void
-PannerNodeEngine::HRTFPanningFunction(const AudioChunk& aInput,
-                                      AudioChunk* aOutput)
+PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput,
+                                      AudioBlock* aOutput)
 {
   // The output of this node is always stereo, no matter what the inputs are.
-  AllocateAudioBlock(2, aOutput);
+  aOutput->AllocateChannels(2);
 
   float azimuth, elevation;
   ComputeAzimuthAndElevation(azimuth, elevation);
 
-  AudioChunk input = aInput;
+  AudioBlock input = aInput;
   // Gain is applied before the delay and convolution of the HRTF.
   input.mVolume *= ComputeConeGain() * ComputeDistanceGain();
 
   mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
 }
 
 void
-PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
-                                            AudioChunk* aOutput)
+PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput,
+                                            AudioBlock* aOutput)
 {
   float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
   int inputChannels = aInput.ChannelCount();
 
   // If both the listener are in the same spot, and no cone gain is specified,
   // this node is noop.
   if (mListenerPosition == mPosition &&
       mConeInnerAngle == 360 &&
       mConeOuterAngle == 360) {
     *aOutput = aInput;
     return;
   }
 
   // The output of this node is always stereo, no matter what the inputs are.
-  AllocateAudioBlock(2, aOutput);
+  aOutput->AllocateChannels(2);
 
   ComputeAzimuthAndElevation(azimuth, elevation);
   coneGain = ComputeConeGain();
 
   // The following algorithm is described in the spec.
   // Clamp azimuth in the [-90, 90] range.
   azimuth = min(180.f, max(-180.f, azimuth));
 
--- a/dom/media/webaudio/PanningUtils.h
+++ b/dom/media/webaudio/PanningUtils.h
@@ -10,51 +10,51 @@
 #include "AudioSegment.h"
 #include "AudioNodeEngine.h"
 
 namespace mozilla {
 namespace dom {
 
 template<typename T>
 void
-GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
+GainMonoToStereo(const AudioBlock& aInput, AudioBlock* aOutput,
                  T aGainL, T aGainR)
 {
   float* outputL = aOutput->ChannelFloatsForWrite(0);
   float* outputR = aOutput->ChannelFloatsForWrite(1);
   const float* input = static_cast<const float*>(aInput.mChannelData[0]);
 
   MOZ_ASSERT(aInput.ChannelCount() == 1);
   MOZ_ASSERT(aOutput->ChannelCount() == 2);
 
   AudioBlockPanMonoToStereo(input, aGainL, aGainR, outputL, outputR);
 }
 
 // T can be float or an array of float, and  U can be bool or an array of bool,
 // depending if the value of the parameters are constant for this block.
 template<typename T, typename U>
 void
-GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
+GainStereoToStereo(const AudioBlock& aInput, AudioBlock* aOutput,
                    T aGainL, T aGainR, U aOnLeft)
 {
   float* outputL = aOutput->ChannelFloatsForWrite(0);
   float* outputR = aOutput->ChannelFloatsForWrite(1);
   const float* inputL = static_cast<const float*>(aInput.mChannelData[0]);
   const float* inputR = static_cast<const float*>(aInput.mChannelData[1]);
 
   MOZ_ASSERT(aInput.ChannelCount() == 2);
   MOZ_ASSERT(aOutput->ChannelCount() == 2);
 
   AudioBlockPanStereoToStereo(inputL, inputR, aGainL, aGainR, aOnLeft, outputL, outputR);
 }
 
 // T can be float or an array of float, and  U can be bool or an array of bool,
 // depending if the value of the parameters are constant for this block.
 template<typename T, typename U>
-void ApplyStereoPanning(const AudioChunk& aInput, AudioChunk* aOutput,
+void ApplyStereoPanning(const AudioBlock& aInput, AudioBlock* aOutput,
                         T aGainL, T aGainR, U aOnLeft)
 {
   if (aInput.ChannelCount() == 1) {
     GainMonoToStereo(aInput, aOutput, aGainL, aGainR);
   } else {
     GainStereoToStereo(aInput, aOutput, aGainL, aGainR, aOnLeft);
   }
 }
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -275,18 +275,18 @@ public:
         mIsConnected = aParam;
         break;
       default:
         NS_ERROR("Bad Int32Parameter");
     } // End index switch.
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     // This node is not connected to anything. Per spec, we don't fire the
     // onaudioprocess event. We also want to clear out the input and output
     // buffer queue, and output a null buffer.
     if (!mIsConnected) {
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       mSharedBuffers->Reset();
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -78,51 +78,51 @@ public:
     } else if (aPanning <= 0) {
       aPanning += 1;
     }
 
     aLeftGain  = cos(0.5 * M_PI * aPanning);
     aRightGain = sin(0.5 * M_PI * aPanning);
   }
 
-  void SetToSilentStereoBlock(AudioChunk* aChunk)
+  void SetToSilentStereoBlock(AudioBlock* aChunk)
   {
     for (uint32_t channel = 0; channel < 2; channel++) {
       float* samples = aChunk->ChannelFloatsForWrite(channel);
       for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i++) {
         samples[i] = 0.f;
       }
     }
   }
 
-  void UpmixToStereoIfNeeded(const AudioChunk& aInput, AudioChunk* aOutput)
+  void UpmixToStereoIfNeeded(const AudioBlock& aInput, AudioBlock* aOutput)
   {
     if (aInput.ChannelCount() == 2) {
       *aOutput = aInput;
     } else {
       MOZ_ASSERT(aInput.ChannelCount() == 1);
-      AllocateAudioBlock(2, aOutput);
+      aOutput->AllocateChannels(2);
       const float* input = static_cast<const float*>(aInput.mChannelData[0]);
       for (uint32_t channel = 0; channel < 2; channel++) {
         float* output = aOutput->ChannelFloatsForWrite(channel);
         PodCopy(output, input, WEBAUDIO_BLOCK_SIZE);
       }
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool *aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     // The output of this node is always stereo, no matter what the inputs are.
     MOZ_ASSERT(aInput.ChannelCount() <= 2);
-    AllocateAudioBlock(2, aOutput);
+    aOutput->AllocateChannels(2);
     bool monoToStereo = aInput.ChannelCount() == 1;
 
     if (aInput.IsNull()) {
       // If input is silent, so is the output
       SetToSilentStereoBlock(aOutput);
     } else if (mPan.HasSimpleValue()) {
       float panning = mPan.GetValue();
       // If the panning is 0.0, we can simply copy the input to the
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -210,29 +210,29 @@ public:
           aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
                                      interpolationFactor  * mCurve[indexHigher];
         }
       }
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
-                            const AudioChunk& aInput,
-                            AudioChunk* aOutput,
+                            const AudioBlock& aInput,
+                            AudioBlock* aOutput,
                             bool* aFinished) override
   {
     uint32_t channelCount = aInput.ChannelCount();
     if (!mCurve.Length() || !channelCount) {
       // Optimize the case where we don't have a curve buffer,
       // or the input is null.
       *aOutput = aInput;
       return;
     }
 
-    AllocateAudioBlock(channelCount, aOutput);
+    aOutput->AllocateChannels(channelCount);
     for (uint32_t i = 0; i < channelCount; ++i) {
       float* scaledSample = (float *)(aInput.mChannelData[i]);
       AudioBlockInPlaceScale(scaledSample, aInput.mVolume);
       const float* inputBuffer = static_cast<const float*>(scaledSample);
       float* outputBuffer = aOutput->ChannelFloatsForWrite(i);
       float* sampleBuffer;
 
       switch (mType) {
--- a/dom/media/webaudio/blink/DynamicsCompressor.cpp
+++ b/dom/media/webaudio/blink/DynamicsCompressor.cpp
@@ -22,17 +22,17 @@
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "DynamicsCompressor.h"
-#include "AudioSegment.h"
+#include "AudioBlock.h"
 
 #include <cmath>
 #include "AudioNodeEngine.h"
 #include "nsDebug.h"
 
 using mozilla::WEBAUDIO_BLOCK_SIZE;
 using mozilla::AudioBlockCopyChannelWithScale;
 
@@ -143,17 +143,17 @@ void DynamicsCompressor::setEmphasisStag
 void DynamicsCompressor::setEmphasisParameters(float gain, float anchorFreq, float filterStageRatio)
 {
     setEmphasisStageParameters(0, gain, anchorFreq);
     setEmphasisStageParameters(1, gain, anchorFreq / filterStageRatio);
     setEmphasisStageParameters(2, gain, anchorFreq / (filterStageRatio * filterStageRatio));
     setEmphasisStageParameters(3, gain, anchorFreq / (filterStageRatio * filterStageRatio * filterStageRatio));
 }
 
-void DynamicsCompressor::process(const AudioChunk* sourceChunk, AudioChunk* destinationChunk, unsigned framesToProcess)
+void DynamicsCompressor::process(const AudioBlock* sourceChunk, AudioBlock* destinationChunk, unsigned framesToProcess)
 {
     // Though numberOfChannels is retrived from destinationBus, we still name it numberOfChannels instead of numberOfDestinationChannels.
     // It's because we internally match sourceChannels's size to destinationBus by channel up/down mix. Thus we need numberOfChannels
     // to do the loop work for both m_sourceChannels and m_destinationChannels.
 
     unsigned numberOfChannels = destinationChunk->ChannelCount();
     unsigned numberOfSourceChannels = sourceChunk->ChannelCount();
 
--- a/dom/media/webaudio/blink/DynamicsCompressor.h
+++ b/dom/media/webaudio/blink/DynamicsCompressor.h
@@ -32,22 +32,22 @@
 #include "DynamicsCompressorKernel.h"
 #include "ZeroPole.h"
 
 #include "nsTArray.h"
 #include "nsAutoPtr.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace mozilla {
-struct AudioChunk;
+class AudioBlock;
 } // namespace mozilla
 
 namespace WebCore {
 
-using mozilla::AudioChunk;
+using mozilla::AudioBlock;
 
 // DynamicsCompressor implements a flexible audio dynamics compression effect such as
 // is commonly used in musical production and game audio. It lowers the volume
 // of the loudest parts of the signal and raises the volume of the softest parts,
 // making the sound richer, fuller, and more controlled.
 
 class DynamicsCompressor {
 public:
@@ -68,17 +68,17 @@ public:
         ParamFilterAnchor,
         ParamEffectBlend,
         ParamReduction,
         ParamLast
     };
 
     DynamicsCompressor(float sampleRate, unsigned numberOfChannels);
 
-    void process(const AudioChunk* sourceChunk, AudioChunk* destinationChunk, unsigned framesToProcess);
+    void process(const AudioBlock* sourceChunk, AudioBlock* destinationChunk, unsigned framesToProcess);
     void reset();
     void setNumberOfChannels(unsigned);
     unsigned numberOfChannels() const { return m_numberOfChannels; }
 
     void setParameterValue(unsigned parameterID, float value);
     float parameterValue(unsigned parameterID);
 
     float sampleRate() const { return m_sampleRate; }
--- a/dom/media/webaudio/blink/HRTFPanner.cpp
+++ b/dom/media/webaudio/blink/HRTFPanner.cpp
@@ -22,16 +22,17 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "HRTFPanner.h"
 #include "HRTFDatabaseLoader.h"
 
 #include "FFTConvolver.h"
 #include "HRTFDatabase.h"
+#include "AudioBlock.h"
 
 using namespace std;
 using namespace mozilla;
 using dom::ChannelInterpretation;
 
 namespace WebCore {
 
 // The value of 2 milliseconds is larger than the largest delay which exists in any HRTFKernel from the default HRTFDatabase (0.0136 seconds).
@@ -123,17 +124,17 @@ int HRTFPanner::calculateDesiredAzimuthI
 
     // We don't immediately start using this azimuth index, but instead approach this index from the last index we rendered at.
     // This minimizes the clicks and graininess for moving sources which occur otherwise.
     desiredAzimuthIndex = max(0, desiredAzimuthIndex);
     desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex);
     return desiredAzimuthIndex;
 }
 
-void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk* inputBus, AudioChunk* outputBus)
+void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBlock* inputBus, AudioBlock* outputBus)
 {
 #ifdef DEBUG
     unsigned numInputChannels =
         inputBus->IsNull() ? 0 : inputBus->ChannelCount();
 
     MOZ_ASSERT(numInputChannels <= 2);
     MOZ_ASSERT(inputBus->GetDuration() == WEBAUDIO_BLOCK_SIZE);
 #endif
--- a/dom/media/webaudio/blink/HRTFPanner.h
+++ b/dom/media/webaudio/blink/HRTFPanner.h
@@ -25,34 +25,34 @@
 #ifndef HRTFPanner_h
 #define HRTFPanner_h
 
 #include "FFTConvolver.h"
 #include "DelayBuffer.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace mozilla {
-struct AudioChunk;
+class AudioBlock;
 } // namespace mozilla
 
 namespace WebCore {
 
 typedef nsTArray<float> AudioFloatArray;
 
 class HRTFDatabaseLoader;
 
-using mozilla::AudioChunk;
+using mozilla::AudioBlock;
 
 class HRTFPanner {
 public:
     HRTFPanner(float sampleRate, already_AddRefed<HRTFDatabaseLoader> databaseLoader);
     ~HRTFPanner();
 
     // chunk durations must be 128
-    void pan(double azimuth, double elevation, const AudioChunk* inputBus, AudioChunk* outputBus);
+    void pan(double azimuth, double elevation, const AudioBlock* inputBus, AudioBlock* outputBus);
     void reset();
 
     size_t fftSize() const { return m_convolverL1.fftSize(); }
 
     float sampleRate() const { return m_sampleRate; }
 
     int maxTailFrames() const;
 
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -25,17 +25,16 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "Reverb.h"
 #include "ReverbConvolverStage.h"
 
 #include <math.h>
-#include "AudioBlock.h"
 #include "ReverbConvolver.h"
 #include "mozilla/FloatingPoint.h"
 
 using namespace mozilla;
 
 namespace WebCore {
 
 // Empirical gain calibration tested across many impulse responses to ensure perceived volume is same as dry (unprocessed) signal
@@ -140,22 +139,22 @@ void Reverb::initialize(const nsTArray<c
         m_convolvers.AppendElement(convolver.forget());
 
         convolverRenderPhase += renderSliceSize;
     }
 
     // For "True" stereo processing we allocate a temporary buffer to avoid repeatedly allocating it in the process() method.
     // It can be bad to allocate memory in a real-time thread.
     if (numResponseChannels == 4) {
-        AllocateAudioBlock(2, &m_tempBuffer);
+        m_tempBuffer.AllocateChannels(2);
         WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE);
     }
 }
 
-void Reverb::process(const AudioChunk* sourceBus, AudioChunk* destinationBus, size_t framesToProcess)
+void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus, size_t framesToProcess)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
     bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0
         && framesToProcess <= MaxFrameSize && framesToProcess <= size_t(sourceBus->GetDuration()) && framesToProcess <= size_t(destinationBus->GetDuration());
 
     MOZ_ASSERT(isSafeToProcess);
     if (!isSafeToProcess)
--- a/dom/media/webaudio/blink/Reverb.h
+++ b/dom/media/webaudio/blink/Reverb.h
@@ -27,17 +27,17 @@
  */
 
 #ifndef Reverb_h
 #define Reverb_h
 
 #include "ReverbConvolver.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
-#include "AudioSegment.h"
+#include "AudioBlock.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace mozilla {
 class ThreadSharedFloatArrayBufferList;
 } // namespace mozilla
 
 namespace WebCore {
 
@@ -45,30 +45,30 @@ namespace WebCore {
 
 class Reverb {
 public:
     enum { MaxFrameSize = 256 };
 
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
     Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate);
 
-    void process(const mozilla::AudioChunk* sourceBus, mozilla::AudioChunk* destinationBus, size_t framesToProcess);
+    void process(const mozilla::AudioBlock* sourceBus, mozilla::AudioBlock* destinationBus, size_t framesToProcess);
     void reset();
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
     size_t latencyFrames() const;
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
 private:
     void initialize(const nsTArray<const float*>& impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads);
 
     size_t m_impulseResponseLength;
 
     nsTArray<nsAutoPtr<ReverbConvolver> > m_convolvers;
 
     // For "True" stereo processing
-    mozilla::AudioChunk m_tempBuffer;
+    mozilla::AudioBlock m_tempBuffer;
 };
 
 } // namespace WebCore
 
 #endif // Reverb_h