bug 1391482 generalize shared channel data from AudioBuffer as AudioChunk r?padenot draft
authorKarl Tomlinson <karlt+@karlt.net>
Wed, 09 Aug 2017 16:39:40 +1200
changeset 648599 e42ede828bb6f7f1cadbd0e8f5b6eb15ebc6a5ea
parent 648598 2128750db4677bb595c5df4006b2bca10b80ba63
child 648600 601727c524675f2e007abec566410c284c6bf61e
child 654560 ed03ee72dca2ee698e920f0645eb90ebf1cdb9ae
child 654722 7032a51ef1338115bfec3fc14cf880aade281736
push id74809
push userktomlinson@mozilla.com
push dateFri, 18 Aug 2017 01:10:01 +0000
reviewerspadenot
bugs1391482
milestone57.0a1
bug 1391482 generalize shared channel data from AudioBuffer as AudioChunk r?padenot Although the AudioChunk buffer is still always a ThreadSharedFloatArrayBufferList, 16 bit buffers will be permitted in a future patch. MozReview-Commit-ID: FPZ6VcX4C1q
dom/media/webaudio/AudioBuffer.cpp
dom/media/webaudio/AudioBuffer.h
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/blink/Reverb.cpp
dom/media/webaudio/blink/Reverb.h
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -429,30 +429,30 @@ AudioBuffer::StealJSArrayDataIntoSharedC
 
   for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
     mJSChannels[i] = nullptr;
   }
 
   return result.forget();
 }
 
-ThreadSharedFloatArrayBufferList*
+const AudioChunk&
 AudioBuffer::GetThreadSharedChannelsForRate(JSContext* aJSContext)
 {
   if (mSharedChannels.IsNull()) {
     // mDuration is set in constructor
     RefPtr<ThreadSharedFloatArrayBufferList> buffer =
       StealJSArrayDataIntoSharedChannels(aJSContext);
 
     if (buffer) {
       SetSharedChannels(buffer.forget());
     }
   }
 
-  return mSharedChannels.mBuffer->AsThreadSharedFloatArrayBufferList();
+  return mSharedChannels;
 }
 
 size_t
 AudioBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   size_t amount = aMallocSizeOf(this);
   amount += mJSChannels.ShallowSizeOfExcludingThis(aMallocSizeOf);
   amount += mSharedChannels.SizeOfExcludingThis(aMallocSizeOf, false);
--- a/dom/media/webaudio/AudioBuffer.h
+++ b/dom/media/webaudio/AudioBuffer.h
@@ -101,20 +101,20 @@ public:
 
   void CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber,
                        uint32_t aStartInChannel, ErrorResult& aRv);
   void CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
                      uint32_t aChannelNumber, uint32_t aStartInChannel,
                      ErrorResult& aRv);
 
   /**
-   * Returns a ThreadSharedFloatArrayBufferList containing the sample data.
-   * Can return null if there is no data.
+   * Returns a reference to an AudioChunk containing the sample data.
+   * The AudioChunk can have a null buffer if there is no data.
    */
-  ThreadSharedFloatArrayBufferList* GetThreadSharedChannelsForRate(JSContext* aContext);
+  const AudioChunk& GetThreadSharedChannelsForRate(JSContext* aContext);
 
 protected:
   AudioBuffer(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
               uint32_t aLength, float aSampleRate);
   ~AudioBuffer();
 
   void
   SetSharedChannels(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -139,17 +139,17 @@ public:
     case AudioBufferSourceNode::LOOPEND:
       MOZ_ASSERT(aParam >= 0);
       mLoopEnd = aParam;
       break;
     default:
       NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter");
     }
   }
-  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override
+  void SetBuffer(AudioChunk&& aBuffer) override
   {
     mBuffer = aBuffer;
   }
 
   bool BegunResampling()
   {
     return mBeginProcessing == -STREAM_TIME_MAX;
   }
@@ -210,35 +210,37 @@ public:
     }
   }
 
   // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer
   // at offset aSourceOffset.  This avoids copying memory.
   void BorrowFromInputBuffer(AudioBlock* aOutput,
                              uint32_t aChannels)
   {
-    aOutput->SetBuffer(mBuffer);
+    aOutput->SetBuffer(mBuffer.mBuffer);
     aOutput->mChannelData.SetLength(aChannels);
     for (uint32_t i = 0; i < aChannels; ++i) {
-      aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition;
+      aOutput->mChannelData[i] =
+        mBuffer.ChannelData<float>()[i] + mBufferPosition;
     }
-    aOutput->mVolume = 1.0f;
+    aOutput->mVolume = mBuffer.mVolume;
     aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32;
   }
 
   // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset
   // and put it at offset aBufferOffset in the destination buffer.
   void CopyFromInputBuffer(AudioBlock* aOutput,
                            uint32_t aChannels,
                            uintptr_t aOffsetWithinBlock,
                            uint32_t aNumberOfFrames) {
+    MOZ_ASSERT(mBuffer.mVolume == 1.0f);
     for (uint32_t i = 0; i < aChannels; ++i) {
       float* baseChannelData = aOutput->ChannelFloatsForWrite(i);
       memcpy(baseChannelData + aOffsetWithinBlock,
-             mBuffer->GetData(i) + mBufferPosition,
+             mBuffer.ChannelData<float>()[i] + mBufferPosition,
              aNumberOfFrames * sizeof(float));
     }
   }
 
   // Resamples input data to an output buffer, according to |mBufferSampleRate| and
   // the playbackRate/detune.
   // The number of frames consumed/produced depends on the amount of space
   // remaining in both the input and output buffer, and the playback rate (that
@@ -285,19 +287,21 @@ public:
         }
         speex_resampler_set_skip_frac_num(resampler,
                                   std::min<int64_t>(skipFracNum, UINT32_MAX));
 
         mBeginProcessing = -STREAM_TIME_MAX;
       }
       inputLimit = std::min(inputLimit, availableInInputBuffer);
 
+      MOZ_ASSERT(mBuffer.mVolume == 1.0f);
       for (uint32_t i = 0; true; ) {
         uint32_t inSamples = inputLimit;
-        const float* inputData = mBuffer->GetData(i) + mBufferPosition;
+        const float* inputData =
+          mBuffer.ChannelData<float>()[i] + mBufferPosition;
 
         uint32_t outSamples = aAvailableInOutput;
         float* outputData =
           aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock;
 
         WebAudioUtils::SpeexResamplerProcess(resampler, i,
                                              inputData, &inSamples,
                                              outputData, &outSamples);
@@ -415,17 +419,17 @@ public:
       return;
     }
 
     uint32_t numFrames = std::min(aBufferMax - mBufferPosition,
                                   availableInOutput);
 
     bool inputBufferAligned = true;
     for (uint32_t i = 0; i < aChannels; ++i) {
-      if (!IS_ALIGNED16(mBuffer->GetData(i) + mBufferPosition)) {
+      if (!IS_ALIGNED16(mBuffer.ChannelData<float>()[i] + mBufferPosition)) {
         inputBufferAligned = false;
       }
     }
 
     if (numFrames == WEBAUDIO_BLOCK_SIZE && inputBufferAligned) {
       MOZ_ASSERT(mBufferPosition < aBufferMax);
       BorrowFromInputBuffer(aOutput, aChannels);
     } else {
@@ -484,17 +488,17 @@ public:
   {
     if (mBufferSampleRate == 0) {
       // start() has not yet been called or no buffer has yet been set
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
     StreamTime streamPosition = mDestination->GraphTimeToStreamTime(aFrom);
-    uint32_t channels = mBuffer ? mBuffer->GetChannels() : 0;
+    uint32_t channels = mBuffer.ChannelCount();
 
     UpdateSampleRateIfNeeded(channels, streamPosition);
 
     uint32_t written = 0;
     while (written < WEBAUDIO_BLOCK_SIZE) {
       if (mStop != STREAM_TIME_MAX &&
           streamPosition >= mStop) {
         FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX);
@@ -564,17 +568,17 @@ public:
 
   double mStart; // including the fractional position between ticks
   // Low pass filter effects from the resampler mean that samples before the
   // start time are influenced by resampling the buffer.  mBeginProcessing
   // includes the extent of this filter.  The special value of -STREAM_TIME_MAX
   // indicates that the resampler has begun processing.
   StreamTime mBeginProcessing;
   StreamTime mStop;
-  RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
+  AudioChunk mBuffer;
   SpeexResamplerState* mResampler;
   // mRemainingResamplerTail, like mBufferPosition, and
   // mBufferEnd, is measured in input buffer samples.
   uint32_t mRemainingResamplerTail;
   uint32_t mBufferEnd;
   uint32_t mLoopStart;
   uint32_t mLoopEnd;
   uint32_t mBufferPosition;
@@ -725,26 +729,25 @@ void
 AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
 {
   AudioNodeStream* ns = mStream;
   if (!ns) {
     return;
   }
 
   if (mBuffer) {
-    RefPtr<ThreadSharedFloatArrayBufferList> data =
-      mBuffer->GetThreadSharedChannelsForRate(aCx);
-    ns->SetBuffer(data.forget());
+    AudioChunk data = mBuffer->GetThreadSharedChannelsForRate(aCx);
+    ns->SetBuffer(Move(data));
 
     if (mStartCalled) {
       SendOffsetAndDurationParametersToStream(ns);
     }
   } else {
     ns->SetInt32Parameter(BUFFEREND, 0);
-    ns->SetBuffer(nullptr);
+    ns->SetBuffer(AudioChunk());
 
     MarkInactive();
   }
 }
 
 void
 AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream)
 {
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -25,39 +25,30 @@ NS_IMPL_ADDREF_INHERITED(ConvolverNode, 
 NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode)
 
 class ConvolverNodeEngine final : public AudioNodeEngine
 {
   typedef PlayingRefChangeHandler PlayingRefChanged;
 public:
   ConvolverNodeEngine(AudioNode* aNode, bool aNormalize)
     : AudioNodeEngine(aNode)
-    , mBufferLength(0)
     , mLeftOverData(INT32_MIN)
     , mSampleRate(0.0f)
     , mUseBackgroundThreads(!aNode->Context()->IsOffline())
     , mNormalize(aNormalize)
   {
   }
 
   enum Parameters {
-    BUFFER_LENGTH,
     SAMPLE_RATE,
     NORMALIZE
   };
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
   {
     switch (aIndex) {
-    case BUFFER_LENGTH:
-      // BUFFER_LENGTH is the first parameter that we set when setting a new buffer,
-      // so we should be careful to invalidate the rest of our state here.
-      mSampleRate = 0.0f;
-      mBufferLength = aParam;
-      mLeftOverData = INT32_MIN;
-      break;
     case NORMALIZE:
       mNormalize = !!aParam;
       break;
     default:
       NS_ERROR("Bad ConvolverNodeEngine Int32Parameter");
     }
   }
   void SetDoubleParameter(uint32_t aIndex, double aParam) override
@@ -67,36 +58,34 @@ public:
       mSampleRate = aParam;
       // The buffer is passed after the sample rate.
       // mReverb will be set using this sample rate when the buffer is received.
       break;
     default:
       NS_ERROR("Bad ConvolverNodeEngine DoubleParameter");
     }
   }
-  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override
+  void SetBuffer(AudioChunk&& aBuffer) override
   {
-    RefPtr<ThreadSharedFloatArrayBufferList> buffer = aBuffer;
-
     // Note about empirical tuning (this is copied from Blink)
     // The maximum FFT size affects reverb performance and accuracy.
     // If the reverb is single-threaded and processes entirely in the real-time audio thread,
     // it's important not to make this too high.  In this case 8192 is a good value.
     // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy.
     // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise.
     const size_t MaxFFTSize = 32768;
 
-    if (!buffer || !mBufferLength || !mSampleRate) {
+    mLeftOverData = INT32_MIN; // reset
+
+    if (aBuffer.IsNull() || !mSampleRate) {
       mReverb = nullptr;
-      mLeftOverData = INT32_MIN;
       return;
     }
 
-    mReverb = new WebCore::Reverb(buffer, mBufferLength,
-                                  MaxFFTSize, mUseBackgroundThreads,
+    mReverb = new WebCore::Reverb(aBuffer, MaxFFTSize, mUseBackgroundThreads,
                                   mNormalize, mSampleRate);
   }
 
   void ProcessBlock(AudioNodeStream* aStream,
                     GraphTime aFrom,
                     const AudioBlock& aInput,
                     AudioBlock* aOutput,
                     bool* aFinished) override
@@ -137,17 +126,17 @@ public:
       }
 
       if (mLeftOverData <= 0) {
         RefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
           refchanged.forget());
       }
-      mLeftOverData = mBufferLength;
+      mLeftOverData = mReverb->impulseResponseLength();
       MOZ_ASSERT(mLeftOverData > 0);
     }
     aOutput->AllocateChannels(2);
 
     mReverb->process(&input, aOutput);
   }
 
   bool IsActive() const override
@@ -168,17 +157,16 @@ public:
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 private:
   nsAutoPtr<WebCore::Reverb> mReverb;
-  int32_t mBufferLength;
   int32_t mLeftOverData;
   float mSampleRate;
   bool mUseBackgroundThreads;
   bool mNormalize;
 };
 
 ConvolverNode::ConvolverNode(AudioContext* aContext)
   : AudioNode(aContext,
@@ -264,25 +252,22 @@ ConvolverNode::SetBuffer(JSContext* aCx,
   }
 
   mBuffer = aBuffer;
 
   // Send the buffer to the stream
   AudioNodeStream* ns = mStream;
   MOZ_ASSERT(ns, "Why don't we have a stream here?");
   if (mBuffer) {
-    uint32_t length = mBuffer->Length();
-    RefPtr<ThreadSharedFloatArrayBufferList> data =
-      mBuffer->GetThreadSharedChannelsForRate(aCx);
-    SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length);
+    AudioChunk data = mBuffer->GetThreadSharedChannelsForRate(aCx);
     SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
                                 mBuffer->SampleRate());
-    ns->SetBuffer(data.forget());
+    ns->SetBuffer(Move(data));
   } else {
-    ns->SetBuffer(nullptr);
+    ns->SetBuffer(AudioChunk());
   }
 }
 
 void
 ConvolverNode::SetNormalize(bool aNormalize)
 {
   mNormalize = aNormalize;
   SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize);
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -116,18 +116,17 @@ public:
       MutexAutoLock lock(mOutputQueue.Lock());
       amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf);
     }
 
     return amount;
   }
 
   // main thread
-  void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer,
-                                   uint32_t aBufferSize)
+  void FinishProducingOutputBuffer(const AudioChunk& aBuffer)
   {
     MOZ_ASSERT(NS_IsMainThread());
 
     TimeStamp now = TimeStamp::Now();
 
     if (mLastEventTime.IsNull()) {
       mLastEventTime = now;
     } else {
@@ -137,47 +136,38 @@ public:
       // latency is also reset to 0.
       // It could happen that the output queue becomes empty before the input
       // node has fully caught up. In this case there will be events where
       // |(now - mLastEventTime)| is very short, making mLatency negative.
       // As this happens and the size of |mLatency| becomes greater than
       // MAX_LATENCY_S, frame dropping starts again to maintain an as short
       // output queue as possible.
       float latency = (now - mLastEventTime).ToSeconds();
-      float bufferDuration = aBufferSize / mSampleRate;
+      float bufferDuration = aBuffer.mDuration / mSampleRate;
       mLatency += latency - bufferDuration;
       mLastEventTime = now;
       if (fabs(mLatency) > MAX_LATENCY_S) {
         mDroppingBuffers = true;
       }
     }
 
     MutexAutoLock lock(mOutputQueue.Lock());
     if (mDroppingBuffers) {
       if (mOutputQueue.ReadyToConsume()) {
         return;
       }
       mDroppingBuffers = false;
       mLatency = 0;
     }
 
-    for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) {
+    for (uint32_t offset = 0; offset < aBuffer.mDuration;
+         offset += WEBAUDIO_BLOCK_SIZE) {
       AudioChunk& chunk = mOutputQueue.Produce();
-      if (aBuffer) {
-        chunk.mDuration = WEBAUDIO_BLOCK_SIZE;
-        chunk.mBuffer = aBuffer;
-        chunk.mChannelData.SetLength(aBuffer->GetChannels());
-        for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) {
-          chunk.mChannelData[i] = aBuffer->GetData(i) + offset;
-        }
-        chunk.mVolume = 1.0f;
-        chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32;
-      } else {
-        chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
-      }
+      chunk = aBuffer;
+      chunk.SliceTo(offset, offset + WEBAUDIO_BLOCK_SIZE);
     }
   }
 
   // graph thread
   AudioChunk GetOutputBuffer()
   {
     MOZ_ASSERT(!NS_IsMainThread());
     AudioChunk buffer;
@@ -379,67 +369,67 @@ private:
         , mStream(aStream)
         , mInputBuffer(aInputBuffer)
         , mPlaybackTime(aPlaybackTime)
       {
       }
 
       NS_IMETHOD Run() override
       {
-        RefPtr<ThreadSharedFloatArrayBufferList> output;
 
         auto engine =
           static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
+        AudioChunk output;
+        output.SetNull(engine->mBufferSize);
         {
           auto node = static_cast<ScriptProcessorNode*>
             (engine->NodeMainThread());
           if (!node) {
             return NS_OK;
           }
 
           if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
-            output = DispatchAudioProcessEvent(node);
+            DispatchAudioProcessEvent(node, &output);
           }
           // The node may have been destroyed during event dispatch.
         }
 
         // Append it to our output buffer queue
-        engine->GetSharedBuffers()->
-          FinishProducingOutputBuffer(output, engine->mBufferSize);
+        engine->GetSharedBuffers()->FinishProducingOutputBuffer(output);
 
         return NS_OK;
       }
 
-      // Returns the output buffers if set in event handlers.
-      ThreadSharedFloatArrayBufferList*
-        DispatchAudioProcessEvent(ScriptProcessorNode* aNode)
+      // Sets up |output| iff buffers are set in event handlers.
+      void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
+                                     AudioChunk* aOutput)
       {
         AudioContext* context = aNode->Context();
         if (!context) {
-          return nullptr;
+          return;
         }
 
         AutoJSAPI jsapi;
         if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
-          return nullptr;
+          return;
         }
         JSContext* cx = jsapi.cx();
         uint32_t inputChannelCount = aNode->ChannelCount();
 
         // Create the input buffer
         RefPtr<AudioBuffer> inputBuffer;
         if (mInputBuffer) {
           ErrorResult rv;
           inputBuffer =
             AudioBuffer::Create(context->GetOwner(), inputChannelCount,
                                 aNode->BufferSize(), context->SampleRate(),
                                 mInputBuffer.forget(), rv);
           if (rv.Failed()) {
             rv.SuppressException();
-            return nullptr;
+            return;
           }
         }
 
         // Ask content to produce data in the output buffer
         // Note that we always avoid creating the output buffer here, and we try to
         // avoid creating the input buffer as well.  The AudioProcessingEvent class
         // knows how to lazily create them if needed once the script tries to access
         // them.  Otherwise, we may be able to get away without creating them!
@@ -453,20 +443,21 @@ private:
         // FinishProducingOutputBuffer() will optimize output = null.
         // GetThreadSharedChannelsForRate() may also return null after OOM.
         if (event->HasOutputBuffer()) {
           ErrorResult rv;
           AudioBuffer* buffer = event->GetOutputBuffer(rv);
           // HasOutputBuffer() returning true means that GetOutputBuffer()
           // will not fail.
           MOZ_ASSERT(!rv.Failed());
-          return buffer->GetThreadSharedChannelsForRate(cx);
+          *aOutput = buffer->GetThreadSharedChannelsForRate(cx);
+          MOZ_ASSERT(aOutput->IsNull() ||
+                     aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
+                     "AudioBuffers initialized from JS have float data");
         }
-
-        return nullptr;
       }
     private:
       RefPtr<AudioNodeStream> mStream;
       RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
       double mPlaybackTime;
     };
 
     RefPtr<Command> command = new Command(aStream, mInputBuffer.forget(),
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -39,25 +39,25 @@ namespace WebCore {
 
 // Empirical gain calibration tested across many impulse responses to ensure perceived volume is same as dry (unprocessed) signal
 const float GainCalibration = 0.00125f;
 const float GainCalibrationSampleRate = 44100;
 
 // A minimum power value to when normalizing a silent (or very quiet) impulse response
 const float MinPower = 0.000125f;
 
-static float calculateNormalizationScale(ThreadSharedFloatArrayBufferList* response, size_t aLength, float sampleRate)
+static float calculateNormalizationScale(const nsTArray<const float*>& response, size_t aLength, float sampleRate)
 {
     // Normalize by RMS power
-    size_t numberOfChannels = response->GetChannels();
+    size_t numberOfChannels = response.Length();
 
     float power = 0;
 
     for (size_t i = 0; i < numberOfChannels; ++i) {
-        float channelPower = AudioBufferSumOfSquares(static_cast<const float*>(response->GetData(i)), aLength);
+        float channelPower = AudioBufferSumOfSquares(response[i], aLength);
         power += channelPower;
     }
 
     power = sqrt(power / (numberOfChannels * aLength));
 
     // Protect against accidental overload
     if (!IsFinite(power) || IsNaN(power) || power < MinPower)
         power = MinPower;
@@ -66,43 +66,41 @@ static float calculateNormalizationScale
 
     scale *= GainCalibration; // calibrate to make perceived volume same as unprocessed
 
     // Scale depends on sample-rate.
     if (sampleRate)
         scale *= GainCalibrationSampleRate / sampleRate;
 
     // True-stereo compensation
-    if (response->GetChannels() == 4)
+    if (numberOfChannels == 4)
         scale *= 0.5f;
 
     return scale;
 }
 
-Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
+Reverb::Reverb(const AudioChunk& impulseResponse, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
 {
-    float scale = 1;
+    size_t impulseResponseBufferLength = impulseResponse.mDuration;
+    float scale = impulseResponse.mVolume;
 
-    AutoTArray<const float*,4> irChannels;
-    for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) {
-        irChannels.AppendElement(impulseResponse->GetData(i));
-    }
+    AutoTArray<const float*,4> irChannels(impulseResponse.ChannelData<float>());
     AutoTArray<float,1024> tempBuf;
 
     if (normalize) {
-        scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate);
+        scale = calculateNormalizationScale(irChannels, impulseResponseBufferLength, sampleRate);
+    }
 
-        if (scale) {
-            tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
-            for (uint32_t i = 0; i < irChannels.Length(); ++i) {
-                float* buf = &tempBuf[i*impulseResponseBufferLength];
-                AudioBufferCopyWithScale(irChannels[i], scale, buf,
-                                         impulseResponseBufferLength);
-                irChannels[i] = buf;
-            }
+    if (scale != 1.0f) {
+        tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
+        for (uint32_t i = 0; i < irChannels.Length(); ++i) {
+            float* buf = &tempBuf[i*impulseResponseBufferLength];
+            AudioBufferCopyWithScale(irChannels[i], scale, buf,
+                                     impulseResponseBufferLength);
+            irChannels[i] = buf;
         }
     }
 
     initialize(irChannels, impulseResponseBufferLength,
                maxFFTSize, useBackgroundThreads);
 }
 
 size_t Reverb::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
--- a/dom/media/webaudio/blink/Reverb.h
+++ b/dom/media/webaudio/blink/Reverb.h
@@ -30,31 +30,26 @@
 #define Reverb_h
 
 #include "ReverbConvolver.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "AudioBlock.h"
 #include "mozilla/MemoryReporting.h"
 
-namespace mozilla {
-class ThreadSharedFloatArrayBufferList;
-} // namespace mozilla
-
 namespace WebCore {
 
 // Multi-channel convolution reverb with channel matrixing - one or more ReverbConvolver objects are used internally.
 
 class Reverb {
 public:
     enum { MaxFrameSize = 256 };
 
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
-    Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer,
-           size_t impulseResponseBufferLength, size_t maxFFTSize,
+    Reverb(const mozilla::AudioChunk& impulseResponseBuffer, size_t maxFFTSize,
            bool useBackgroundThreads, bool normalize, float sampleRate);
 
     void process(const mozilla::AudioBlock* sourceBus,
                  mozilla::AudioBlock* destinationBus);
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;