Bug 815643 - Part 4: Add the Convolution processing implementation to the build system; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Mon, 10 Jun 2013 16:09:01 -0400
changeset 134588 b7efc129d2b1
parent 134587 6bed30223d8f
child 134589 90c849ba5baf
push id29285
push usereakhgari@mozilla.com
push dateTue, 11 Jun 2013 00:10:14 +0000
treeherdermozilla-inbound@63386b71d1b5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs815643
milestone24.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 815643 - Part 4: Add the Convolution processing implementation to the build system; r=roc
content/media/AudioNodeEngine.cpp
content/media/AudioNodeEngine.h
content/media/webaudio/AnalyserNode.cpp
content/media/webaudio/FFTBlock.h
content/media/webaudio/Makefile.in
content/media/webaudio/PannerNode.cpp
content/media/webaudio/blink/DirectConvolver.cpp
content/media/webaudio/blink/DirectConvolver.h
content/media/webaudio/blink/FFTConvolver.cpp
content/media/webaudio/blink/FFTConvolver.h
content/media/webaudio/blink/Makefile.in
content/media/webaudio/blink/Reverb.cpp
content/media/webaudio/blink/Reverb.h
content/media/webaudio/blink/ReverbAccumulationBuffer.cpp
content/media/webaudio/blink/ReverbAccumulationBuffer.h
content/media/webaudio/blink/ReverbConvolver.cpp
content/media/webaudio/blink/ReverbConvolver.h
content/media/webaudio/blink/ReverbConvolverStage.cpp
content/media/webaudio/blink/ReverbConvolverStage.h
content/media/webaudio/blink/ReverbInputBuffer.cpp
content/media/webaudio/blink/ReverbInputBuffer.h
content/media/webaudio/blink/moz.build
content/media/webaudio/moz.build
--- a/content/media/AudioNodeEngine.cpp
+++ b/content/media/AudioNodeEngine.cpp
@@ -34,30 +34,38 @@ WriteZeroesToAudioBlock(AudioChunk* aChu
   if (aLength == 0)
     return;
   for (uint32_t i = 0; i < aChunk->mChannelData.Length(); ++i) {
     memset(static_cast<float*>(const_cast<void*>(aChunk->mChannelData[i])) + aStart,
            0, aLength*sizeof(float));
   }
 }
 
+void AudioBufferAddWithScale(const float* aInput,
+                             float aScale,
+                             float* aOutput,
+                             uint32_t aSize)
+{
+  if (aScale == 1.0f) {
+    for (uint32_t i = 0; i < aSize; ++i) {
+      aOutput[i] += aInput[i];
+    }
+  } else {
+    for (uint32_t i = 0; i < aSize; ++i) {
+      aOutput[i] += aInput[i]*aScale;
+    }
+  }
+}
+
 void
 AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
                               float aScale,
                               float aOutput[WEBAUDIO_BLOCK_SIZE])
 {
-  if (aScale == 1.0f) {
-    for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
-      aOutput[i] += aInput[i];
-    }
-  } else {
-    for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
-      aOutput[i] += aInput[i]*aScale;
-    }
-  }
+  AudioBufferAddWithScale(aInput, aScale, aOutput, WEBAUDIO_BLOCK_SIZE);
 }
 
 void
 AudioBlockCopyChannelWithScale(const float* aInput,
                                float aScale,
                                float* aOutput)
 {
   if (aScale == 1.0f) {
@@ -93,24 +101,33 @@ AudioBlockCopyChannelWithScale(const flo
                                float aOutput[WEBAUDIO_BLOCK_SIZE])
 {
   for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
     aOutput[i] = aInput[i]*aScale[i];
   }
 }
 
 void
-AudioBlockInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE],
-                       uint32_t aChannelCount,
-                       float aScale)
+AudioBufferInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE],
+                        uint32_t aChannelCount,
+                        float aScale)
+{
+  AudioBufferInPlaceScale(aBlock, aChannelCount, aScale, WEBAUDIO_BLOCK_SIZE);
+}
+
+void
+AudioBufferInPlaceScale(float* aBlock,
+                        uint32_t aChannelCount,
+                        float aScale,
+                        uint32_t aSize)
 {
   if (aScale == 1.0f) {
     return;
   }
-  for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE * aChannelCount; ++i) {
+  for (uint32_t i = 0; i < aSize * aChannelCount; ++i) {
     *aBlock++ *= aScale;
   }
 }
 
 void
 AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE],
                           float aGainL, float aGainR,
                           float aOutputL[WEBAUDIO_BLOCK_SIZE],
@@ -136,9 +153,21 @@ AudioBlockPanStereoToStereo(const float 
     }
   } else {
     for (i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
       *aOutputL++ = *aInputL * aGainL;
       *aOutputR++ = *aInputR++ + *aInputL++ * aGainR;
     }
   }
 }
+
+float
+AudioBufferSumOfSquares(const float* aInput, uint32_t aLength)
+{
+  float sum = 0.0f;
+  while (aLength--) {
+    sum += *aInput * *aInput;
+    ++aInput;
+  }
+  return sum;
 }
+
+}
--- a/content/media/AudioNodeEngine.h
+++ b/content/media/AudioNodeEngine.h
@@ -85,16 +85,24 @@ void AllocateAudioBlock(uint32_t aChanne
 /**
  * aChunk must have been allocated by AllocateAudioBlock.
  */
 void WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength);
 
 /**
  * Pointwise multiply-add operation. aScale == 1.0f should be optimized.
  */
+void AudioBufferAddWithScale(const float* aInput,
+                             float aScale,
+                             float* aOutput,
+                             uint32_t aSize);
+
+/**
+ * Pointwise multiply-add operation. aScale == 1.0f should be optimized.
+ */
 void AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
                                    float aScale,
                                    float aOutput[WEBAUDIO_BLOCK_SIZE]);
 
 /**
  * Pointwise copy-scaled operation. aScale == 1.0f should be optimized.
  *
  * Buffer size is implicitly assumed to be WEBAUDIO_BLOCK_SIZE.
@@ -116,19 +124,27 @@ void AudioBlockCopyChannelWithScale(cons
 void BufferComplexMultiply(const float* aInput,
                            const float* aScale,
                            float* aOutput,
                            uint32_t aSize);
 
 /**
  * In place gain. aScale == 1.0f should be optimized.
  */
-void AudioBlockInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE],
-                            uint32_t aChannelCount,
-                            float aScale);
+void AudioBufferInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE],
+                             uint32_t aChannelCount,
+                             float aScale);
+
+/**
+ * In place gain. aScale == 1.0f should be optimized.
+ */
+void AudioBufferInPlaceScale(float* aBlock,
+                             uint32_t aChannelCount,
+                             float aScale,
+                             uint32_t aSize);
 
 /**
  * Upmix a mono input to a stereo output, scaling the two output channels by two
  * different gain value.
  * This algorithm is specified in the WebAudio spec.
  */
 void
 AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE],
@@ -143,16 +159,22 @@ AudioBlockPanMonoToStereo(const float aI
 void
 AudioBlockPanStereoToStereo(const float aInputL[WEBAUDIO_BLOCK_SIZE],
                             const float aInputR[WEBAUDIO_BLOCK_SIZE],
                             float aGainL, float aGainR, bool aIsOnTheLeft,
                             float aOutputL[WEBAUDIO_BLOCK_SIZE],
                             float aOutputR[WEBAUDIO_BLOCK_SIZE]);
 
 /**
+ * Return the sum of squares of all of the samples in the input.
+ */
+float
+AudioBufferSumOfSquares(const float* aInput, uint32_t aLength);
+
+/**
  * All methods of this class and its subclasses are called on the
  * MediaStreamGraph thread.
  */
 class AudioNodeEngine {
 public:
   // This should be compatible with AudioNodeStream::OutputChunks.
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
--- a/content/media/webaudio/AnalyserNode.cpp
+++ b/content/media/webaudio/AnalyserNode.cpp
@@ -283,18 +283,18 @@ AnalyserNode::AppendChunk(const AudioChu
   }
 
   PodCopy(mBuffer.Elements() + mWriteIndex, static_cast<const float*>(aChunk.mChannelData[0]), chunkDuration);
   for (uint32_t i = 1; i < channelCount; ++i) {
     AudioBlockAddChannelWithScale(static_cast<const float*>(aChunk.mChannelData[i]), 1.0f,
                                   mBuffer.Elements() + mWriteIndex);
   }
   if (channelCount > 1) {
-    AudioBlockInPlaceScale(mBuffer.Elements() + mWriteIndex, 1,
-                           1.0f / aChunk.mChannelData.Length());
+    AudioBufferInPlaceScale(mBuffer.Elements() + mWriteIndex, 1,
+                            1.0f / aChunk.mChannelData.Length());
   }
   mWriteIndex += chunkDuration;
   MOZ_ASSERT(mWriteIndex <= bufferSize);
   if (mWriteIndex >= bufferSize) {
     mWriteIndex = 0;
   }
 }
 
--- a/content/media/webaudio/FFTBlock.h
+++ b/content/media/webaudio/FFTBlock.h
@@ -43,24 +43,34 @@ public:
   void Multiply(const FFTBlock& aFrame)
   {
     BufferComplexMultiply(reinterpret_cast<const float*>(mOutputBuffer.Elements()),
                           reinterpret_cast<const float*>(aFrame.mOutputBuffer.Elements()),
                           reinterpret_cast<float*>(mOutputBuffer.Elements()),
                           mFFTSize / 2 + 1);
   }
 
+  void PerformPaddedFFT(const float* aData, size_t dataSize)
+  {
+    MOZ_ASSERT(dataSize <= FFTSize());
+    nsTArray<float> paddedData;
+    paddedData.SetLength(FFTSize());
+    PodCopy(paddedData.Elements(), aData, dataSize);
+    PodZero(paddedData.Elements() + dataSize, mFFTSize - dataSize);
+    PerformFFT(paddedData.Elements());
+  }
+
   void SetFFTSize(uint32_t aSize)
   {
     mFFTSize = aSize;
     mOutputBuffer.SetLength(aSize / 2 + 1);
     PodZero(mOutputBuffer.Elements(), aSize / 2 + 1);
   }
 
-  float FFTSize() const
+  uint32_t FFTSize() const
   {
     return mFFTSize;
   }
   float RealData(uint32_t aIndex) const
   {
     return mOutputBuffer[aIndex].r;
   }
   float ImagData(uint32_t aIndex) const
--- a/content/media/webaudio/Makefile.in
+++ b/content/media/webaudio/Makefile.in
@@ -13,8 +13,9 @@ LIBRARY_NAME   := gkconwebaudio_s
 LIBXUL_LIBRARY := 1
 ifndef _MSC_VER
 FAIL_ON_WARNINGS := 1
 endif # !_MSC_VER
 
 FORCE_STATIC_LIB := 1
 
 include $(topsrcdir)/config/rules.mk
+include $(topsrcdir)/ipc/chromium/chromium-config.mk
--- a/content/media/webaudio/PannerNode.cpp
+++ b/content/media/webaudio/PannerNode.cpp
@@ -327,17 +327,17 @@ PannerNodeEngine::GainStereoToStereo(con
 }
 
 void
 PannerNodeEngine::DistanceAndConeGain(AudioChunk* aChunk, float aGain)
 {
   float* samples = static_cast<float*>(const_cast<void*>(*aChunk->mChannelData.Elements()));
   uint32_t channelCount = aChunk->mChannelData.Length();
 
-  AudioBlockInPlaceScale(samples, channelCount, aGain);
+  AudioBufferInPlaceScale(samples, channelCount, aGain);
 }
 
 // This algorithm is specicied in the webaudio spec.
 void
 PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation)
 {
   ThreeDPoint sourceListener = mPosition - mListenerPosition;
 
--- a/content/media/webaudio/blink/DirectConvolver.cpp
+++ b/content/media/webaudio/blink/DirectConvolver.cpp
@@ -21,86 +21,55 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
-
-#include "core/platform/audio/DirectConvolver.h"
+#include "DirectConvolver.h"
+#include "mozilla/PodOperations.h"
 
-#if OS(DARWIN)
-#include <Accelerate/Accelerate.h>
-#endif
-
-#include "core/platform/audio/VectorMath.h"
+using namespace mozilla;
 
 namespace WebCore {
 
-using namespace VectorMath;
-    
 DirectConvolver::DirectConvolver(size_t inputBlockSize)
     : m_inputBlockSize(inputBlockSize)
-#if USE(WEBAUDIO_IPP)
-    , m_overlayBuffer(inputBlockSize)
-#endif // USE(WEBAUDIO_IPP)
-    , m_buffer(inputBlockSize * 2)
 {
+  m_buffer.SetLength(inputBlockSize * 2);
+  PodZero(m_buffer.Elements(), inputBlockSize * 2);
 }
 
-void DirectConvolver::process(AudioFloatArray* convolutionKernel, const float* sourceP, float* destP, size_t framesToProcess)
+void DirectConvolver::process(const nsTArray<float>* convolutionKernel, const float* sourceP, float* destP, size_t framesToProcess)
 {
-    ASSERT(framesToProcess == m_inputBlockSize);
+    MOZ_ASSERT(framesToProcess == m_inputBlockSize);
     if (framesToProcess != m_inputBlockSize)
         return;
 
     // Only support kernelSize <= m_inputBlockSize
-    size_t kernelSize = convolutionKernel->size();
-    ASSERT(kernelSize <= m_inputBlockSize);
+    size_t kernelSize = convolutionKernel->Length();
+    MOZ_ASSERT(kernelSize <= m_inputBlockSize);
     if (kernelSize > m_inputBlockSize)
         return;
 
-    float* kernelP = convolutionKernel->data();
+    const float* kernelP = convolutionKernel->Elements();
 
     // Sanity check
-    bool isCopyGood = kernelP && sourceP && destP && m_buffer.data();
-    ASSERT(isCopyGood);
+    bool isCopyGood = kernelP && sourceP && destP && m_buffer.Elements();
+    MOZ_ASSERT(isCopyGood);
     if (!isCopyGood)
         return;
 
-#if USE(WEBAUDIO_IPP)
-    float* outputBuffer = m_buffer.data();
-    float* overlayBuffer = m_overlayBuffer.data();
-    bool isCopyGood2 = overlayBuffer && m_overlayBuffer.size() >= kernelSize && m_buffer.size() == m_inputBlockSize * 2;
-    ASSERT(isCopyGood2);
-    if (!isCopyGood2)
-        return;
-
-    ippsConv_32f(static_cast<const Ipp32f*>(sourceP), framesToProcess, static_cast<Ipp32f*>(kernelP), kernelSize, static_cast<Ipp32f*>(outputBuffer));
-
-    vadd(outputBuffer, 1, overlayBuffer, 1, destP, 1, framesToProcess);
-    memcpy(overlayBuffer, outputBuffer + m_inputBlockSize, sizeof(float) * kernelSize);
-#else
-    float* inputP = m_buffer.data() + m_inputBlockSize;
+    float* inputP = m_buffer.Elements() + m_inputBlockSize;
 
     // Copy samples to 2nd half of input buffer.
     memcpy(inputP, sourceP, sizeof(float) * framesToProcess);
 
-#if OS(DARWIN)
-#if defined(__ppc__) || defined(__i386__)
-    conv(inputP - kernelSize + 1, 1, kernelP + kernelSize - 1, -1, destP, 1, framesToProcess, kernelSize);
-#else
-    vDSP_conv(inputP - kernelSize + 1, 1, kernelP + kernelSize - 1, -1, destP, 1, framesToProcess, kernelSize);
-#endif // defined(__ppc__) || defined(__i386__)
-#else
     // FIXME: The macro can be further optimized to avoid pipeline stalls. One possibility is to maintain 4 separate sums and change the macro to CONVOLVE_FOUR_SAMPLES.
 #define CONVOLVE_ONE_SAMPLE             \
     sum += inputP[i - j] * kernelP[j];  \
     j++;
 
     size_t i = 0;
     while (i < framesToProcess) {
         size_t j = 0;
@@ -360,26 +329,19 @@ void DirectConvolver::process(AudioFloat
         } else {
             while (j < kernelSize) {
                 // Non-optimized using actual while loop.
                 CONVOLVE_ONE_SAMPLE
             }
         }
         destP[i++] = sum;
     }
-#endif // OS(DARWIN)
 
     // Copy 2nd half of input buffer to 1st half.
-    memcpy(m_buffer.data(), inputP, sizeof(float) * framesToProcess);
-#endif
+    memcpy(m_buffer.Elements(), inputP, sizeof(float) * framesToProcess);
 }
 
 void DirectConvolver::reset()
 {
-    m_buffer.zero();
-#if USE(WEBAUDIO_IPP)
-    m_overlayBuffer.zero();
-#endif // USE(WEBAUDIO_IPP)
+    PodZero(m_buffer.Elements(), m_buffer.Length());
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/DirectConvolver.h
+++ b/content/media/webaudio/blink/DirectConvolver.h
@@ -24,36 +24,29 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef DirectConvolver_h
 #define DirectConvolver_h
 
-#include "core/platform/audio/AudioArray.h"
-
-#if USE(WEBAUDIO_IPP)
-#include <ipps.h>
-#endif // USE(WEBAUDIO_IPP)
+#include "nsTArray.h"
 
 namespace WebCore {
 
 class DirectConvolver {
 public:
     DirectConvolver(size_t inputBlockSize);
 
-    void process(AudioFloatArray* convolutionKernel, const float* sourceP, float* destP, size_t framesToProcess);
+    void process(const nsTArray<float>* convolutionKernel, const float* sourceP, float* destP, size_t framesToProcess);
 
     void reset();
 
 private:
     size_t m_inputBlockSize;
 
-#if USE(WEBAUDIO_IPP)
-    AudioFloatArray m_overlayBuffer;
-#endif // USE(WEBAUDIO_IPP)
-    AudioFloatArray m_buffer;
+    nsTArray<float> m_buffer;
 };
 
 } // namespace WebCore
 
 #endif // DirectConvolver_h
--- a/content/media/webaudio/blink/FFTConvolver.cpp
+++ b/content/media/webaudio/blink/FFTConvolver.cpp
@@ -21,100 +21,97 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
+#include "FFTConvolver.h"
+#include "mozilla/PodOperations.h"
 
-#include "core/platform/audio/FFTConvolver.h"
-
-#include "core/platform/audio/VectorMath.h"
+using namespace mozilla;
 
 namespace WebCore {
 
-using namespace VectorMath;
-    
 FFTConvolver::FFTConvolver(size_t fftSize)
     : m_frame(fftSize)
     , m_readWriteIndex(0)
-    , m_inputBuffer(fftSize) // 2nd half of buffer is always zeroed
-    , m_outputBuffer(fftSize)
-    , m_lastOverlapBuffer(fftSize / 2)
 {
+  m_inputBuffer.SetLength(fftSize);
+  PodZero(m_inputBuffer.Elements(), fftSize);
+  m_outputBuffer.SetLength(fftSize);
+  PodZero(m_outputBuffer.Elements(), fftSize);
+  m_lastOverlapBuffer.SetLength(fftSize / 2);
+  PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
 }
 
-void FFTConvolver::process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess)
+void FFTConvolver::process(FFTBlock* fftKernel, const float* sourceP, float* destP, size_t framesToProcess)
 {
     size_t halfSize = fftSize() / 2;
 
     // framesToProcess must be an exact multiple of halfSize,
     // or halfSize is a multiple of framesToProcess when halfSize > framesToProcess.
     bool isGood = !(halfSize % framesToProcess && framesToProcess % halfSize);
-    ASSERT(isGood);
+    MOZ_ASSERT(isGood);
     if (!isGood)
         return;
 
     size_t numberOfDivisions = halfSize <= framesToProcess ? (framesToProcess / halfSize) : 1;
     size_t divisionSize = numberOfDivisions == 1 ? framesToProcess : halfSize;
 
     for (size_t i = 0; i < numberOfDivisions; ++i, sourceP += divisionSize, destP += divisionSize) {
         // Copy samples to input buffer (note contraint above!)
-        float* inputP = m_inputBuffer.data();
+        float* inputP = m_inputBuffer.Elements();
 
         // Sanity check
-        bool isCopyGood1 = sourceP && inputP && m_readWriteIndex + divisionSize <= m_inputBuffer.size();
-        ASSERT(isCopyGood1);
+        bool isCopyGood1 = sourceP && inputP && m_readWriteIndex + divisionSize <= m_inputBuffer.Length();
+        MOZ_ASSERT(isCopyGood1);
         if (!isCopyGood1)
             return;
 
         memcpy(inputP + m_readWriteIndex, sourceP, sizeof(float) * divisionSize);
 
         // Copy samples from output buffer
-        float* outputP = m_outputBuffer.data();
+        float* outputP = m_outputBuffer.Elements();
 
         // Sanity check
-        bool isCopyGood2 = destP && outputP && m_readWriteIndex + divisionSize <= m_outputBuffer.size();
-        ASSERT(isCopyGood2);
+        bool isCopyGood2 = destP && outputP && m_readWriteIndex + divisionSize <= m_outputBuffer.Length();
+        MOZ_ASSERT(isCopyGood2);
         if (!isCopyGood2)
             return;
 
         memcpy(destP, outputP + m_readWriteIndex, sizeof(float) * divisionSize);
         m_readWriteIndex += divisionSize;
 
         // Check if it's time to perform the next FFT
         if (m_readWriteIndex == halfSize) {
             // The input buffer is now filled (get frequency-domain version)
-            m_frame.doFFT(m_inputBuffer.data());
-            m_frame.multiply(*fftKernel);
-            m_frame.doInverseFFT(m_outputBuffer.data());
+            m_frame.PerformFFT(m_inputBuffer.Elements());
+            m_frame.Multiply(*fftKernel);
+            m_frame.PerformInverseFFT(m_outputBuffer.Elements());
 
             // Overlap-add 1st half from previous time
-            vadd(m_outputBuffer.data(), 1, m_lastOverlapBuffer.data(), 1, m_outputBuffer.data(), 1, halfSize);
+            AudioBufferAddWithScale(m_lastOverlapBuffer.Elements(), 1.0f,
+                                    m_outputBuffer.Elements(), halfSize);
 
             // Finally, save 2nd half of result
-            bool isCopyGood3 = m_outputBuffer.size() == 2 * halfSize && m_lastOverlapBuffer.size() == halfSize;
-            ASSERT(isCopyGood3);
+            bool isCopyGood3 = m_outputBuffer.Length() == 2 * halfSize && m_lastOverlapBuffer.Length() == halfSize;
+            MOZ_ASSERT(isCopyGood3);
             if (!isCopyGood3)
                 return;
 
-            memcpy(m_lastOverlapBuffer.data(), m_outputBuffer.data() + halfSize, sizeof(float) * halfSize);
+            memcpy(m_lastOverlapBuffer.Elements(), m_outputBuffer.Elements() + halfSize, sizeof(float) * halfSize);
 
             // Reset index back to start for next time
             m_readWriteIndex = 0;
         }
     }
 }
 
 void FFTConvolver::reset()
 {
-    m_lastOverlapBuffer.zero();
+    PodZero(m_lastOverlapBuffer.Elements(), m_lastOverlapBuffer.Length());
     m_readWriteIndex = 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/FFTConvolver.h
+++ b/content/media/webaudio/blink/FFTConvolver.h
@@ -24,41 +24,44 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef FFTConvolver_h
 #define FFTConvolver_h
 
-#include "core/platform/audio/AudioArray.h"
-#include "core/platform/audio/FFTFrame.h"
+#include "nsTArray.h"
+#include "mozilla/FFTBlock.h"
 
 namespace WebCore {
 
+typedef nsTArray<float> AudioFloatArray;
+using mozilla::FFTBlock;
+
 class FFTConvolver {
 public:
     // fftSize must be a power of two
     FFTConvolver(size_t fftSize);
 
     // For now, with multiple calls to Process(), framesToProcess MUST add up EXACTLY to fftSize / 2
     //
     // FIXME: Later, we can do more sophisticated buffering to relax this requirement...
     //
     // The input to output latency is equal to fftSize / 2
     //
     // Processing in-place is allowed...
-    void process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess);
+    void process(FFTBlock* fftKernel, const float* sourceP, float* destP, size_t framesToProcess);
 
     void reset();
 
-    size_t fftSize() const { return m_frame.fftSize(); }
+    size_t fftSize() const { return m_frame.FFTSize(); }
 
 private:
-    FFTFrame m_frame;
+    FFTBlock m_frame;
 
     // Buffer input until we get fftSize / 2 samples then do an FFT
     size_t m_readWriteIndex;
     AudioFloatArray m_inputBuffer;
 
     // Stores output which we read a little at a time
     AudioFloatArray m_outputBuffer;
 
--- a/content/media/webaudio/blink/Makefile.in
+++ b/content/media/webaudio/blink/Makefile.in
@@ -10,8 +10,9 @@ VPATH            := @srcdir@
 include $(DEPTH)/config/autoconf.mk
 
 LIBRARY_NAME   := gkconwebaudio_blink_s
 LIBXUL_LIBRARY := 1
 
 FORCE_STATIC_LIB := 1
 
 include $(topsrcdir)/config/rules.mk
+include $(topsrcdir)/ipc/chromium/chromium-config.mk
--- a/content/media/webaudio/blink/Reverb.cpp
+++ b/content/media/webaudio/blink/Reverb.cpp
@@ -21,223 +21,219 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
-
-#include "core/platform/audio/Reverb.h"
+#include "Reverb.h"
 
 #include <math.h>
-#include "core/platform/audio/AudioBus.h"
-#include "core/platform/audio/AudioFileReader.h"
-#include "core/platform/audio/ReverbConvolver.h"
-#include "core/platform/audio/VectorMath.h"
-#include <wtf/MathExtras.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include "ReverbConvolver.h"
+#include "mozilla/FloatingPoint.h"
 
-#if OS(DARWIN)
-using namespace std;
-#endif
+using namespace mozilla;
 
 namespace WebCore {
 
-using namespace VectorMath;
-
 // Empirical gain calibration tested across many impulse responses to ensure perceived volume is same as dry (unprocessed) signal
 const float GainCalibration = -58;
 const float GainCalibrationSampleRate = 44100;
 
 // A minimum power value to when normalizing a silent (or very quiet) impulse response
 const float MinPower = 0.000125f;
-    
-static float calculateNormalizationScale(AudioBus* response)
+
+static float calculateNormalizationScale(ThreadSharedFloatArrayBufferList* response, size_t aLength, float sampleRate)
 {
     // Normalize by RMS power
-    size_t numberOfChannels = response->numberOfChannels();
-    size_t length = response->length();
+    size_t numberOfChannels = response->GetChannels();
 
     float power = 0;
 
     for (size_t i = 0; i < numberOfChannels; ++i) {
-        float channelPower = 0;
-        vsvesq(response->channel(i)->data(), 1, &channelPower, length);
+        float channelPower = AudioBufferSumOfSquares(static_cast<const float*>(response->GetData(i)), aLength);
         power += channelPower;
     }
 
-    power = sqrt(power / (numberOfChannels * length));
+    power = sqrt(power / (numberOfChannels * aLength));
 
     // Protect against accidental overload
-    if (std::isinf(power) || std::isnan(power) || power < MinPower)
+    if (!IsFinite(power) || IsNaN(power) || power < MinPower)
         power = MinPower;
 
     float scale = 1 / power;
 
     scale *= powf(10, GainCalibration * 0.05f); // calibrate to make perceived volume same as unprocessed
 
     // Scale depends on sample-rate.
-    if (response->sampleRate())
-        scale *= GainCalibrationSampleRate / response->sampleRate();
+    if (sampleRate)
+        scale *= GainCalibrationSampleRate / sampleRate;
 
     // True-stereo compensation
-    if (response->numberOfChannels() == 4)
+    if (response->GetChannels() == 4)
         scale *= 0.5f;
 
     return scale;
 }
 
-Reverb::Reverb(AudioBus* impulseResponse, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize)
+Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate)
 {
     float scale = 1;
 
     if (normalize) {
-        scale = calculateNormalizationScale(impulseResponse);
+        scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate);
 
-        if (scale)
-            impulseResponse->scale(scale);
+        if (scale) {
+            for (uint32_t i = 0; i < impulseResponse->GetChannels(); ++i) {
+                AudioBufferInPlaceScale(const_cast<float*>(impulseResponse->GetData(i)),
+                                        1, scale, impulseResponseBufferLength);
+            }
+        }
     }
 
-    initialize(impulseResponse, renderSliceSize, maxFFTSize, numberOfChannels, useBackgroundThreads);
+    initialize(impulseResponse, impulseResponseBufferLength, renderSliceSize, maxFFTSize, numberOfChannels, useBackgroundThreads);
 
     // Undo scaling since this shouldn't be a destructive operation on impulseResponse.
     // FIXME: What about roundoff? Perhaps consider making a temporary scaled copy
     // instead of scaling and unscaling in place.
-    if (normalize && scale)
-        impulseResponse->scale(1 / scale);
+    if (normalize && scale) {
+        for (uint32_t i = 0; i < impulseResponse->GetChannels(); ++i) {
+            AudioBufferInPlaceScale(const_cast<float*>(impulseResponse->GetData(i)),
+                                    1, 1 / scale, impulseResponseBufferLength);
+        }
+    }
 }
 
-void Reverb::initialize(AudioBus* impulseResponseBuffer, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads)
+void Reverb::initialize(ThreadSharedFloatArrayBufferList* impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads)
 {
-    m_impulseResponseLength = impulseResponseBuffer->length();
+    m_impulseResponseLength = impulseResponseBufferLength;
 
     // The reverb can handle a mono impulse response and still do stereo processing
-    size_t numResponseChannels = impulseResponseBuffer->numberOfChannels();
-    m_convolvers.reserveCapacity(numberOfChannels);
+    size_t numResponseChannels = impulseResponseBuffer->GetChannels();
+    m_convolvers.SetCapacity(numberOfChannels);
 
     int convolverRenderPhase = 0;
     for (size_t i = 0; i < numResponseChannels; ++i) {
-        AudioChannel* channel = impulseResponseBuffer->channel(i);
+        const float* channel = impulseResponseBuffer->GetData(i);
+        size_t length = impulseResponseBufferLength;
 
-        OwnPtr<ReverbConvolver> convolver = adoptPtr(new ReverbConvolver(channel, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
-        m_convolvers.append(convolver.release());
+        nsAutoPtr<ReverbConvolver> convolver(new ReverbConvolver(channel, length, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
+        m_convolvers.AppendElement(convolver.forget());
 
         convolverRenderPhase += renderSliceSize;
     }
 
     // For "True" stereo processing we allocate a temporary buffer to avoid repeatedly allocating it in the process() method.
     // It can be bad to allocate memory in a real-time thread.
-    if (numResponseChannels == 4)
-        m_tempBuffer = AudioBus::create(2, MaxFrameSize);
+    if (numResponseChannels == 4) {
+        AllocateAudioBlock(2, &m_tempBuffer);
+        WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE);
+    }
 }
 
-void Reverb::process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess)
+void Reverb::process(const AudioChunk* sourceBus, AudioChunk* destinationBus, size_t framesToProcess)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
-    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->numberOfChannels() > 0 && destinationBus->numberOfChannels() > 0
-        && framesToProcess <= MaxFrameSize && framesToProcess <= sourceBus->length() && framesToProcess <= destinationBus->length(); 
-    
-    ASSERT(isSafeToProcess);
+    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->mChannelData.Length() > 0 && destinationBus->mChannelData.Length() > 0
+        && framesToProcess <= MaxFrameSize && framesToProcess <= size_t(sourceBus->mDuration) && framesToProcess <= size_t(destinationBus->mDuration);
+
+    MOZ_ASSERT(isSafeToProcess);
     if (!isSafeToProcess)
         return;
 
     // For now only handle mono or stereo output
-    if (destinationBus->numberOfChannels() > 2) {
-        destinationBus->zero();
+    if (destinationBus->mChannelData.Length() > 2) {
+        destinationBus->SetNull(destinationBus->mDuration);
         return;
     }
 
-    AudioChannel* destinationChannelL = destinationBus->channel(0);
-    const AudioChannel* sourceChannelL = sourceBus->channel(0);
+    float* destinationChannelL = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[0]));
+    const float* sourceBusL = static_cast<const float*>(sourceBus->mChannelData[0]);
 
     // Handle input -> output matrixing...
-    size_t numInputChannels = sourceBus->numberOfChannels();
-    size_t numOutputChannels = destinationBus->numberOfChannels();
-    size_t numReverbChannels = m_convolvers.size();
+    size_t numInputChannels = sourceBus->mChannelData.Length();
+    size_t numOutputChannels = destinationBus->mChannelData.Length();
+    size_t numReverbChannels = m_convolvers.Length();
 
     if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) {
         // 2 -> 2 -> 2
-        const AudioChannel* sourceChannelR = sourceBus->channel(1);
-        AudioChannel* destinationChannelR = destinationBus->channel(1);
-        m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
-        m_convolvers[1]->process(sourceChannelR, destinationChannelR, framesToProcess);
+        const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
+        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
+        m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
+        m_convolvers[1]->process(sourceBusR, sourceBus->mDuration, destinationChannelR, destinationBus->mDuration, framesToProcess);
     } else  if (numInputChannels == 1 && numOutputChannels == 2 && numReverbChannels == 2) {
         // 1 -> 2 -> 2
         for (int i = 0; i < 2; ++i) {
-            AudioChannel* destinationChannel = destinationBus->channel(i);
-            m_convolvers[i]->process(sourceChannelL, destinationChannel, framesToProcess);
+            float* destinationChannel = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[i]));
+            m_convolvers[i]->process(sourceBusL, sourceBus->mDuration, destinationChannel, destinationBus->mDuration, framesToProcess);
         }
     } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 2) {
         // 1 -> 1 -> 2
-        m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
+        m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
 
         // simply copy L -> R
-        AudioChannel* destinationChannelR = destinationBus->channel(1);
-        bool isCopySafe = destinationChannelL->data() && destinationChannelR->data() && destinationChannelL->length() >= framesToProcess && destinationChannelR->length() >= framesToProcess;
-        ASSERT(isCopySafe);
+        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
+        bool isCopySafe = destinationChannelL && destinationChannelR && size_t(destinationBus->mDuration) >= framesToProcess && size_t(destinationBus->mDuration) >= framesToProcess;
+        MOZ_ASSERT(isCopySafe);
         if (!isCopySafe)
             return;
-        memcpy(destinationChannelR->mutableData(), destinationChannelL->data(), sizeof(float) * framesToProcess);
+        PodCopy(destinationChannelR, destinationChannelL, framesToProcess);
     } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) {
         // 1 -> 1 -> 1
-        m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
+        m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
     } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) {
         // 2 -> 4 -> 2 ("True" stereo)
-        const AudioChannel* sourceChannelR = sourceBus->channel(1);
-        AudioChannel* destinationChannelR = destinationBus->channel(1);
+        const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
+        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
 
-        AudioChannel* tempChannelL = m_tempBuffer->channel(0);
-        AudioChannel* tempChannelR = m_tempBuffer->channel(1);
+        float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0]));
+        float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));
 
         // Process left virtual source
-        m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
-        m_convolvers[1]->process(sourceChannelL, destinationChannelR, framesToProcess);
+        m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
+        m_convolvers[1]->process(sourceBusL, sourceBus->mDuration, destinationChannelR, destinationBus->mDuration, framesToProcess);
 
         // Process right virtual source
-        m_convolvers[2]->process(sourceChannelR, tempChannelL, framesToProcess);
-        m_convolvers[3]->process(sourceChannelR, tempChannelR, framesToProcess);
+        m_convolvers[2]->process(sourceBusR, sourceBus->mDuration, tempChannelL, m_tempBuffer.mDuration, framesToProcess);
+        m_convolvers[3]->process(sourceBusR, sourceBus->mDuration, tempChannelR, m_tempBuffer.mDuration, framesToProcess);
 
-        destinationBus->sumFrom(*m_tempBuffer);
+        AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->mDuration);
+        AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->mDuration);
     } else if (numInputChannels == 1 && numReverbChannels == 4 && numOutputChannels == 2) {
         // 1 -> 4 -> 2 (Processing mono with "True" stereo impulse response)
         // This is an inefficient use of a four-channel impulse response, but we should handle the case.
-        AudioChannel* destinationChannelR = destinationBus->channel(1);
+        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
 
-        AudioChannel* tempChannelL = m_tempBuffer->channel(0);
-        AudioChannel* tempChannelR = m_tempBuffer->channel(1);
+        float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0]));
+        float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));
 
         // Process left virtual source
-        m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
-        m_convolvers[1]->process(sourceChannelL, destinationChannelR, framesToProcess);
+        m_convolvers[0]->process(sourceBusL, sourceBus->mDuration, destinationChannelL, destinationBus->mDuration, framesToProcess);
+        m_convolvers[1]->process(sourceBusL, sourceBus->mDuration, destinationChannelR, destinationBus->mDuration, framesToProcess);
 
         // Process right virtual source
-        m_convolvers[2]->process(sourceChannelL, tempChannelL, framesToProcess);
-        m_convolvers[3]->process(sourceChannelL, tempChannelR, framesToProcess);
+        m_convolvers[2]->process(sourceBusL, sourceBus->mDuration, tempChannelL, m_tempBuffer.mDuration, framesToProcess);
+        m_convolvers[3]->process(sourceBusL, sourceBus->mDuration, tempChannelR, m_tempBuffer.mDuration, framesToProcess);
 
-        destinationBus->sumFrom(*m_tempBuffer);
+        AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->mDuration);
+        AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->mDuration);
     } else {
         // Handle gracefully any unexpected / unsupported matrixing
         // FIXME: add code for 5.1 support...
-        destinationBus->zero();
+        destinationBus->SetNull(destinationBus->mDuration);
     }
 }
 
 void Reverb::reset()
 {
-    for (size_t i = 0; i < m_convolvers.size(); ++i)
+    for (size_t i = 0; i < m_convolvers.Length(); ++i)
         m_convolvers[i]->reset();
 }
 
 size_t Reverb::latencyFrames() const
 {
-    return !m_convolvers.isEmpty() ? m_convolvers.first()->latencyFrames() : 0;
+    return !m_convolvers.IsEmpty() ? m_convolvers[0]->latencyFrames() : 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/Reverb.h
+++ b/content/media/webaudio/blink/Reverb.h
@@ -24,44 +24,47 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef Reverb_h
 #define Reverb_h
 
-#include "core/platform/audio/ReverbConvolver.h"
-#include <wtf/Vector.h>
+#include "ReverbConvolver.h"
+#include "nsAutoPtr.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+class ThreadSharedFloatArrayBufferList;
+}
 
 namespace WebCore {
 
-class AudioBus;
-    
 // Multi-channel convolution reverb with channel matrixing - one or more ReverbConvolver objects are used internally.
 
 class Reverb {
 public:
     enum { MaxFrameSize = 256 };
 
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
-    Reverb(AudioBus* impulseResponseBuffer, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize);
+    Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate);
 
-    void process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess);
+    void process(const mozilla::AudioChunk* sourceBus, mozilla::AudioChunk* destinationBus, size_t framesToProcess);
     void reset();
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
     size_t latencyFrames() const;
 
 private:
-    void initialize(AudioBus* impulseResponseBuffer, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads);
+    void initialize(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads);
 
     size_t m_impulseResponseLength;
 
-    Vector<OwnPtr<ReverbConvolver> > m_convolvers;
+    nsTArray<nsAutoPtr<ReverbConvolver> > m_convolvers;
 
     // For "True" stereo processing
-    RefPtr<AudioBus> m_tempBuffer;
+    mozilla::AudioChunk m_tempBuffer;
 };
 
 } // namespace WebCore
 
 #endif // Reverb_h
--- a/content/media/webaudio/blink/ReverbAccumulationBuffer.cpp
+++ b/content/media/webaudio/blink/ReverbAccumulationBuffer.cpp
@@ -21,99 +21,96 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
+#include "ReverbAccumulationBuffer.h"
+#include "AudioNodeEngine.h"
+#include "mozilla/PodOperations.h"
+#include <algorithm>
 
-#include "core/platform/audio/ReverbAccumulationBuffer.h"
-
-#include "core/platform/audio/VectorMath.h"
+using namespace mozilla;
 
 namespace WebCore {
 
-using namespace VectorMath;
-
 ReverbAccumulationBuffer::ReverbAccumulationBuffer(size_t length)
-    : m_buffer(length)
-    , m_readIndex(0)
+    : m_readIndex(0)
     , m_readTimeFrame(0)
 {
+  m_buffer.SetLength(length);
+  PodZero(m_buffer.Elements(), length);
 }
 
 void ReverbAccumulationBuffer::readAndClear(float* destination, size_t numberOfFrames)
 {
-    size_t bufferLength = m_buffer.size();
+    size_t bufferLength = m_buffer.Length();
     bool isCopySafe = m_readIndex <= bufferLength && numberOfFrames <= bufferLength;
-    
-    ASSERT(isCopySafe);
+
+    MOZ_ASSERT(isCopySafe);
     if (!isCopySafe)
         return;
 
     size_t framesAvailable = bufferLength - m_readIndex;
     size_t numberOfFrames1 = std::min(numberOfFrames, framesAvailable);
     size_t numberOfFrames2 = numberOfFrames - numberOfFrames1;
 
-    float* source = m_buffer.data();
+    float* source = m_buffer.Elements();
     memcpy(destination, source + m_readIndex, sizeof(float) * numberOfFrames1);
     memset(source + m_readIndex, 0, sizeof(float) * numberOfFrames1);
 
     // Handle wrap-around if necessary
     if (numberOfFrames2 > 0) {
         memcpy(destination + numberOfFrames1, source, sizeof(float) * numberOfFrames2);
         memset(source, 0, sizeof(float) * numberOfFrames2);
     }
 
     m_readIndex = (m_readIndex + numberOfFrames) % bufferLength;
     m_readTimeFrame += numberOfFrames;
 }
 
 void ReverbAccumulationBuffer::updateReadIndex(int* readIndex, size_t numberOfFrames) const
 {
     // Update caller's readIndex
-    *readIndex = (*readIndex + numberOfFrames) % m_buffer.size();
+    *readIndex = (*readIndex + numberOfFrames) % m_buffer.Length();
 }
 
 int ReverbAccumulationBuffer::accumulate(float* source, size_t numberOfFrames, int* readIndex, size_t delayFrames)
 {
-    size_t bufferLength = m_buffer.size();
-    
+    size_t bufferLength = m_buffer.Length();
+
     size_t writeIndex = (*readIndex + delayFrames) % bufferLength;
 
     // Update caller's readIndex
     *readIndex = (*readIndex + numberOfFrames) % bufferLength;
 
     size_t framesAvailable = bufferLength - writeIndex;
     size_t numberOfFrames1 = std::min(numberOfFrames, framesAvailable);
     size_t numberOfFrames2 = numberOfFrames - numberOfFrames1;
 
-    float* destination = m_buffer.data();
+    float* destination = m_buffer.Elements();
 
     bool isSafe = writeIndex <= bufferLength && numberOfFrames1 + writeIndex <= bufferLength && numberOfFrames2 <= bufferLength;
-    ASSERT(isSafe);
+    MOZ_ASSERT(isSafe);
     if (!isSafe)
         return 0;
 
-    vadd(source, 1, destination + writeIndex, 1, destination + writeIndex, 1, numberOfFrames1);
+    AudioBufferAddWithScale(source, 1.0f, destination + writeIndex, numberOfFrames1);
 
     // Handle wrap-around if necessary
-    if (numberOfFrames2 > 0)       
-        vadd(source + numberOfFrames1, 1, destination, 1, destination, 1, numberOfFrames2);
+    if (numberOfFrames2 > 0) {
+        AudioBufferAddWithScale(source + numberOfFrames1, 1.0f, destination, numberOfFrames2);
+    }
 
     return writeIndex;
 }
 
 void ReverbAccumulationBuffer::reset()
 {
-    m_buffer.zero();
+    PodZero(m_buffer.Elements(), m_buffer.Length());
     m_readIndex = 0;
     m_readTimeFrame = 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/ReverbAccumulationBuffer.h
+++ b/content/media/webaudio/blink/ReverbAccumulationBuffer.h
@@ -24,20 +24,22 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbAccumulationBuffer_h
 #define ReverbAccumulationBuffer_h
 
-#include "core/platform/audio/AudioArray.h"
+#include "nsTArray.h"
 
 namespace WebCore {
 
+typedef nsTArray<float> AudioFloatArray;
+
 // ReverbAccumulationBuffer is a circular delay buffer with one client reading from it and multiple clients
 // writing/accumulating to it at different delay offsets from the read position.  The read operation will zero the memory
 // just read from the buffer, so it will be ready for accumulation the next time around.
 class ReverbAccumulationBuffer {
 public:
     ReverbAccumulationBuffer(size_t length);
 
     // This will read from, then clear-out numberOfFrames
--- a/content/media/webaudio/blink/ReverbConvolver.cpp
+++ b/content/media/webaudio/blink/ReverbConvolver.cpp
@@ -21,72 +21,67 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
+#include "ReverbConvolver.h"
 
-#if ENABLE(WEB_AUDIO)
+using namespace mozilla;
 
-#include "core/platform/audio/ReverbConvolver.h"
-
-#include "core/platform/audio/AudioBus.h"
-#include "core/platform/audio/VectorMath.h"
+template<>
+struct RunnableMethodTraits<WebCore::ReverbConvolver>
+{
+  static void RetainCallee(WebCore::ReverbConvolver* obj) {}
+  static void ReleaseCallee(WebCore::ReverbConvolver* obj) {}
+};
 
 namespace WebCore {
 
-using namespace VectorMath;
-
 const int InputBufferSize = 8 * 16384;
 
 // We only process the leading portion of the impulse response in the real-time thread.  We don't exceed this length.
 // It turns out then, that the background thread has about 278msec of scheduling slop.
 // Empirically, this has been found to be a good compromise between giving enough time for scheduling slop,
 // while still minimizing the amount of processing done in the primary (high-priority) thread.
 // This was found to be a good value on Mac OS X, and may work well on other platforms as well, assuming
 // the very rough scheduling latencies are similar on these time-scales.  Of course, this code may need to be
 // tuned for individual platforms if this assumption is found to be incorrect.
 const size_t RealtimeFrameLimit = 8192  + 4096; // ~278msec @ 44.1KHz
 
 const size_t MinFFTSize = 128;
 const size_t MaxRealtimeFFTSize = 2048;
 
-static void backgroundThreadEntry(void* threadData)
-{
-    ReverbConvolver* reverbConvolver = static_cast<ReverbConvolver*>(threadData);
-    reverbConvolver->backgroundThreadEntry();
-}
-
-ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads)
-    : m_impulseResponseLength(impulseResponse->length())
-    , m_accumulationBuffer(impulseResponse->length() + renderSliceSize)
+ReverbConvolver::ReverbConvolver(const float* impulseResponseData, size_t impulseResponseLength, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads)
+    : m_impulseResponseLength(impulseResponseLength)
+    , m_accumulationBuffer(impulseResponseLength + renderSliceSize)
     , m_inputBuffer(InputBufferSize)
     , m_minFFTSize(MinFFTSize) // First stage will have this size - successive stages will double in size each time
     , m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize
+    , m_backgroundThread("ConvolverWorker")
+    , m_backgroundThreadMonitor("ConvolverMonitor")
     , m_useBackgroundThreads(useBackgroundThreads)
-    , m_backgroundThread(0)
     , m_wantsToExit(false)
     , m_moreInputBuffered(false)
 {
     // If we are using background threads then don't exceed this FFT size for the
     // stages which run in the real-time thread.  This avoids having only one or two
     // large stages (size 16384 or so) at the end which take a lot of time every several
     // processing slices.  This way we amortize the cost over more processing slices.
     m_maxRealtimeFFTSize = MaxRealtimeFFTSize;
 
     // For the moment, a good way to know if we have real-time constraint is to check if we're using background threads.
     // Otherwise, assume we're being run from a command-line tool.
     bool hasRealtimeConstraint = useBackgroundThreads;
 
-    const float* response = impulseResponse->data();
-    size_t totalResponseLength = impulseResponse->length();
+    const float* response = impulseResponseData;
+    size_t totalResponseLength = impulseResponseLength;
 
     // The total latency is zero because the direct-convolution is used in the leading portion.
     size_t reverbTotalLatency = 0;
 
     size_t stageOffset = 0;
     int i = 0;
     size_t fftSize = m_minFFTSize;
     while (stageOffset < totalResponseLength) {
@@ -97,25 +92,25 @@ ReverbConvolver::ReverbConvolver(AudioCh
         if (stageSize + stageOffset > totalResponseLength)
             stageSize = totalResponseLength - stageOffset;
 
         // This "staggers" the time when each FFT happens so they don't all happen at the same time
         int renderPhase = convolverRenderPhase + i * renderSliceSize;
 
         bool useDirectConvolver = !stageOffset;
 
-        OwnPtr<ReverbConvolverStage> stage = adoptPtr(new ReverbConvolverStage(response, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSize, renderPhase, renderSliceSize, &m_accumulationBuffer, useDirectConvolver));
+        nsAutoPtr<ReverbConvolverStage> stage(new ReverbConvolverStage(response, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSize, renderPhase, renderSliceSize, &m_accumulationBuffer, useDirectConvolver));
 
         bool isBackgroundStage = false;
 
         if (this->useBackgroundThreads() && stageOffset > RealtimeFrameLimit) {
-            m_backgroundStages.append(stage.release());
+            m_backgroundStages.AppendElement(stage.forget());
             isBackgroundStage = true;
         } else
-            m_stages.append(stage.release());
+            m_stages.AppendElement(stage.forget());
 
         stageOffset += stageSize;
         ++i;
 
         if (!useDirectConvolver) {
             // Figure out next FFT size
             fftSize *= 2;
         }
@@ -123,116 +118,117 @@ ReverbConvolver::ReverbConvolver(AudioCh
         if (hasRealtimeConstraint && !isBackgroundStage && fftSize > m_maxRealtimeFFTSize)
             fftSize = m_maxRealtimeFFTSize;
         if (fftSize > m_maxFFTSize)
             fftSize = m_maxFFTSize;
     }
 
     // Start up background thread
     // FIXME: would be better to up the thread priority here.  It doesn't need to be real-time, but higher than the default...
-    if (this->useBackgroundThreads() && m_backgroundStages.size() > 0)
-        m_backgroundThread = createThread(WebCore::backgroundThreadEntry, this, "convolution background thread");
+    if (this->useBackgroundThreads() && m_backgroundStages.Length() > 0) {
+        m_backgroundThread.Start();
+        CancelableTask* task = NewRunnableMethod(this, &ReverbConvolver::backgroundThreadEntry);
+        m_backgroundThread.message_loop()->PostTask(FROM_HERE, task);
+    }
 }
 
 ReverbConvolver::~ReverbConvolver()
 {
     // Wait for background thread to stop
-    if (useBackgroundThreads() && m_backgroundThread) {
+    if (useBackgroundThreads() && m_backgroundThread.IsRunning()) {
         m_wantsToExit = true;
 
         // Wake up thread so it can return
         {
-            MutexLocker locker(m_backgroundThreadLock);
+            MonitorAutoLock locker(m_backgroundThreadMonitor);
             m_moreInputBuffered = true;
-            m_backgroundThreadCondition.signal();
+            locker.Notify();
         }
 
-        waitForThreadCompletion(m_backgroundThread);
+        m_backgroundThread.Stop();
     }
 }
 
 void ReverbConvolver::backgroundThreadEntry()
 {
     while (!m_wantsToExit) {
         // Wait for realtime thread to give us more input
-        m_moreInputBuffered = false;        
+        m_moreInputBuffered = false;
         {
-            MutexLocker locker(m_backgroundThreadLock);
+            MonitorAutoLock locker(m_backgroundThreadMonitor);
             while (!m_moreInputBuffered && !m_wantsToExit)
-                m_backgroundThreadCondition.wait(m_backgroundThreadLock);
+                locker.Wait();
         }
 
         // Process all of the stages until their read indices reach the input buffer's write index
         int writeIndex = m_inputBuffer.writeIndex();
 
         // Even though it doesn't seem like every stage needs to maintain its own version of readIndex 
         // we do this in case we want to run in more than one background thread.
         int readIndex;
 
         while ((readIndex = m_backgroundStages[0]->inputReadIndex()) != writeIndex) { // FIXME: do better to detect buffer overrun...
             // The ReverbConvolverStages need to process in amounts which evenly divide half the FFT size
             const int SliceSize = MinFFTSize / 2;
 
             // Accumulate contributions from each stage
-            for (size_t i = 0; i < m_backgroundStages.size(); ++i)
+            for (size_t i = 0; i < m_backgroundStages.Length(); ++i)
                 m_backgroundStages[i]->processInBackground(this, SliceSize);
         }
     }
 }
 
-void ReverbConvolver::process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess)
+void ReverbConvolver::process(const float* sourceChannelData, size_t sourceChannelLength,
+                              float* destinationChannelData, size_t destinationChannelLength,
+                              size_t framesToProcess)
 {
-    bool isSafe = sourceChannel && destinationChannel && sourceChannel->length() >= framesToProcess && destinationChannel->length() >= framesToProcess;
-    ASSERT(isSafe);
+    bool isSafe = sourceChannelData && destinationChannelData && sourceChannelLength >= framesToProcess && destinationChannelLength >= framesToProcess;
+    MOZ_ASSERT(isSafe);
     if (!isSafe)
         return;
-        
-    const float* source = sourceChannel->data();
-    float* destination = destinationChannel->mutableData();
+
+    const float* source = sourceChannelData;
+    float* destination = destinationChannelData;
     bool isDataSafe = source && destination;
-    ASSERT(isDataSafe);
+    MOZ_ASSERT(isDataSafe);
     if (!isDataSafe)
         return;
 
     // Feed input buffer (read by all threads)
     m_inputBuffer.write(source, framesToProcess);
 
     // Accumulate contributions from each stage
-    for (size_t i = 0; i < m_stages.size(); ++i)
+    for (size_t i = 0; i < m_stages.Length(); ++i)
         m_stages[i]->process(source, framesToProcess);
 
     // Finally read from accumulation buffer
     m_accumulationBuffer.readAndClear(destination, framesToProcess);
-        
+
     // Now that we've buffered more input, wake up our background thread.
-    
+
     // Not using a MutexLocker looks strange, but we use a tryLock() instead because this is run on the real-time
     // thread where it is a disaster for the lock to be contended (causes audio glitching).  It's OK if we fail to
     // signal from time to time, since we'll get to it the next time we're called.  We're called repeatedly
     // and frequently (around every 3ms).  The background thread is processing well into the future and has a considerable amount of 
     // leeway here...
-    if (m_backgroundThreadLock.tryLock()) {
-        m_moreInputBuffered = true;
-        m_backgroundThreadCondition.signal();
-        m_backgroundThreadLock.unlock();
-    }
+    MonitorAutoLock locker(m_backgroundThreadMonitor);
+    m_moreInputBuffered = true;
+    locker.Notify();
 }
 
 void ReverbConvolver::reset()
 {
-    for (size_t i = 0; i < m_stages.size(); ++i)
+    for (size_t i = 0; i < m_stages.Length(); ++i)
         m_stages[i]->reset();
 
-    for (size_t i = 0; i < m_backgroundStages.size(); ++i)
+    for (size_t i = 0; i < m_backgroundStages.Length(); ++i)
         m_backgroundStages[i]->reset();
 
     m_accumulationBuffer.reset();
     m_inputBuffer.reset();
 }
 
 size_t ReverbConvolver::latencyFrames() const
 {
     return 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/ReverbConvolver.h
+++ b/content/media/webaudio/blink/ReverbConvolver.h
@@ -24,72 +24,76 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbConvolver_h
 #define ReverbConvolver_h
 
-#include "core/platform/audio/AudioArray.h"
-#include "core/platform/audio/DirectConvolver.h"
-#include "core/platform/audio/FFTConvolver.h"
-#include "core/platform/audio/ReverbAccumulationBuffer.h"
-#include "core/platform/audio/ReverbConvolverStage.h"
-#include "core/platform/audio/ReverbInputBuffer.h"
-#include <wtf/OwnPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Threading.h>
-#include <wtf/Vector.h>
+#include "DirectConvolver.h"
+#include "FFTConvolver.h"
+#include "ReverbAccumulationBuffer.h"
+#include "ReverbConvolverStage.h"
+#include "ReverbInputBuffer.h"
+#include "nsAutoPtr.h"
+#include "nsTArray.h"
+#include "nsCOMPtr.h"
+#include "mozilla/Monitor.h"
+#ifdef LOG
+#undef LOG
+#endif
+#include "base/thread.h"
 
 namespace WebCore {
 
 class AudioChannel;
 
 class ReverbConvolver {
 public:
     // maxFFTSize can be adjusted (from say 2048 to 32768) depending on how much precision is necessary.
     // For certain tweaky de-convolving applications the phase errors add up quickly and lead to non-sensical results with
     // larger FFT sizes and single-precision floats.  In these cases 2048 is a good size.
     // If not doing multi-threaded convolution, then should not go > 8192.
-    ReverbConvolver(AudioChannel* impulseResponse, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads);
+    ReverbConvolver(const float* impulseResponseData, size_t impulseResponseLength, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads);
     ~ReverbConvolver();
 
-    void process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess);
+    void process(const float* sourceChannelData, size_t sourceChannelLength,
+                 float* destinationChannelData, size_t destinationChannelLength,
+                 size_t framesToProcess);
     void reset();
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
 
     ReverbInputBuffer* inputBuffer() { return &m_inputBuffer; }
 
     bool useBackgroundThreads() const { return m_useBackgroundThreads; }
     void backgroundThreadEntry();
 
     size_t latencyFrames() const;
 private:
-    Vector<OwnPtr<ReverbConvolverStage> > m_stages;
-    Vector<OwnPtr<ReverbConvolverStage> > m_backgroundStages;
+    nsTArray<nsAutoPtr<ReverbConvolverStage> > m_stages;
+    nsTArray<nsAutoPtr<ReverbConvolverStage> > m_backgroundStages;
     size_t m_impulseResponseLength;
 
     ReverbAccumulationBuffer m_accumulationBuffer;
 
     // One or more background threads read from this input buffer which is fed from the realtime thread.
     ReverbInputBuffer m_inputBuffer;
 
     // First stage will be of size m_minFFTSize.  Each next stage will be twice as big until we hit m_maxFFTSize.
     size_t m_minFFTSize;
     size_t m_maxFFTSize;
 
     // But don't exceed this size in the real-time thread (if we're doing background processing).
     size_t m_maxRealtimeFFTSize;
 
     // Background thread and synchronization
+    base::Thread m_backgroundThread;
+    mozilla::Monitor m_backgroundThreadMonitor;
     bool m_useBackgroundThreads;
-    ThreadIdentifier m_backgroundThread;
     bool m_wantsToExit;
     bool m_moreInputBuffered;
-    mutable Mutex m_backgroundThreadLock;
-    mutable ThreadCondition m_backgroundThreadCondition;
 };
 
 } // namespace WebCore
 
 #endif // ReverbConvolver_h
--- a/content/media/webaudio/blink/ReverbConvolverStage.cpp
+++ b/content/media/webaudio/blink/ReverbConvolverStage.cpp
@@ -21,61 +21,56 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
-
-#include "core/platform/audio/ReverbConvolverStage.h"
+#include "ReverbConvolverStage.h"
 
-#include "core/platform/audio/ReverbAccumulationBuffer.h"
-#include "core/platform/audio/ReverbConvolver.h"
-#include "core/platform/audio/ReverbInputBuffer.h"
-#include "core/platform/audio/VectorMath.h"
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include "ReverbAccumulationBuffer.h"
+#include "ReverbConvolver.h"
+#include "ReverbInputBuffer.h"
+#include "mozilla/PodOperations.h"
+
+using namespace mozilla;
 
 namespace WebCore {
 
-using namespace VectorMath;
-
 ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                            size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
     : m_accumulationBuffer(accumulationBuffer)
     , m_accumulationReadIndex(0)
     , m_inputReadIndex(0)
     , m_directMode(directMode)
 {
-    ASSERT(impulseResponse);
-    ASSERT(accumulationBuffer);
+    MOZ_ASSERT(impulseResponse);
+    MOZ_ASSERT(accumulationBuffer);
 
     if (!m_directMode) {
-        m_fftKernel = adoptPtr(new FFTFrame(fftSize));
-        m_fftKernel->doPaddedFFT(impulseResponse + stageOffset, stageLength);
-        m_fftConvolver = adoptPtr(new FFTConvolver(fftSize));
+        m_fftKernel = new FFTBlock(fftSize);
+        m_fftKernel->PerformPaddedFFT(impulseResponse + stageOffset, stageLength);
+        m_fftConvolver = new FFTConvolver(fftSize);
     } else {
-        m_directKernel = adoptPtr(new AudioFloatArray(fftSize / 2));
-        m_directKernel->copyToRange(impulseResponse + stageOffset, 0, fftSize / 2);
-        m_directConvolver = adoptPtr(new DirectConvolver(renderSliceSize));
+        m_directKernel.SetLength(fftSize / 2);
+        PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
+        m_directConvolver = new DirectConvolver(renderSliceSize);
     }
-    m_temporaryBuffer.allocate(renderSliceSize);
+    m_temporaryBuffer.SetLength(renderSliceSize);
+    PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());
 
     // The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.
     size_t totalDelay = stageOffset + reverbTotalLatency;
 
     // But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out...
     size_t halfSize = fftSize / 2;
     if (!m_directMode) {
-        ASSERT(totalDelay >= halfSize);
+        MOZ_ASSERT(totalDelay >= halfSize);
         if (totalDelay >= halfSize)
             totalDelay -= halfSize;
     }
 
     // We divide up the total delay, into pre and post delay sections so that we can schedule at exactly the moment when the FFT will happen.
     // This is coordinated with the other stages, so they don't all do their FFTs at the same time...
     int maxPreDelayLength = std::min(halfSize, totalDelay);
     m_preDelayLength = totalDelay > 0 ? renderPhase % maxPreDelayLength : 0;
@@ -83,100 +78,99 @@ ReverbConvolverStage::ReverbConvolverSta
         m_preDelayLength = 0;
 
     m_postDelayLength = totalDelay - m_preDelayLength;
     m_preReadWriteIndex = 0;
     m_framesProcessed = 0; // total frames processed so far
 
     size_t delayBufferSize = m_preDelayLength < fftSize ? fftSize : m_preDelayLength;
     delayBufferSize = delayBufferSize < renderSliceSize ? renderSliceSize : delayBufferSize;
-    m_preDelayBuffer.allocate(delayBufferSize);
+    m_preDelayBuffer.SetLength(delayBufferSize);
+    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
 }
 
 void ReverbConvolverStage::processInBackground(ReverbConvolver* convolver, size_t framesToProcess)
 {
     ReverbInputBuffer* inputBuffer = convolver->inputBuffer();
     float* source = inputBuffer->directReadFrom(&m_inputReadIndex, framesToProcess);
     process(source, framesToProcess);
 }
 
 void ReverbConvolverStage::process(const float* source, size_t framesToProcess)
 {
-    ASSERT(source);
+    MOZ_ASSERT(source);
     if (!source)
         return;
     
     // Deal with pre-delay stream : note special handling of zero delay.
 
     const float* preDelayedSource;
     float* preDelayedDestination;
     float* temporaryBuffer;
     bool isTemporaryBufferSafe = false;
     if (m_preDelayLength > 0) {
         // Handles both the read case (call to process() ) and the write case (memcpy() )
-        bool isPreDelaySafe = m_preReadWriteIndex + framesToProcess <= m_preDelayBuffer.size();
-        ASSERT(isPreDelaySafe);
+        bool isPreDelaySafe = m_preReadWriteIndex + framesToProcess <= m_preDelayBuffer.Length();
+        MOZ_ASSERT(isPreDelaySafe);
         if (!isPreDelaySafe)
             return;
 
-        isTemporaryBufferSafe = framesToProcess <= m_temporaryBuffer.size();
+        isTemporaryBufferSafe = framesToProcess <= m_temporaryBuffer.Length();
 
-        preDelayedDestination = m_preDelayBuffer.data() + m_preReadWriteIndex;
+        preDelayedDestination = m_preDelayBuffer.Elements() + m_preReadWriteIndex;
         preDelayedSource = preDelayedDestination;
-        temporaryBuffer = m_temporaryBuffer.data();        
+        temporaryBuffer = m_temporaryBuffer.Elements();
     } else {
         // Zero delay
         preDelayedDestination = 0;
         preDelayedSource = source;
-        temporaryBuffer = m_preDelayBuffer.data();
+        temporaryBuffer = m_preDelayBuffer.Elements();
         
-        isTemporaryBufferSafe = framesToProcess <= m_preDelayBuffer.size();
+        isTemporaryBufferSafe = framesToProcess <= m_preDelayBuffer.Length();
     }
     
-    ASSERT(isTemporaryBufferSafe);
+    MOZ_ASSERT(isTemporaryBufferSafe);
     if (!isTemporaryBufferSafe)
         return;
 
     if (m_framesProcessed < m_preDelayLength) {
         // For the first m_preDelayLength frames don't process the convolver, instead simply buffer in the pre-delay.
         // But while buffering the pre-delay, we still need to update our index.
         m_accumulationBuffer->updateReadIndex(&m_accumulationReadIndex, framesToProcess);
     } else {
         // Now, run the convolution (into the delay buffer).
         // An expensive FFT will happen every fftSize / 2 frames.
         // We process in-place here...
         if (!m_directMode)
-            m_fftConvolver->process(m_fftKernel.get(), preDelayedSource, temporaryBuffer, framesToProcess);
+            m_fftConvolver->process(m_fftKernel, preDelayedSource, temporaryBuffer, framesToProcess);
         else
-            m_directConvolver->process(m_directKernel.get(), preDelayedSource, temporaryBuffer, framesToProcess);
+            m_directConvolver->process(&m_directKernel, preDelayedSource, temporaryBuffer, framesToProcess);
 
         // Now accumulate into reverb's accumulation buffer.
         m_accumulationBuffer->accumulate(temporaryBuffer, framesToProcess, &m_accumulationReadIndex, m_postDelayLength);
     }
 
     // Finally copy input to pre-delay.
     if (m_preDelayLength > 0) {
         memcpy(preDelayedDestination, source, sizeof(float) * framesToProcess);
         m_preReadWriteIndex += framesToProcess;
 
-        ASSERT(m_preReadWriteIndex <= m_preDelayLength);
+        MOZ_ASSERT(m_preReadWriteIndex <= m_preDelayLength);
         if (m_preReadWriteIndex >= m_preDelayLength)
             m_preReadWriteIndex = 0;
     }
 
     m_framesProcessed += framesToProcess;
 }
 
 void ReverbConvolverStage::reset()
 {
     if (!m_directMode)
         m_fftConvolver->reset();
     else
         m_directConvolver->reset();
-    m_preDelayBuffer.zero();
+    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
     m_accumulationReadIndex = 0;
     m_inputReadIndex = 0;
     m_framesProcessed = 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/ReverbConvolverStage.h
+++ b/content/media/webaudio/blink/ReverbConvolverStage.h
@@ -24,27 +24,28 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbConvolverStage_h
 #define ReverbConvolverStage_h
 
-#include "core/platform/audio/AudioArray.h"
-#include "core/platform/audio/FFTFrame.h"
-#include <wtf/OwnPtr.h>
+#include "nsTArray.h"
+#include "mozilla/FFTBlock.h"
 
 namespace WebCore {
 
+using mozilla::FFTBlock;
+
 class ReverbAccumulationBuffer;
 class ReverbConvolver;
 class FFTConvolver;
 class DirectConvolver;
-    
+
 // A ReverbConvolverStage represents the convolution associated with a sub-section of a large impulse response.
 // It incorporates a delay line to account for the offset of the sub-section within the larger impulse response.
 class ReverbConvolverStage {
 public:
     // renderPhase is useful to know so that we can manipulate the pre versus post delay so that stages will perform
     // their heavy work (FFT processing) on different slices to balance the load in a real-time thread.
     ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength, size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer*, bool directMode = false);
 
@@ -54,32 +55,32 @@ public:
     void processInBackground(ReverbConvolver* convolver, size_t framesToProcess);
 
     void reset();
 
     // Useful for background processing
     int inputReadIndex() const { return m_inputReadIndex; }
 
 private:
-    OwnPtr<FFTFrame> m_fftKernel;
-    OwnPtr<FFTConvolver> m_fftConvolver;
+    nsAutoPtr<FFTBlock> m_fftKernel;
+    nsAutoPtr<FFTConvolver> m_fftConvolver;
 
-    AudioFloatArray m_preDelayBuffer;
+    nsTArray<float> m_preDelayBuffer;
 
     ReverbAccumulationBuffer* m_accumulationBuffer;
     int m_accumulationReadIndex;
     int m_inputReadIndex;
 
     size_t m_preDelayLength;
     size_t m_postDelayLength;
     size_t m_preReadWriteIndex;
     size_t m_framesProcessed;
 
-    AudioFloatArray m_temporaryBuffer;
+    nsTArray<float> m_temporaryBuffer;
 
     bool m_directMode;
-    OwnPtr<AudioFloatArray> m_directKernel;
-    OwnPtr<DirectConvolver> m_directConvolver;
+    nsTArray<float> m_directKernel;
+    nsAutoPtr<DirectConvolver> m_directConvolver;
 };
 
 } // namespace WebCore
 
 #endif // ReverbConvolverStage_h
--- a/content/media/webaudio/blink/ReverbInputBuffer.cpp
+++ b/content/media/webaudio/blink/ReverbInputBuffer.cpp
@@ -21,69 +21,67 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
+#include "ReverbInputBuffer.h"
+#include "mozilla/PodOperations.h"
 
-#if ENABLE(WEB_AUDIO)
-
-#include "core/platform/audio/ReverbInputBuffer.h"
+using namespace mozilla;
 
 namespace WebCore {
 
 ReverbInputBuffer::ReverbInputBuffer(size_t length)
-    : m_buffer(length)
-    , m_writeIndex(0)
+    : m_writeIndex(0)
 {
+  m_buffer.SetLength(length);
+  PodZero(m_buffer.Elements(), length);
 }
 
 void ReverbInputBuffer::write(const float* sourceP, size_t numberOfFrames)
 {
-    size_t bufferLength = m_buffer.size();
+    size_t bufferLength = m_buffer.Length();
     bool isCopySafe = m_writeIndex + numberOfFrames <= bufferLength;
-    ASSERT(isCopySafe);
+    MOZ_ASSERT(isCopySafe);
     if (!isCopySafe)
         return;
-        
-    memcpy(m_buffer.data() + m_writeIndex, sourceP, sizeof(float) * numberOfFrames);
+
+    memcpy(m_buffer.Elements() + m_writeIndex, sourceP, sizeof(float) * numberOfFrames);
 
     m_writeIndex += numberOfFrames;
-    ASSERT(m_writeIndex <= bufferLength);
+    MOZ_ASSERT(m_writeIndex <= bufferLength);
 
     if (m_writeIndex >= bufferLength)
         m_writeIndex = 0;
 }
 
 float* ReverbInputBuffer::directReadFrom(int* readIndex, size_t numberOfFrames)
 {
-    size_t bufferLength = m_buffer.size();
+    size_t bufferLength = m_buffer.Length();
     bool isPointerGood = readIndex && *readIndex >= 0 && *readIndex + numberOfFrames <= bufferLength;
-    ASSERT(isPointerGood);
+    MOZ_ASSERT(isPointerGood);
     if (!isPointerGood) {
         // Should never happen in practice but return pointer to start of buffer (avoid crash)
         if (readIndex)
             *readIndex = 0;
-        return m_buffer.data();
+        return m_buffer.Elements();
     }
-        
-    float* sourceP = m_buffer.data();
+
+    float* sourceP = m_buffer.Elements();
     float* p = sourceP + *readIndex;
 
     // Update readIndex
     *readIndex = (*readIndex + numberOfFrames) % bufferLength;
 
     return p;
 }
 
 void ReverbInputBuffer::reset()
 {
-    m_buffer.zero();
+    PodZero(m_buffer.Elements(), m_buffer.Length());
     m_writeIndex = 0;
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/ReverbInputBuffer.h
+++ b/content/media/webaudio/blink/ReverbInputBuffer.h
@@ -24,17 +24,17 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbInputBuffer_h
 #define ReverbInputBuffer_h
 
-#include "core/platform/audio/AudioArray.h"
+#include "nsTArray.h"
 
 namespace WebCore {
 
 // ReverbInputBuffer is used to buffer input samples for deferred processing by the background threads.
 class ReverbInputBuffer {
 public:
     ReverbInputBuffer(size_t length);
 
@@ -50,15 +50,15 @@ public:
     // readIndex is updated with the next readIndex to read from...
     // The assumption is that the buffer's length is evenly divisible by numberOfFrames.
     // FIXME: remove numberOfFrames restriction...
     float* directReadFrom(int* readIndex, size_t numberOfFrames);
 
     void reset();
 
 private:
-    AudioFloatArray m_buffer;
+    nsTArray<float> m_buffer;
     size_t m_writeIndex;
 };
 
 } // namespace WebCore
 
 #endif // ReverbInputBuffer_h
--- a/content/media/webaudio/blink/moz.build
+++ b/content/media/webaudio/blink/moz.build
@@ -3,13 +3,20 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 MODULE = 'content'
 
 CPP_SOURCES += [
     'Biquad.cpp',
+    'DirectConvolver.cpp',
     'DynamicsCompressor.cpp',
     'DynamicsCompressorKernel.cpp',
+    'FFTConvolver.cpp',
+    'Reverb.cpp',
+    'ReverbAccumulationBuffer.cpp',
+    'ReverbConvolver.cpp',
+    'ReverbConvolverStage.cpp',
+    'ReverbInputBuffer.cpp',
     'ZeroPole.cpp',
 ]
 
--- a/content/media/webaudio/moz.build
+++ b/content/media/webaudio/moz.build
@@ -12,16 +12,20 @@ MODULE = 'content'
 
 EXPORTS += [
     'AudioParamTimeline.h',
     'MediaBufferDecoder.h',
     'ThreeDPoint.h',
     'WebAudioUtils.h',
 ]
 
+EXPORTS.mozilla += [
+    'FFTBlock.h',
+]
+
 EXPORTS.mozilla.dom += [
     'AnalyserNode.h',
     'AudioBuffer.h',
     'AudioBufferSourceNode.h',
     'AudioContext.h',
     'AudioDestinationNode.h',
     'AudioListener.h',
     'AudioNode.h',