Bug 815643 - Part 5: Implement ConvolverNode's processing based on the Blink implementation; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Mon, 10 Jun 2013 16:09:12 -0400
changeset 134589 90c849ba5baf3baace590f9e8657022753feac50
parent 134588 b7efc129d2b1a487e5aa79eede28cc9c4dfb92a9
child 134590 22d7a1784228280fc0a3e6bb326fb305e8e1ac9c
push id29285
push usereakhgari@mozilla.com
push dateTue, 11 Jun 2013 00:10:14 +0000
treeherdermozilla-inbound@63386b71d1b5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs815643
milestone24.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 815643 - Part 5: Implement ConvolverNode's processing based on the Blink implementation; r=roc
content/media/webaudio/ConvolverNode.cpp
--- a/content/media/webaudio/ConvolverNode.cpp
+++ b/content/media/webaudio/ConvolverNode.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ConvolverNode.h"
 #include "mozilla/dom/ConvolverNodeBinding.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
+#include "blink/Reverb.h"
 
 #include <cmath>
 #include "nsMathUtils.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_INHERITED_1(ConvolverNode, AudioNode, mBuffer)
@@ -23,49 +24,133 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNod
 NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode)
 
 class ConvolverNodeEngine : public AudioNodeEngine
 {
 public:
   ConvolverNodeEngine(AudioNode* aNode, bool aNormalize)
     : AudioNodeEngine(aNode)
+    , mBufferLength(0)
+    , mSampleRate(0.0f)
+    , mUseBackgroundThreads(!aNode->Context()->IsOffline())
     , mNormalize(aNormalize)
+    , mSeenInput(false)
   {
   }
 
   enum Parameters {
+    BUFFER_LENGTH,
+    SAMPLE_RATE,
     NORMALIZE
   };
   virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE
   {
     switch (aIndex) {
+    case BUFFER_LENGTH:
+      // BUFFER_LENGTH is the first parameter that we set when setting a new buffer,
+      // so we should be careful to invalidate the rest of our state here.
+      mBuffer = nullptr;
+      mSampleRate = 0.0f;
+      mBufferLength = aParam;
+      break;
+    case SAMPLE_RATE:
+      mSampleRate = aParam;
+      break;
     case NORMALIZE:
       mNormalize = !!aParam;
       break;
     default:
       NS_ERROR("Bad ConvolverNodeEngine Int32Parameter");
     }
   }
+  virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE
+  {
+    switch (aIndex) {
+    case SAMPLE_RATE:
+      mSampleRate = aParam;
+      AdjustReverb();
+      break;
+    default:
+      NS_ERROR("Bad ConvolverNodeEngine DoubleParameter");
+    }
+  }
   virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
   {
     mBuffer = aBuffer;
+    AdjustReverb();
+  }
+
+  void AdjustReverb()
+  {
+    // Note about empirical tuning (this is copied from Blink)
+    // The maximum FFT size affects reverb performance and accuracy.
+    // If the reverb is single-threaded and processes entirely in the real-time audio thread,
+    // it's important not to make this too high.  In this case 8192 is a good value.
+    // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy.
+    // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise.
+    const size_t MaxFFTSize = 32768;
+
+    if (!mBuffer || !mBufferLength || !mSampleRate) {
+      mReverb = nullptr;
+      mSeenInput = false;
+      return;
+    }
+
+    mReverb = new WebCore::Reverb(mBuffer, mBufferLength,
+                                  WEBAUDIO_BLOCK_SIZE,
+                                  MaxFFTSize, 2, mUseBackgroundThreads,
+                                  mNormalize, mSampleRate);
   }
 
   virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                  const AudioChunk& aInput,
                                  AudioChunk* aOutput,
                                  bool* aFinished)
   {
-    *aOutput = aInput;
+    if (!mSeenInput && aInput.IsNull()) {
+      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
+      return;
+    }
+    if (!mReverb) {
+      *aOutput = aInput;
+      return;
+    }
+
+    mSeenInput = true;
+    uint32_t numChannels = 2;
+    AudioChunk input = aInput;
+    if (aInput.IsNull()) {
+      AllocateAudioBlock(1, &input);
+      WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE);
+    } else if (aInput.mVolume != 1.0f) {
+      // Pre-multiply the input's volume
+      numChannels = aInput.mChannelData.Length();
+      AllocateAudioBlock(numChannels, &input);
+      for (uint32_t i = 0; i < numChannels; ++i) {
+        const float* src = static_cast<const float*>(aInput.mChannelData[i]);
+        float* dest = static_cast<float*>(const_cast<void*>(input.mChannelData[i]));
+        AudioBlockAddChannelWithScale(src, aInput.mVolume, dest);
+      }
+    } else {
+      numChannels = aInput.mChannelData.Length();
+    }
+    AllocateAudioBlock(numChannels, aOutput);
+
+    mReverb->process(&input, aOutput, WEBAUDIO_BLOCK_SIZE);
   }
 
 private:
   nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
+  nsAutoPtr<WebCore::Reverb> mReverb;
+  int32_t mBufferLength;
+  float mSampleRate;
+  bool mUseBackgroundThreads;
   bool mNormalize;
+  bool mSeenInput;
 };
 
 ConvolverNode::ConvolverNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Clamped_max,
               ChannelInterpretation::Speakers)
   , mNormalize(true)
@@ -95,18 +180,37 @@ ConvolverNode::SetBuffer(JSContext* aCx,
   }
 
   mBuffer = aBuffer;
 
   // Send the buffer to the stream
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   MOZ_ASSERT(ns, "Why don't we have a stream here?");
   if (mBuffer) {
+    uint32_t length = mBuffer->Length();
     nsRefPtr<ThreadSharedFloatArrayBufferList> data =
       mBuffer->GetThreadSharedChannelsForRate(aCx);
+    if (length < WEBAUDIO_BLOCK_SIZE) {
+      // For very small impulse response buffers, we need to pad the
+      // buffer with 0 to make sure that the Reverb implementation
+      // has enough data to compute FFTs from.
+      length = WEBAUDIO_BLOCK_SIZE;
+      nsRefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer =
+        new ThreadSharedFloatArrayBufferList(data->GetChannels());
+      float* channelData = (float*) malloc(sizeof(float) * length * data->GetChannels());
+      for (uint32_t i = 0; i < data->GetChannels(); ++i) {
+        PodCopy(channelData + length * i, data->GetData(i), mBuffer->Length());
+        PodZero(channelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length());
+        paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, channelData);
+      }
+      data = paddedBuffer;
+    }
+    SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length);
+    SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
+                                mBuffer->SampleRate());
     ns->SetBuffer(data.forget());
   } else {
     ns->SetBuffer(nullptr);
   }
 }
 
 void
 ConvolverNode::SetNormalize(bool aNormalize)