b=815643 Refactor DelayNodeEngine delay processing into a shareable class r=ehsan
authorKarl Tomlinson <karlt+@karlt.net>
Fri, 09 Aug 2013 10:07:49 +1200
changeset 142365 d92240f69c48
parent 142364 7ab5c4babe56
child 142366 746b2ba6cf30
push id32374
push userktomlinson@mozilla.com
push dateTue, 13 Aug 2013 02:49:14 +0000
treeherdermozilla-inbound@62ad090a94a4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs815643
milestone26.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
b=815643 Refactor DelayNodeEngine delay processing into a shareable class r=ehsan
content/media/webaudio/DelayNode.cpp
content/media/webaudio/DelayProcessor.cpp
content/media/webaudio/DelayProcessor.h
content/media/webaudio/moz.build
--- a/content/media/webaudio/DelayNode.cpp
+++ b/content/media/webaudio/DelayNode.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DelayNode.h"
 #include "mozilla/dom/DelayNodeBinding.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioDestinationNode.h"
 #include "WebAudioUtils.h"
+#include "DelayProcessor.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_INHERITED_1(DelayNode, AudioNode,
                                      mDelay)
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(DelayNode)
@@ -22,101 +23,71 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNod
 
 NS_IMPL_ADDREF_INHERITED(DelayNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(DelayNode, AudioNode)
 
 class DelayNodeEngine : public AudioNodeEngine
 {
   typedef PlayingRefChangeHandler<DelayNode> PlayingRefChanged;
 public:
-  DelayNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination)
+  DelayNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination,
+                  int aMaxDelayFrames)
     : AudioNodeEngine(aNode)
     , mSource(nullptr)
     , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
     // Keep the default value in sync with the default value in DelayNode::DelayNode.
     , mDelay(0.f)
-    , mMaxDelay(0.)
-    , mWriteIndex(0)
+    // Use a smoothing range of 20ms
+    , mProcessor(aMaxDelayFrames,
+                 WebAudioUtils::ComputeSmoothingRate(0.02,
+                                                     mDestination->SampleRate()))
     , mLeftOverData(INT32_MIN)
-    , mCurrentDelayTime(0.)
   {
   }
 
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
   enum Parameters {
     DELAY,
-    MAX_DELAY
   };
   void SetTimelineParameter(uint32_t aIndex,
                             const AudioParamTimeline& aValue,
                             TrackRate aSampleRate) MOZ_OVERRIDE
   {
     switch (aIndex) {
     case DELAY:
       MOZ_ASSERT(mSource && mDestination);
       mDelay = aValue;
       WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad DelayNodeEngine TimelineParameter");
     }
   }
-  void SetDoubleParameter(uint32_t aIndex, double aValue) MOZ_OVERRIDE
-  {
-    switch (aIndex) {
-    case MAX_DELAY: mMaxDelay = aValue; break;
-    default:
-      NS_ERROR("Bad DelayNodeEngine DoubleParameter");
-    }
-  }
-
-  bool EnsureBuffer(uint32_t aNumberOfChannels, TrackRate aSampleRate)
-  {
-    if (aNumberOfChannels == 0) {
-      return false;
-    }
-    if (mBuffer.Length() == 0) {
-      if (!mBuffer.SetLength(aNumberOfChannels)) {
-        return false;
-      }
-      const int32_t numFrames = ceil(mMaxDelay * aSampleRate);
-      for (uint32_t channel = 0; channel < aNumberOfChannels; ++channel) {
-        if (!mBuffer[channel].SetLength(numFrames)) {
-          return false;
-        }
-        memset(mBuffer[channel].Elements(), 0, numFrames * sizeof(float));
-      }
-    } else if (mBuffer.Length() != aNumberOfChannels) {
-      // TODO: Handle changes in the channel count
-      return false;
-    }
-    return true;
-  }
 
   virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                  const AudioChunk& aInput,
                                  AudioChunk* aOutput,
                                  bool* aFinished)
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
+    MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate());
 
-    const bool firstTime = !!!mBuffer.Length();
     const uint32_t numChannels = aInput.IsNull() ?
-                                 mBuffer.Length() :
+                                 mProcessor.BufferChannelCount() :
                                  aInput.mChannelData.Length();
 
     bool playedBackAllLeftOvers = false;
-    if (!mBuffer.IsEmpty() &&
+    if (mProcessor.BufferChannelCount() &&
         mLeftOverData == INT32_MIN &&
         aStream->AllInputsFinished()) {
-      mLeftOverData = static_cast<int32_t>(mCurrentDelayTime * aStream->SampleRate()) - WEBAUDIO_BLOCK_SIZE;
+      mLeftOverData = mProcessor.CurrentDelayFrames() - WEBAUDIO_BLOCK_SIZE;
 
       if (mLeftOverData > 0) {
         nsRefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         NS_DispatchToMainThread(refchanged);
       }
     } else if (mLeftOverData != INT32_MIN) {
       mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
@@ -128,139 +99,81 @@ public:
         playedBackAllLeftOvers = true;
 
         nsRefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
         NS_DispatchToMainThread(refchanged);
       }
     }
 
-    if (!EnsureBuffer(numChannels, aStream->SampleRate())) {
-      aOutput->SetNull(0);
-      return;
-    }
-
     AllocateAudioBlock(numChannels, aOutput);
 
-    double delayTime = 0;
-    double computedDelay[WEBAUDIO_BLOCK_SIZE];
-    // Use a smoothing range of 20ms
-    const double smoothingRate = WebAudioUtils::ComputeSmoothingRate(0.02, aStream->SampleRate());
-
-    if (mDelay.HasSimpleValue()) {
-      delayTime = std::max(0.0, std::min(mMaxDelay, double(mDelay.GetValue())));
-      if (firstTime) {
-        // Initialize this only the first time to make sure that mCurrentDelayTime
-        // has a valid value when we try to change the delay time further below.
-        mCurrentDelayTime = delayTime;
-      }
-    } else {
-      // Compute the delay values for the duration of the input AudioChunk
-      TrackTicks tick = aStream->GetCurrentPosition();
-      for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
-        computedDelay[counter] = std::max(0.0, std::min(mMaxDelay,
-                                   double(mDelay.GetValueAtTime(tick, counter))));
+    AudioChunk input = aInput;
+    if (!aInput.IsNull() && aInput.mVolume != 1.0f) {
+      // Pre-multiply the input's volume
+      AllocateAudioBlock(numChannels, &input);
+      for (uint32_t i = 0; i < numChannels; ++i) {
+        const float* src = static_cast<const float*>(aInput.mChannelData[i]);
+        float* dest = static_cast<float*>(const_cast<void*>(input.mChannelData[i]));
+        AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest);
       }
     }
 
-    for (uint32_t channel = 0; channel < numChannels; ++channel) {
-      double currentDelayTime = mCurrentDelayTime;
-      uint32_t writeIndex = mWriteIndex;
-
-      float* buffer = mBuffer[channel].Elements();
-      const uint32_t bufferLength = mBuffer[channel].Length();
-      const float* input = static_cast<const float*>(aInput.mChannelData.SafeElementAt(channel));
-      float* output = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[channel]));
-
-      for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
-        if (mDelay.HasSimpleValue()) {
-          // If the simple value has changed, smoothly approach it
-          currentDelayTime += (delayTime - currentDelayTime) * smoothingRate;
-        } else {
-          currentDelayTime = computedDelay[i];
-        }
-
-        // Write the input sample to the correct location in our buffer
-        if (input) {
-          buffer[writeIndex] = input[i] * aInput.mVolume;
-        }
+    const float* const* inputChannels = input.IsNull() ? nullptr :
+      reinterpret_cast<const float* const*>(input.mChannelData.Elements());
+    float* const* outputChannels = reinterpret_cast<float* const*>
+      (const_cast<void* const*>(aOutput->mChannelData.Elements()));
 
-        // Now, determine the correct read position.  We adjust the read position to be
-        // from currentDelayTime seconds in the past.  We also interpolate the two input
-        // frames in case the read position does not match an integer index.
-        double readPosition = writeIndex + bufferLength -
-                              (currentDelayTime * aStream->SampleRate());
-        if (readPosition >= bufferLength) {
-          readPosition -= bufferLength;
-        }
-        MOZ_ASSERT(readPosition >= 0.0, "Why are we reading before the beginning of the buffer?");
+    double sampleRate = aStream->SampleRate();
+    if (mDelay.HasSimpleValue()) {
+      double delayFrames = mDelay.GetValue() * sampleRate;
+      mProcessor.Process(delayFrames, inputChannels, outputChannels,
+                         numChannels, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      // Compute the delay values for the duration of the input AudioChunk
+      double computedDelay[WEBAUDIO_BLOCK_SIZE];
+      TrackTicks tick = aStream->GetCurrentPosition();
+      for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
+        computedDelay[counter] =
+          mDelay.GetValueAtTime(tick, counter) * sampleRate;
+      }
+      mProcessor.Process(computedDelay, inputChannels, outputChannels,
+                         numChannels, WEBAUDIO_BLOCK_SIZE);
+    }
 
-        // Here is a the reason why readIndex1 and readIndex will never be out
-        // of bounds.  The maximum value for bufferLength is 180 * 48000 (see
-        // AudioContext::CreateDelay).  The maximum value for mCurrentDelay is
-        // 180.0, so initially readPosition cannot be more than bufferLength +
-        // a fraction less than 1.  Then we take care of that case by
-        // subtracting bufferLength from it if needed.  So, if
-        // |bufferLength-readPosition<1.0|, readIndex1 will end up being zero.
-        // If |1.0<=bufferLength-readPosition<2.0|, readIndex1 will be
-        // bufferLength-1 and readIndex2 will be 0.
-        int readIndex1 = int(readPosition);
-        int readIndex2 = (readIndex1 + 1) % bufferLength;
-        double interpolationFactor = readPosition - readIndex1;
-
-        output[i] = (1.0 - interpolationFactor) * buffer[readIndex1] +
-                           interpolationFactor  * buffer[readIndex2];
-        writeIndex = (writeIndex + 1) % bufferLength;
-      }
-
-      // Remember currentDelayTime and writeIndex for the next ProduceAudioBlock
-      // call when processing the last channel.
-      if (channel == numChannels - 1) {
-        mCurrentDelayTime = currentDelayTime;
-        mWriteIndex = writeIndex;
-      }
-    }
 
     if (playedBackAllLeftOvers) {
       // Delete our buffered data once we no longer need it
-      mBuffer.Clear();
+      mProcessor.Reset();
     }
   }
 
   AudioNodeStream* mSource;
   AudioNodeStream* mDestination;
   AudioParamTimeline mDelay;
-  // Maximum delay time in seconds
-  double mMaxDelay;
-  // Circular buffer for capturing delayed samples.
-  AutoFallibleTArray<FallibleTArray<float>, 2> mBuffer;
-  // Write index for the buffer, to write the frames to the correct index of the buffer
-  // given the current delay.
-  uint32_t mWriteIndex;
+  DelayProcessor mProcessor;
   // How much data we have in our buffer which needs to be flushed out when our inputs
   // finish.
   int32_t mLeftOverData;
-  // Current delay time, in seconds
-  double mCurrentDelayTime;
 };
 
 DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mMediaStreamGraphUpdateIndexAtLastInputConnection(0)
   , mDelay(new AudioParam(MOZ_THIS_IN_INITIALIZER_LIST(),
                           SendDelayToStream, 0.0f))
 {
-  DelayNodeEngine* engine = new DelayNodeEngine(this, aContext->Destination());
+  DelayNodeEngine* engine =
+    new DelayNodeEngine(this, aContext->Destination(),
+                        ceil(aContext->SampleRate() * aMaxDelay));
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
-  AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
-  ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
 }
 
 JSObject*
 DelayNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
 {
   return DelayNodeBinding::Wrap(aCx, aScope, this);
 }
 
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/DelayProcessor.cpp
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DelayProcessor.h"
+
+#include "mozilla/PodOperations.h"
+#include "AudioSegment.h"
+
+namespace mozilla {
+
+void
+DelayProcessor::Process(const double *aPerFrameDelays,
+                        const float* const* aInputChannels,
+                        float* const* aOutputChannels,
+                        int aChannelCount, int aFramesToProcess)
+{
+  if (!EnsureBuffer(aChannelCount)) {
+    for (int channel = 0; channel < aChannelCount; ++channel) {
+      PodZero(aOutputChannels[channel], aFramesToProcess);
+    }
+    return;
+  }
+
+  for (int channel = 0; channel < aChannelCount; ++channel) {
+    double currentDelayFrames = mCurrentDelay;
+    int writeIndex = mWriteIndex;
+
+    float* buffer = mBuffer[channel].Elements();
+    const uint32_t bufferLength = mBuffer[channel].Length();
+    const float* input = aInputChannels ? aInputChannels[channel] : nullptr;
+    float* output = aOutputChannels[channel];
+
+    for (int i = 0; i < aFramesToProcess; ++i) {
+      currentDelayFrames = clamped(aPerFrameDelays[i],
+                                   0.0, static_cast<double>(mMaxDelayFrames));
+
+      // Write the input sample to the correct location in our buffer
+      if (input) {
+        buffer[writeIndex] = input[i];
+      }
+
+      // Now, determine the correct read position.  We adjust the read position to be
+      // from currentDelayFrames frames in the past.  We also interpolate the two input
+      // frames in case the read position does not match an integer index.
+      double readPosition = writeIndex + bufferLength - currentDelayFrames;
+      if (readPosition >= bufferLength) {
+        readPosition -= bufferLength;
+      }
+      MOZ_ASSERT(readPosition >= 0.0, "Why are we reading before the beginning of the buffer?");
+
+      // Here is a the reason why readIndex1 and readIndex will never be out
+      // of bounds.  The maximum value for bufferLength is 180 * 48000 (see
+      // AudioContext::CreateDelay).  The maximum value for mCurrentDelay is
+      // 180.0, so initially readPosition cannot be more than bufferLength +
+      // a fraction less than 1.  Then we take care of that case by
+      // subtracting bufferLength from it if needed.  So, if
+      // |bufferLength-readPosition<1.0|, readIndex1 will end up being zero.
+      // If |1.0<=bufferLength-readPosition<2.0|, readIndex1 will be
+      // bufferLength-1 and readIndex2 will be 0.
+      int readIndex1 = int(readPosition);
+      int readIndex2 = (readIndex1 + 1) % bufferLength;
+      double interpolationFactor = readPosition - readIndex1;
+
+      output[i] = (1.0 - interpolationFactor) * buffer[readIndex1] +
+                         interpolationFactor  * buffer[readIndex2];
+      writeIndex = (writeIndex + 1) % bufferLength;
+    }
+
+    // Remember currentDelayFrames and writeIndex for the next ProduceAudioBlock
+    // call when processing the last channel.
+    if (channel == aChannelCount - 1) {
+      mCurrentDelay = currentDelayFrames;
+      mWriteIndex = writeIndex;
+    }
+  }
+}
+
+void
+DelayProcessor::Process(double aDelayFrames, const float* const* aInputChannels,
+                        float* const* aOutputChannels, int aChannelCount,
+                        int aFramesToProcess)
+{
+  const bool firstTime = !mBuffer.Length();
+  double currentDelay = firstTime ? aDelayFrames : mCurrentDelay;
+
+  nsAutoTArray<double, WEBAUDIO_BLOCK_SIZE> computedDelay;
+  computedDelay.SetLength(aFramesToProcess);
+
+  for (int i = 0; i < aFramesToProcess; ++i) {
+    // If the value has changed, smoothly approach it
+    currentDelay += (aDelayFrames - currentDelay) * mSmoothingRate;
+    computedDelay[i] = currentDelay;
+  }
+
+  Process(computedDelay.Elements(), aInputChannels, aOutputChannels,
+          aChannelCount, aFramesToProcess);
+}
+
+bool
+DelayProcessor::EnsureBuffer(uint32_t aNumberOfChannels)
+{
+  if (aNumberOfChannels == 0) {
+    return false;
+  }
+  if (mBuffer.Length() == 0) {
+    if (!mBuffer.SetLength(aNumberOfChannels)) {
+      return false;
+    }
+    const int numFrames = mMaxDelayFrames;
+    for (uint32_t channel = 0; channel < aNumberOfChannels; ++channel) {
+      if (!mBuffer[channel].SetLength(numFrames)) {
+        return false;
+      }
+      PodZero(mBuffer[channel].Elements(), numFrames);
+    }
+  } else if (mBuffer.Length() != aNumberOfChannels) {
+    // TODO: Handle changes in the channel count
+    return false;
+  }
+  return true;
+}
+
+} // mozilla
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/DelayProcessor.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DelayProcessor_h_
+#define DelayProcessor_h_
+
+#include "nsTArray.h"
+
+namespace mozilla {
+
+class DelayProcessor {
+public:
+  // See WebAudioUtils::ComputeSmoothingRate() for frame to frame exponential
+  // |smoothingRate| multiplier.
+  DelayProcessor(int aMaxDelayFrames, double aSmoothingRate)
+    : mSmoothingRate(aSmoothingRate)
+    , mCurrentDelay(0.)
+    , mMaxDelayFrames(aMaxDelayFrames)
+    , mWriteIndex(0)
+  {
+  }
+
+  // Process with an array of delays, in frames, for each frame.
+  void Process(const double *aPerFrameDelays,
+               const float* const* aInputChannels,
+               float* const* aOutputChannels,
+               int aChannelCount, int aFramesToProcess);
+
+  // Process with a constant delay, which will be smoothed with the previous
+  // delay.
+  void Process(double aDelayFrames, const float* const* aInputChannels,
+               float* const* aOutputChannels, int aChannelCount,
+               int aFramesToProcess);
+
+  void Reset() { mBuffer.Clear(); };
+
+  double CurrentDelayFrames() const { return mCurrentDelay; }
+  int BufferChannelCount() const { return mBuffer.Length(); }
+
+private:
+  bool EnsureBuffer(uint32_t aNumberOfChannels);
+
+  // Circular buffer for capturing delayed samples.
+  AutoFallibleTArray<FallibleTArray<float>, 2> mBuffer;
+  double mSmoothingRate;
+  // Current delay, in fractional frames
+  double mCurrentDelay;
+  // Maximum delay, in frames
+  int mMaxDelayFrames;
+  // Write index for the buffer, to write the frames to the correct index of the buffer
+  // given the current delay.
+  int mWriteIndex;
+};
+
+} // mozilla
+
+#endif // DelayProcessor_h_
--- a/content/media/webaudio/moz.build
+++ b/content/media/webaudio/moz.build
@@ -59,16 +59,17 @@ CPP_SOURCES += [
     'AudioNode.cpp',
     'AudioParam.cpp',
     'AudioProcessingEvent.cpp',
     'BiquadFilterNode.cpp',
     'ChannelMergerNode.cpp',
     'ChannelSplitterNode.cpp',
     'ConvolverNode.cpp',
     'DelayNode.cpp',
+    'DelayProcessor.cpp',
     'DynamicsCompressorNode.cpp',
     'EnableWebAudioCheck.cpp',
     'FFTBlock.cpp',
     'GainNode.cpp',
     'MediaBufferDecoder.cpp',
     'MediaElementAudioSourceNode.cpp',
     'MediaStreamAudioDestinationNode.cpp',
     'MediaStreamAudioSourceNode.cpp',