Bug 866434 - Part 3: Mix in the value generated by AudioNode inputs to AudioParams when getting their values during audio processing; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Wed, 01 May 2013 23:12:59 -0400
changeset 130734 8d0cd25b6611
parent 130733 6c0be12d00aa
child 130735 42ced245b6f8
push id24630
push userphilringnalda@gmail.com
push date2013-05-04 02:33 +0000
treeherdermozilla-central@1e00967c5786 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs866434
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 866434 - Part 3: Mix in the value generated by AudioNode inputs to AudioParams when getting their values during audio processing; r=roc
content/media/AudioNodeEngine.h
content/media/AudioNodeStream.h
content/media/AudioSegment.h
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioParam.cpp
content/media/webaudio/AudioParamTimeline.h
content/media/webaudio/DelayNode.cpp
content/media/webaudio/GainNode.cpp
--- a/content/media/AudioNodeEngine.h
+++ b/content/media/AudioNodeEngine.h
@@ -14,21 +14,16 @@ namespace mozilla {
 
 namespace dom {
 class AudioNode;
 struct ThreeDPoint;
 }
 
 class AudioNodeStream;
 
-// We ensure that the graph advances in steps that are multiples of the Web
-// Audio block size
-const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7;
-const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS;
-
 /**
  * This class holds onto a set of immutable channel buffers. The storage
  * for the buffers must be malloced, but the buffer pointers and the malloc
  * pointers can be different (e.g. if the buffers are contained inside
  * some malloced object).
  */
 class ThreadSharedFloatArrayBufferList : public ThreadSharedObject {
 public:
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -92,16 +92,20 @@ public:
                                       dom::ChannelInterpretation aChannelInterpretation);
   virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
   TrackTicks GetCurrentPosition();
   bool AllInputsFinished() const;
   bool IsAudioParamStream() const
   {
     return mAudioParamStream;
   }
+  const AudioChunk& LastChunk() const
+  {
+    return mLastChunk;
+  }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
 
 protected:
   void FinishOutput();
 
   StreamBuffer::Track* EnsureTrack();
--- a/content/media/AudioSegment.h
+++ b/content/media/AudioSegment.h
@@ -15,16 +15,21 @@ namespace mozilla {
 
 class AudioStream;
 
 /**
  * For auto-arrays etc, guess this as the common number of channels.
  */
 const int GUESS_AUDIO_CHANNELS = 2;
 
+// We ensure that the graph advances in steps that are multiples of the Web
+// Audio block size
+const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7;
+const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS;
+
 /**
  * An AudioChunk represents a multi-channel buffer of audio samples.
  * It references an underlying ThreadSharedObject which manages the lifetime
  * of the buffer. An AudioChunk maintains its own duration and channel data
  * pointers so it can represent a subinterval of a buffer without copying.
  * An AudioChunk can store its individual channels anywhere; it maintains
  * separate pointers to each channel's buffer.
  */
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -304,17 +304,17 @@ public:
              mSampleRate == IdealAudioRate());
   }
 
   void UpdateSampleRateIfNeeded(AudioNodeStream* aStream)
   {
     if (mPlaybackRateTimeline.HasSimpleValue()) {
       mPlaybackRate = mPlaybackRateTimeline.GetValue();
     } else {
-      mPlaybackRate = mPlaybackRateTimeline.GetValueAtTime<TrackTicks>(aStream->GetCurrentPosition());
+      mPlaybackRate = mPlaybackRateTimeline.GetValueAtTime(aStream->GetCurrentPosition());
     }
 
     // Make sure the playback rate if something our resampler can work with.
     if (mPlaybackRate <= 0.0 || mPlaybackRate >= 1024) {
       mPlaybackRate = 1.0;
     }
 
     uint32_t currentOutSampleRate, currentInSampleRate;
--- a/content/media/webaudio/AudioParam.cpp
+++ b/content/media/webaudio/AudioParam.cpp
@@ -115,11 +115,31 @@ AudioParam::Stream()
   mNodeStreamPort = ps->AllocateInputPort(mStream, MediaInputPort::FLAG_BLOCK_INPUT);
 
   // Let the MSG's copy of AudioParamTimeline know about the change in the stream
   mCallback(mNode);
 
   return mStream;
 }
 
+float
+AudioParamTimeline::AudioNodeInputValue(size_t aCounter) const
+{
+  MOZ_ASSERT(mStream);
+
+  // If we have a chunk produced by the AudioNode inputs to the AudioParam,
+  // get its value now.  We use aCounter to tell us which frame of the last
+  // AudioChunk to look at.
+  float audioNodeInputValue = 0.0f;
+  const AudioChunk& lastAudioNodeChunk =
+    static_cast<AudioNodeStream*>(mStream.get())->LastChunk();
+  if (!lastAudioNodeChunk.IsNull()) {
+    MOZ_ASSERT(lastAudioNodeChunk.GetDuration() == WEBAUDIO_BLOCK_SIZE);
+    audioNodeInputValue =
+      static_cast<const float*>(lastAudioNodeChunk.mChannelData[0])[aCounter];
+  }
+
+  return audioNodeInputValue;
+}
+
 }
 }
 
--- a/content/media/webaudio/AudioParamTimeline.h
+++ b/content/media/webaudio/AudioParamTimeline.h
@@ -6,41 +6,68 @@
 
 #ifndef AudioParamTimeline_h_
 #define AudioParamTimeline_h_
 
 #include "AudioEventTimeline.h"
 #include "mozilla/ErrorResult.h"
 #include "nsAutoPtr.h"
 #include "MediaStreamGraph.h"
+#include "AudioSegment.h"
 
 namespace mozilla {
 
 namespace dom {
 
 // This helper class is used to represent the part of the AudioParam
 // class that gets sent to AudioNodeEngine instances.  In addition to
 // AudioEventTimeline methods, it holds a pointer to an optional
 // MediaStream which represents the AudioNode inputs to the AudioParam.
 // This MediaStream is managed by the AudioParam subclass on the main
 // thread, and can only be obtained from the AudioNodeEngine instances
 // consuming this class.
 class AudioParamTimeline : public AudioEventTimeline<ErrorResult>
 {
+  typedef AudioEventTimeline<ErrorResult> BaseClass;
+
 public:
   explicit AudioParamTimeline(float aDefaultValue)
-    : AudioEventTimeline<ErrorResult>(aDefaultValue)
+    : BaseClass(aDefaultValue)
   {
   }
 
   MediaStream* Stream() const
   {
     return mStream;
   }
 
+  bool HasSimpleValue() const
+  {
+    return BaseClass::HasSimpleValue() && !mStream;
+  }
+
+  // Get the value of the AudioParam at time aTime + aCounter.
+  // aCounter here is an offset to aTime if we try to get the value in ticks,
+  // otherwise it should always be zero.  aCounter is meant to be used when
+  // getting the value of an a-rate AudioParam for each tick inside an
+  // AudioNodeEngine implementation.
+  template<class TimeType>
+  float GetValueAtTime(TimeType aTime, size_t aCounter = 0) const
+  {
+    MOZ_ASSERT(aCounter < WEBAUDIO_BLOCK_SIZE);
+    MOZ_ASSERT(!aCounter || !HasSimpleValue());
+
+    // Mix the value of the AudioParam itself with that of the AudioNode inputs.
+    return BaseClass::GetValueAtTime(static_cast<TimeType>(aTime + aCounter)) +
+           (mStream ? AudioNodeInputValue(aCounter) : 0.0f);
+  }
+
+private:
+  float AudioNodeInputValue(size_t aCounter) const;
+
 protected:
   // This is created lazily when needed.
   nsRefPtr<MediaStream> mStream;
 };
 
 }
 }
 
--- a/content/media/webaudio/DelayNode.cpp
+++ b/content/media/webaudio/DelayNode.cpp
@@ -180,17 +180,17 @@ public:
         // has a valid value when we try to change the delay time further below.
         mCurrentDelayTime = delayTime;
       }
     } else {
       // Compute the delay values for the duration of the input AudioChunk
       TrackTicks tick = aStream->GetCurrentPosition();
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         computedDelay[counter] = std::max(0.0, std::min(mMaxDelay,
-                                   double(mDelay.GetValueAtTime<TrackTicks>(tick + counter))));
+                                   double(mDelay.GetValueAtTime(tick, counter))));
       }
     }
 
     for (uint32_t channel = 0; channel < numChannels; ++channel) {
       double currentDelayTime = mCurrentDelayTime;
       uint32_t writeIndex = mWriteIndex;
 
       float* buffer = mBuffer[channel].Elements();
--- a/content/media/webaudio/GainNode.cpp
+++ b/content/media/webaudio/GainNode.cpp
@@ -71,18 +71,18 @@ public:
       // First, compute a vector of gains for each track tick based on the
       // timeline at hand, and then for each channel, multiply the values
       // in the buffer with the gain vector.
 
       // Compute the gain values for the duration of the input AudioChunk
       // XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
       float computedGain[WEBAUDIO_BLOCK_SIZE];
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
-        TrackTicks tick = aStream->GetCurrentPosition() + counter;
-        computedGain[counter] = mGain.GetValueAtTime<TrackTicks>(tick) * aInput.mVolume;
+        TrackTicks tick = aStream->GetCurrentPosition();
+        computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInput.mVolume;
       }
 
       // Apply the gain to the output buffer
       for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
         float* buffer = static_cast<float*> (const_cast<void*>
                           (aOutput->mChannelData[channel]));
         AudioBlockCopyChannelWithScale(buffer, computedGain, buffer);
       }