Bug 864164 - Part 2: Send the AudioBufferSourceNode buffer parameter changes to the stream; r=padenot
authorEhsan Akhgari <ehsan@mozilla.com>
Mon, 22 Apr 2013 17:01:22 -0400
changeset 129750 05cec405039374656a75bae5ff9019fd60510bee
parent 129749 fb4ab26a49de8979c2e4348379b5c464596c0a1e
child 129751 979da3f33fa32edc4a858833e01b0aaa91fd1b91
push id26981
push userpaul@paul.cx
push dateWed, 24 Apr 2013 13:40:56 +0000
treeherdermozilla-inbound@979da3f33fa3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs864164
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 864164 - Part 2: Send the AudioBufferSourceNode buffer parameter changes to the stream; r=padenot
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioBufferSourceNode.h
content/media/webaudio/test/Makefile.in
content/media/webaudio/test/test_audioBufferSourceNode.html
content/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html
dom/bindings/Bindings.conf
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -7,16 +7,17 @@
 #include "AudioBufferSourceNode.h"
 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
 #include "nsMathUtils.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioDestinationNode.h"
 #include "PannerNode.h"
 #include "speex/speex_resampler.h"
+#include <limits>
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate)
   if (tmp->Context()) {
@@ -395,20 +396,23 @@ public:
   AudioParamTimeline mPlaybackRateTimeline;
   bool mLoop;
 };
 
 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
   : AudioNode(aContext)
   , mLoopStart(0.0)
   , mLoopEnd(0.0)
+  , mOffset(0.0)
+  , mDuration(std::numeric_limits<double>::min())
   , mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
   , mPannerNode(nullptr)
   , mLoop(false)
   , mStartCalled(false)
+  , mOffsetAndDurationRemembered(false)
 {
   mStream = aContext->Graph()->CreateAudioNodeStream(
       new AudioBufferSourceNodeEngine(this, aContext->Destination()),
       MediaStreamGraph::INTERNAL_STREAM);
   mStream->AddMainThreadListener(this);
 }
 
 AudioBufferSourceNode::~AudioBufferSourceNode()
@@ -420,70 +424,113 @@ AudioBufferSourceNode::~AudioBufferSourc
 
 JSObject*
 AudioBufferSourceNode::WrapObject(JSContext* aCx, JSObject* aScope)
 {
   return AudioBufferSourceNodeBinding::Wrap(aCx, aScope, this);
 }
 
 void
-AudioBufferSourceNode::Start(JSContext* aCx, double aWhen, double aOffset,
+AudioBufferSourceNode::Start(double aWhen, double aOffset,
                              const Optional<double>& aDuration, ErrorResult& aRv)
 {
   if (mStartCalled) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
   }
   mStartCalled = true;
 
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
-  if (!mBuffer || !ns) {
+  if (!ns) {
     // Nothing to play, or we're already dead for some reason
     return;
   }
 
-  float rate = mBuffer->SampleRate();
-  int32_t lengthSamples = mBuffer->Length();
-  nsRefPtr<ThreadSharedFloatArrayBufferList> data =
-    mBuffer->GetThreadSharedChannelsForRate(aCx);
+  if (mBuffer) {
+    double duration = aDuration.WasPassed() ?
+                      aDuration.Value() :
+                      std::numeric_limits<double>::min();
+    SendOffsetAndDurationParametersToStream(ns, aOffset, duration);
+  } else {
+    // Remember our argument so that we can use them once we have a buffer
+    mOffset = aOffset;
+    mDuration = aDuration.WasPassed() ?
+                aDuration.Value() :
+                std::numeric_limits<double>::min();
+    mOffsetAndDurationRemembered = true;
+  }
+
+  // Don't set parameter unnecessarily
+  if (aWhen > 0.0) {
+    ns->SetStreamTimeParameter(START, Context()->DestinationStream(), aWhen);
+  }
+
+  MOZ_ASSERT(!mPlayingRef, "We can only accept a successful start() call once");
+  mPlayingRef.Take(this);
+}
+
+void
+AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
+{
+  AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
+  MOZ_ASSERT(ns, "Why don't we have a stream here?");
+
+  if (mBuffer) {
+    float rate = mBuffer->SampleRate();
+    nsRefPtr<ThreadSharedFloatArrayBufferList> data =
+      mBuffer->GetThreadSharedChannelsForRate(aCx);
+    ns->SetBuffer(data.forget());
+    ns->SetInt32Parameter(SAMPLE_RATE, rate);
+  } else {
+    ns->SetBuffer(nullptr);
+  }
+
+  if (mOffsetAndDurationRemembered) {
+    SendOffsetAndDurationParametersToStream(ns, mOffset, mDuration);
+  }
+}
+
+void
+AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream,
+                                                               double aOffset,
+                                                               double aDuration)
+{
+  float rate = mBuffer ? mBuffer->SampleRate() : Context()->SampleRate();
+  int32_t lengthSamples = mBuffer ? mBuffer->Length() : 0;
   double length = double(lengthSamples) / rate;
   double offset = std::max(0.0, aOffset);
-  double endOffset = aDuration.WasPassed() ?
-      std::min(aOffset + aDuration.Value(), length) : length;
+  double endOffset = aDuration == std::numeric_limits<double>::min() ?
+                     length : std::min(aOffset + aDuration, length);
 
   if (offset >= endOffset) {
     return;
   }
 
-  ns->SetBuffer(data.forget());
-  // Don't set parameter unnecessarily
-  if (aWhen > 0.0) {
-    ns->SetStreamTimeParameter(START, Context()->DestinationStream(), aWhen);
-  }
   int32_t offsetTicks = NS_lround(offset*rate);
   // Don't set parameter unnecessarily
   if (offsetTicks > 0) {
-    ns->SetInt32Parameter(OFFSET, offsetTicks);
+    aStream->SetInt32Parameter(OFFSET, offsetTicks);
   }
-  ns->SetInt32Parameter(DURATION,
-      NS_lround(endOffset*rate) - offsetTicks);
-  ns->SetInt32Parameter(SAMPLE_RATE, rate);
-
-  MOZ_ASSERT(!mPlayingRef, "We can only accept a successful start() call once");
-  mPlayingRef.Take(this);
+  aStream->SetInt32Parameter(DURATION, NS_lround(endOffset*rate) - offsetTicks);
 }
 
 void
 AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv)
 {
   if (!mStartCalled) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
   }
 
+  if (!mBuffer) {
+    // We don't have a buffer, so the stream is never marked as finished.
+    // Therefore we need to drop our playing ref right now.
+    mPlayingRef.Drop(this);
+  }
+
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns) {
     // We've already stopped and had our stream shut down
     return;
   }
 
   ns->SetStreamTimeParameter(STOP, Context()->DestinationStream(),
                              std::max(0.0, aWhen));
--- a/content/media/webaudio/AudioBufferSourceNode.h
+++ b/content/media/webaudio/AudioBufferSourceNode.h
@@ -50,42 +50,43 @@ public:
     mPannerNode = aPannerNode;
   }
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioBufferSourceNode, AudioNode)
 
   virtual JSObject* WrapObject(JSContext* aCx, JSObject* aScope);
 
-  void Start(JSContext* aCx, double aWhen, double aOffset,
+  void Start(double aWhen, double aOffset,
              const Optional<double>& aDuration, ErrorResult& aRv);
-  void NoteOn(JSContext* aCx, double aWhen, ErrorResult& aRv)
+  void NoteOn(double aWhen, ErrorResult& aRv)
   {
-    Start(aCx, aWhen, 0.0, Optional<double>(), aRv);
+    Start(aWhen, 0.0, Optional<double>(), aRv);
   }
-  void NoteGrainOn(JSContext* aCx, double aWhen, double aOffset,
+  void NoteGrainOn(double aWhen, double aOffset,
                    double aDuration, ErrorResult& aRv)
   {
     Optional<double> duration;
     duration.Construct(aDuration);
-    Start(aCx, aWhen, aOffset, duration, aRv);
+    Start(aWhen, aOffset, duration, aRv);
   }
   void Stop(double aWhen, ErrorResult& aRv);
   void NoteOff(double aWhen, ErrorResult& aRv)
   {
     Stop(aWhen, aRv);
   }
 
-  AudioBuffer* GetBuffer() const
+  AudioBuffer* GetBuffer(JSContext* aCx) const
   {
     return mBuffer;
   }
-  void SetBuffer(AudioBuffer* aBuffer)
+  void SetBuffer(JSContext* aCx, AudioBuffer* aBuffer)
   {
     mBuffer = aBuffer;
+    SendBufferParameterToStream(aCx);
   }
   AudioParam* PlaybackRate() const
   {
     return mPlaybackRate;
   }
   bool Loop() const
   {
     return mLoop;
@@ -131,26 +132,33 @@ private:
     LOOP,
     LOOPSTART,
     LOOPEND,
     PLAYBACKRATE,
     DOPPLERSHIFT
   };
 
   void SendLoopParametersToStream();
+  void SendBufferParameterToStream(JSContext* aCx);
+  void SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream,
+                                               double aOffset,
+                                               double aDuration);
   static void SendPlaybackRateToStream(AudioNode* aNode);
 
 private:
   double mLoopStart;
   double mLoopEnd;
+  double mOffset;
+  double mDuration;
   nsRefPtr<AudioBuffer> mBuffer;
   nsRefPtr<AudioParam> mPlaybackRate;
   PannerNode* mPannerNode;
   SelfReference<AudioBufferSourceNode> mPlayingRef; // a reference to self while playing
   bool mLoop;
   bool mStartCalled;
+  bool mOffsetAndDurationRemembered;
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/test/Makefile.in
+++ b/content/media/webaudio/test/Makefile.in
@@ -20,16 +20,17 @@ MOCHITEST_FILES := \
   test_analyserNode.html \
   test_AudioBuffer.html \
   test_AudioContext.html \
   test_AudioListener.html \
   test_AudioParam.html \
   test_audioBufferSourceNode.html \
   test_audioBufferSourceNodeLoop.html \
   test_audioBufferSourceNodeLoopStartEnd.html \
+  test_audioBufferSourceNodeNullBuffer.html \
   test_badConnect.html \
   test_biquadFilterNode.html \
   test_currentTime.html \
   test_delayNode.html \
   test_delayNodeWithGain.html \
   test_decodeAudioData.html \
   test_dynamicsCompressorNode.html \
   test_gainNode.html \
--- a/content/media/webaudio/test/test_audioBufferSourceNode.html
+++ b/content/media/webaudio/test/test_audioBufferSourceNode.html
@@ -16,20 +16,20 @@ addLoadEvent(function() {
 
   var context = new AudioContext();
   var buffer = context.createBuffer(1, 2048, context.sampleRate);
   for (var i = 0; i < 2048; ++i) {
     buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
   }
 
   var source = context.createBufferSource();
-  source.buffer = buffer;
 
   var sp = context.createScriptProcessor(2048);
   source.start(0);
+  source.buffer = buffer;
   source.connect(sp);
   sp.connect(context.destination);
   sp.onaudioprocess = function(e) {
     compareBuffers(e.inputBuffer.getChannelData(0), buffer.getChannelData(0));
     compareBuffers(e.inputBuffer.getChannelData(1), buffer.getChannelData(0));
 
     // On the next iteration, we'll get a silence buffer
     sp.onaudioprocess = function(e) {
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html
@@ -0,0 +1,42 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test AudioBufferSourceNode</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="text/javascript" src="webaudio.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+  SpecialPowers.setBoolPref("media.webaudio.enabled", true);
+
+  var context = new AudioContext();
+  var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); // silence
+
+  var source = context.createBufferSource();
+
+  var sp = context.createScriptProcessor(2048);
+  source.start(0);
+  source.buffer = null;
+  is(source.buffer, null, "Try playing back a null buffer");
+  source.connect(sp);
+  sp.connect(context.destination);
+  sp.onaudioprocess = function(e) {
+    compareBuffers(e.inputBuffer.getChannelData(0), expectedBuffer.getChannelData(0));
+    compareBuffers(e.inputBuffer.getChannelData(1), expectedBuffer.getChannelData(0));
+
+    sp.onaudioprocess = null;
+
+    SpecialPowers.clearUserPref("media.webaudio.enabled");
+    SimpleTest.finish();
+  };
+});
+
+</script>
+</pre>
+</body>
+</html>
--- a/dom/bindings/Bindings.conf
+++ b/dom/bindings/Bindings.conf
@@ -101,17 +101,17 @@ DOMInterfaces = {
 
 'AudioContext': {
     'implicitJSContext': [ 'createBuffer' ],
     'nativeOwnership': 'refcounted',
     'resultNotAddRefed': [ 'destination', 'listener' ],
 },
 
 'AudioBufferSourceNode': {
-    'implicitJSContext': [ 'start', 'noteOn', 'noteGrainOn' ],
+    'implicitJSContext': [ 'buffer' ],
     'resultNotAddRefed': [ 'playbackRate' ],
 },
 
 'AudioListener' : {
     'nativeOwnership': 'refcounted'
 },
 
 'AudioNode' : {