b=910174 add DelayNode's tail-time reference as soon as the engine receives sound r=ehsan a=akeybl
authorKarl Tomlinson <karlt+@karlt.net>
Tue, 01 Oct 2013 09:50:04 +1300
changeset 160549 7dfe4a77553120545ccb6f59a1f0e7c73f14d8ce
parent 160548 1405a08b51e23a55c91fb0284f1f436c12fb9f53
child 160550 a4b7282df517a70e0d7ad3930065abf71d125d30
push id2961
push userlsblakk@mozilla.com
push dateMon, 28 Oct 2013 21:59:28 +0000
treeherdermozilla-beta@73ef4f13486f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan, akeybl
bugs910174, 921457
milestone26.0a2
b=910174 add DelayNode's tail-time reference as soon as the engine receives sound r=ehsan a=akeybl This removes the dependence on AllInputsFinished() which didn't return true for many input types. The DelayProcessor is no longer continuously reset (bug 921457) and the reference is now correctly added again when all inputs are finished and then new inputs are connected. (transplanted from cb856e6d57c1eeb387e3b57a5e4f95afc16f80cb)
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/webaudio/DelayNode.cpp
content/media/webaudio/test/Makefile.in
content/media/webaudio/test/test_delayNodeTailWithGain.html
content/media/webaudio/test/test_delayNodeTailWithReconnect.html
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -233,28 +233,16 @@ AudioNodeStream::SetChannelMixingParamet
   MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
   MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
 
   mNumberOfInputChannels = aNumberOfChannels;
   mChannelCountMode = aChannelCountMode;
   mChannelInterpretation = aChannelInterpretation;
 }
 
-bool
-AudioNodeStream::AllInputsFinished() const
-{
-  uint32_t inputCount = mInputs.Length();
-  for (uint32_t i = 0; i < inputCount; ++i) {
-    if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
-      return false;
-    }
-  }
-  return !!inputCount;
-}
-
 uint32_t
 AudioNodeStream::ComputeFinalOuputChannelCount(uint32_t aInputChannelCount)
 {
   switch (mChannelCountMode) {
   case ChannelCountMode::Explicit:
     // Disregard the channel count we've calculated from inputs, and just use
     // mNumberOfInputChannels.
     return mNumberOfInputChannels;
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -95,17 +95,16 @@ public:
   // Graph thread only
   void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                   double aStreamTime);
   void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
                                       dom::ChannelCountMode aChannelCountMoe,
                                       dom::ChannelInterpretation aChannelInterpretation);
   virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
   TrackTicks GetCurrentPosition();
-  bool AllInputsFinished() const;
   bool IsAudioParamStream() const
   {
     return mAudioParamStream;
   }
   void Mute() {
     mMuted = true;
   }
 
--- a/content/media/webaudio/DelayNode.cpp
+++ b/content/media/webaudio/DelayNode.cpp
@@ -80,26 +80,23 @@ public:
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
     MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate());
 
     const uint32_t numChannels = aInput.IsNull() ?
                                  mProcessor.BufferChannelCount() :
                                  aInput.mChannelData.Length();
 
     bool playedBackAllLeftOvers = false;
-    if (mProcessor.BufferChannelCount() &&
-        mLeftOverData == INT32_MIN &&
-        aStream->AllInputsFinished()) {
-      mLeftOverData = mProcessor.MaxDelayFrames() - WEBAUDIO_BLOCK_SIZE;
-
-      if (mLeftOverData > 0) {
+    if (!aInput.IsNull()) {
+      if (mLeftOverData <= 0) {
         nsRefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         NS_DispatchToMainThread(refchanged);
       }
+      mLeftOverData = mProcessor.MaxDelayFrames();
     } else if (mLeftOverData != INT32_MIN) {
       mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
       if (mLeftOverData <= 0) {
         // Continue spamming the main thread with messages until we are destroyed.
         // This isn't great, but it ensures a message will get through even if
         // some are ignored by DelayNode::AcceptPlayingRefRelease
         mLeftOverData = 0;
         playedBackAllLeftOvers = true;
--- a/content/media/webaudio/test/Makefile.in
+++ b/content/media/webaudio/test/Makefile.in
@@ -52,16 +52,18 @@ MOCHITEST_FILES := \
   test_convolverNodeChannelCount.html \
   test_convolverNodeWithGain.html \
   test_convolverNode_mono_mono.html \
   test_currentTime.html \
   test_delayNode.html \
   test_delayNodeAtMax.html \
   test_delayNodeSmallMaxDelay.html \
   test_delayNodeTailIncrease.html \
+  test_delayNodeTailWithGain.html \
+  test_delayNodeTailWithReconnect.html \
   test_delayNodeWithGain.html \
   test_delayNodeCycles.html \
   test_dynamicsCompressorNode.html \
   test_gainNode.html \
   test_gainNodeInLoop.html \
   test_maxChannelCount.html \
   test_mediaDecoding.html \
   test_decodeMultichannel.html \
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/test/test_delayNodeTailWithGain.html
@@ -0,0 +1,72 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test tail time lifetime of DelayNode indirectly connected to source</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="text/javascript" src="webaudio.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const signalLength = 130;
+const bufferSize = 1024;
+// Delay should be long enough to allow CC to run
+const delayBufferCount = 50;
+const delayLength = delayBufferCount * bufferSize + 700;
+
+var count = 0;
+
+function applySignal(buffer, offset) {
+  for (var i = 0; i < signalLength; ++i) {
+    buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+  }
+}
+
+function onAudioProcess(e) {
+  switch(count) {
+  case 5:
+    SpecialPowers.forceGC();
+    SpecialPowers.forceCC();
+    break;
+  case delayBufferCount:
+    var offset = delayLength - count * bufferSize;
+    var ctx = e.target.context;
+    var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
+    applySignal(expected, offset);
+    compareBuffers(e.inputBuffer.getChannelData(0), expected.getChannelData(0));
+    SimpleTest.finish();
+  }
+  count++;
+}
+
+function startTest() {
+  var ctx = new AudioContext();
+  var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
+  processor.onaudioprocess = onAudioProcess;
+
+  var delayDuration = delayLength / ctx.sampleRate;
+  var delay = ctx.createDelay(delayDuration);
+  delay.delayTime.value = delayDuration;
+  delay.connect(processor);
+
+  var gain = ctx.createGain();
+  gain.connect(delay);
+
+  // Short signal that finishes before garbage collection
+  var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
+  applySignal(buffer, 0);
+  var source = ctx.createBufferSource();
+  source.buffer = buffer;
+  source.start();
+  source.connect(gain);
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/test/test_delayNodeTailWithReconnect.html
@@ -0,0 +1,138 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test tail time lifetime of DelayNode after input finishes and new input added</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="text/javascript" src="webaudio.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+// The buffer source will start on a block boundary, so keeping the signal
+// within one block ensures that it will not cross AudioProcessingEvent buffer
+// boundaries.
+const signalLength = 128;
+const bufferSize = 1024;
+// Delay should be long enough to allow CC to run
+var delayBufferCount = 50;
+var delayBufferOffset;
+const delayLength = delayBufferCount * bufferSize;
+
+var phase = "initial";
+var sourceCount = 0;
+var delayCount = 0;
+var oscillator;
+var delay;
+var source;
+
+function applySignal(buffer, offset) {
+  for (var i = 0; i < signalLength; ++i) {
+    buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+  }
+}
+
+function bufferIsSilent(buffer, out) {
+  for (var i = 0; i < buffer.length; ++i) {
+    if (buffer.getChannelData(0)[i] != 0) {
+      if (out) {
+        out.soundOffset = i;
+      }
+      return false;
+    }
+  }
+  return true;
+}
+
+function onDelayOutput(e) {
+  switch(phase) {
+
+  case "initial":
+    // Wait for oscillator sound to exit delay
+    if (bufferIsSilent(e.inputBuffer))
+      break;
+
+    phase = "played oscillator";
+    break;
+
+  case "played oscillator":
+    // First tail time has expired.  Start second source and remove references
+    // to the delay and connected second source.
+    oscillator.disconnect();
+    source.connect(delay);
+    source.start();
+    source = null;
+    delay = null;
+    phase = "started second source";
+    break;
+
+  case "second tail time":
+    if (delayCount == delayBufferCount) {
+      var ctx = e.target.context;
+      var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
+      applySignal(expected, delayBufferOffset);
+      compareBuffers(e.inputBuffer.getChannelData(0), expected.getChannelData(0));
+      e.target.onaudioprocess = null;
+      SimpleTest.finish();
+    }
+  }
+
+  delayCount++;
+}
+
+function onSourceOutput(e) {
+  switch(phase) {
+  case "started second source":
+    var out = {};
+    if (!bufferIsSilent(e.inputBuffer, out)) {
+      delayBufferCount += sourceCount;
+      delayBufferOffset = out.soundOffset;
+      phase = "played second source";
+    }
+    break;
+  case "played second source":
+    SpecialPowers.forceGC();
+    SpecialPowers.forceCC();
+    phase = "second tail time";
+    e.target.onaudioprocess = null;
+  }
+
+  sourceCount++;
+}
+
+function startTest() {
+  var ctx = new AudioContext();
+  var delayDuration = delayLength / ctx.sampleRate;
+  delay = ctx.createDelay(delayDuration);
+  delay.delayTime.value = delayDuration;
+  var processor1 = ctx.createScriptProcessor(bufferSize, 1, 0);
+  delay.connect(processor1);
+  processor1.onaudioprocess = onDelayOutput;
+  processor1.connect(ctx.destination); // work around bug 916387
+
+  // Signal to trigger initial tail time reference
+  oscillator = ctx.createOscillator();
+  oscillator.start(0);
+  oscillator.stop(100/ctx.sampleRate);
+  oscillator.connect(delay);
+
+  // Short signal, not started yet, with a ScriptProcessor to detect when it
+  // starts.  It should finish before garbage collection.
+  var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
+  applySignal(buffer, 0);
+  source = ctx.createBufferSource();
+  source.buffer = buffer;
+  var processor2 = ctx.createScriptProcessor(bufferSize, 1, 0);
+  source.connect(processor2);
+  processor2.onaudioprocess = onSourceOutput;
+  processor2.connect(ctx.destination); // guard against bug 916387
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>