bug 1207003 add GraphTime parameter to ProcessBlock() and remove GetCurrentPosition() r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Fri, 18 Sep 2015 17:05:25 +1200
changeset 263945 63b1064ba50057c5ed2d3d80a2458ba2b947e281
parent 263944 2fe45d4590e580cabf0486c91c6e62a68b700164
child 263946 15e7ac19b26ed6bdb5813479b914897e6c0ec82b
push id65483
push userktomlinson@mozilla.com
push dateWed, 23 Sep 2015 10:45:32 +0000
treeherdermozilla-inbound@15e7ac19b26e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1207003, 1205558
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1207003 add GraphTime parameter to ProcessBlock() and remove GetCurrentPosition() r=padenot This is immediately useful for making the track unnecessary, but will also be required when switching to the destination node stream for tracking time (bug 1205558) because using GetCurrentPosition() on the destination node stream would give different results depending on the stream processing order (when called during processing of streams not strictly upstream from the destination node).
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioNodeEngine.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
--- a/dom/media/webaudio/AnalyserNode.cpp
+++ b/dom/media/webaudio/AnalyserNode.cpp
@@ -54,16 +54,17 @@ class AnalyserNodeEngine final : public 
 public:
   explicit AnalyserNodeEngine(AnalyserNode* aNode)
     : AudioNodeEngine(aNode)
   {
     MOZ_ASSERT(NS_IsMainThread());
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     *aOutput = aInput;
 
     if (aInput.IsNull()) {
       // If AnalyserNode::mChunks has only null chunks, then there is no need
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -453,27 +453,28 @@ public:
 
     detune = std::min(std::max(-1200.f, detune), 1200.f);
 
     int32_t outRate = ComputeFinalOutSampleRate(playbackRate, detune);
     UpdateResampler(outRate, aChannels);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (mBufferSampleRate == 0) {
       // start() has not yet been called or no buffer has yet been set
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
-    StreamTime streamPosition = aStream->GetCurrentPosition();
+    StreamTime streamPosition = aStream->GraphTimeToStreamTime(aFrom);
     // We've finished if we've gone past mStop, or if we're past mDuration when
     // looping is disabled.
     if (streamPosition >= mStop ||
         (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) {
       *aFinished = true;
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -41,16 +41,17 @@ public:
     , mNumberOfChannels(aNumberOfChannels)
     , mLength(aLength)
     , mSampleRate(aSampleRate)
     , mBufferAllocated(false)
   {
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     // Do this just for the sake of political correctness; this output
     // will not go anywhere.
     *aOutput = aInput;
 
@@ -236,16 +237,17 @@ public:
     , mVolume(1.0f)
     , mLastInputMuted(true)
     , mSuspended(false)
   {
     MOZ_ASSERT(aNode);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     *aOutput = aInput;
     aOutput->mVolume *= mVolume;
 
     if (mSuspended) {
--- a/dom/media/webaudio/AudioNodeEngine.cpp
+++ b/dom/media/webaudio/AudioNodeEngine.cpp
@@ -268,16 +268,17 @@ AudioBufferSumOfSquares(const float* aIn
     sum += *aInput * *aInput;
     ++aInput;
   }
   return sum;
 }
 
 void
 AudioNodeEngine::ProcessBlock(AudioNodeStream* aStream,
+                              GraphTime aFrom,
                               const AudioBlock& aInput,
                               AudioBlock* aOutput,
                               bool* aFinished)
 {
   MOZ_ASSERT(mInputCount <= 1 && mOutputCount <= 1);
   *aOutput = aInput;
 }
 
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -310,25 +310,27 @@ public:
    * and to have been resampled to the sampling rate for the stream, and to have
    * exactly WEBAUDIO_BLOCK_SIZE samples.
    * *aFinished is set to false by the caller. The callee must not set this to
    * true unless silent output is produced. If set to true, we'll finish the
    * stream, consider this input inactive on any downstream nodes, and not
    * call this again.
    */
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished);
   /**
    * Produce the next block of audio samples, before input is provided.
    * ProcessBlock() will be called later, and it then should not change
    * aOutput.  This is used only for DelayNodeEngine in a feedback loop.
    */
-  virtual void ProduceBlockBeforeInput(AudioBlock* aOutput)
+  virtual void ProduceBlockBeforeInput(GraphTime aFrom,
+                                       AudioBlock* aOutput)
   {
     NS_NOTREACHED("ProduceBlockBeforeInput called on wrong engine\n");
   }
 
   /**
    * Produce the next block of audio samples, given input samples in the aInput
    * array.  There is one input sample per active port in aInput, in order.
    * This is the multi-input/output version of ProcessBlock.  Only one kind
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -536,17 +536,18 @@ AudioNodeStream::ProcessInput(GraphTime 
       ObtainInputBlock(mInputChunks[i], i);
     }
     bool finished = false;
     if (mPassThrough) {
       MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
       mLastChunks[0] = mInputChunks[0];
     } else {
       if (maxInputs <= 1 && outputCount <= 1) {
-        mEngine->ProcessBlock(this, mInputChunks[0], &mLastChunks[0], &finished);
+        mEngine->ProcessBlock(this, aFrom,
+                              mInputChunks[0], &mLastChunks[0], &finished);
       } else {
         mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
       }
     }
     for (uint16_t i = 0; i < outputCount; ++i) {
       NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                    "Invalid WebAudio chunk size");
     }
@@ -581,17 +582,17 @@ AudioNodeStream::ProduceOutputBeforeInpu
   MOZ_ASSERT(mEngine->OutputCount() == 1,
              "DelayNodeEngine output count should be 1");
   MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
   MOZ_ASSERT(mLastChunks.Length() == 1);
 
   if (!mIsActive) {
     mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
   } else {
-    mEngine->ProduceBlockBeforeInput(&mLastChunks[0]);
+    mEngine->ProduceBlockBeforeInput(aFrom, &mLastChunks[0]);
     NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                  "Invalid WebAudio chunk size");
     if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
       mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
     }
   }
 }
 
@@ -615,23 +616,16 @@ AudioNodeStream::AdvanceOutputSegment()
     AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
     AudioSegment tmpSegment;
     tmpSegment.AppendAndConsumeChunk(&copyChunk);
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                 segment->GetDuration(), 0, tmpSegment);
   }
 }
 
-StreamTime
-AudioNodeStream::GetCurrentPosition()
-{
-  NS_ASSERTION(!mFinished, "Don't create another track after finishing");
-  return EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->GetDuration();
-}
-
 void
 AudioNodeStream::FinishOutput()
 {
   if (IsFinishedOnGraphThread()) {
     return;
   }
 
   StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -117,17 +117,16 @@ public:
                                       ChannelInterpretation aChannelInterpretation);
   virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
   /**
    * Produce the next block of output, before input is provided.
    * ProcessInput() will be called later, and it then should not change
    * the output.  This is used only for DelayNodeEngine in a feedback loop.
    */
   void ProduceOutputBeforeInput(GraphTime aFrom);
-  StreamTime GetCurrentPosition();
   bool IsAudioParamStream() const
   {
     return mAudioParamStream;
   }
 
   const OutputChunks& LastChunks() const
   {
     return mLastChunks;
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -133,16 +133,17 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad BiquadFilterNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     float inputBuffer[WEBAUDIO_BLOCK_SIZE];
 
     if (aInput.IsNull()) {
       bool hasTail = false;
@@ -181,17 +182,17 @@ public:
 
       // Adjust the number of biquads based on the number of channels
       mBiquads.SetLength(aInput.ChannelCount());
     }
 
     uint32_t numberOfChannels = mBiquads.Length();
     aOutput->AllocateChannels(numberOfChannels);
 
-    StreamTime pos = aStream->GetCurrentPosition();
+    StreamTime pos = aStream->GraphTimeToStreamTime(aFrom);
 
     double freq = mFrequency.GetValueAtTime(pos);
     double q = mQ.GetValueAtTime(pos);
     double gain = mGain.GetValueAtTime(pos);
     double detune = mDetune.GetValueAtTime(pos);
 
     for (uint32_t i = 0; i < numberOfChannels; ++i) {
       const float* input;
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -97,16 +97,17 @@ public:
 
     mReverb = new WebCore::Reverb(mBuffer, mBufferLength,
                                   WEBAUDIO_BLOCK_SIZE,
                                   MaxFFTSize, 2, mUseBackgroundThreads,
                                   mNormalize, mSampleRate);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (!mReverb) {
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -71,16 +71,17 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad DelayNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
     MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate());
 
     if (!aInput.IsSilentOrSubnormal()) {
@@ -110,60 +111,61 @@ public:
       return;
     }
 
     mBuffer.Write(aInput);
 
     // Skip output update if mLastChunks has already been set by
     // ProduceBlockBeforeInput() when in a cycle.
     if (!mHaveProducedBeforeInput) {
-      UpdateOutputBlock(aOutput, 0.0);
+      UpdateOutputBlock(aFrom, aOutput, 0.0);
     }
     mHaveProducedBeforeInput = false;
     mBuffer.NextBlock();
   }
 
-  void UpdateOutputBlock(AudioBlock* aOutput, double minDelay)
+  void UpdateOutputBlock(GraphTime aFrom, AudioBlock* aOutput, double minDelay)
   {
     double maxDelay = mMaxDelay;
     double sampleRate = mSource->SampleRate();
     ChannelInterpretation channelInterpretation =
       mSource->GetChannelInterpretation();
     if (mDelay.HasSimpleValue()) {
       // If this DelayNode is in a cycle, make sure the delay value is at least
       // one block, even if that is greater than maxDelay.
       double delayFrames = mDelay.GetValue() * sampleRate;
       double delayFramesClamped =
         std::max(minDelay, std::min(delayFrames, maxDelay));
       mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation);
     } else {
       // Compute the delay values for the duration of the input AudioChunk
       // If this DelayNode is in a cycle, make sure the delay value is at least
       // one block.
-      StreamTime tick = mSource->GetCurrentPosition();
+      StreamTime tick = mSource->GraphTimeToStreamTime(aFrom);
       float values[WEBAUDIO_BLOCK_SIZE];
       mDelay.GetValuesAtTime(tick, values,WEBAUDIO_BLOCK_SIZE);
 
       double computedDelay[WEBAUDIO_BLOCK_SIZE];
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         double delayAtTick = values[counter] * sampleRate;
         double delayAtTickClamped =
           std::max(minDelay, std::min(delayAtTick, maxDelay));
         computedDelay[counter] = delayAtTickClamped;
       }
       mBuffer.Read(computedDelay, aOutput, channelInterpretation);
     }
   }
 
-  virtual void ProduceBlockBeforeInput(AudioBlock* aOutput) override
+  virtual void ProduceBlockBeforeInput(GraphTime aFrom,
+                                       AudioBlock* aOutput) override
   {
     if (mLeftOverData <= 0) {
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
     } else {
-      UpdateOutputBlock(aOutput, WEBAUDIO_BLOCK_SIZE);
+      UpdateOutputBlock(aFrom, aOutput, WEBAUDIO_BLOCK_SIZE);
     }
     mHaveProducedBeforeInput = true;
   }
 
   virtual bool IsActive() const override
   {
     return mLeftOverData != INT32_MIN;
   }
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -88,16 +88,17 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mRelease, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad DynamicsCompresssorNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     if (aInput.IsNull()) {
       // Just output silence
       *aOutput = aInput;
       return;
@@ -105,17 +106,17 @@ public:
 
     const uint32_t channelCount = aInput.ChannelCount();
     if (mCompressor->numberOfChannels() != channelCount) {
       // Create a new compressor object with a new channel count
       mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(),
                                                     aInput.ChannelCount());
     }
 
-    StreamTime pos = aStream->GetCurrentPosition();
+    StreamTime pos = aStream->GraphTimeToStreamTime(aFrom);
     mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold,
                                    mThreshold.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamKnee,
                                    mKnee.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRatio,
                                    mRatio.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamAttack,
                                    mAttack.GetValueAtTime(pos));
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -54,16 +54,17 @@ public:
       WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
       break;
     default:
       NS_ERROR("Bad GainNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     if (aInput.IsNull()) {
       // If input is silent, so is the output
@@ -79,17 +80,17 @@ public:
       }
     } else {
       // First, compute a vector of gains for each track tick based on the
       // timeline at hand, and then for each channel, multiply the values
       // in the buffer with the gain vector.
       aOutput->AllocateChannels(aInput.ChannelCount());
 
       // Compute the gain values for the duration of the input AudioChunk
-      StreamTime tick = aStream->GetCurrentPosition();
+      StreamTime tick = aStream->GraphTimeToStreamTime(aFrom);
       float computedGain[WEBAUDIO_BLOCK_SIZE];
       mGain.GetValuesAtTime(tick, computedGain, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         computedGain[counter] *= aInput.mVolume;
       }
 
       // Apply the gain to the output buffer
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -281,23 +281,24 @@ public:
   }
 
   void ComputeSilence(AudioBlock *aOutput)
   {
     aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
-    StreamTime ticks = aStream->GetCurrentPosition();
+    StreamTime ticks = aStream->GraphTimeToStreamTime(aFrom);
     if (mStart == -1) {
       ComputeSilence(aOutput);
       return;
     }
 
     if (ticks >= mStop) {
       // We've finished playing.
       ComputeSilence(aOutput);
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -130,16 +130,17 @@ public:
     case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break;
     case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break;
     default:
       NS_ERROR("Bad PannerNodeEngine DoubleParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool *aFinished) override
   {
     if (aInput.IsNull()) {
       // mLeftOverData != INT_MIN means that the panning model was HRTF and a
       // tail-time reference was added.  Even if the model is now equalpower,
       // the reference will need to be removed.
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -275,16 +275,17 @@ public:
         mIsConnected = aParam;
         break;
       default:
         NS_ERROR("Bad Int32Parameter");
     } // End index switch.
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     // This node is not connected to anything. Per spec, we don't fire the
     // onaudioprocess event. We also want to clear out the input and output
     // buffer queue, and output a null buffer.
     if (!mIsConnected) {
@@ -323,17 +324,17 @@ public:
     mInputWriteIndex += aInput.GetDuration();
 
     // Now, see if we have data to output
     // Note that we need to do this before sending the buffer to the main
     // thread so that our delay time is updated.
     *aOutput = mSharedBuffers->GetOutputBuffer();
 
     if (mInputWriteIndex >= mBufferSize) {
-      SendBuffersToMainThread(aStream);
+      SendBuffersToMainThread(aStream, aFrom);
       mInputWriteIndex -= mBufferSize;
     }
   }
 
   virtual bool IsActive() const override
   {
     // Could return false when !mIsConnected after all output chunks produced
     // by main thread events calling
@@ -354,22 +355,22 @@ public:
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 private:
-  void SendBuffersToMainThread(AudioNodeStream* aStream)
+  void SendBuffersToMainThread(AudioNodeStream* aStream, GraphTime aFrom)
   {
     MOZ_ASSERT(!NS_IsMainThread());
 
     // we now have a full input buffer ready to be sent to the main thread.
-    StreamTime playbackTick = mSource->GetCurrentPosition();
+    StreamTime playbackTick = mSource->GraphTimeToStreamTime(aFrom);
     // Add the duration of the current sample
     playbackTick += WEBAUDIO_BLOCK_SIZE;
     // Add the delay caused by the main thread
     playbackTick += mSharedBuffers->DelaySoFar();
     // Compute the playback time in the coordinate system of the destination
     double playbackTime =
       mSource->DestinationTimeFromTicks(mDestination, playbackTick);
 
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -104,16 +104,17 @@ public:
       for (uint32_t channel = 0; channel < 2; channel++) {
         float* output = aOutput->ChannelFloatsForWrite(channel);
         PodCopy(output, input, WEBAUDIO_BLOCK_SIZE);
       }
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool *aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     // The output of this node is always stereo, no matter what the inputs are.
     MOZ_ASSERT(aInput.ChannelCount() <= 2);
@@ -141,17 +142,17 @@ public:
                            gainR * aInput.mVolume,
                            panning <= 0);
       }
     } else {
       float computedGain[2][WEBAUDIO_BLOCK_SIZE];
       bool onLeft[WEBAUDIO_BLOCK_SIZE];
 
       float values[WEBAUDIO_BLOCK_SIZE];
-      StreamTime tick = aStream->GetCurrentPosition();
+      StreamTime tick = aStream->GraphTimeToStreamTime(aFrom);
       mPan.GetValuesAtTime(tick, values, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         float left, right;
         GetGainValuesForPanning(values[counter], monoToStereo, left, right);
 
         computedGain[0][counter] = left * aInput.mVolume;
         computedGain[1][counter] = right * aInput.mVolume;
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -210,16 +210,17 @@ public:
           aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
                                      interpolationFactor  * mCurve[indexHigher];
         }
       }
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
+                            GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     uint32_t channelCount = aInput.ChannelCount();
     if (!mCurve.Length() || !channelCount) {
       // Optimize the case where we don't have a curve buffer,
       // or the input is null.