bug 1205558 use destination stream for audio node engine time r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Tue, 22 Sep 2015 16:34:45 +1200
changeset 265090 13e85dc6b41bf1ce0a6f1d50e290b6159dd79786
parent 265089 c47751b43a71927d2df84074ce2d1114c65e6323
child 265091 983347f21a23dbfe7ebe8b11b7a23d4f786c81a5
push id65838
push userktomlinson@mozilla.com
push dateWed, 30 Sep 2015 00:43:54 +0000
treeherdermozilla-inbound@f1f82e673a34 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1205558
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1205558 use destination stream for audio node engine time r=padenot
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/WebAudioUtils.cpp
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -93,18 +93,17 @@ public:
       NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter");
     }
   }
   virtual void SetDoubleParameter(uint32_t aIndex, double aParam) override
   {
     switch (aIndex) {
     case AudioBufferSourceNode::START:
       MOZ_ASSERT(!mStart, "Another START?");
-      mStart =
-        mSource->FractionalTicksFromDestinationTime(mDestination, aParam);
+      mStart = mDestination->SecondsToNearestStreamTime(aParam);
       // Round to nearest
       mBeginProcessing = mStart + 0.5;
       break;
     case AudioBufferSourceNode::DOPPLERSHIFT:
       mDopplerShift = (aParam <= 0 || mozilla::IsNaN(aParam)) ? 1.0 : aParam;
       break;
     default:
       NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter.");
@@ -466,17 +465,17 @@ public:
                             bool* aFinished) override
   {
     if (mBufferSampleRate == 0) {
       // start() has not yet been called or no buffer has yet been set
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
-    StreamTime streamPosition = aStream->GraphTimeToStreamTime(aFrom);
+    StreamTime streamPosition = mDestination->GraphTimeToStreamTime(aFrom);
     // We've finished if we've gone past mStop, or if we're past mDuration when
     // looping is disabled.
     if (streamPosition >= mStop ||
         (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) {
       *aFinished = true;
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -151,17 +151,17 @@ AudioNodeStream::SetStreamTimeParameter(
                                          aContext->DestinationStream(),
                                          aStreamTime));
 }
 
 void
 AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                             double aStreamTime)
 {
-  StreamTime ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
+  StreamTime ticks = aRelativeToStream->SecondsToNearestStreamTime(aStreamTime);
   mEngine->SetStreamTimeParameter(aIndex, ticks);
 }
 
 void
 AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
 {
   class Message final : public ControlMessage
   {
@@ -661,60 +661,16 @@ AudioNodeStream::FinishOutput()
     MediaStreamListener* l = mListeners[j];
     AudioSegment emptySegment;
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                 track->GetSegment()->GetDuration(),
                                 MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
   }
 }
 
-double
-AudioNodeStream::FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
-                                                    double aSeconds)
-{
-  MOZ_ASSERT(aDestination->SampleRate() == SampleRate());
-  MOZ_ASSERT(SampleRate() == GraphRate());
-
-  double destinationSeconds = std::max(0.0, aSeconds);
-  double destinationFractionalTicks = destinationSeconds * SampleRate();
-  MOZ_ASSERT(destinationFractionalTicks < STREAM_TIME_MAX);
-  StreamTime destinationStreamTime = destinationFractionalTicks; // round down
-  // MediaTime does not have the resolution of double
-  double offset = destinationFractionalTicks - destinationStreamTime;
-
-  GraphTime graphTime =
-    aDestination->StreamTimeToGraphTime(destinationStreamTime);
-  StreamTime thisStreamTime = GraphTimeToStreamTime(graphTime);
-  double thisFractionalTicks = thisStreamTime + offset;
-  return thisFractionalTicks;
-}
-
-StreamTime
-AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
-                                          double aSeconds)
-{
-  AudioNodeStream* destination = aDestination->AsAudioNodeStream();
-  MOZ_ASSERT(destination);
-
-  double thisSeconds =
-    FractionalTicksFromDestinationTime(destination, aSeconds);
-  return NS_round(thisSeconds);
-}
-
-double
-AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
-                                          StreamTime aPosition)
-{
-  MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
-
-  GraphTime graphTime = StreamTimeToGraphTime(aPosition);
-  StreamTime destinationTime = aDestination->GraphTimeToStreamTime(graphTime);
-  return StreamTimeToSeconds(destinationTime);
-}
-
 void
 AudioNodeStream::AddInput(MediaInputPort* aPort)
 {
   ProcessedMediaStream::AddInput(aPort);
   AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
   // Streams that are not AudioNodeStreams are considered active.
   if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
     IncrementActiveInputCount();
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -145,35 +145,16 @@ public:
     return ((mFlags & NEED_MAIN_THREAD_FINISHED) && mFinished) ||
       (mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
   TrackRate SampleRate() const { return mSampleRate; }
 
-  /**
-   * Convert a time in seconds on the destination stream to ticks
-   * on this stream, including fractional position between ticks.
-   */
-  double FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
-                                            double aSeconds);
-  /**
-   * Convert a time in seconds on the destination stream to StreamTime
-   * on this stream.
-   */
-  StreamTime TicksFromDestinationTime(MediaStream* aDestination,
-                                      double aSeconds);
-  /**
-   * Get the destination stream time in seconds corresponding to a position on
-   * this stream.
-   */
-  double DestinationTimeFromTicks(AudioNodeStream* aDestination,
-                                  StreamTime aPosition);
-
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
   /*
    * SetActive() is called when either an active input is added or the engine
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -182,17 +182,17 @@ public:
 
       // Adjust the number of biquads based on the number of channels
       mBiquads.SetLength(aInput.ChannelCount());
     }
 
     uint32_t numberOfChannels = mBiquads.Length();
     aOutput->AllocateChannels(numberOfChannels);
 
-    StreamTime pos = aStream->GraphTimeToStreamTime(aFrom);
+    StreamTime pos = mDestination->GraphTimeToStreamTime(aFrom);
 
     double freq = mFrequency.GetValueAtTime(pos);
     double q = mQ.GetValueAtTime(pos);
     double gain = mGain.GetValueAtTime(pos);
     double detune = mDetune.GetValueAtTime(pos);
 
     for (uint32_t i = 0; i < numberOfChannels; ++i) {
       const float* input;
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -136,17 +136,17 @@ public:
       double delayFrames = mDelay.GetValue() * sampleRate;
       double delayFramesClamped =
         std::max(minDelay, std::min(delayFrames, maxDelay));
       mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation);
     } else {
       // Compute the delay values for the duration of the input AudioChunk
       // If this DelayNode is in a cycle, make sure the delay value is at least
       // one block.
-      StreamTime tick = mSource->GraphTimeToStreamTime(aFrom);
+      StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom);
       float values[WEBAUDIO_BLOCK_SIZE];
       mDelay.GetValuesAtTime(tick, values,WEBAUDIO_BLOCK_SIZE);
 
       double computedDelay[WEBAUDIO_BLOCK_SIZE];
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         double delayAtTick = values[counter] * sampleRate;
         double delayAtTickClamped =
           std::max(minDelay, std::min(delayAtTick, maxDelay));
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -105,17 +105,17 @@ public:
 
     const uint32_t channelCount = aInput.ChannelCount();
     if (mCompressor->numberOfChannels() != channelCount) {
       // Create a new compressor object with a new channel count
       mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(),
                                                     aInput.ChannelCount());
     }
 
-    StreamTime pos = aStream->GraphTimeToStreamTime(aFrom);
+    StreamTime pos = mDestination->GraphTimeToStreamTime(aFrom);
     mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold,
                                    mThreshold.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamKnee,
                                    mKnee.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRatio,
                                    mRatio.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamAttack,
                                    mAttack.GetValueAtTime(pos));
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -82,17 +82,17 @@ public:
       }
     } else {
       // First, compute a vector of gains for each track tick based on the
       // timeline at hand, and then for each channel, multiply the values
       // in the buffer with the gain vector.
       aOutput->AllocateChannels(aInput.ChannelCount());
 
       // Compute the gain values for the duration of the input AudioChunk
-      StreamTime tick = aStream->GraphTimeToStreamTime(aFrom);
+      StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom);
       float computedGain[WEBAUDIO_BLOCK_SIZE];
       mGain.GetValuesAtTime(tick, computedGain, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         computedGain[counter] *= aInput.mVolume;
       }
 
       // Apply the gain to the output buffer
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -290,17 +290,17 @@ public:
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             GraphTime aFrom,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
-    StreamTime ticks = aStream->GraphTimeToStreamTime(aFrom);
+    StreamTime ticks = mDestination->GraphTimeToStreamTime(aFrom);
     if (mStart == -1) {
       ComputeSilence(aOutput);
       return;
     }
 
     if (ticks >= mStop) {
       // We've finished playing.
       ComputeSilence(aOutput);
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -360,24 +360,23 @@ public:
   }
 
 private:
   void SendBuffersToMainThread(AudioNodeStream* aStream, GraphTime aFrom)
   {
     MOZ_ASSERT(!NS_IsMainThread());
 
     // we now have a full input buffer ready to be sent to the main thread.
-    StreamTime playbackTick = mSource->GraphTimeToStreamTime(aFrom);
+    StreamTime playbackTick = mDestination->GraphTimeToStreamTime(aFrom);
     // Add the duration of the current sample
     playbackTick += WEBAUDIO_BLOCK_SIZE;
     // Add the delay caused by the main thread
     playbackTick += mSharedBuffers->DelaySoFar();
     // Compute the playback time in the coordinate system of the destination
-    double playbackTime =
-      mSource->DestinationTimeFromTicks(mDestination, playbackTick);
+    double playbackTime = mDestination->StreamTimeToSeconds(playbackTick);
 
     class Command final : public nsRunnable
     {
     public:
       Command(AudioNodeStream* aStream,
               already_AddRefed<ThreadSharedFloatArrayBufferList> aInputBuffer,
               double aPlaybackTime)
         : mStream(aStream)
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -144,17 +144,17 @@ public:
                            gainR * aInput.mVolume,
                            panning <= 0);
       }
     } else {
       float computedGain[2][WEBAUDIO_BLOCK_SIZE];
       bool onLeft[WEBAUDIO_BLOCK_SIZE];
 
       float values[WEBAUDIO_BLOCK_SIZE];
-      StreamTime tick = aStream->GraphTimeToStreamTime(aFrom);
+      StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom);
       mPan.GetValuesAtTime(tick, values, WEBAUDIO_BLOCK_SIZE);
 
       for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
         float left, right;
         GetGainValuesForPanning(values[counter], monoToStereo, left, right);
 
         computedGain[0][counter] = left * aInput.mVolume;
         computedGain[1][counter] = right * aInput.mVolume;
--- a/dom/media/webaudio/WebAudioUtils.cpp
+++ b/dom/media/webaudio/WebAudioUtils.cpp
@@ -13,17 +13,17 @@ namespace mozilla {
 namespace dom {
 
 void WebAudioUtils::ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
                                                      AudioNodeStream* aSource,
                                                      AudioNodeStream* aDest)
 {
   MOZ_ASSERT(!aSource || aSource->SampleRate() == aDest->SampleRate());
   aEvent.SetTimeInTicks(
-      aSource->TicksFromDestinationTime(aDest, aEvent.Time<double>()));
+      aDest->SecondsToNearestStreamTime(aEvent.Time<double>()));
   aEvent.mTimeConstant *= aSource->SampleRate();
   aEvent.mDuration *= aSource->SampleRate();
 }
 
 void
 WebAudioUtils::Shutdown()
 {
   WebCore::HRTFDatabaseLoader::shutdown();