b=972678 move stream time methods from WebAudioUtils to AudioNodeStream r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Mon, 17 Feb 2014 09:46:56 +1300
changeset 169437 49f896f2264116e6698eba6fd00d5ed0c1a854e8
parent 169436 a310ac56a47b33cb5633207c2a1f6163ff4715dc
child 169438 085ca09d085e9e7ee5d5174b59a3d7449af7be3b
push id270
push userpvanderbeken@mozilla.com
push dateThu, 06 Mar 2014 09:24:21 +0000
reviewerspadenot
bugs972678
milestone30.0a1
b=972678 move stream time methods from WebAudioUtils to AudioNodeStream r=padenot Trying to clear up which stream parameter is which.
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/webaudio/ScriptProcessorNode.cpp
content/media/webaudio/WebAudioUtils.cpp
content/media/webaudio/WebAudioUtils.h
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -55,19 +55,17 @@ AudioNodeStream::SetStreamTimeParameter(
       aContext->DestinationStream(),
       aContext->DOMTimeToStreamTime(aStreamTime)));
 }
 
 void
 AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                             double aStreamTime)
 {
-  TrackTicks ticks =
-      WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(
-          aStreamTime, this, aRelativeToStream);
+  TrackTicks ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
   mEngine->SetStreamTimeParameter(aIndex, ticks);
 }
 
 void
 AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
 {
   class Message : public ControlMessage {
   public:
@@ -512,9 +510,31 @@ AudioNodeStream::FinishOutput()
     AudioSegment emptySegment;
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                 mSampleRate,
                                 track->GetSegment()->GetDuration(),
                                 MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
   }
 }
 
+TrackTicks
+AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
+                                          double aSeconds)
+{
+  StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aSeconds));
+  GraphTime graphTime = aDestination->StreamTimeToGraphTime(streamTime);
+  StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
+  TrackTicks ticks = TimeToTicksRoundUp(SampleRate(), thisStreamTime);
+  return ticks;
 }
+
+double
+AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
+                                          TrackTicks aPosition)
+{
+  MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
+  StreamTime sourceTime = TicksToTimeRoundDown(SampleRate(), aPosition);
+  GraphTime graphTime = StreamTimeToGraphTime(sourceTime);
+  StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
+  return MediaTimeToSeconds(destinationTime);
+}
+
+}
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -123,16 +123,29 @@ public:
   {
     return true;
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
   TrackRate SampleRate() const { return mSampleRate; }
 
+  /**
+   * Convert a time in seconds on the destination stream to TrackTicks
+   * on this stream.
+   */
+  TrackTicks TicksFromDestinationTime(MediaStream* aDestination,
+                                      double aSeconds);
+  /**
+   * Get the destination stream time in seconds corresponding to a position on
+   * this stream.
+   */
+  double DestinationTimeFromTicks(AudioNodeStream* aDestination,
+                                  TrackTicks aPosition);
+
 protected:
   void AdvanceOutputSegment();
   void FinishOutput();
   void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
                             AudioChunk* aBlock,
                             nsTArray<float>* aDownmixBuffer);
   void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
                          nsTArray<const void*>& aOutputChannels,
--- a/content/media/webaudio/ScriptProcessorNode.cpp
+++ b/content/media/webaudio/ScriptProcessorNode.cpp
@@ -309,20 +309,19 @@ private:
 
     // we now have a full input buffer ready to be sent to the main thread.
     TrackTicks playbackTick = mSource->GetCurrentPosition();
     // Add the duration of the current sample
     playbackTick += WEBAUDIO_BLOCK_SIZE;
     // Add the delay caused by the main thread
     playbackTick += mSharedBuffers->DelaySoFar();
     // Compute the playback time in the coordinate system of the destination
+    // FIXME: bug 970773
     double playbackTime =
-      WebAudioUtils::StreamPositionToDestinationTime(playbackTick,
-                                                     mSource,
-                                                     mDestination);
+      mSource->DestinationTimeFromTicks(mDestination, playbackTick);
 
     class Command : public nsRunnable
     {
     public:
       Command(AudioNodeStream* aStream,
               InputChannels& aInputChannels,
               double aPlaybackTime,
               bool aNullInput)
--- a/content/media/webaudio/WebAudioUtils.cpp
+++ b/content/media/webaudio/WebAudioUtils.cpp
@@ -18,45 +18,21 @@ struct ConvertTimeToTickHelper
 {
   AudioNodeStream* mSourceStream;
   AudioNodeStream* mDestinationStream;
 
   static int64_t Convert(double aTime, void* aClosure)
   {
     ConvertTimeToTickHelper* This = static_cast<ConvertTimeToTickHelper*> (aClosure);
     MOZ_ASSERT(This->mSourceStream->SampleRate() == This->mDestinationStream->SampleRate());
-    return WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(
-        aTime, This->mSourceStream, This->mDestinationStream);
+    return This->mSourceStream->
+      TicksFromDestinationTime(This->mDestinationStream, aTime);
   }
 };
 
-TrackTicks
-WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(double aTime,
-                                                              AudioNodeStream* aSource,
-                                                              MediaStream* aDestination)
-{
-  StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aTime));
-  GraphTime graphTime = aDestination->StreamTimeToGraphTime(streamTime);
-  StreamTime thisStreamTime = aSource->GraphTimeToStreamTimeOptimistic(graphTime);
-  TrackTicks ticks = TimeToTicksRoundUp(aSource->SampleRate(), thisStreamTime);
-  return ticks;
-}
-
-double
-WebAudioUtils::StreamPositionToDestinationTime(TrackTicks aSourcePosition,
-                                               AudioNodeStream* aSource,
-                                               AudioNodeStream* aDestination)
-{
-  MOZ_ASSERT(aSource->SampleRate() == aDestination->SampleRate());
-  StreamTime sourceTime = TicksToTimeRoundDown(aSource->SampleRate(), aSourcePosition);
-  GraphTime graphTime = aSource->StreamTimeToGraphTime(sourceTime);
-  StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
-  return MediaTimeToSeconds(destinationTime);
-}
-
 void
 WebAudioUtils::ConvertAudioParamToTicks(AudioParamTimeline& aParam,
                                         AudioNodeStream* aSource,
                                         AudioNodeStream* aDest)
 {
   MOZ_ASSERT(!aSource || aSource->SampleRate() == aDest->SampleRate());
   ConvertTimeToTickHelper ctth;
   ctth.mSourceStream = aSource;
--- a/content/media/webaudio/WebAudioUtils.h
+++ b/content/media/webaudio/WebAudioUtils.h
@@ -46,25 +46,16 @@ struct WebAudioUtils {
    * over aDuration seconds.
    */
   static double ComputeSmoothingRate(double aDuration, double aSampleRate)
   {
     return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate));
   }
 
   /**
-   * Convert a time in second relative to the destination stream to
-   * TrackTicks relative to the source stream.
-   */
-  static TrackTicks
-  ConvertDestinationStreamTimeToSourceStreamTime(double aTime,
-                                                 AudioNodeStream* aSource,
-                                                 MediaStream* aDestination);
-
-  /**
    * Converts AudioParamTimeline floating point time values to tick values
    * with respect to a source and a destination AudioNodeStream.
    *
    * This needs to be called for each AudioParamTimeline that gets sent to an
    * AudioNodeEngine on the engine side where the AudioParamTimeline is
    * received.  This means that such engines need to be aware of their source
    * and destination streams as well.
    */
@@ -110,24 +101,16 @@ struct WebAudioUtils {
   }
 
   static bool IsTimeValid(double aTime)
   {
     return aTime >= 0 &&  aTime <= (MEDIA_TIME_MAX >> MEDIA_TIME_FRAC_BITS);
   }
 
   /**
-   * Convert a stream position into the time coordinate of the destination
-   * stream.
-   */
-  static double StreamPositionToDestinationTime(TrackTicks aSourcePosition,
-                                                AudioNodeStream* aSource,
-                                                AudioNodeStream* aDestination);
-
-  /**
    * Converts a floating point value to an integral type in a safe and
    * platform agnostic way.  The following program demonstrates the kinds
    * of ways things can go wrong depending on the CPU architecture you're
    * compiling for:
    *
    * #include <stdio.h>
    * volatile float r;
    * int main()