Bug 1200579 - Stop copying AudioParam timelines. r=karlt
authorPaul Adenot <paul@paul.cx>
Fri, 25 Sep 2015 15:57:55 +0200
changeset 264560 f47605f2babe32429a8920134ce87609d083b990
parent 264559 ccb137011a0c058d7d74f2a1a4164288d97af209
child 264561 adb0582e983dd5e539324afbbc6398014e254984
push id29444
push usercbook@mozilla.com
push dateMon, 28 Sep 2015 12:17:21 +0000
treeherdermozilla-central@031db40e2b55 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskarlt
bugs1200579
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1200579 - Stop copying AudioParam timelines. r=karlt
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioBufferSourceNode.h
dom/media/webaudio/AudioEventTimeline.h
dom/media/webaudio/AudioNode.cpp
dom/media/webaudio/AudioNode.h
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/AudioParam.cpp
dom/media/webaudio/AudioParam.h
dom/media/webaudio/AudioParamTimeline.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/BiquadFilterNode.h
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DelayNode.h
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/DynamicsCompressorNode.h
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/GainNode.h
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/OscillatorNode.h
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/StereoPannerNode.h
dom/media/webaudio/WebAudioUtils.cpp
dom/media/webaudio/WebAudioUtils.h
dom/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -61,28 +61,30 @@ public:
     }
   }
 
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
-  virtual void SetTimelineParameter(uint32_t aIndex,
-                                    const dom::AudioParamTimeline& aValue,
-                                    TrackRate aSampleRate) override
+  virtual void RecvTimelineEvent(uint32_t aIndex,
+                                 dom::AudioTimelineEvent& aEvent) override
   {
+    MOZ_ASSERT(mSource && mDestination);
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case AudioBufferSourceNode::PLAYBACKRATE:
-      mPlaybackRateTimeline = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination);
+      mPlaybackRateTimeline.InsertEvent<int64_t>(aEvent);
       break;
     case AudioBufferSourceNode::DETUNE:
-      mDetuneTimeline = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mDetuneTimeline, mSource, mDestination);
+      mDetuneTimeline.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter");
     }
   }
   virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override
   {
     switch (aIndex) {
@@ -780,33 +782,35 @@ AudioBufferSourceNode::NotifyMainThreadS
   NS_DispatchToMainThread(new EndedEventDispatcher(this));
 
   // Drop the playing reference
   // Warning: The below line might delete this.
   MarkInactive();
 }
 
 void
-AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
+AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode,
+                                                const AudioTimelineEvent& aEvent)
 {
   AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
   if (!This->mStream) {
     return;
   }
-  SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate);
+  SendTimelineEventToStream(This, PLAYBACKRATE, aEvent);
 }
 
 void
-AudioBufferSourceNode::SendDetuneToStream(AudioNode* aNode)
+AudioBufferSourceNode::SendDetuneToStream(AudioNode* aNode,
+                                          const AudioTimelineEvent& aEvent)
 {
   AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
   if (!This->mStream) {
     return;
   }
-  SendTimelineParameterToStream(This, DETUNE, *This->mDetune);
+  SendTimelineEventToStream(This, DETUNE, aEvent);
 }
 
 void
 AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift)
 {
   MOZ_ASSERT(mStream, "Should have disconnected panner if no stream");
   SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift);
 }
--- a/dom/media/webaudio/AudioBufferSourceNode.h
+++ b/dom/media/webaudio/AudioBufferSourceNode.h
@@ -124,18 +124,20 @@ private:
     PLAYBACKRATE,
     DETUNE,
     DOPPLERSHIFT
   };
 
   void SendLoopParametersToStream();
   void SendBufferParameterToStream(JSContext* aCx);
   void SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream);
-  static void SendPlaybackRateToStream(AudioNode* aNode);
-  static void SendDetuneToStream(AudioNode* aNode);
+  static void SendPlaybackRateToStream(AudioNode* aNode,
+                                       const AudioTimelineEvent& aEvent);
+  static void SendDetuneToStream(AudioNode* aNode,
+                                 const AudioTimelineEvent& aEvent);
 
 private:
   double mLoopStart;
   double mLoopEnd;
   double mOffset;
   double mDuration;
   nsRefPtr<AudioBuffer> mBuffer;
   nsRefPtr<AudioParam> mPlaybackRate;
--- a/dom/media/webaudio/AudioEventTimeline.h
+++ b/dom/media/webaudio/AudioEventTimeline.h
@@ -7,92 +7,86 @@
 #ifndef AudioEventTimeline_h_
 #define AudioEventTimeline_h_
 
 #include <algorithm>
 #include "mozilla/Assertions.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/PodOperations.h"
 
+#include "MainThreadUtils.h"
 #include "nsTArray.h"
 #include "math.h"
 #include "WebAudioUtils.h"
 
 namespace mozilla {
 
+class MediaStream;
+
 namespace dom {
 
-// This is an internal helper class and should not be used outside of this header.
 struct AudioTimelineEvent final
 {
   enum Type : uint32_t
   {
     SetValue,
+    SetValueAtTime,
     LinearRamp,
     ExponentialRamp,
     SetTarget,
-    SetValueCurve
+    SetValueCurve,
+    Stream,
+    Cancel
   };
 
   AudioTimelineEvent(Type aType, double aTime, float aValue, double aTimeConstant = 0.0,
-                     float aDuration = 0.0, const float* aCurve = nullptr, uint32_t aCurveLength = 0)
+                     float aDuration = 0.0, const float* aCurve = nullptr,
+                     uint32_t aCurveLength = 0)
     : mType(aType)
     , mTimeConstant(aTimeConstant)
     , mDuration(aDuration)
 #ifdef DEBUG
     , mTimeIsInTicks(false)
 #endif
   {
     mTime = aTime;
     if (aType == AudioTimelineEvent::SetValueCurve) {
       SetCurveParams(aCurve, aCurveLength);
     } else {
       mValue = aValue;
     }
   }
 
+  explicit AudioTimelineEvent(MediaStream* aStream)
+    : mType(Stream)
+    , mStream(aStream)
+#ifdef DEBUG
+    , mTimeIsInTicks(false)
+#endif
+  {
+  }
+
   AudioTimelineEvent(const AudioTimelineEvent& rhs)
   {
     PodCopy(this, &rhs, 1);
+
     if (rhs.mType == AudioTimelineEvent::SetValueCurve) {
       SetCurveParams(rhs.mCurve, rhs.mCurveLength);
+    } else if (rhs.mType == AudioTimelineEvent::Stream) {
+      new (&mStream) decltype(mStream)(rhs.mStream);
     }
   }
 
   ~AudioTimelineEvent()
   {
     if (mType == AudioTimelineEvent::SetValueCurve) {
       delete[] mCurve;
     }
   }
 
-  bool IsValid() const
-  {
-    if (mType == AudioTimelineEvent::SetValueCurve) {
-      if (!mCurve || !mCurveLength) {
-        return false;
-      }
-      for (uint32_t i = 0; i < mCurveLength; ++i) {
-        if (!IsValid(mCurve[i])) {
-          return false;
-        }
-      }
-    }
-
-    if (mType == AudioTimelineEvent::SetTarget &&
-        WebAudioUtils::FuzzyEqual(mTimeConstant, 0.0)) {
-      return false;
-    }
-
-    return IsValid(mTime) &&
-           IsValid(mValue) &&
-           IsValid(mTimeConstant) &&
-           IsValid(mDuration);
-  }
-
   template <class TimeType>
   TimeType Time() const;
 
   void SetTimeInTicks(int64_t aTimeInTicks)
   {
     mTimeInTicks = aTimeInTicks;
 #ifdef DEBUG
     mTimeIsInTicks = true;
@@ -109,54 +103,54 @@ struct AudioTimelineEvent final
     }
   }
 
   Type mType;
   union {
     float mValue;
     uint32_t mCurveLength;
   };
+  // mCurve contains a buffer of SetValueCurve samples.  We sample the
+  // values in the buffer depending on how far along we are in time.
+  // If we're at time T and the event has started as time T0 and has a
+  // duration of D, we sample the buffer at floor(mCurveLength*(T-T0)/D)
+  // if T<T0+D, and just take the last sample in the buffer otherwise.
+  float* mCurve;
+  nsRefPtr<MediaStream> mStream;
+  double mTimeConstant;
+  double mDuration;
+#ifdef DEBUG
+  bool mTimeIsInTicks;
+#endif
+
+private:
+  // This member is accessed using the `Time` method, for safety.
+  //
   // The time for an event can either be in absolute value or in ticks.
   // Initially the time of the event is always in absolute value.
   // In order to convert it to ticks, call SetTimeInTicks.  Once this
   // method has been called for an event, the time cannot be converted
   // back to absolute value.
   union {
     double mTime;
     int64_t mTimeInTicks;
   };
-  // mCurve contains a buffer of SetValueCurve samples.  We sample the
-  // values in the buffer depending on how far along we are in time.
-  // If we're at time T and the event has started as time T0 and has a
-  // duration of D, we sample the buffer at floor(mCurveLength*(T-T0)/D)
-  // if T<T0+D, and just take the last sample in the buffer otherwise.
-  float* mCurve;
-  double mTimeConstant;
-  double mDuration;
-#ifdef DEBUG
-  bool mTimeIsInTicks;
-#endif
-
-private:
-  static bool IsValid(double value)
-  {
-    return mozilla::IsFinite(value);
-  }
 };
 
 template <>
 inline double AudioTimelineEvent::Time<double>() const
 {
   MOZ_ASSERT(!mTimeIsInTicks);
   return mTime;
 }
 
 template <>
 inline int64_t AudioTimelineEvent::Time<int64_t>() const
 {
+  MOZ_ASSERT(!NS_IsMainThread());
   MOZ_ASSERT(mTimeIsInTicks);
   return mTimeInTicks;
 }
 
 /**
  * This class will be instantiated with different template arguments for testing and
  * production code.
  *
@@ -166,17 +160,129 @@ inline int64_t AudioTimelineEvent::Time<
 template <class ErrorResult>
 class AudioEventTimeline
 {
 public:
   explicit AudioEventTimeline(float aDefaultValue)
     : mValue(aDefaultValue),
       mComputedValue(aDefaultValue),
       mLastComputedValue(aDefaultValue)
+  { }
+
+  bool ValidateEvent(AudioTimelineEvent& aEvent,
+                     ErrorResult& aRv)
   {
+    MOZ_ASSERT(NS_IsMainThread());
+
+    // Validate the event itself
+    if (!WebAudioUtils::IsTimeValid(aEvent.template Time<double>()) ||
+        !WebAudioUtils::IsTimeValid(aEvent.mTimeConstant)) {
+      aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+      return false;
+    }
+
+    if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
+      if (!aEvent.mCurve || !aEvent.mCurveLength) {
+        aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+        return false;
+      }
+      for (uint32_t i = 0; i < aEvent.mCurveLength; ++i) {
+        if (!IsValid(aEvent.mCurve[i])) {
+          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+          return false;
+        }
+      }
+    }
+
+    if (aEvent.mType == AudioTimelineEvent::SetTarget &&
+        WebAudioUtils::FuzzyEqual(aEvent.mTimeConstant, 0.0)) {
+      aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+      return false;
+    }
+
+    bool timeAndValueValid = IsValid(aEvent.mValue) &&
+                             IsValid(aEvent.mDuration);
+    if (!timeAndValueValid) {
+      aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+      return false;
+    }
+
+    // Make sure that non-curve events don't fall within the duration of a
+    // curve event.
+    for (unsigned i = 0; i < mEvents.Length(); ++i) {
+      if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve &&
+          mEvents[i].template Time<double>() <= aEvent.template Time<double>() &&
+          (mEvents[i].template Time<double>() + mEvents[i].mDuration) >= aEvent.template Time<double>()) {
+        aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+        return false;
+      }
+    }
+
+    // Make sure that curve events don't fall in a range which includes other
+    // events.
+    if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
+      for (unsigned i = 0; i < mEvents.Length(); ++i) {
+        if (mEvents[i].template Time<double>() > aEvent.template Time<double>() &&
+            mEvents[i].template Time<double>() < (aEvent.template Time<double>() + aEvent.mDuration)) {
+          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+          return false;
+        }
+      }
+    }
+
+    // Make sure that invalid values are not used for exponential curves
+    if (aEvent.mType == AudioTimelineEvent::ExponentialRamp) {
+      if (aEvent.mValue <= 0.f) {
+        aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+        return false;
+      }
+      const AudioTimelineEvent* previousEvent = GetPreviousEvent(aEvent.template Time<double>());
+      if (previousEvent) {
+        if (previousEvent->mValue <= 0.f) {
+          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+          return false;
+        }
+      } else {
+        if (mValue <= 0.f) {
+          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  template<typename TimeType>
+  void InsertEvent(const AudioTimelineEvent& aEvent)
+  {
+    for (unsigned i = 0; i < mEvents.Length(); ++i) {
+      if (aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>()) {
+        if (aEvent.mType == mEvents[i].mType) {
+          // If times and types are equal, replace the event
+          mEvents.ReplaceElementAt(i, aEvent);
+        } else {
+          // Otherwise, place the element after the last event of another type
+          do {
+            ++i;
+          } while (i < mEvents.Length() &&
+                   aEvent.mType != mEvents[i].mType &&
+                   aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>());
+          mEvents.InsertElementAt(i, aEvent);
+        }
+        return;
+      }
+      // Otherwise, place the event right after the latest existing event
+      if (aEvent.template Time<TimeType>() < mEvents[i].template Time<TimeType>()) {
+        mEvents.InsertElementAt(i, aEvent);
+        return;
+      }
+    }
+
+    // If we couldn't find a place for the event, just append it to the list
+    mEvents.AppendElement(aEvent);
   }
 
   bool HasSimpleValue() const
   {
     return mEvents.IsEmpty();
   }
 
   float GetValue() const
@@ -197,48 +303,68 @@ public:
     // Silently don't change anything if there are any events
     if (mEvents.IsEmpty()) {
       mLastComputedValue = mComputedValue = mValue = aValue;
     }
   }
 
   void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
   {
-    InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValue, aStartTime, aValue), aRv);
+    AudioTimelineEvent event(AudioTimelineEvent::SetValueAtTime, aStartTime, aValue);
+
+    if (ValidateEvent(event, aRv)) {
+      InsertEvent<double>(event);
+    }
   }
 
   void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
-    InsertEvent(AudioTimelineEvent(AudioTimelineEvent::LinearRamp, aEndTime, aValue), aRv);
+    AudioTimelineEvent event(AudioTimelineEvent::LinearRamp, aEndTime, aValue);
+
+    if (ValidateEvent(event, aRv)) {
+      InsertEvent<double>(event);
+    }
   }
 
   void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
-    InsertEvent(AudioTimelineEvent(AudioTimelineEvent::ExponentialRamp, aEndTime, aValue), aRv);
+    AudioTimelineEvent event(AudioTimelineEvent::ExponentialRamp, aEndTime, aValue);
+
+    if (ValidateEvent(event, aRv)) {
+      InsertEvent<double>(event);
+    }
   }
 
   void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
   {
-    InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetTarget, aStartTime, aTarget, aTimeConstant), aRv);
+    AudioTimelineEvent event(AudioTimelineEvent::SetTarget, aStartTime, aTarget, aTimeConstant);
+
+    if (ValidateEvent(event, aRv)) {
+      InsertEvent<double>(event);
+    }
   }
 
   void SetValueCurveAtTime(const float* aValues, uint32_t aValuesLength, double aStartTime, double aDuration, ErrorResult& aRv)
   {
-    InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValueCurve, aStartTime, 0.0f, 0.0f, aDuration, aValues, aValuesLength), aRv);
+    AudioTimelineEvent event(AudioTimelineEvent::SetValueCurve, aStartTime, 0.0f, 0.0f, aDuration, aValues, aValuesLength);
+    if (ValidateEvent(event, aRv)) {
+      InsertEvent<double>(event);
+    }
   }
 
-  void CancelScheduledValues(double aStartTime)
+  template<typename TimeType>
+  void CancelScheduledValues(TimeType aStartTime)
   {
     for (unsigned i = 0; i < mEvents.Length(); ++i) {
-      if (mEvents[i].mTime >= aStartTime) {
+      if (mEvents[i].template Time<TimeType>() >= aStartTime) {
 #ifdef DEBUG
         // Sanity check: the array should be sorted, so all of the following
         // events should have a time greater than aStartTime too.
         for (unsigned j = i + 1; j < mEvents.Length(); ++j) {
-          MOZ_ASSERT(mEvents[j].mTime >= aStartTime);
+          MOZ_ASSERT(mEvents[j].template Time<TimeType>() >= aStartTime);
         }
 #endif
         mEvents.TruncateLength(i);
         break;
       }
     }
   }
 
@@ -293,17 +419,17 @@ public:
       mEvents.RemoveElementAt(0);
     }
 
     for (size_t bufferIndex = 0; bufferIndex < aSize; ++bufferIndex, ++aTime) {
       for (; !bailOut && lastEventId < mEvents.Length(); ++lastEventId) {
 
 #ifdef DEBUG
         const AudioTimelineEvent* current = &mEvents[lastEventId];
-        MOZ_ASSERT(current->mType == AudioTimelineEvent::SetValue ||
+        MOZ_ASSERT(current->mType == AudioTimelineEvent::SetValueAtTime ||
                    current->mType == AudioTimelineEvent::SetTarget ||
                    current->mType == AudioTimelineEvent::LinearRamp ||
                    current->mType == AudioTimelineEvent::ExponentialRamp ||
                    current->mType == AudioTimelineEvent::SetValueCurve);
 #endif
 
         if (TimesEqual(aTime, mEvents[lastEventId].template Time<TimeType>())) {
           mLastComputedValue = mComputedValue;
@@ -388,27 +514,16 @@ public:
     }
     double ratio = std::max((t - startTime) / duration, 0.0);
     if (ratio >= 1.0) {
       return aCurve[aCurveLength - 1];
     }
     return aCurve[uint32_t(aCurveLength * ratio)];
   }
 
-  void ConvertEventTimesToTicks(int64_t (*aConvertor)(double aTime, void* aClosure), void* aClosure,
-                                int32_t aSampleRate)
-  {
-    for (unsigned i = 0; i < mEvents.Length(); ++i) {
-      mEvents[i].SetTimeInTicks(aConvertor(mEvents[i].template Time<double>(), aClosure));
-      mEvents[i].mTimeConstant *= aSampleRate;
-      mEvents[i].mDuration *= aSampleRate;
-    }
-  }
-
-private:
   template<class TimeType>
   float GetValuesAtTimeHelperInternal(TimeType aTime,
                                       const AudioTimelineEvent* aPrevious,
                                       const AudioTimelineEvent* aNext)
   {
     // If the requested time is before all of the existing events
     if (!aPrevious) {
        return mValue;
@@ -428,27 +543,31 @@ private:
       return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
                                    aPrevious->mCurve, aPrevious->mCurveLength,
                                    aPrevious->mDuration, aTime);
     }
 
     // If the requested time is after all of the existing events
     if (!aNext) {
       switch (aPrevious->mType) {
-      case AudioTimelineEvent::SetValue:
-      case AudioTimelineEvent::LinearRamp:
-      case AudioTimelineEvent::ExponentialRamp:
-        // The value will be constant after the last event
-        return aPrevious->mValue;
-      case AudioTimelineEvent::SetValueCurve:
-        return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
-                                     aPrevious->mCurve, aPrevious->mCurveLength,
-                                     aPrevious->mDuration, aTime);
-      case AudioTimelineEvent::SetTarget:
-        MOZ_ASSERT(false, "unreached");
+        case AudioTimelineEvent::SetValueAtTime:
+        case AudioTimelineEvent::LinearRamp:
+        case AudioTimelineEvent::ExponentialRamp:
+          // The value will be constant after the last event
+          return aPrevious->mValue;
+        case AudioTimelineEvent::SetValueCurve:
+          return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
+                                       aPrevious->mCurve, aPrevious->mCurveLength,
+                                       aPrevious->mDuration, aTime);
+        case AudioTimelineEvent::SetTarget:
+          MOZ_ASSERT(false, "unreached");
+        case AudioTimelineEvent::SetValue:
+        case AudioTimelineEvent::Cancel:
+        case AudioTimelineEvent::Stream:
+          MOZ_ASSERT(false, "Should have been handled earlier.");
       }
       MOZ_ASSERT(false, "unreached");
     }
 
     // Finally, handle the case where we have both a previous and a next event
 
     // First, handle the case where our range ends up in a ramp event
     switch (aNext->mType) {
@@ -459,159 +578,94 @@ private:
                                aNext->mValue, aTime);
 
     case AudioTimelineEvent::ExponentialRamp:
       return ExponentialInterpolate(aPrevious->template Time<TimeType>(),
                                     aPrevious->mValue,
                                     aNext->template Time<TimeType>(),
                                     aNext->mValue, aTime);
 
-    case AudioTimelineEvent::SetValue:
+    case AudioTimelineEvent::SetValueAtTime:
     case AudioTimelineEvent::SetTarget:
     case AudioTimelineEvent::SetValueCurve:
       break;
+    case AudioTimelineEvent::SetValue:
+    case AudioTimelineEvent::Cancel:
+    case AudioTimelineEvent::Stream:
+      MOZ_ASSERT(false, "Should have been handled earlier.");
     }
 
     // Now handle all other cases
     switch (aPrevious->mType) {
-    case AudioTimelineEvent::SetValue:
+    case AudioTimelineEvent::SetValueAtTime:
     case AudioTimelineEvent::LinearRamp:
     case AudioTimelineEvent::ExponentialRamp:
       // If the next event type is neither linear or exponential ramp, the
       // value is constant.
       return aPrevious->mValue;
     case AudioTimelineEvent::SetValueCurve:
       return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
                                    aPrevious->mCurve, aPrevious->mCurveLength,
                                    aPrevious->mDuration, aTime);
     case AudioTimelineEvent::SetTarget:
       MOZ_ASSERT(false, "unreached");
+    case AudioTimelineEvent::SetValue:
+    case AudioTimelineEvent::Cancel:
+    case AudioTimelineEvent::Stream:
+      MOZ_ASSERT(false, "Should have been handled earlier.");
     }
 
     MOZ_ASSERT(false, "unreached");
     return 0.0f;
   }
 
   const AudioTimelineEvent* GetPreviousEvent(double aTime) const
   {
     const AudioTimelineEvent* previous = nullptr;
     const AudioTimelineEvent* next = nullptr;
 
     bool bailOut = false;
     for (unsigned i = 0; !bailOut && i < mEvents.Length(); ++i) {
       switch (mEvents[i].mType) {
-      case AudioTimelineEvent::SetValue:
+      case AudioTimelineEvent::SetValueAtTime:
       case AudioTimelineEvent::SetTarget:
       case AudioTimelineEvent::LinearRamp:
       case AudioTimelineEvent::ExponentialRamp:
       case AudioTimelineEvent::SetValueCurve:
-        if (aTime == mEvents[i].mTime) {
+        if (aTime == mEvents[i].template Time<double>()) {
           // Find the last event with the same time
           do {
             ++i;
           } while (i < mEvents.Length() &&
-                   aTime == mEvents[i].mTime);
+                   aTime == mEvents[i].template Time<double>());
           return &mEvents[i - 1];
         }
         previous = next;
         next = &mEvents[i];
-        if (aTime < mEvents[i].mTime) {
+        if (aTime < mEvents[i].template Time<double>()) {
           bailOut = true;
         }
         break;
       default:
         MOZ_ASSERT(false, "unreached");
       }
     }
     // Handle the case where the time is past all of the events
     if (!bailOut) {
       previous = next;
     }
 
     return previous;
   }
-
-  void InsertEvent(const AudioTimelineEvent& aEvent, ErrorResult& aRv)
+private:
+  static bool IsValid(double value)
   {
-    if (!aEvent.IsValid()) {
-      aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-      return;
-    }
-
-    // Make sure that non-curve events don't fall within the duration of a
-    // curve event.
-    for (unsigned i = 0; i < mEvents.Length(); ++i) {
-      if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve &&
-          mEvents[i].mTime <= aEvent.mTime &&
-          (mEvents[i].mTime + mEvents[i].mDuration) >= aEvent.mTime) {
-        aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-        return;
-      }
-    }
-
-    // Make sure that curve events don't fall in a range which includes other
-    // events.
-    if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
-      for (unsigned i = 0; i < mEvents.Length(); ++i) {
-        if (mEvents[i].mTime > aEvent.mTime &&
-            mEvents[i].mTime < (aEvent.mTime + aEvent.mDuration)) {
-          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-          return;
-        }
-      }
-    }
-
-    // Make sure that invalid values are not used for exponential curves
-    if (aEvent.mType == AudioTimelineEvent::ExponentialRamp) {
-      if (aEvent.mValue <= 0.f) {
-        aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-        return;
-      }
-      const AudioTimelineEvent* previousEvent = GetPreviousEvent(aEvent.mTime);
-      if (previousEvent) {
-        if (previousEvent->mValue <= 0.f) {
-          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-          return;
-        }
-      } else {
-        if (mValue <= 0.f) {
-          aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-          return;
-        }
-      }
-    }
-
-    for (unsigned i = 0; i < mEvents.Length(); ++i) {
-      if (aEvent.mTime == mEvents[i].mTime) {
-        if (aEvent.mType == mEvents[i].mType) {
-          // If times and types are equal, replace the event
-          mEvents.ReplaceElementAt(i, aEvent);
-        } else {
-          // Otherwise, place the element after the last event of another type
-          do {
-            ++i;
-          } while (i < mEvents.Length() &&
-                   aEvent.mType != mEvents[i].mType &&
-                   aEvent.mTime == mEvents[i].mTime);
-          mEvents.InsertElementAt(i, aEvent);
-        }
-        return;
-      }
-      // Otherwise, place the event right after the latest existing event
-      if (aEvent.mTime < mEvents[i].mTime) {
-        mEvents.InsertElementAt(i, aEvent);
-        return;
-      }
-    }
-
-    // If we couldn't find a place for the event, just append it to the list
-    mEvents.AppendElement(aEvent);
+    return mozilla::IsFinite(value);
   }
 
-private:
   // This is a sorted array of the events in the timeline.  Queries of this
   // data structure should probably be more frequent than modifications to it,
   // and that is the reason why we're using a simple array as the data structure.
   // We can optimize this in the future if the performance of the array ends up
   // being a bottleneck.
   nsTArray<AudioTimelineEvent> mEvents;
   float mValue;
   // This is the value of this AudioParam we computed at the last call.
--- a/dom/media/webaudio/AudioNode.cpp
+++ b/dom/media/webaudio/AudioNode.cpp
@@ -299,22 +299,22 @@ AudioNode::SendChannelMixingParametersTo
 {
   if (mStream) {
     mStream->SetChannelMixingParameters(mChannelCount, mChannelCountMode,
                                         mChannelInterpretation);
   }
 }
 
 void
-AudioNode::SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
-                                         const AudioParamTimeline& aValue)
+AudioNode::SendTimelineEventToStream(AudioNode* aNode, uint32_t aIndex,
+                                     const AudioTimelineEvent& aEvent)
 {
   AudioNodeStream* ns = aNode->mStream;
   MOZ_ASSERT(ns, "How come we don't have a stream here?");
-  ns->SetTimelineParameter(aIndex, aValue);
+  ns->SendTimelineEvent(aIndex, aEvent);
 }
 
 void
 AudioNode::Disconnect(uint32_t aOutput, ErrorResult& aRv)
 {
   if (aOutput >= NumberOfOutputs()) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return;
--- a/dom/media/webaudio/AudioNode.h
+++ b/dom/media/webaudio/AudioNode.h
@@ -23,16 +23,17 @@ namespace mozilla {
 
 namespace dom {
 
 class AudioContext;
 class AudioBufferSourceNode;
 class AudioParam;
 class AudioParamTimeline;
 struct ThreeDPoint;
+struct AudioTimelineEvent;
 
 /**
  * The DOM object representing a Web Audio AudioNode.
  *
  * Each AudioNode has a MediaStream representing the actual
  * real-time processing and output of this AudioNode.
  *
  * We track the incoming and outgoing connections to other AudioNodes.
@@ -218,18 +219,18 @@ private:
 protected:
   static void Callback(AudioNode* aNode) { /* not implemented */ }
 
   // Helpers for sending different value types to streams
   void SendDoubleParameterToStream(uint32_t aIndex, double aValue);
   void SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue);
   void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue);
   void SendChannelMixingParametersToStream();
-  static void SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
-                                            const AudioParamTimeline& aValue);
+  static void SendTimelineEventToStream(AudioNode* aNode, uint32_t aIndex,
+                                        const dom::AudioTimelineEvent& aEvent);
 
 private:
   nsRefPtr<AudioContext> mContext;
 
 protected:
   // Must be set in the constructor. Must not be null unless finished.
   nsRefPtr<AudioNodeStream> mStream;
 
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -12,16 +12,17 @@
 #include "mozilla/Mutex.h"
 
 namespace mozilla {
 
 namespace dom {
 struct ThreeDPoint;
 class AudioParamTimeline;
 class DelayNodeEngine;
+struct AudioTimelineEvent;
 } // namespace dom
 
 class AudioBlock;
 class AudioNodeStream;
 
 /**
  * This class holds onto a set of immutable channel buffers. The storage
  * for the buffers must be malloced, but the buffer pointers and the malloc
@@ -277,21 +278,20 @@ public:
   virtual void SetDoubleParameter(uint32_t aIndex, double aParam)
   {
     NS_ERROR("Invalid SetDoubleParameter index");
   }
   virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam)
   {
     NS_ERROR("Invalid SetInt32Parameter index");
   }
-  virtual void SetTimelineParameter(uint32_t aIndex,
-                                    const dom::AudioParamTimeline& aValue,
-                                    TrackRate aSampleRate)
+  virtual void RecvTimelineEvent(uint32_t aIndex,
+                                 dom::AudioTimelineEvent& aValue)
   {
-    NS_ERROR("Invalid SetTimelineParameter index");
+    NS_ERROR("Invalid RecvTimelineEvent index");
   }
   virtual void SetThreeDPointParameter(uint32_t aIndex,
                                        const dom::ThreeDPoint& aValue)
   {
     NS_ERROR("Invalid SetThreeDPointParameter index");
   }
   virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
   {
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -198,39 +198,39 @@ AudioNodeStream::SetInt32Parameter(uint3
     int32_t mValue;
     uint32_t mIndex;
   };
 
   GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
 }
 
 void
-AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
-                                      const AudioParamTimeline& aValue)
+AudioNodeStream::SendTimelineEvent(uint32_t aIndex,
+                                   const AudioTimelineEvent& aEvent)
 {
   class Message final : public ControlMessage
   {
   public:
     Message(AudioNodeStream* aStream, uint32_t aIndex,
-            const AudioParamTimeline& aValue)
+            const AudioTimelineEvent& aEvent)
       : ControlMessage(aStream),
-        mValue(aValue),
+        mEvent(aEvent),
         mSampleRate(aStream->SampleRate()),
         mIndex(aIndex)
     {}
     virtual void Run() override
     {
       static_cast<AudioNodeStream*>(mStream)->Engine()->
-          SetTimelineParameter(mIndex, mValue, mSampleRate);
+          RecvTimelineEvent(mIndex, mEvent);
     }
-    AudioParamTimeline mValue;
+    AudioTimelineEvent mEvent;
     TrackRate mSampleRate;
     uint32_t mIndex;
   };
-  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
+  GraphImpl()->AppendMessage(new Message(this, aIndex, aEvent));
 }
 
 void
 AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
 {
   class Message final : public ControlMessage
   {
   public:
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -9,17 +9,17 @@
 #include "MediaStreamGraph.h"
 #include "mozilla/dom/AudioNodeBinding.h"
 #include "AudioBlock.h"
 
 namespace mozilla {
 
 namespace dom {
 struct ThreeDPoint;
-class AudioParamTimeline;
+struct AudioTimelineEvent;
 class AudioContext;
 } // namespace dom
 
 class ThreadSharedFloatArrayBufferList;
 class AudioNodeEngine;
 
 /**
  * An AudioNodeStream produces one audio track with ID AUDIO_TRACK.
@@ -80,19 +80,20 @@ public:
   /**
    * Sets a parameter that's a time relative to some stream's played time.
    * This time is converted to a time relative to this stream when it's set.
    */
   void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
-  void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
   void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer);
+  // This sends a single event to the timeline on the MSG thread side.
+  void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent);
   // This consumes the contents of aData.  aData will be emptied after this returns.
   void SetRawArrayData(nsTArray<float>& aData);
   void SetChannelMixingParameters(uint32_t aNumberOfChannels,
                                   ChannelCountMode aChannelCountMoe,
                                   ChannelInterpretation aChannelInterpretation);
   void SetPassThrough(bool aPassThrough);
   ChannelInterpretation GetChannelInterpretation()
   {
--- a/dom/media/webaudio/AudioParam.cpp
+++ b/dom/media/webaudio/AudioParam.cpp
@@ -113,18 +113,20 @@ AudioParam::Stream()
 
   // Setup the AudioParam's stream as an input to the owner AudioNode's stream
   AudioNodeStream* nodeStream = mNode->GetStream();
   if (nodeStream) {
     mNodeStreamPort =
       nodeStream->AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK);
   }
 
-  // Let the MSG's copy of AudioParamTimeline know about the change in the stream
-  mCallback(mNode);
+  // Send the stream to the timeline on the MSG side.
+  AudioTimelineEvent event(mStream);
+
+  mCallback(mNode, event);
 
   return mStream;
 }
 
 float
 AudioParamTimeline::AudioNodeInputValue(size_t aCounter) const
 {
   MOZ_ASSERT(mStream);
--- a/dom/media/webaudio/AudioParam.h
+++ b/dom/media/webaudio/AudioParam.h
@@ -21,17 +21,17 @@ namespace mozilla {
 namespace dom {
 
 class AudioParam final : public nsWrapperCache,
                          public AudioParamTimeline
 {
   virtual ~AudioParam();
 
 public:
-  typedef void (*CallbackType)(AudioNode*);
+  typedef void (*CallbackType)(AudioNode* aNode, const AudioTimelineEvent&);
 
   AudioParam(AudioNode* aNode,
              CallbackType aCallback,
              float aDefaultValue,
              const char* aName);
 
   NS_IMETHOD_(MozExternalRefCountType) AddRef(void);
   NS_IMETHOD_(MozExternalRefCountType) Release(void);
@@ -53,78 +53,96 @@ public:
   // object.
   void SetValueCurveAtTime(const Float32Array& aValues, double aStartTime, double aDuration, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
     aValues.ComputeLengthAndData();
-    AudioParamTimeline::SetValueCurveAtTime(aValues.Data(), aValues.Length(),
-                                            DOMTimeToStreamTime(aStartTime), aDuration, aRv);
-    mCallback(mNode);
+
+    EventInsertionHelper(aRv, AudioTimelineEvent::SetValueCurve,
+                         aStartTime, 0.0f, 0.0f, aDuration, aValues.Data(),
+                         aValues.Length());
   }
 
-  // We override the rest of the mutating AudioParamTimeline methods in order to make
-  // sure that the callback is called every time that this object gets mutated.
   void SetValue(float aValue)
   {
-    // Optimize away setting the same value on an AudioParam
-    if (HasSimpleValue() &&
-        WebAudioUtils::FuzzyEqual(GetValue(), aValue)) {
+    AudioTimelineEvent event(AudioTimelineEvent::SetValue, 0.0f, aValue);
+
+    ErrorResult rv;
+    if (!ValidateEvent(event, rv)) {
+      MOZ_ASSERT(false, "This should not happen, "
+                        "setting the value should always work");
       return;
     }
+
     AudioParamTimeline::SetValue(aValue);
-    mCallback(mNode);
+
+    mCallback(mNode, event);
   }
+
   void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::SetValueAtTime(aValue, DOMTimeToStreamTime(aStartTime), aRv);
-    mCallback(mNode);
+    EventInsertionHelper(aRv, AudioTimelineEvent::SetValueAtTime,
+                         aStartTime, aValue);
   }
+
   void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aEndTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::LinearRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
-    mCallback(mNode);
+    EventInsertionHelper(aRv, AudioTimelineEvent::LinearRamp, aEndTime, aValue);
   }
+
   void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aEndTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::ExponentialRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
-    mCallback(mNode);
+    EventInsertionHelper(aRv, AudioTimelineEvent::ExponentialRamp,
+                         aEndTime, aValue);
   }
-  void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
+
+  void SetTargetAtTime(float aTarget, double aStartTime,
+                       double aTimeConstant, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime) ||
         !WebAudioUtils::IsTimeValid(aTimeConstant)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::SetTargetAtTime(aTarget, DOMTimeToStreamTime(aStartTime), aTimeConstant, aRv);
-    mCallback(mNode);
+    EventInsertionHelper(aRv, AudioTimelineEvent::SetTarget,
+                         aStartTime, aTarget,
+                         aTimeConstant);
   }
+
   void CancelScheduledValues(double aStartTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::CancelScheduledValues(DOMTimeToStreamTime(aStartTime));
-    mCallback(mNode);
+
+    double streamTime = DOMTimeToStreamTime(aStartTime);
+
+    // Remove some events on the main thread copy.
+    AudioEventTimeline::CancelScheduledValues(streamTime);
+
+    AudioTimelineEvent event(AudioTimelineEvent::Cancel,
+                             streamTime, 0.0f);
+
+    mCallback(mNode, event);
   }
 
   uint32_t ParentNodeId()
   {
     return mNode->Id();
   }
 
   void GetName(nsAString& aName)
@@ -183,16 +201,37 @@ public:
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 protected:
   nsCycleCollectingAutoRefCnt mRefCnt;
   NS_DECL_OWNINGTHREAD
 
 private:
+  void EventInsertionHelper(ErrorResult& aRv,
+                            AudioTimelineEvent::Type aType,
+                            double aTime, float aValue,
+                            double aTimeConstant = 0.0,
+                            float aDuration = 0.0,
+                            const float* aCurve = nullptr,
+                            uint32_t aCurveLength = 0)
+  {
+    AudioTimelineEvent event(aType,
+                             DOMTimeToStreamTime(aTime), aValue,
+                             aTimeConstant, aDuration, aCurve, aCurveLength);
+
+    if (!ValidateEvent(event, aRv)) {
+      return;
+    }
+
+    AudioEventTimeline::InsertEvent<double>(event);
+
+    mCallback(mNode, event);
+  }
+
   nsRefPtr<AudioNode> mNode;
   // For every InputNode, there is a corresponding entry in mOutputParams of the
   // InputNode's mInputNode.
   nsTArray<AudioNode::InputNode> mInputNodes;
   CallbackType mCallback;
   const float mDefaultValue;
   const char* mName;
   // The input port used to connect the AudioParam's stream to its node's stream
--- a/dom/media/webaudio/AudioParamTimeline.h
+++ b/dom/media/webaudio/AudioParamTimeline.h
@@ -45,16 +45,34 @@ public:
   }
 
   template<class TimeType>
   float GetValueAtTime(TimeType aTime)
   {
     return GetValueAtTime(aTime, 0);
   }
 
+  template<typename TimeType>
+  void InsertEvent(const AudioTimelineEvent& aEvent)
+  {
+    if (aEvent.mType == AudioTimelineEvent::Cancel) {
+      CancelScheduledValues(aEvent.template Time<TimeType>());
+      return;
+    }
+    if (aEvent.mType == AudioTimelineEvent::Stream) {
+      mStream = aEvent.mStream;
+      return;
+    }
+    if (aEvent.mType == AudioTimelineEvent::SetValue) {
+      AudioEventTimeline::SetValue(aEvent.mValue);
+      return;
+    }
+    AudioEventTimeline::InsertEvent<TimeType>(aEvent);
+  }
+
   // Get the value of the AudioParam at time aTime + aCounter.
   // aCounter here is an offset to aTime if we try to get the value in ticks,
   // otherwise it should always be zero.  aCounter is meant to be used when
   template<class TimeType>
   float GetValueAtTime(TimeType aTime, size_t aCounter);
 
   // Get the values of the AudioParam at time aTime + (0 to aSize).
   // aBuffer must have the correct aSize.
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -105,37 +105,37 @@ public:
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue) override
   {
     switch (aIndex) {
     case TYPE: mType = static_cast<BiquadFilterType>(aValue); break;
     default:
       NS_ERROR("Bad BiquadFilterNode Int32Parameter");
     }
   }
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
     MOZ_ASSERT(mSource && mDestination);
+
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case FREQUENCY:
-      mFrequency = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mFrequency, mSource, mDestination);
+      mFrequency.InsertEvent<int64_t>(aEvent);
       break;
     case DETUNE:
-      mDetune = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mDetune, mSource, mDestination);
+      mDetune.InsertEvent<int64_t>(aEvent);
       break;
     case Q:
-      mQ = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mQ, mSource, mDestination);
+      mQ.InsertEvent<int64_t>(aEvent);
       break;
     case GAIN:
-      mGain = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
+      mGain.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad BiquadFilterNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             GraphTime aFrom,
@@ -342,37 +342,37 @@ BiquadFilterNode::GetFrequencyResponse(c
   double detune = mDetune->GetValueAtTime(currentTime);
 
   WebCore::Biquad biquad;
   SetParamsOnBiquad(biquad, Context()->SampleRate(), mType, freq, q, gain, detune);
   biquad.getFrequencyResponse(int(length), frequencies, aMagResponse.Data(), aPhaseResponse.Data());
 }
 
 void
-BiquadFilterNode::SendFrequencyToStream(AudioNode* aNode)
+BiquadFilterNode::SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
-  SendTimelineParameterToStream(This, BiquadFilterNodeEngine::FREQUENCY, *This->mFrequency);
+  SendTimelineEventToStream(This, BiquadFilterNodeEngine::FREQUENCY, aEvent);
 }
 
 void
-BiquadFilterNode::SendDetuneToStream(AudioNode* aNode)
+BiquadFilterNode::SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
-  SendTimelineParameterToStream(This, BiquadFilterNodeEngine::DETUNE, *This->mDetune);
+  SendTimelineEventToStream(This, BiquadFilterNodeEngine::DETUNE, aEvent);
 }
 
 void
-BiquadFilterNode::SendQToStream(AudioNode* aNode)
+BiquadFilterNode::SendQToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
-  SendTimelineParameterToStream(This, BiquadFilterNodeEngine::Q, *This->mQ);
+  SendTimelineEventToStream(This, BiquadFilterNodeEngine::Q, aEvent);
 }
 
 void
-BiquadFilterNode::SendGainToStream(AudioNode* aNode)
+BiquadFilterNode::SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
-  SendTimelineParameterToStream(This, BiquadFilterNodeEngine::GAIN, *This->mGain);
+  SendTimelineEventToStream(This, BiquadFilterNodeEngine::GAIN, aEvent);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/BiquadFilterNode.h
+++ b/dom/media/webaudio/BiquadFilterNode.h
@@ -10,16 +10,17 @@
 #include "AudioNode.h"
 #include "AudioParam.h"
 #include "mozilla/dom/BiquadFilterNodeBinding.h"
 
 namespace mozilla {
 namespace dom {
 
 class AudioContext;
+struct AudioTimelineEvent;
 
 class BiquadFilterNode final : public AudioNode
 {
 public:
   explicit BiquadFilterNode(AudioContext* aContext);
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(BiquadFilterNode, AudioNode)
@@ -63,20 +64,24 @@ public:
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 protected:
   virtual ~BiquadFilterNode();
 
 private:
-  static void SendFrequencyToStream(AudioNode* aNode);
-  static void SendDetuneToStream(AudioNode* aNode);
-  static void SendQToStream(AudioNode* aNode);
-  static void SendGainToStream(AudioNode* aNode);
+  static void SendFrequencyToStream(AudioNode* aNode,
+                                    const AudioTimelineEvent& aEvent);
+  static void SendDetuneToStream(AudioNode* aNode,
+                                const AudioTimelineEvent& aEvente);
+  static void SendQToStream(AudioNode* aNode,
+                            const AudioTimelineEvent& aEvent);
+  static void SendGainToStream(AudioNode* aNode,
+                               const AudioTimelineEvent& aEvent);
 
 private:
   BiquadFilterType mType;
   nsRefPtr<AudioParam> mFrequency;
   nsRefPtr<AudioParam> mDetune;
   nsRefPtr<AudioParam> mQ;
   nsRefPtr<AudioParam> mGain;
 };
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -55,25 +55,27 @@ public:
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
   enum Parameters {
     DELAY,
   };
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
+    MOZ_ASSERT(mSource && mDestination);
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case DELAY:
-      MOZ_ASSERT(mSource && mDestination);
-      mDelay = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
+      mDelay.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad DelayNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             GraphTime aFrom,
@@ -232,16 +234,16 @@ DelayNode::SizeOfIncludingThis(MallocSiz
 
 JSObject*
 DelayNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return DelayNodeBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
-DelayNode::SendDelayToStream(AudioNode* aNode)
+DelayNode::SendDelayToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   DelayNode* This = static_cast<DelayNode*>(aNode);
-  SendTimelineParameterToStream(This, DelayNodeEngine::DELAY, *This->mDelay);
+  SendTimelineEventToStream(This, DelayNodeEngine::DELAY, aEvent);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/DelayNode.h
+++ b/dom/media/webaudio/DelayNode.h
@@ -37,17 +37,18 @@ public:
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 protected:
   virtual ~DelayNode();
 
 private:
-  static void SendDelayToStream(AudioNode* aNode);
+  static void SendDelayToStream(AudioNode* aNode,
+                                const AudioTimelineEvent& aEvent);
   friend class DelayNodeEngine;
 
 private:
   nsRefPtr<AudioParam> mDelay;
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -56,41 +56,40 @@ public:
 
   enum Parameters {
     THRESHOLD,
     KNEE,
     RATIO,
     ATTACK,
     RELEASE
   };
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
     MOZ_ASSERT(mSource && mDestination);
+
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case THRESHOLD:
-      mThreshold = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mThreshold, mSource, mDestination);
+      mThreshold.InsertEvent<int64_t>(aEvent);
       break;
     case KNEE:
-      mKnee = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mKnee, mSource, mDestination);
+      mKnee.InsertEvent<int64_t>(aEvent);
       break;
     case RATIO:
-      mRatio = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mRatio, mSource, mDestination);
+      mRatio.InsertEvent<int64_t>(aEvent);
       break;
     case ATTACK:
-      mAttack = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mAttack, mSource, mDestination);
+      mAttack.InsertEvent<int64_t>(aEvent);
       break;
     case RELEASE:
-      mRelease = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mRelease, mSource, mDestination);
+      mRelease.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad DynamicsCompresssorNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             GraphTime aFrom,
@@ -233,44 +232,49 @@ DynamicsCompressorNode::SizeOfIncludingT
 
 JSObject*
 DynamicsCompressorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return DynamicsCompressorNodeBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
-DynamicsCompressorNode::SendThresholdToStream(AudioNode* aNode)
+DynamicsCompressorNode::SendThresholdToStream(AudioNode* aNode,
+                                              const AudioTimelineEvent& aEvent)
 {
   DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
-  SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::THRESHOLD, *This->mThreshold);
+  SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::THRESHOLD, aEvent);
 }
 
 void
-DynamicsCompressorNode::SendKneeToStream(AudioNode* aNode)
+DynamicsCompressorNode::SendKneeToStream(AudioNode* aNode,
+                                         const AudioTimelineEvent& aEvent)
 {
   DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
-  SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::KNEE, *This->mKnee);
+  SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::KNEE, aEvent);
 }
 
 void
-DynamicsCompressorNode::SendRatioToStream(AudioNode* aNode)
+DynamicsCompressorNode::SendRatioToStream(AudioNode* aNode,
+                                          const AudioTimelineEvent& aEvent)
 {
   DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
-  SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::RATIO, *This->mRatio);
+  SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::RATIO, aEvent);
 }
 
 void
-DynamicsCompressorNode::SendAttackToStream(AudioNode* aNode)
+DynamicsCompressorNode::SendAttackToStream(AudioNode* aNode,
+                                          const AudioTimelineEvent& aEvent)
 {
   DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
-  SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::ATTACK, *This->mAttack);
+  SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::ATTACK, aEvent);
 }
 
 void
-DynamicsCompressorNode::SendReleaseToStream(AudioNode* aNode)
+DynamicsCompressorNode::SendReleaseToStream(AudioNode* aNode,
+                                            const AudioTimelineEvent& aEvent)
 {
   DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
-  SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::RELEASE, *This->mRelease);
+  SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::RELEASE, aEvent);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/DynamicsCompressorNode.h
+++ b/dom/media/webaudio/DynamicsCompressorNode.h
@@ -69,21 +69,26 @@ public:
     MOZ_ASSERT(NS_IsMainThread());
     mReduction = aReduction;
   }
 
 protected:
   virtual ~DynamicsCompressorNode();
 
 private:
-  static void SendThresholdToStream(AudioNode* aNode);
-  static void SendKneeToStream(AudioNode* aNode);
-  static void SendRatioToStream(AudioNode* aNode);
-  static void SendAttackToStream(AudioNode* aNode);
-  static void SendReleaseToStream(AudioNode* aNode);
+  static void SendThresholdToStream(AudioNode* aNode,
+                                    const AudioTimelineEvent& aEvent);
+  static void SendKneeToStream(AudioNode* aNode,
+                               const AudioTimelineEvent& aEvent);
+  static void SendRatioToStream(AudioNode* aNode,
+                                const AudioTimelineEvent& aEvent);
+  static void SendAttackToStream(AudioNode* aNode,
+                                 const AudioTimelineEvent& aEvent);
+  static void SendReleaseToStream(AudioNode* aNode,
+                                  const AudioTimelineEvent& aEvent);
 
 private:
   nsRefPtr<AudioParam> mThreshold;
   nsRefPtr<AudioParam> mKnee;
   nsRefPtr<AudioParam> mRatio;
   float mReduction;
   nsRefPtr<AudioParam> mAttack;
   nsRefPtr<AudioParam> mRelease;
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -38,25 +38,27 @@ public:
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
   enum Parameters {
     GAIN
   };
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
+    MOZ_ASSERT(mSource && mDestination);
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case GAIN:
-      MOZ_ASSERT(mSource && mDestination);
-      mGain = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
+      mGain.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad GainNodeEngine TimelineParameter");
     }
   }
 
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             GraphTime aFrom,
@@ -154,16 +156,16 @@ GainNode::SizeOfIncludingThis(MallocSize
 
 JSObject*
 GainNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return GainNodeBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
-GainNode::SendGainToStream(AudioNode* aNode)
+GainNode::SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   GainNode* This = static_cast<GainNode*>(aNode);
-  SendTimelineParameterToStream(This, GainNodeEngine::GAIN, *This->mGain);
+  SendTimelineEventToStream(This, GainNodeEngine::GAIN, aEvent);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/GainNode.h
+++ b/dom/media/webaudio/GainNode.h
@@ -37,17 +37,17 @@ public:
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 protected:
   virtual ~GainNode();
 
 private:
-  static void SendGainToStream(AudioNode* aNode);
+  static void SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
 
 private:
   nsRefPtr<AudioParam> mGain;
 };
 
 } // namespace dom
 } // namespace mozilla
 
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -52,31 +52,33 @@ public:
   enum Parameters {
     FREQUENCY,
     DETUNE,
     TYPE,
     PERIODICWAVE,
     START,
     STOP,
   };
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
     mRecomputeParameters = true;
+
+    MOZ_ASSERT(mSource && mDestination);
+
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case FREQUENCY:
-      MOZ_ASSERT(mSource && mDestination);
-      mFrequency = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mFrequency, mSource, mDestination);
+      mFrequency.InsertEvent<int64_t>(aEvent);
       break;
     case DETUNE:
-      MOZ_ASSERT(mSource && mDestination);
-      mDetune = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mDetune, mSource, mDestination);
+      mDetune.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad OscillatorNodeEngine TimelineParameter");
     }
   }
 
   virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override
   {
@@ -435,33 +437,33 @@ OscillatorNode::DestroyMediaStream()
 {
   if (mStream) {
     mStream->RemoveMainThreadListener(this);
   }
   AudioNode::DestroyMediaStream();
 }
 
 void
-OscillatorNode::SendFrequencyToStream(AudioNode* aNode)
+OscillatorNode::SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   OscillatorNode* This = static_cast<OscillatorNode*>(aNode);
   if (!This->mStream) {
     return;
   }
-  SendTimelineParameterToStream(This, OscillatorNodeEngine::FREQUENCY, *This->mFrequency);
+  SendTimelineEventToStream(This, OscillatorNodeEngine::FREQUENCY, aEvent);
 }
 
 void
-OscillatorNode::SendDetuneToStream(AudioNode* aNode)
+OscillatorNode::SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   OscillatorNode* This = static_cast<OscillatorNode*>(aNode);
   if (!This->mStream) {
     return;
   }
-  SendTimelineParameterToStream(This, OscillatorNodeEngine::DETUNE, *This->mDetune);
+  SendTimelineEventToStream(This, OscillatorNodeEngine::DETUNE, aEvent);
 }
 
 void
 OscillatorNode::SendTypeToStream()
 {
   if (!mStream) {
     return;
   }
--- a/dom/media/webaudio/OscillatorNode.h
+++ b/dom/media/webaudio/OscillatorNode.h
@@ -82,18 +82,18 @@ public:
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 protected:
   virtual ~OscillatorNode();
 
 private:
-  static void SendFrequencyToStream(AudioNode* aNode);
-  static void SendDetuneToStream(AudioNode* aNode);
+  static void SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
+  static void SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
   void SendTypeToStream();
   void SendPeriodicWaveToStream();
 
 private:
   OscillatorType mType;
   nsRefPtr<PeriodicWave> mPeriodicWave;
   nsRefPtr<AudioParam> mFrequency;
   nsRefPtr<AudioParam> mDetune;
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -44,25 +44,27 @@ public:
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
   enum Parameters {
     PAN
   };
-  void SetTimelineParameter(uint32_t aIndex,
-                            const AudioParamTimeline& aValue,
-                            TrackRate aSampleRate) override
+  void RecvTimelineEvent(uint32_t aIndex,
+                         AudioTimelineEvent& aEvent) override
   {
+    MOZ_ASSERT(mSource && mDestination);
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mSource,
+                                                    mDestination);
+
     switch (aIndex) {
     case PAN:
-      MOZ_ASSERT(mSource && mDestination);
-      mPan = aValue;
-      WebAudioUtils::ConvertAudioParamToTicks(mPan, mSource, mDestination);
+      mPan.InsertEvent<int64_t>(aEvent);
       break;
     default:
       NS_ERROR("Bad StereoPannerNode TimelineParameter");
     }
   }
 
   void GetGainValuesForPanning(float aPanning,
                                bool aMonoToStereo,
@@ -207,16 +209,16 @@ StereoPannerNode::SizeOfIncludingThis(Ma
 
 JSObject*
 StereoPannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return StereoPannerNodeBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
-StereoPannerNode::SendPanToStream(AudioNode* aNode)
+StereoPannerNode::SendPanToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
 {
   StereoPannerNode* This = static_cast<StereoPannerNode*>(aNode);
-  SendTimelineParameterToStream(This, StereoPannerNodeEngine::PAN, *This->mPan);
+  SendTimelineEventToStream(This, StereoPannerNodeEngine::PAN, aEvent);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/StereoPannerNode.h
+++ b/dom/media/webaudio/StereoPannerNode.h
@@ -55,17 +55,18 @@ public:
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 protected:
   virtual ~StereoPannerNode();
 
 private:
-  static void SendPanToStream(AudioNode* aNode);
+  static void SendPanToStream(AudioNode* aNode,
+                              const AudioTimelineEvent& aEvent);
   nsRefPtr<AudioParam> mPan;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
 
--- a/dom/media/webaudio/WebAudioUtils.cpp
+++ b/dom/media/webaudio/WebAudioUtils.cpp
@@ -1,47 +1,31 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WebAudioUtils.h"
 #include "AudioNodeStream.h"
-#include "AudioParamTimeline.h"
 #include "blink/HRTFDatabaseLoader.h"
 
 namespace mozilla {
 
 namespace dom {
 
-struct ConvertTimeToTickHelper
-{
-  AudioNodeStream* mSourceStream;
-  AudioNodeStream* mDestinationStream;
-
-  static int64_t Convert(double aTime, void* aClosure)
-  {
-    ConvertTimeToTickHelper* This = static_cast<ConvertTimeToTickHelper*> (aClosure);
-    MOZ_ASSERT(This->mSourceStream->SampleRate() == This->mDestinationStream->SampleRate());
-    return This->mSourceStream->
-      TicksFromDestinationTime(This->mDestinationStream, aTime);
-  }
-};
-
-void
-WebAudioUtils::ConvertAudioParamToTicks(AudioParamTimeline& aParam,
-                                        AudioNodeStream* aSource,
-                                        AudioNodeStream* aDest)
+void WebAudioUtils::ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
+                                                     AudioNodeStream* aSource,
+                                                     AudioNodeStream* aDest)
 {
   MOZ_ASSERT(!aSource || aSource->SampleRate() == aDest->SampleRate());
-  ConvertTimeToTickHelper ctth;
-  ctth.mSourceStream = aSource;
-  ctth.mDestinationStream = aDest;
-  aParam.ConvertEventTimesToTicks(ConvertTimeToTickHelper::Convert, &ctth, aDest->SampleRate());
+  aEvent.SetTimeInTicks(
+      aSource->TicksFromDestinationTime(aDest, aEvent.Time<double>()));
+  aEvent.mTimeConstant *= aSource->SampleRate();
+  aEvent.mDuration *= aSource->SampleRate();
 }
 
 void
 WebAudioUtils::Shutdown()
 {
   WebCore::HRTFDatabaseLoader::shutdown();
 }
 
--- a/dom/media/webaudio/WebAudioUtils.h
+++ b/dom/media/webaudio/WebAudioUtils.h
@@ -17,17 +17,17 @@
 typedef struct SpeexResamplerState_ SpeexResamplerState;
 
 namespace mozilla {
 
 class AudioNodeStream;
 
 namespace dom {
 
-class AudioParamTimeline;
+struct AudioTimelineEvent;
 
 namespace WebAudioUtils {
   // 32 is the minimum required by the spec for createBuffer() and
   // createScriptProcessor() and matches what is used by Blink.  The limit
   // protects against large memory allocations.
   const size_t MaxChannelCount = 32;
   // AudioContext::CreateBuffer() "must support sample-rates in at least the
   // range 22050 to 96000."
@@ -50,27 +50,27 @@ namespace WebAudioUtils {
    * over aDuration seconds.
    */
   inline double ComputeSmoothingRate(double aDuration, double aSampleRate)
   {
     return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate));
   }
 
   /**
-   * Converts AudioParamTimeline floating point time values to tick values
-   * with respect to a source and a destination AudioNodeStream.
+   * Converts an AudioTimelineEvent's floating point time values to tick values
+   * with respect to a destination AudioNodeStream.
    *
-   * This needs to be called for each AudioParamTimeline that gets sent to an
-   * AudioNodeEngine on the engine side where the AudioParamTimeline is
-   * received.  This means that such engines need to be aware of their source
-   * and destination streams as well.
+   * This needs to be called for each AudioTimelineEvent that gets sent to an
+   * AudioNodeEngine, on the engine side where the AudioTimlineEvent is
+   * received.  This means that such engines need to be aware of their
+   * destination streams as well.
    */
-  void ConvertAudioParamToTicks(AudioParamTimeline& aParam,
-                                AudioNodeStream* aSource,
-                                AudioNodeStream* aDest);
+  void ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
+                                        AudioNodeStream* aSource,
+                                        AudioNodeStream* aDest);
 
   /**
    * Converts a linear value to decibels.  Returns aMinDecibels if the linear
    * value is 0.
    */
   inline float ConvertLinearToDecibels(float aLinearValue, float aMinDecibels)
   {
     return aLinearValue ? 20.0f * std::log10(aLinearValue) : aMinDecibels;
--- a/dom/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
+++ b/dom/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
@@ -4,16 +4,27 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioEventTimeline.h"
 #include "TestHarness.h"
 #include <sstream>
 #include <limits>
 
+// Mock the MediaStream class
+namespace mozilla {
+class MediaStream
+{
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream)
+private:
+  ~MediaStream() {
+  };
+};
+}
+
 using namespace mozilla;
 using namespace mozilla::dom;
 using std::numeric_limits;
 
 // Some simple testing primitives
 void ok(bool val, const char* msg)
 {
   if (val) {