Bug 952893. Part 1: Block the AudioDestinationNode when it's the only node in the AudioContext. r=padenot,karlt
authorRobert O'Callahan <robert@ocallahan.org>
Thu, 16 Jan 2014 00:08:20 +1300
changeset 181966 1424d2e163498e2cfb34653ff6534c6a19c30b0c
parent 181965 af39c4df306ffd7f978d0e0d475e4678aa91f3c8
child 181967 62a1f3ee8c28364a9291183e1f63aa430a1f6b31
push id3343
push userffxbld
push dateMon, 17 Mar 2014 21:55:32 +0000
treeherdermozilla-beta@2f7d3415f79f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot, karlt
bugs952893
milestone29.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 952893. Part 1: Block the AudioDestinationNode when it's the only node in the AudioContext. r=padenot,karlt
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioContext.h
content/media/webaudio/AudioDestinationNode.cpp
content/media/webaudio/AudioDestinationNode.h
content/media/webaudio/AudioNode.cpp
content/media/webaudio/AudioParam.h
content/media/webaudio/OscillatorNode.cpp
content/media/webaudio/moz.build
content/media/webaudio/test/webaudio.js
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -5,16 +5,17 @@
 
 #include "AudioNodeStream.h"
 
 #include "MediaStreamGraphImpl.h"
 #include "AudioNodeEngine.h"
 #include "ThreeDPoint.h"
 #include "AudioChannelFormat.h"
 #include "AudioParamTimeline.h"
+#include "AudioContext.h"
 
 using namespace mozilla::dom;
 
 namespace mozilla {
 
 /**
  * An AudioNodeStream produces a single audio track with ID
  * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
@@ -25,17 +26,17 @@ namespace mozilla {
  */
 
 AudioNodeStream::~AudioNodeStream()
 {
   MOZ_COUNT_DTOR(AudioNodeStream);
 }
 
 void
-AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
+AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                                         double aStreamTime)
 {
   class Message : public ControlMessage {
   public:
     Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
             double aStreamTime)
       : ControlMessage(aStream), mStreamTime(aStreamTime),
         mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
@@ -45,17 +46,19 @@ AudioNodeStream::SetStreamTimeParameter(
           SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
     }
     double mStreamTime;
     MediaStream* mRelativeToStream;
     uint32_t mIndex;
   };
 
   MOZ_ASSERT(this);
-  GraphImpl()->AppendMessage(new Message(this, aIndex, aRelativeToStream, aStreamTime));
+  GraphImpl()->AppendMessage(new Message(this, aIndex,
+      aContext->DestinationStream(),
+      aContext->DOMTimeToStreamTime(aStreamTime)));
 }
 
 void
 AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                             double aStreamTime)
 {
   TrackTicks ticks =
       WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -11,16 +11,17 @@
 #include "AudioSegment.h"
 
 namespace mozilla {
 
 namespace dom {
 struct ThreeDPoint;
 class AudioParamTimeline;
 class DelayNodeEngine;
+class AudioContext;
 }
 
 class ThreadSharedFloatArrayBufferList;
 class AudioNodeEngine;
 
 /**
  * An AudioNodeStream produces one audio track with ID AUDIO_TRACK.
  * The start time of the AudioTrack is aligned to the start time of the
@@ -28,16 +29,18 @@ class AudioNodeEngine;
  * samples.
  *
  * An AudioNodeStream has an AudioNodeEngine plugged into it that does the
  * actual audio processing. AudioNodeStream contains the glue code that
  * integrates audio processing with the MediaStreamGraph.
  */
 class AudioNodeStream : public ProcessedMediaStream {
 public:
+  typedef mozilla::dom::AudioContext AudioContext;
+
   enum { AUDIO_TRACK = 1 };
 
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
@@ -61,17 +64,17 @@ public:
   }
   ~AudioNodeStream();
 
   // Control API
   /**
    * Sets a parameter that's a time relative to some stream's played time.
    * This time is converted to a time relative to this stream when it's set.
    */
-  void SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
+  void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
   void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
   void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
   // This consumes the contents of aData.  aData will be emptied after this returns.
   void SetRawArrayData(nsTArray<float>& aData);
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -527,17 +527,17 @@ AudioBufferSourceNode::Start(double aWhe
   // We can't send these parameters without a buffer because we don't know the
   // buffer's sample rate or length.
   if (mBuffer) {
     SendOffsetAndDurationParametersToStream(ns);
   }
 
   // Don't set parameter unnecessarily
   if (aWhen > 0.0) {
-    ns->SetStreamTimeParameter(START, Context()->DestinationStream(), aWhen);
+    ns->SetStreamTimeParameter(START, Context(), aWhen);
   }
 
   MarkActive();
 }
 
 void
 AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
 {
@@ -611,18 +611,17 @@ AudioBufferSourceNode::Stop(double aWhen
   }
 
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns || !Context()) {
     // We've already stopped and had our stream shut down
     return;
   }
 
-  ns->SetStreamTimeParameter(STOP, Context()->DestinationStream(),
-                             std::max(0.0, aWhen));
+  ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen));
 }
 
 void
 AudioBufferSourceNode::NotifyMainThreadStateChanged()
 {
   if (mStream->IsFinished()) {
     class EndedEventDispatcher : public nsRunnable
     {
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -79,27 +79,32 @@ float GetSampleRateForAudioContext(bool 
 AudioContext::AudioContext(nsPIDOMWindow* aWindow,
                            bool aIsOffline,
                            uint32_t aNumberOfChannels,
                            uint32_t aLength,
                            float aSampleRate)
   : nsDOMEventTargetHelper(aWindow)
   , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate))
   , mNumberOfChannels(aNumberOfChannels)
+  , mNodeCount(0)
   , mIsOffline(aIsOffline)
   , mIsStarted(!aIsOffline)
   , mIsShutDown(false)
 {
   aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
   mDestination = new AudioDestinationNode(this, aIsOffline, aNumberOfChannels,
                                           aLength, aSampleRate);
   mDestination->Stream()->AddAudioOutput(&gWebAudioOutputKey);
+  // We skip calling SetIsOnlyNodeForContext during mDestination's constructor,
+  // because we can only call SetIsOnlyNodeForContext after mDestination has
+  // been set up.
+  mDestination->SetIsOnlyNodeForContext(true);
 }
 
 AudioContext::~AudioContext()
 {
   nsPIDOMWindow* window = GetOwner();
   if (window) {
     window->RemoveAudioContext(this);
   }
@@ -538,17 +543,18 @@ AudioContext::DestinationStream() const
     return Destination()->Stream();
   }
   return nullptr;
 }
 
 double
 AudioContext::CurrentTime() const
 {
-  return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime());
+  return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime()) +
+      ExtraCurrentTime();
 }
 
 void
 AudioContext::Shutdown()
 {
   mIsShutDown = true;
 
   // We mute rather than suspending, because the delay between the ::Shutdown
@@ -584,16 +590,28 @@ void
 AudioContext::Resume()
 {
   MediaStream* ds = DestinationStream();
   if (ds) {
     ds->ChangeExplicitBlockerCount(-1);
   }
 }
 
+void
+AudioContext::UpdateNodeCount(int32_t aDelta)
+{
+  bool firstNode = mNodeCount == 0;
+  mNodeCount += aDelta;
+  MOZ_ASSERT(mNodeCount >= 0);
+  // mDestinationNode may be null when we're destroying nodes unlinked by CC
+  if (!firstNode && mDestination) {
+    mDestination->SetIsOnlyNodeForContext(mNodeCount == 1);
+  }
+}
+
 JSContext*
 AudioContext::GetJSContext() const
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   nsCOMPtr<nsIScriptGlobalObject> scriptGlobal =
     do_QueryInterface(GetParentObject());
   if (!scriptGlobal) {
@@ -674,10 +692,16 @@ NS_IMETHODIMP
 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
                              nsISupports* aData)
 {
   int64_t amount = SizeOfIncludingThis(MallocSizeOf);
   return MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
                             amount, "Memory used by AudioContext objects (Web Audio).");
 }
 
+double
+AudioContext::ExtraCurrentTime() const
+{
+  return mDestination->ExtraCurrentTime();
+}
+
 }
 }
--- a/content/media/webaudio/AudioContext.h
+++ b/content/media/webaudio/AudioContext.h
@@ -241,17 +241,33 @@ public:
   void Mute() const;
   void Unmute() const;
 
   JSContext* GetJSContext() const;
 
   AudioChannel MozAudioChannelType() const;
   void SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv);
 
+  void UpdateNodeCount(int32_t aDelta);
+
+  double DOMTimeToStreamTime(double aTime) const
+  {
+    return aTime - ExtraCurrentTime();
+  }
+
 private:
+  /**
+   * Returns the amount of extra time added to the current time of the
+   * AudioDestinationNode's MediaStream to get this AudioContext's currentTime.
+   * Must be subtracted from all DOM API parameter times that are on the same
+   * timeline as AudioContext's currentTime to get times we can pass to the
+   * MediaStreamGraph.
+   */
+  double ExtraCurrentTime() const;
+
   void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
   void ShutdownDecoder();
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData);
 
   friend struct ::mozilla::WebAudioDecodeJob;
@@ -267,16 +283,18 @@ private:
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Hashsets containing all the PannerNodes, to compute the doppler shift.
   // These are weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
   // Number of channels passed in the OfflineAudioContext ctor.
   uint32_t mNumberOfChannels;
+  // Number of nodes that currently exist for this AudioContext
+  int32_t mNodeCount;
   bool mIsOffline;
   bool mIsStarted;
   bool mIsShutDown;
 };
 
 }
 }
 
--- a/content/media/webaudio/AudioDestinationNode.cpp
+++ b/content/media/webaudio/AudioDestinationNode.cpp
@@ -12,16 +12,18 @@
 #include "AudioNodeStream.h"
 #include "MediaStreamGraph.h"
 #include "OfflineAudioCompletionEvent.h"
 #include "nsIInterfaceRequestorUtils.h"
 #include "nsIDocShell.h"
 #include "nsIPermissionManager.h"
 #include "nsIScriptObjectPrincipal.h"
 #include "nsServiceManagerUtils.h"
+#include "nsIAppShell.h"
+#include "nsWidgetsCID.h"
 
 namespace mozilla {
 namespace dom {
 
 class OfflineDestinationNodeEngine : public AudioNodeEngine
 {
 public:
   typedef AutoFallibleTArray<nsAutoArrayPtr<float>, 2> InputChannels;
@@ -212,16 +214,19 @@ AudioDestinationNode::AudioDestinationNo
   : AudioNode(aContext,
               aIsOffline ? aNumberOfChannels : 2,
               ChannelCountMode::Explicit,
               ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mAudioChannel(AudioChannel::Normal)
   , mIsOffline(aIsOffline)
   , mHasFinished(false)
+  , mExtraCurrentTime(0)
+  , mExtraCurrentTimeSinceLastStartedBlocking(0)
+  , mExtraCurrentTimeUpdatedSinceLastStableState(false)
 {
   MediaStreamGraph* graph = aIsOffline ?
                             MediaStreamGraph::CreateNonRealtimeInstance() :
                             MediaStreamGraph::GetInstance();
   AudioNodeEngine* engine = aIsOffline ?
                             new OfflineDestinationNodeEngine(this, aNumberOfChannels,
                                                              aLength, aSampleRate) :
                             static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
@@ -481,11 +486,81 @@ AudioDestinationNode::CreateAudioChannel
     docshell->GetIsActive(&isActive);
     mAudioChannelAgent->SetVisibilityState(isActive);
   }
 
   int32_t state = 0;
   mAudioChannelAgent->StartPlaying(&state);
   SetCanPlay(state == AudioChannelState::AUDIO_CHANNEL_STATE_NORMAL);
 }
+
+void
+AudioDestinationNode::NotifyStableState()
+{
+  mExtraCurrentTimeUpdatedSinceLastStableState = false;
+}
+
+static NS_DEFINE_CID(kAppShellCID, NS_APPSHELL_CID);
+
+void
+AudioDestinationNode::ScheduleStableStateNotification()
+{
+  nsCOMPtr<nsIAppShell> appShell = do_GetService(kAppShellCID);
+  if (appShell) {
+    nsCOMPtr<nsIRunnable> event =
+      NS_NewRunnableMethod(this, &AudioDestinationNode::NotifyStableState);
+    appShell->RunInStableState(event);
+  }
+}
+
+double
+AudioDestinationNode::ExtraCurrentTime()
+{
+  if (!mStartedBlockingDueToBeingOnlyNode.IsNull() &&
+      !mExtraCurrentTimeUpdatedSinceLastStableState) {
+    mExtraCurrentTimeUpdatedSinceLastStableState = true;
+    mExtraCurrentTimeSinceLastStartedBlocking =
+      (TimeStamp::Now() - mStartedBlockingDueToBeingOnlyNode).ToSeconds();
+    ScheduleStableStateNotification();
+  }
+  return mExtraCurrentTime + mExtraCurrentTimeSinceLastStartedBlocking;
+}
+
+void
+AudioDestinationNode::SetIsOnlyNodeForContext(bool aIsOnlyNode)
+{
+  if (!mStartedBlockingDueToBeingOnlyNode.IsNull() == aIsOnlyNode) {
+    // Nothing changed.
+    return;
+  }
+
+  if (!mStream) {
+    // DestroyMediaStream has been called, presumably during CC Unlink().
+    return;
+  }
+
+  if (mIsOffline) {
+    // Don't block the destination stream for offline AudioContexts, since
+    // we expect the zero data produced when there are no other nodes to
+    // show up in its result buffer. Also, we would get confused by adding
+    // ExtraCurrentTime before StartRendering has even been called.
+    return;
+  }
+
+  if (aIsOnlyNode) {
+    mStream->ChangeExplicitBlockerCount(1);
+    mStartedBlockingDueToBeingOnlyNode = TimeStamp::Now();
+    mExtraCurrentTimeSinceLastStartedBlocking = 0;
+    // Don't do an update of mExtraCurrentTimeSinceLastStartedBlocking until the next stable state.
+    mExtraCurrentTimeUpdatedSinceLastStableState = true;
+    ScheduleStableStateNotification();
+  } else {
+    // Force update of mExtraCurrentTimeSinceLastStartedBlocking if necessary
+    ExtraCurrentTime();
+    mExtraCurrentTime += mExtraCurrentTimeSinceLastStartedBlocking;
+    mStream->ChangeExplicitBlockerCount(-1);
+    mStartedBlockingDueToBeingOnlyNode = TimeStamp();
+  }
 }
 
 }
+
+}
--- a/content/media/webaudio/AudioDestinationNode.h
+++ b/content/media/webaudio/AudioDestinationNode.h
@@ -65,30 +65,45 @@ public:
   NS_IMETHOD CanPlayChanged(int32_t aCanPlay);
 
   AudioChannel MozAudioChannelType() const;
   void SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv);
 
   virtual void NotifyMainThreadStateChanged() MOZ_OVERRIDE;
   void FireOfflineCompletionEvent();
 
+  // An amount that should be added to the MediaStream's current time to
+  // get the AudioContext.currentTime.
+  double ExtraCurrentTime();
+
+  // When aIsOnlyNode is true, this is the only node for the AudioContext.
+  void SetIsOnlyNodeForContext(bool aIsOnlyNode);
+
 private:
   bool CheckAudioChannelPermissions(AudioChannel aValue);
   void CreateAudioChannelAgent();
 
   void SetCanPlay(bool aCanPlay);
 
+  void NotifyStableState();
+  void ScheduleStableStateNotification();
+
   SelfReference<AudioDestinationNode> mOfflineRenderingRef;
   uint32_t mFramesToProduce;
 
   nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent;
 
   // Audio Channel Type.
   AudioChannel mAudioChannel;
   bool mIsOffline;
   bool mHasFinished;
+
+  TimeStamp mStartedBlockingDueToBeingOnlyNode;
+  double mExtraCurrentTime;
+  double mExtraCurrentTimeSinceLastStartedBlocking;
+  bool mExtraCurrentTimeUpdatedSinceLastStableState;
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/AudioNode.cpp
+++ b/content/media/webaudio/AudioNode.cpp
@@ -14,16 +14,17 @@ namespace mozilla {
 namespace dom {
 
 static const uint32_t INVALID_PORT = 0xffffffff;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioNode, nsDOMEventTargetHelper)
   tmp->DisconnectFromGraph();
+  tmp->mContext->UpdateNodeCount(-1);
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputParams)
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioNode, nsDOMEventTargetHelper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputParams)
@@ -53,23 +54,29 @@ AudioNode::AudioNode(AudioContext* aCont
                      ChannelInterpretation aChannelInterpretation)
   : nsDOMEventTargetHelper(aContext->GetParentObject())
   , mContext(aContext)
   , mChannelCount(aChannelCount)
   , mChannelCountMode(aChannelCountMode)
   , mChannelInterpretation(aChannelInterpretation)
 {
   MOZ_ASSERT(aContext);
+  nsDOMEventTargetHelper::BindToOwner(aContext->GetParentObject());
+  SetIsDOMBinding();
+  aContext->UpdateNodeCount(1);
 }
 
 AudioNode::~AudioNode()
 {
   MOZ_ASSERT(mInputNodes.IsEmpty());
   MOZ_ASSERT(mOutputNodes.IsEmpty());
   MOZ_ASSERT(mOutputParams.IsEmpty());
+  if (mContext) {
+    mContext->UpdateNodeCount(-1);
+  }
 }
 
 template <class InputNode>
 static uint32_t
 FindIndexOfNode(const nsTArray<InputNode>& aInputNodes, const AudioNode* aNode)
 {
   for (uint32_t i = 0; i < aInputNodes.Length(); ++i) {
     if (aInputNodes[i].mInputNode == aNode) {
--- a/content/media/webaudio/AudioParam.h
+++ b/content/media/webaudio/AudioParam.h
@@ -37,29 +37,34 @@ public:
   NS_IMETHOD_(nsrefcnt) Release(void);
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioParam)
 
   AudioContext* GetParentObject() const
   {
     return mNode->Context();
   }
 
+  double DOMTimeToStreamTime(double aTime) const
+  {
+    return mNode->Context()->DOMTimeToStreamTime(aTime);
+  }
+
   virtual JSObject* WrapObject(JSContext* aCx,
                                JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
 
   // We override SetValueCurveAtTime to convert the Float32Array to the wrapper
   // object.
   void SetValueCurveAtTime(const Float32Array& aValues, double aStartTime, double aDuration, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
     AudioParamTimeline::SetValueCurveAtTime(aValues.Data(), aValues.Length(),
-                                            aStartTime, aDuration, aRv);
+                                            DOMTimeToStreamTime(aStartTime), aDuration, aRv);
     mCallback(mNode);
   }
 
   // We override the rest of the mutating AudioParamTimeline methods in order to make
   // sure that the callback is called every time that this object gets mutated.
   void SetValue(float aValue)
   {
     // Optimize away setting the same value on an AudioParam
@@ -71,58 +76,58 @@ public:
     mCallback(mNode);
   }
   void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::SetValueAtTime(aValue, aStartTime, aRv);
+    AudioParamTimeline::SetValueAtTime(aValue, DOMTimeToStreamTime(aStartTime), aRv);
     mCallback(mNode);
   }
   void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aEndTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::LinearRampToValueAtTime(aValue, aEndTime, aRv);
+    AudioParamTimeline::LinearRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
     mCallback(mNode);
   }
   void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aEndTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::ExponentialRampToValueAtTime(aValue, aEndTime, aRv);
+    AudioParamTimeline::ExponentialRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
     mCallback(mNode);
   }
   void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime) ||
         !WebAudioUtils::IsTimeValid(aTimeConstant)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::SetTargetAtTime(aTarget, aStartTime, aTimeConstant, aRv);
+    AudioParamTimeline::SetTargetAtTime(aTarget, DOMTimeToStreamTime(aStartTime), aTimeConstant, aRv);
     mCallback(mNode);
   }
   void SetTargetValueAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
   {
     SetTargetAtTime(aTarget, aStartTime, aTimeConstant, aRv);
   }
   void CancelScheduledValues(double aStartTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
-    AudioParamTimeline::CancelScheduledValues(aStartTime);
+    AudioParamTimeline::CancelScheduledValues(DOMTimeToStreamTime(aStartTime));
     mCallback(mNode);
   }
 
   float DefaultValue() const
   {
     return mDefaultValue;
   }
 
--- a/content/media/webaudio/OscillatorNode.cpp
+++ b/content/media/webaudio/OscillatorNode.cpp
@@ -573,18 +573,17 @@ OscillatorNode::Start(double aWhen, Erro
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns) {
     // Nothing to play, or we're already dead for some reason
     return;
   }
 
   // TODO: Perhaps we need to do more here.
   ns->SetStreamTimeParameter(OscillatorNodeEngine::START,
-                             Context()->DestinationStream(),
-                             aWhen);
+                             Context(), aWhen);
 
   MarkActive();
 }
 
 void
 OscillatorNode::Stop(double aWhen, ErrorResult& aRv)
 {
   if (!WebAudioUtils::IsTimeValid(aWhen)) {
@@ -600,18 +599,17 @@ OscillatorNode::Stop(double aWhen, Error
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns || !Context()) {
     // We've already stopped and had our stream shut down
     return;
   }
 
   // TODO: Perhaps we need to do more here.
   ns->SetStreamTimeParameter(OscillatorNodeEngine::STOP,
-                             Context()->DestinationStream(),
-                             std::max(0.0, aWhen));
+                             Context(), std::max(0.0, aWhen));
 }
 
 void
 OscillatorNode::NotifyMainThreadStateChanged()
 {
   if (mStream->IsFinished()) {
     class EndedEventDispatcher : public nsRunnable
     {
--- a/content/media/webaudio/moz.build
+++ b/content/media/webaudio/moz.build
@@ -4,16 +4,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 PARALLEL_DIRS += ['blink', 'test']
 
 TEST_TOOL_DIRS += ['compiledtest']
 
 EXPORTS += [
+    'AudioContext.h',
     'AudioParamTimeline.h',
     'MediaBufferDecoder.h',
     'ThreeDPoint.h',
     'WebAudioUtils.h',
 ]
 
 EXPORTS.mozilla += [
     'FFTBlock.h',
--- a/content/media/webaudio/test/webaudio.js
+++ b/content/media/webaudio/test/webaudio.js
@@ -48,20 +48,21 @@ function compareChannels(buf1, buf2,
       difference++;
       maxDifference = Math.max(maxDifference, Math.abs(buf1[i + sourceOffset] - buf2[i + destOffset]));
       if (firstBadIndex == -1) {
         firstBadIndex = i;
       }
     }
   };
 
-  is(difference, 0, "Found " + difference + " different samples, maxDifference: " +
-     maxDifference + ", first bad index: " + firstBadIndex +
-     " with source offset " + sourceOffset + " and destination offset " +
-     destOffset);
+  is(difference, 0, "maxDifference: " + maxDifference +
+     ", first bad index: " + firstBadIndex +
+     " with test-data offset " + sourceOffset + " and expected-data offset " +
+     destOffset + "; corresponding values " + buf1[firstBadIndex + sourceOffset] +
+     " and " + buf2[firstBadIndex + destOffset] + " --- differences");
 }
 
 function compareBuffers(got, expected) {
   if (got.numberOfChannels != expected.numberOfChannels) {
     is(got.numberOfChannels, expected.numberOfChannels,
        "Correct number of buffer channels");
     return;
   }