Bug 853298 - Part 1: Switch the ownership model of audio nodes to be based the cycle collector with wrapper caches; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Sun, 14 Apr 2013 21:52:55 -0400
changeset 128748 dedbbce5235fe4a54b762ee63f796e7767f115e1
parent 128747 1d031f4a22f56ecb0724e27deb819b0216040976
child 128749 5d549a8fc2eb70898a41216ec1de267cf1649f75
push id24540
push userryanvm@gmail.com
push dateMon, 15 Apr 2013 16:12:05 +0000
treeherdermozilla-central@53c2e7b9753b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs853298
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 853298 - Part 1: Switch the ownership model of audio nodes to be based the cycle collector with wrapper caches; r=roc Here is what this patch does: * Got rid of the JSBindingFinalized stuff * Made all nodes wrappercached * Started to hold a self reference while the AudioBufferSourceNode is playing back * Converted the input references to weak references * Got rid of all of the SetProduceOwnOutput and UpdateOutputEnded logic The nodes are now collected by the cycle collector which calls into DisconnectFromGraph which drops the references to other nodes and destroys the media stream. Note that most of the cycles that are now inherent in the ownership model are between nodes and their AudioParams (that is, the cycles not created by content.)
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioBufferSourceNode.h
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioDestinationNode.cpp
content/media/webaudio/AudioDestinationNode.h
content/media/webaudio/AudioNode.cpp
content/media/webaudio/AudioNode.h
content/media/webaudio/DelayNode.cpp
content/media/webaudio/DelayNode.h
content/media/webaudio/PannerNode.cpp
dom/bindings/Bindings.conf
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -172,16 +172,28 @@ AudioNodeStream::EnsureTrack()
                                   MediaStreamListener::TRACK_EVENT_CREATED,
                                   *segment);
     }
     track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
   }
   return track;
 }
 
+bool
+AudioNodeStream::AllInputsFinished() const
+{
+  uint32_t inputCount = mInputs.Length();
+  for (uint32_t i = 0; i < inputCount; ++i) {
+    if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
+      return false;
+    }
+  }
+  return !!inputCount;
+}
+
 AudioChunk*
 AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
 {
   uint32_t inputCount = mInputs.Length();
   uint32_t outputChannelCount = 0;
   nsAutoTArray<AudioChunk*,250> inputChunks;
   for (uint32_t i = 0; i < inputCount; ++i) {
     MediaStream* s = mInputs[i]->GetSource();
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -69,16 +69,17 @@ public:
 
   virtual AudioNodeStream* AsAudioNodeStream() { return this; }
 
   // Graph thread only
   void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                   double aStreamTime);
   virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
   TrackTicks GetCurrentPosition();
+  bool AllInputsFinished() const;
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
 
 protected:
   void FinishOutput();
 
   StreamBuffer::Track* EnsureTrack();
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -408,22 +408,21 @@ public:
   AudioParamTimeline mPlaybackRateTimeline;
   bool mLoop;
 };
 
 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
   : AudioNode(aContext)
   , mLoopStart(0.0)
   , mLoopEnd(0.0)
+  , mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
+  , mPannerNode(nullptr)
   , mLoop(false)
   , mStartCalled(false)
-  , mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
-  , mPannerNode(nullptr)
 {
-  SetProduceOwnOutput(true);
   mStream = aContext->Graph()->CreateAudioNodeStream(
       new AudioBufferSourceNodeEngine(aContext->Destination()),
       MediaStreamGraph::INTERNAL_STREAM);
   mStream->AddMainThreadListener(this);
 }
 
 AudioBufferSourceNode::~AudioBufferSourceNode()
 {
@@ -498,16 +497,19 @@ AudioBufferSourceNode::Start(JSContext* 
   int32_t offsetTicks = NS_lround(offset*rate);
   // Don't set parameter unnecessarily
   if (offsetTicks > 0) {
     ns->SetInt32Parameter(AudioBufferSourceNodeEngine::OFFSET, offsetTicks);
   }
   ns->SetInt32Parameter(AudioBufferSourceNodeEngine::DURATION,
       NS_lround(endOffset*rate) - offsetTicks);
   ns->SetInt32Parameter(AudioBufferSourceNodeEngine::SAMPLE_RATE, rate);
+
+  MOZ_ASSERT(!mPlayingRef, "We can only accept a successful start() call once");
+  mPlayingRef.Take(this);
 }
 
 void
 AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv)
 {
   if (!mStartCalled) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
@@ -523,17 +525,19 @@ AudioBufferSourceNode::Stop(double aWhen
                              Context()->DestinationStream(),
                              std::max(0.0, aWhen));
 }
 
 void
 AudioBufferSourceNode::NotifyMainThreadStateChanged()
 {
   if (mStream->IsFinished()) {
-    SetProduceOwnOutput(false);
+    // Drop the playing reference
+    // Warning: The below line might delete this.
+    mPlayingRef.Drop(this);
   }
 }
 
 void
 AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
 {
   AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
   SendTimelineParameterToStream(This, AudioBufferSourceNodeEngine::PLAYBACKRATE, *This->mPlaybackRate);
--- a/content/media/webaudio/AudioBufferSourceNode.h
+++ b/content/media/webaudio/AudioBufferSourceNode.h
@@ -45,26 +45,16 @@ public:
   void UnregisterPannerNode() {
     mPannerNode = nullptr;
   }
 
   void RegisterPannerNode(PannerNode* aPannerNode) {
     mPannerNode = aPannerNode;
   }
 
-  void JSBindingFinalized()
-  {
-    // If the JS binding goes away on a node which never received a start()
-    // call, then it can no longer produce output.
-    if (!mStartCalled) {
-      SetProduceOwnOutput(false);
-    }
-    AudioNode::JSBindingFinalized();
-  }
-
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioBufferSourceNode, AudioNode)
 
   virtual JSObject* WrapObject(JSContext* aCx, JSObject* aScope);
 
   void Start(JSContext* aCx, double aWhen, double aOffset,
              const Optional<double>& aDuration, ErrorResult& aRv);
   void NoteOn(JSContext* aCx, double aWhen, ErrorResult& aRv)
@@ -121,22 +111,23 @@ public:
     mLoopEnd = aEnd;
   }
   void SendDopplerShiftToStream(double aDopplerShift);
 
   virtual void NotifyMainThreadStateChanged() MOZ_OVERRIDE;
 
 private:
   static void SendPlaybackRateToStream(AudioNode* aNode);
-  nsRefPtr<AudioBuffer> mBuffer;
   double mLoopStart;
   double mLoopEnd;
+  nsRefPtr<AudioBuffer> mBuffer;
+  nsRefPtr<AudioParam> mPlaybackRate;
+  PannerNode* mPannerNode;
+  SelfReference<AudioBufferSourceNode> mPlayingRef; // a reference to self while playing
   bool mLoop;
   bool mStartCalled;
-  nsRefPtr<AudioParam> mPlaybackRate;
-  PannerNode* mPannerNode;
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -228,19 +228,25 @@ double
 AudioContext::CurrentTime() const
 {
   return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime());
 }
 
 void
 AudioContext::Suspend()
 {
-  DestinationStream()->ChangeExplicitBlockerCount(1);
+  MediaStream* ds = DestinationStream();
+  if (ds) {
+    ds->ChangeExplicitBlockerCount(1);
+  }
 }
 
 void
 AudioContext::Resume()
 {
-  DestinationStream()->ChangeExplicitBlockerCount(-1);
+  MediaStream* ds = DestinationStream();
+  if (ds) {
+    ds->ChangeExplicitBlockerCount(-1);
+  }
 }
 
 }
 }
--- a/content/media/webaudio/AudioDestinationNode.cpp
+++ b/content/media/webaudio/AudioDestinationNode.cpp
@@ -4,41 +4,27 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioDestinationNode.h"
 #include "mozilla/dom/AudioDestinationNodeBinding.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "MediaStreamGraph.h"
-#include "nsContentUtils.h"
 
 namespace mozilla {
 namespace dom {
 
-NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioDestinationNode, AudioNode)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
-NS_IMPL_CYCLE_COLLECTION_UNLINK_END
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioDestinationNode, AudioNode)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
-NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(AudioDestinationNode)
-
-NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioDestinationNode)
-NS_INTERFACE_MAP_END_INHERITING(AudioNode)
-
-NS_IMPL_CYCLE_COLLECTING_ADDREF(AudioDestinationNode)
-NS_IMPL_CYCLE_COLLECTING_RELEASE(AudioDestinationNode)
+NS_IMPL_ISUPPORTS_INHERITED0(AudioDestinationNode, AudioNode)
 
 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, MediaStreamGraph* aGraph)
   : AudioNode(aContext)
 {
   mStream = aGraph->CreateAudioNodeStream(new AudioNodeEngine(),
                                           MediaStreamGraph::EXTERNAL_STREAM);
-  SetIsDOMBinding();
 }
 
 JSObject*
 AudioDestinationNode::WrapObject(JSContext* aCx, JSObject* aScope)
 {
   return AudioDestinationNodeBinding::Wrap(aCx, aScope, this);
 }
 
--- a/content/media/webaudio/AudioDestinationNode.h
+++ b/content/media/webaudio/AudioDestinationNode.h
@@ -9,42 +9,29 @@
 
 #include "AudioNode.h"
 
 namespace mozilla {
 namespace dom {
 
 class AudioContext;
 
-/**
- * Need to have an nsWrapperCache on AudioDestinationNodes since
- * AudioContext.destination returns them.
- */
-class AudioDestinationNode : public AudioNode,
-                             public nsWrapperCache
+class AudioDestinationNode : public AudioNode
 {
 public:
   AudioDestinationNode(AudioContext* aContext, MediaStreamGraph* aGraph);
 
   NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS_INHERITED(AudioDestinationNode,
-                                                         AudioNode)
 
   virtual JSObject* WrapObject(JSContext* aCx, JSObject* aScope) MOZ_OVERRIDE;
 
   virtual uint32_t NumberOfOutputs() const MOZ_FINAL MOZ_OVERRIDE
   {
     return 0;
   }
 
-  void JSBindingFinalized()
-  {
-    // Don't do anything special for destination nodes, as they will always
-    // remain accessible through the AudioContext.
-  }
-
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/AudioNode.cpp
+++ b/content/media/webaudio/AudioNode.cpp
@@ -8,54 +8,46 @@
 #include "AudioContext.h"
 #include "nsContentUtils.h"
 #include "mozilla/ErrorResult.h"
 #include "AudioNodeStream.h"
 
 namespace mozilla {
 namespace dom {
 
-inline void
-ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& aCallback,
-                            AudioNode::InputNode& aField,
-                            const char* aName,
-                            unsigned aFlags)
-{
-  CycleCollectionNoteChild(aCallback, aField.mInputNode.get(), aName, aFlags);
-}
-
-inline void
-ImplCycleCollectionUnlink(nsCycleCollectionTraversalCallback& aCallback,
-                          AudioNode::InputNode& aField,
-                          const char* aName,
-                          unsigned aFlags)
-{
-  aField.mInputNode = nullptr;
-}
-
-NS_IMPL_CYCLE_COLLECTION_3(AudioNode, mContext, mInputNodes, mOutputNodes)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioNode)
+  tmp->DisconnectFromGraph();
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputNodes)
+  NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioNode)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputNodes)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(AudioNode)
 
 NS_IMPL_CYCLE_COLLECTING_ADDREF(AudioNode)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(AudioNode)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioNode)
+  NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
 AudioNode::AudioNode(AudioContext* aContext)
   : mContext(aContext)
-  , mJSBindingFinalized(false)
-  , mCanProduceOwnOutput(false)
-  , mOutputEnded(false)
 {
   MOZ_ASSERT(aContext);
+  SetIsDOMBinding();
 }
 
 AudioNode::~AudioNode()
 {
-  DestroyMediaStream();
+  DisconnectFromGraph();
   MOZ_ASSERT(mInputNodes.IsEmpty());
   MOZ_ASSERT(mOutputNodes.IsEmpty());
 }
 
 static uint32_t
 FindIndexOfNode(const nsTArray<AudioNode::InputNode>& aInputNodes, const AudioNode* aNode)
 {
   for (uint32_t i = 0; i < aInputNodes.Length(); ++i) {
@@ -76,58 +68,41 @@ FindIndexOfNodeWithPorts(const nsTArray<
         aInputNodes[i].mOutputPort == aOutputPort) {
       return i;
     }
   }
   return nsTArray<AudioNode::InputNode>::NoIndex;
 }
 
 void
-AudioNode::UpdateOutputEnded()
+AudioNode::DisconnectFromGraph()
 {
-  if (mOutputEnded) {
-    // Already ended, so nothing to do.
-    return;
-  }
-  if (mCanProduceOwnOutput ||
-      !mInputNodes.IsEmpty() ||
-      (!mJSBindingFinalized && NumberOfInputs() > 0)) {
-    // This node could still produce output in the future.
-    return;
-  }
-
-  mOutputEnded = true;
-
   // Addref this temporarily so the refcount bumping below doesn't destroy us
   // prematurely
   nsRefPtr<AudioNode> kungFuDeathGrip = this;
 
   // The idea here is that we remove connections one by one, and at each step
   // the graph is in a valid state.
 
   // Disconnect inputs. We don't need them anymore.
   while (!mInputNodes.IsEmpty()) {
     uint32_t i = mInputNodes.Length() - 1;
-    nsRefPtr<AudioNode> input = mInputNodes[i].mInputNode.forget();
+    nsRefPtr<AudioNode> input = mInputNodes[i].mInputNode;
     mInputNodes.RemoveElementAt(i);
-    NS_ASSERTION(mOutputNodes.Contains(this), "input/output inconsistency");
     input->mOutputNodes.RemoveElement(this);
   }
 
   while (!mOutputNodes.IsEmpty()) {
     uint32_t i = mOutputNodes.Length() - 1;
     nsRefPtr<AudioNode> output = mOutputNodes[i].forget();
     mOutputNodes.RemoveElementAt(i);
     uint32_t inputIndex = FindIndexOfNode(output->mInputNodes, this);
-    NS_ASSERTION(inputIndex != nsTArray<AudioNode::InputNode>::NoIndex, "input/output inconsistency");
     // It doesn't matter which one we remove, since we're going to remove all
     // entries for this node anyway.
     output->mInputNodes.RemoveElementAt(inputIndex);
-
-    output->UpdateOutputEnded();
   }
 
   DestroyMediaStream();
 }
 
 void
 AudioNode::Connect(AudioNode& aDestination, uint32_t aOutput,
                    uint32_t aInput, ErrorResult& aRv)
@@ -138,33 +113,25 @@ AudioNode::Connect(AudioNode& aDestinati
     return;
   }
 
   if (Context() != aDestination.Context()) {
     aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
     return;
   }
 
-  if (IsOutputEnded() || aDestination.IsOutputEnded()) {
-    // No need to connect since we're not going to produce anything other
-    // than silence.
-    return;
-  }
   if (FindIndexOfNodeWithPorts(aDestination.mInputNodes, this, aInput, aOutput) !=
       nsTArray<AudioNode::InputNode>::NoIndex) {
     // connection already exists.
     return;
   }
 
   // The MediaStreamGraph will handle cycle detection. We don't need to do it
   // here.
 
-  // Addref this temporarily so the refcount bumping below doesn't destroy us
-  nsRefPtr<AudioNode> kungFuDeathGrip = this;
-
   mOutputNodes.AppendElement(&aDestination);
   InputNode* input = aDestination.mInputNodes.AppendElement();
   input->mInputNode = this;
   input->mInputPort = aInput;
   input->mOutputPort = aOutput;
   if (SupportsMediaStreams() && aDestination.mStream) {
     // Connect streams in the MediaStreamGraph
     MOZ_ASSERT(aDestination.mStream->AsProcessedStream());
@@ -214,38 +181,29 @@ AudioNode::SendTimelineParameterToStream
 void
 AudioNode::Disconnect(uint32_t aOutput, ErrorResult& aRv)
 {
   if (aOutput >= NumberOfOutputs()) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return;
   }
 
-  // Disconnect everything connected to this output. First find the
-  // corresponding inputs and remove them.
-  nsAutoTArray<nsRefPtr<AudioNode>,4> outputsToUpdate;
-
   for (int32_t i = mOutputNodes.Length() - 1; i >= 0; --i) {
     AudioNode* dest = mOutputNodes[i];
     for (int32_t j = dest->mInputNodes.Length() - 1; j >= 0; --j) {
       InputNode& input = dest->mInputNodes[j];
       if (input.mInputNode == this && input.mOutputPort == aOutput) {
         dest->mInputNodes.RemoveElementAt(j);
         // Remove one instance of 'dest' from mOutputNodes. There could be
         // others, and it's not correct to remove them all since some of them
         // could be for different output ports.
-        *outputsToUpdate.AppendElement() = mOutputNodes[i].forget();
         mOutputNodes.RemoveElementAt(i);
         break;
       }
     }
   }
 
-  for (uint32_t i = 0; i < outputsToUpdate.Length(); ++i) {
-    outputsToUpdate[i]->UpdateOutputEnded();
-  }
-
   // This disconnection may have disconnected a panner and a source.
   Context()->UpdatePannerSource();
 }
 
 }
 }
--- a/content/media/webaudio/AudioNode.h
+++ b/content/media/webaudio/AudioNode.h
@@ -2,16 +2,17 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef AudioNode_h_
 #define AudioNode_h_
 
+#include "nsWrapperCache.h"
 #include "nsCycleCollectionParticipant.h"
 #include "mozilla/Attributes.h"
 #include "EnableWebAudioCheck.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "AudioContext.h"
 #include "AudioParamTimeline.h"
 #include "MediaStreamGraph.h"
@@ -21,36 +22,61 @@ struct JSContext;
 namespace mozilla {
 
 class ErrorResult;
 
 namespace dom {
 
 struct ThreeDPoint;
 
+template<class T>
+class SelfReference {
+public:
+  SelfReference() : mHeld(false) {}
+  ~SelfReference()
+  {
+    NS_ASSERTION(!mHeld, "Forgot to drop the self reference?");
+  }
+
+  void Take(T* t)
+  {
+    if (!mHeld) {
+      mHeld = true;
+      t->AddRef();
+    }
+  }
+  void Drop(T* t)
+  {
+    if (mHeld) {
+      mHeld = false;
+      t->Release();
+    }
+  }
+
+  operator bool() const { return mHeld; }
+
+private:
+  bool mHeld;
+};
+
 /**
  * The DOM object representing a Web Audio AudioNode.
  *
  * Each AudioNode has a MediaStream representing the actual
  * real-time processing and output of this AudioNode.
  *
  * We track the incoming and outgoing connections to other AudioNodes.
  * All connections are strong and thus rely on cycle collection to break them.
  * However, we also track whether an AudioNode is capable of producing output
  * in the future. If it isn't, then we break its connections to its inputs
  * and outputs, allowing nodes to be immediately disconnected. This
  * disconnection is done internally, invisible to DOM users.
- *
- * We say that a node cannot produce output in the future if it has no inputs
- * that can, and it is not producing output itself without any inputs, and
- * either it can never have any inputs or it has no JS wrapper. (If it has a
- * JS wrapper and can accept inputs, then a new input could be added in
- * the future.)
  */
 class AudioNode : public nsISupports,
+                  public nsWrapperCache,
                   public EnableWebAudioCheck
 {
 public:
   explicit AudioNode(AudioContext* aContext);
   virtual ~AudioNode();
 
   // This should be idempotent (safe to call multiple times).
   // This should be called in the destructor of every class that overrides
@@ -66,24 +92,17 @@ public:
   // This method should be overridden to return true in nodes
   // which support being hooked up to the Media Stream graph.
   virtual bool SupportsMediaStreams() const
   {
     return false;
   }
 
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
-  NS_DECL_CYCLE_COLLECTION_CLASS(AudioNode)
-
-  void JSBindingFinalized()
-  {
-    NS_ASSERTION(!mJSBindingFinalized, "JS binding already finalized");
-    mJSBindingFinalized = true;
-    UpdateOutputEnded();
-  }
+  NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(AudioNode)
 
   virtual AudioBufferSourceNode* AsAudioBufferSourceNode() {
     return nullptr;
   }
 
   AudioContext* GetParentObject() const
   {
     return mContext;
@@ -100,55 +119,44 @@ public:
   void Disconnect(uint32_t aOutput, ErrorResult& aRv);
 
   // The following two virtual methods must be implemented by each node type
   // to provide their number of input and output ports. These numbers are
   // constant for the lifetime of the node. Both default to 1.
   virtual uint32_t NumberOfInputs() const { return 1; }
   virtual uint32_t NumberOfOutputs() const { return 1; }
 
-  // This could possibly delete 'this'.
-  void UpdateOutputEnded();
-  bool IsOutputEnded() const { return mOutputEnded; }
-
   struct InputNode {
     ~InputNode()
     {
       if (mStreamPort) {
         mStreamPort->Destroy();
       }
     }
 
-    // Strong reference.
-    // May be null if the source node has gone away.
-    nsRefPtr<AudioNode> mInputNode;
+    // Weak reference.
+    AudioNode* mInputNode;
     nsRefPtr<MediaInputPort> mStreamPort;
     // The index of the input port this node feeds into.
     uint32_t mInputPort;
     // The index of the output port this node comes out of.
     uint32_t mOutputPort;
   };
 
   MediaStream* Stream() { return mStream; }
 
-  // Set this to true when the node can produce its own output even if there
-  // are no inputs.
-  void SetProduceOwnOutput(bool aCanProduceOwnOutput)
-  {
-    mCanProduceOwnOutput = aCanProduceOwnOutput;
-    if (!aCanProduceOwnOutput) {
-      UpdateOutputEnded();
-    }
-  }
-
   const nsTArray<InputNode>& InputNodes() const
   {
     return mInputNodes;
   }
 
+private:
+  // This could possibly delete 'this'.
+  void DisconnectFromGraph();
+
 protected:
   static void Callback(AudioNode* aNode) { /* not implemented */ }
 
   // Helpers for sending different value types to streams
   void SendDoubleParameterToStream(uint32_t aIndex, double aValue);
   void SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue);
   void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue);
   static void SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
@@ -166,23 +174,14 @@ private:
   // For every InputNode, there is a corresponding entry in mOutputNodes of the
   // InputNode's mInputNode.
   nsTArray<InputNode> mInputNodes;
   // For every mOutputNode entry, there is a corresponding entry in mInputNodes
   // of the mOutputNode entry. We won't necessarily be able to identify the
   // exact matching entry, since mOutputNodes doesn't include the port
   // identifiers and the same node could be connected on multiple ports.
   nsTArray<nsRefPtr<AudioNode> > mOutputNodes;
-  // True if the JS binding has been finalized (so script no longer has
-  // a reference to this node).
-  bool mJSBindingFinalized;
-  // True if this node can produce its own output even when all inputs
-  // have ended their output.
-  bool mCanProduceOwnOutput;
-  // True if this node can never produce anything except silence in the future.
-  // Updated by UpdateOutputEnded().
-  bool mOutputEnded;
 };
 
 }
 }
 
 #endif
--- a/content/media/webaudio/DelayNode.cpp
+++ b/content/media/webaudio/DelayNode.cpp
@@ -20,24 +20,51 @@ NS_IMPL_CYCLE_COLLECTION_INHERITED_1(Del
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(DelayNode)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(DelayNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(DelayNode, AudioNode)
 
 class DelayNodeEngine : public AudioNodeEngine
 {
+  class PlayingRefChanged : public nsRunnable
+  {
+  public:
+    enum ChangeType { ADDREF, RELEASE };
+    PlayingRefChanged(DelayNode& aNode, ChangeType aChange)
+      : mNode(aNode)
+      , mChange(aChange)
+    {
+    }
+
+    NS_IMETHOD Run()
+    {
+      if (mChange == ADDREF) {
+        mNode.mPlayingRef.Take(&mNode);
+      } else if (mChange == RELEASE) {
+        mNode.mPlayingRef.Drop(&mNode);
+      }
+      return NS_OK;
+    }
+
+  private:
+    DelayNode& mNode;
+    ChangeType mChange;
+  };
+
 public:
-  explicit DelayNodeEngine(AudioDestinationNode* aDestination)
+  DelayNodeEngine(AudioDestinationNode* aDestination, DelayNode& aDelay)
     : mSource(nullptr)
     , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
+    , mDelayNode(aDelay)
     // Keep the default value in sync with the default value in DelayNode::DelayNode.
     , mDelay(0.f)
     , mMaxDelay(0.)
     , mWriteIndex(0)
+    , mLeftOverData(INT32_MIN)
     , mCurrentDelayTime(0.)
   {
   }
 
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
@@ -93,17 +120,40 @@ public:
   virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                  const AudioChunk& aInput,
                                  AudioChunk* aOutput,
                                  bool* aFinished)
   {
     MOZ_ASSERT(mSource == aStream, "Invalid source stream");
 
     const bool firstTime = !!!mBuffer.Length();
-    const uint32_t numChannels = aInput.mChannelData.Length();
+    const uint32_t numChannels = aInput.IsNull() ?
+                                 mBuffer.Length() :
+                                 aInput.mChannelData.Length();
+
+    bool playedBackAllLeftOvers = false;
+    if (!mBuffer.IsEmpty() &&
+        mLeftOverData == INT32_MIN &&
+        aStream->AllInputsFinished()) {
+      mLeftOverData = static_cast<int32_t>(mCurrentDelayTime * IdealAudioRate());
+
+      nsRefPtr<PlayingRefChanged> refchanged =
+        new PlayingRefChanged(mDelayNode, PlayingRefChanged::ADDREF);
+      NS_DispatchToMainThread(refchanged);
+    } else if (mLeftOverData != INT32_MIN) {
+      mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
+      if (mLeftOverData <= 0) {
+        mLeftOverData = INT32_MIN;
+        playedBackAllLeftOvers = true;
+
+        nsRefPtr<PlayingRefChanged> refchanged =
+          new PlayingRefChanged(mDelayNode, PlayingRefChanged::RELEASE);
+        NS_DispatchToMainThread(refchanged);
+      }
+    }
 
     if (!EnsureBuffer(numChannels)) {
       aOutput->SetNull(0);
       return;
     }
 
     AllocateAudioBlock(numChannels, aOutput);
 
@@ -129,29 +179,31 @@ public:
     }
 
     for (uint32_t channel = 0; channel < numChannels; ++channel) {
       double currentDelayTime = mCurrentDelayTime;
       uint32_t writeIndex = mWriteIndex;
 
       float* buffer = mBuffer[channel].Elements();
       const uint32_t bufferLength = mBuffer[channel].Length();
-      const float* input = static_cast<const float*>(aInput.mChannelData[channel]);
+      const float* input = static_cast<const float*>(aInput.mChannelData.SafeElementAt(channel));
       float* output = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[channel]));
 
       for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
         if (mDelay.HasSimpleValue()) {
           // If the simple value has changed, smoothly approach it
           currentDelayTime += (delayTime - currentDelayTime) * smoothingRate;
         } else {
           currentDelayTime = computedDelay[i];
         }
 
         // Write the input sample to the correct location in our buffer
-        buffer[writeIndex] = input[i];
+        if (input) {
+          buffer[writeIndex] = input[i];
+        }
 
         // Now, determine the correct read position.  We adjust the read position to be
         // from currentDelayTime seconds in the past.  We also interpolate the two input
         // frames in case the read position does not match an integer index.
         double readPosition = writeIndex + bufferLength -
                               (currentDelayTime * IdealAudioRate());
         if (readPosition >= bufferLength) {
           readPosition -= bufferLength;
@@ -178,37 +230,46 @@ public:
 
       // Remember currentDelayTime and writeIndex for the next ProduceAudioBlock
       // call when processing the last channel.
       if (channel == numChannels - 1) {
         mCurrentDelayTime = currentDelayTime;
         mWriteIndex = writeIndex;
       }
     }
+
+    if (playedBackAllLeftOvers) {
+      // Delete our buffered data once we no longer need it
+      mBuffer.Clear();
+    }
   }
 
   AudioNodeStream* mSource;
   AudioNodeStream* mDestination;
+  DelayNode& mDelayNode;
   AudioParamTimeline mDelay;
   // Maximum delay time in seconds
   double mMaxDelay;
   // Circular buffer for capturing delayed samples.
   AutoFallibleTArray<FallibleTArray<float>, 2> mBuffer;
   // Write index for the buffer, to write the frames to the correct index of the buffer
   // given the current delay.
   uint32_t mWriteIndex;
+  // How much data we have in our buffer which needs to be flushed out when our inputs
+  // finish.
+  int32_t mLeftOverData;
   // Current delay time, in seconds
   double mCurrentDelayTime;
 };
 
 DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
   : AudioNode(aContext)
   , mDelay(new AudioParam(this, SendDelayToStream, 0.0f))
 {
-  DelayNodeEngine* engine = new DelayNodeEngine(aContext->Destination());
+  DelayNodeEngine* engine = new DelayNodeEngine(aContext->Destination(), *this);
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
 }
 
 JSObject*
 DelayNode::WrapObject(JSContext* aCx, JSObject* aScope)
--- a/content/media/webaudio/DelayNode.h
+++ b/content/media/webaudio/DelayNode.h
@@ -32,18 +32,20 @@ public:
 
   virtual bool SupportsMediaStreams() const MOZ_OVERRIDE
   {
     return true;
   }
 
 private:
   static void SendDelayToStream(AudioNode* aNode);
+  friend class DelayNodeEngine;
 
 private:
   nsRefPtr<AudioParam> mDelay;
+  SelfReference<DelayNode> mPlayingRef;
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/PannerNode.cpp
+++ b/content/media/webaudio/PannerNode.cpp
@@ -175,17 +175,19 @@ PannerNode::PannerNode(AudioContext* aCo
   mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(),
                                                      MediaStreamGraph::INTERNAL_STREAM);
   // We should register once we have set up our stream and engine.
   Context()->Listener()->RegisterPannerNode(this);
 }
 
 PannerNode::~PannerNode()
 {
-  Context()->UnregisterPannerNode(this);
+  if (Context()) {
+    Context()->UnregisterPannerNode(this);
+  }
   DestroyMediaStream();
 }
 
 JSObject*
 PannerNode::WrapObject(JSContext* aCx, JSObject* aScope)
 {
   return PannerNodeBinding::Wrap(aCx, aScope, this);
 }
--- a/dom/bindings/Bindings.conf
+++ b/dom/bindings/Bindings.conf
@@ -77,20 +77,16 @@
 # worker descriptors for objects that will never actually appear in workers.
 
 DOMInterfaces = {
 
 'MozActivity': {
     'nativeType': 'mozilla::dom::Activity',
 },
 
-'AnalyserNode': {
-    'wrapperCache': False
-},
-
 'AnimationEvent': {
     'nativeType': 'nsDOMAnimationEvent',
 },
 
 'ArchiveReader': {
     'nativeType': 'mozilla::dom::file::ArchiveReader',
 },
 
@@ -107,17 +103,16 @@ DOMInterfaces = {
     'implicitJSContext': [ 'createBuffer' ],
     'nativeOwnership': 'refcounted',
     'resultNotAddRefed': [ 'destination', 'listener' ],
 },
 
 'AudioBufferSourceNode': {
     'implicitJSContext': [ 'start', 'noteOn', 'noteGrainOn' ],
     'resultNotAddRefed': [ 'playbackRate' ],
-    'wrapperCache': False
 },
 
 'AudioListener' : {
     'nativeOwnership': 'refcounted'
 },
 
 'AudioNode' : {
     'concrete': False,
@@ -128,17 +123,16 @@ DOMInterfaces = {
 },
 
 'BeforeUnloadEvent': {
     'nativeType': 'nsDOMBeforeUnloadEvent',
 },
 
 'BiquadFilterNode': {
     'resultNotAddRefed': [ 'frequency', 'q', 'gain' ],
-    'wrapperCache': False
 },
 
 'Blob': [
 {
     'headerFile': 'nsIDOMFile.h',
 },
 {
     'workers': True,
@@ -224,17 +218,16 @@ DOMInterfaces = {
 
 'CSSValueList': {
   'nativeType': 'nsDOMCSSValueList'
 },
 
 'DelayNode': [
 {
     'resultNotAddRefed': [ 'delayTime' ],
-    'wrapperCache': False
 }],
 
 'DesktopNotificationCenter': {
     'headerFile': 'mozilla/dom/DesktopNotification.h',
 },
 
 'Document': [
 {
@@ -287,17 +280,16 @@ DOMInterfaces = {
 },
 
 'DynamicsCompressorNode': {
     'resultNotAddRefed': [ 'threshold', 'knee', 'ratio',
                            'reduction', 'attack', 'release' ],
     'binaryNames': {
         'release': 'getRelease'
     },
-    'wrapperCache': False
 },
 
 'Element': {
     'hasXPConnectImpls': True,
     'resultNotAddRefed': [
         'classList', 'attributes', 'children', 'firstElementChild',
         'lastElementChild', 'previousElementSibling', 'nextElementSibling',
         'getAttributeNode', 'getAttributeNodeNS', 'querySelector'
@@ -373,17 +365,16 @@ DOMInterfaces = {
 {
     'workers': True,
     'skipGen': True,
     'nativeType': 'JSObject'
 }],
 
 'GainNode': {
     'resultNotAddRefed': [ 'gain' ],
-    'wrapperCache': False
 },
 
 'Gamepad': {
     'nativeType': 'nsDOMGamepad',
 },
 
 'HTMLAppletElement': {
     'nativeType': 'mozilla::dom::HTMLSharedObjectElement'
@@ -651,17 +642,16 @@ DOMInterfaces = {
     'nativeType': 'nsPaintRequestList',
     'headerFile': 'nsPaintRequest.h',
     'resultNotAddRefed': [ 'item' ]
 },
 
 'PannerNode': [
 {
     'resultNotAddRefed': [ 'coneGain', 'distanceGain' ],
-    'wrapperCache': False
 }],
 
 'Performance': {
     'nativeType': 'nsPerformance',
     'resultNotAddRefed': [ 'timing', 'navigation' ]
 },
 
 'PerformanceTiming': {