bug 1197043 move AudioNodeStream creation to stream class r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Wed, 12 Aug 2015 11:26:24 +1200
changeset 259221 6f427969796d8d3a1e2c680a00759957b0e32fe1
parent 259220 bf6521da6a0310ac1123b02a0143ddcd362cfebb
child 259222 76997b5a67b80c2d06e564d4b57445dda3348f38
push id29273
push userryanvm@gmail.com
push dateTue, 25 Aug 2015 14:44:17 +0000
treeherdermozilla-central@23a04f9a321c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1197043
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1197043 move AudioNodeStream creation to stream class r=padenot
dom/media/MediaRecorder.cpp
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioNodeExternalInputStream.cpp
dom/media/webaudio/AudioNodeExternalInputStream.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/AudioParam.cpp
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ChannelMergerNode.cpp
dom/media/webaudio/ChannelSplitterNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/MediaStreamAudioDestinationNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -771,18 +771,18 @@ MediaRecorder::MediaRecorder(AudioNode& 
 
   // Only AudioNodeStream of kind EXTERNAL_STREAM stores output audio data in
   // the track (see AudioNodeStream::AdvanceOutputSegment()). That means track
   // union stream in recorder session won't be able to copy data from the
   // stream of non-destination node. Create a pipe stream in this case.
   if (aSrcAudioNode.NumberOfOutputs() > 0) {
     AudioContext* ctx = aSrcAudioNode.Context();
     AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
-    mPipeStream = ctx->Graph()->CreateAudioNodeStream(engine,
-                                                      MediaStreamGraph::EXTERNAL_STREAM);
+    mPipeStream = AudioNodeStream::Create(ctx->Graph(), engine,
+                                          AudioNodeStream::EXTERNAL_STREAM);
     AudioNodeStream* ns = aSrcAudioNode.GetStream();
     if (ns) {
       mInputPort = mPipeStream->AllocateInputPort(aSrcAudioNode.GetStream(),
                                                   MediaInputPort::FLAG_BLOCK_INPUT,
                                                   0,
                                                   aSrcOutput);
     }
   }
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -15,17 +15,16 @@
 #include "nsServiceManagerUtils.h"
 #include "prerror.h"
 #include "mozilla/Logging.h"
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
 #include "AudioCaptureStream.h"
 #include "AudioChannelService.h"
-#include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioNodeExternalInputStream.h"
 #include "mozilla/dom/AudioContextBinding.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 #include "mozilla/unused.h"
 #ifdef MOZ_WEBRTC
@@ -3115,51 +3114,16 @@ MediaStreamGraph::CreateTrackUnionStream
 ProcessedMediaStream*
 MediaStreamGraph::CreateAudioCaptureStream(DOMMediaStream* aWrapper)
 {
   AudioCaptureStream* stream = new AudioCaptureStream(aWrapper);
   AddStream(stream);
   return stream;
 }
 
-AudioNodeExternalInputStream*
-MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_ASSERT(GraphRate() == aEngine->NodeMainThread()->Context()->SampleRate());
-
-  AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(
-    aEngine, GraphRate(), aEngine->NodeMainThread()->Context()->Id());
-  AddStream(stream);
-  return stream;
-}
-
-AudioNodeStream*
-MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                        AudioNodeStreamKind aKind)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  // MediaRecorders use an AudioNodeStream, but no AudioNode
-  AudioNode* node = aEngine->NodeMainThread();
-  MOZ_ASSERT(!node || GraphRate() == node->Context()->SampleRate());
-
-  dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
-                                                                NO_AUDIO_CONTEXT;
-  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, GraphRate(),
-                                                contextIdForStream);
-  if (aEngine->HasNode()) {
-    stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
-                                           aEngine->NodeMainThread()->ChannelCountModeValue(),
-                                           aEngine->NodeMainThread()->ChannelInterpretationValue());
-  }
-  AddStream(stream);
-  return stream;
-}
-
 void
 MediaStreamGraph::AddStream(MediaStream* aStream)
 {
   NS_ADDREF(aStream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   aStream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(aStream));
 }
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -1235,29 +1235,17 @@ public:
    * TODO at some point we will probably need to add API to select
    * particular tracks of each input stream.
    */
   ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
   /**
    * Create a stream that will mix all its audio input.
    */
   ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
-  // Internal AudioNodeStreams can only pass their output to another
-  // AudioNode, whereas external AudioNodeStreams can pass their output
-  // to an nsAudioStream for playback.
-  enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
-  /**
-   * Create a stream that will process audio for an AudioNode.
-   * Takes ownership of aEngine.
-   */
-  AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                         AudioNodeStreamKind aKind);
 
-  AudioNodeExternalInputStream*
-  CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine);
   /**
    * Add a new stream to the graph.  Main thread.
    */
   void AddStream(MediaStream* aStream);
 
   /* From the main thread, ask the MSG to send back an event when the graph
    * thread is running, and audio is being processed. */
   void NotifyWhenGraphStarted(AudioNodeStream* aNodeStream);
--- a/dom/media/webaudio/AnalyserNode.cpp
+++ b/dom/media/webaudio/AnalyserNode.cpp
@@ -80,18 +80,19 @@ AnalyserNode::AnalyserNode(AudioContext*
               1,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mAnalysisBlock(2048)
   , mMinDecibels(-100.)
   , mMaxDecibels(-30.)
   , mSmoothingTimeConstant(.8)
 {
-  mStream = aContext->Graph()->CreateAudioNodeStream(new AnalyserNodeEngine(this),
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(),
+                                    new AnalyserNodeEngine(this),
+                                    AudioNodeStream::INTERNAL_STREAM);
 
   // Enough chunks must be recorded to handle the case of fftSize being
   // increased to maximum immediately before getFloatTimeDomainData() is
   // called, for example.
   (void)mChunks.SetLength(CHUNK_COUNT, fallible);
 
   AllocateBuffer();
 }
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -537,17 +537,18 @@ AudioBufferSourceNode::AudioBufferSource
   , mLoopEnd(0.0)
   // mOffset and mDuration are initialized in Start().
   , mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f, "playbackRate"))
   , mDetune(new AudioParam(this, SendDetuneToStream, 0.0f, "detune"))
   , mLoop(false)
   , mStartCalled(false)
 {
   AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::SOURCE_STREAM);
   engine->SetSourceStream(mStream);
   mStream->AddMainThreadListener(this);
 }
 
 AudioBufferSourceNode::~AudioBufferSourceNode()
 {
 }
 
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -341,17 +341,18 @@ AudioDestinationNode::AudioDestinationNo
   MediaStreamGraph* graph = aIsOffline ?
                             MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) :
                             MediaStreamGraph::GetInstance(startWithAudioDriver, aChannel);
   AudioNodeEngine* engine = aIsOffline ?
                             new OfflineDestinationNodeEngine(this, aNumberOfChannels,
                                                              aLength, aSampleRate) :
                             static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
 
-  mStream = graph->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(graph, engine,
+                                    AudioNodeStream::EXTERNAL_STREAM);
   mStream->AddMainThreadListener(this);
   mStream->AddAudioOutput(&gWebAudioOutputKey);
 
   if (!aIsOffline) {
     graph->NotifyWhenGraphStarted(mStream);
   }
 
   if (aChannel != AudioChannel::Normal) {
--- a/dom/media/webaudio/AudioNodeExternalInputStream.cpp
+++ b/dom/media/webaudio/AudioNodeExternalInputStream.cpp
@@ -8,26 +8,40 @@
 #include "AudioChannelFormat.h"
 #include "mozilla/dom/MediaStreamAudioSourceNode.h"
 
 using namespace mozilla::dom;
 
 namespace mozilla {
 
 AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId)
-  : AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate, aContextId)
+  : AudioNodeStream(aEngine, INTERNAL_STREAM, aSampleRate, aContextId)
 {
   MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
 }
 
 AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
 {
   MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
 }
 
+/* static */ already_AddRefed<AudioNodeExternalInputStream>
+AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
+                                     AudioNodeEngine* aEngine)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_ASSERT(aGraph->GraphRate() == aEngine->NodeMainThread()->Context()->SampleRate());
+
+  nsRefPtr<AudioNodeExternalInputStream> stream =
+    new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate(),
+                                     aEngine->NodeMainThread()->Context()->Id());
+  aGraph->AddStream(stream);
+  return stream.forget();
+}
+
 /**
  * Copies the data in aInput to aOffsetInBlock within aBlock.
  * aBlock must have been allocated with AllocateInputBlock and have a channel
  * count that's a superset of the channels in aInput.
  */
 static void
 CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                  uint32_t aOffsetInBlock)
--- a/dom/media/webaudio/AudioNodeExternalInputStream.h
+++ b/dom/media/webaudio/AudioNodeExternalInputStream.h
@@ -16,19 +16,22 @@ namespace mozilla {
  * This is a MediaStream implementation that acts for a Web Audio node but
  * unlike other AudioNodeStreams, supports any kind of MediaStream as an
  * input --- handling any number of audio tracks and handling blocking of
  * the input MediaStream.
  */
 class AudioNodeExternalInputStream final : public AudioNodeStream
 {
 public:
+  static already_AddRefed<AudioNodeExternalInputStream>
+  Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine);
+
+protected:
   AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate,
                                uint32_t aContextId);
-protected:
   ~AudioNodeExternalInputStream();
 
 public:
   virtual void ProcessInput(GraphTime aFrom, GraphTime aTo,
                             uint32_t aFlags) override;
 
 private:
   /**
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -21,17 +21,17 @@ namespace mozilla {
  * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
  * for regular audio contexts, and the rate requested by the web content
  * for offline audio contexts.
  * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
  * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
  */
 
 AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
-                                 MediaStreamGraph::AudioNodeStreamKind aKind,
+                                 AudioNodeStreamKind aKind,
                                  TrackRate aSampleRate,
                                  AudioContext::AudioContextId aContextId)
   : ProcessedMediaStream(nullptr),
     mEngine(aEngine),
     mSampleRate(aSampleRate),
     mAudioContextId(aContextId),
     mKind(aKind),
     mNumberOfInputChannels(2),
@@ -48,16 +48,40 @@ AudioNodeStream::AudioNodeStream(AudioNo
   MOZ_COUNT_CTOR(AudioNodeStream);
 }
 
 AudioNodeStream::~AudioNodeStream()
 {
   MOZ_COUNT_DTOR(AudioNodeStream);
 }
 
+/* static */ already_AddRefed<AudioNodeStream>
+AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
+                        AudioNodeStreamKind aKind)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  // MediaRecorders use an AudioNodeStream, but no AudioNode
+  AudioNode* node = aEngine->NodeMainThread();
+  MOZ_ASSERT(!node || aGraph->GraphRate() == node->Context()->SampleRate());
+
+  dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
+                                                                NO_AUDIO_CONTEXT;
+  nsRefPtr<AudioNodeStream> stream =
+    new AudioNodeStream(aEngine, aKind, aGraph->GraphRate(),
+                        contextIdForStream);
+  if (aEngine->HasNode()) {
+    stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
+                                           aEngine->NodeMainThread()->ChannelCountModeValue(),
+                                           aEngine->NodeMainThread()->ChannelInterpretationValue());
+  }
+  aGraph->AddStream(stream);
+  return stream.forget();
+}
+
 size_t
 AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
 {
   size_t amount = 0;
 
   // Not reported:
   // - mEngine
 
@@ -566,17 +590,17 @@ AudioNodeStream::ProduceOutputBeforeInpu
 }
 
 void
 AudioNodeStream::AdvanceOutputSegment()
 {
   StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
   AudioSegment* segment = track->Get<AudioSegment>();
 
-  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
+  if (mKind == EXTERNAL_STREAM) {
     segment->AppendAndConsumeChunk(&mLastChunks[0]);
   } else {
     segment->AppendNullData(mLastChunks[0].GetDuration());
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     AudioChunk copyChunk = mLastChunks[0];
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -38,25 +38,37 @@ class AudioNodeStream : public Processed
 
 public:
   typedef mozilla::dom::AudioContext AudioContext;
 
   enum { AUDIO_TRACK = 1 };
 
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
+  // Internal AudioNodeStreams can only pass their output to another
+  // AudioNode, whereas external AudioNodeStreams can pass their output
+  // to an nsAudioStream for playback.
+  enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
+  /**
+   * Create a stream that will process audio for an AudioNode.
+   * Takes ownership of aEngine.
+   */
+  static already_AddRefed<AudioNodeStream>
+  Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
+         AudioNodeStreamKind aKind);
+
+protected:
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
-                  MediaStreamGraph::AudioNodeStreamKind aKind,
+                  AudioNodeStreamKind aKind,
                   TrackRate aSampleRate,
                   AudioContext::AudioContextId aContextId);
 
-protected:
   ~AudioNodeStream();
 
 public:
   // Control API
   /**
    * Sets a parameter that's a time relative to some stream's played time.
    * This time is converted to a time relative to this stream when it's set.
    */
@@ -107,18 +119,18 @@ public:
 
   const OutputChunks& LastChunks() const
   {
     return mLastChunks;
   }
   virtual bool MainThreadNeedsUpdates() const override
   {
     // Only source and external streams need updates on the main thread.
-    return (mKind == MediaStreamGraph::SOURCE_STREAM && mFinished) ||
-           mKind == MediaStreamGraph::EXTERNAL_STREAM;
+    return (mKind == SOURCE_STREAM && mFinished) ||
+           mKind == EXTERNAL_STREAM;
   }
   virtual bool IsIntrinsicallyConsumed() const override
   {
     return true;
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
@@ -169,17 +181,17 @@ protected:
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
   // This is necessary to be able to find all the nodes for a given
   // AudioContext. It is set on the main thread, in the constructor.
   const AudioContext::AudioContextId mAudioContextId;
   // Whether this is an internal or external stream
-  const MediaStreamGraph::AudioNodeStreamKind mKind;
+  const AudioNodeStreamKind mKind;
   // The number of input channels that this stream requires. 0 means don't care.
   uint32_t mNumberOfInputChannels;
   // The mixing modes
   ChannelCountMode mChannelCountMode;
   ChannelInterpretation mChannelInterpretation;
   // Whether the stream should be marked as finished as soon
   // as the current time range has been computed block by block.
   bool mMarkAsFinishedAfterThisBlock;
--- a/dom/media/webaudio/AudioParam.cpp
+++ b/dom/media/webaudio/AudioParam.cpp
@@ -95,18 +95,18 @@ MediaStream*
 AudioParam::Stream()
 {
   if (mStream) {
     return mStream;
   }
 
   AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
   nsRefPtr<AudioNodeStream> stream =
-    mNode->Context()->Graph()->CreateAudioNodeStream(engine,
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+    AudioNodeStream::Create(mNode->Context()->Graph(), engine,
+                            AudioNodeStream::INTERNAL_STREAM);
 
   // Force the input to have only one channel, and make it down-mix using
   // the speaker rules if needed.
   stream->SetChannelMixingParametersImpl(1, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
   // Mark as an AudioParam helper stream
   stream->SetAudioParamHelperStream();
 
   mStream = stream.forget();
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -245,17 +245,18 @@ BiquadFilterNode::BiquadFilterNode(Audio
               ChannelInterpretation::Speakers)
   , mType(BiquadFilterType::Lowpass)
   , mFrequency(new AudioParam(this, SendFrequencyToStream, 350.f, "frequency"))
   , mDetune(new AudioParam(this, SendDetuneToStream, 0.f, "detune"))
   , mQ(new AudioParam(this, SendQToStream, 1.f, "Q"))
   , mGain(new AudioParam(this, SendGainToStream, 0.f, "gain"))
 {
   BiquadFilterNodeEngine* engine = new BiquadFilterNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 BiquadFilterNode::~BiquadFilterNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/ChannelMergerNode.cpp
+++ b/dom/media/webaudio/ChannelMergerNode.cpp
@@ -68,18 +68,19 @@ public:
 ChannelMergerNode::ChannelMergerNode(AudioContext* aContext,
                                      uint16_t aInputCount)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mInputCount(aInputCount)
 {
-  mStream = aContext->Graph()->CreateAudioNodeStream(new ChannelMergerNodeEngine(this),
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(),
+                                    new ChannelMergerNodeEngine(this),
+                                    AudioNodeStream::INTERNAL_STREAM);
 }
 
 ChannelMergerNode::~ChannelMergerNode()
 {
 }
 
 JSObject*
 ChannelMergerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
--- a/dom/media/webaudio/ChannelSplitterNode.cpp
+++ b/dom/media/webaudio/ChannelSplitterNode.cpp
@@ -55,18 +55,19 @@ public:
 ChannelSplitterNode::ChannelSplitterNode(AudioContext* aContext,
                                          uint16_t aOutputCount)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mOutputCount(aOutputCount)
 {
-  mStream = aContext->Graph()->CreateAudioNodeStream(new ChannelSplitterNodeEngine(this),
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(),
+                                    new ChannelSplitterNodeEngine(this),
+                                    AudioNodeStream::INTERNAL_STREAM);
 }
 
 ChannelSplitterNode::~ChannelSplitterNode()
 {
 }
 
 JSObject*
 ChannelSplitterNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -186,17 +186,18 @@ private:
 ConvolverNode::ConvolverNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Clamped_max,
               ChannelInterpretation::Speakers)
   , mNormalize(true)
 {
   ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
 }
 
 ConvolverNode::~ConvolverNode()
 {
 }
 
 size_t
 ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -193,17 +193,18 @@ DelayNode::DelayNode(AudioContext* aCont
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mDelay(new AudioParam(this, SendDelayToStream, 0.0f, "delayTime"))
 {
   DelayNodeEngine* engine =
     new DelayNodeEngine(this, aContext->Destination(),
                         aContext->SampleRate() * aMaxDelay);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 DelayNode::~DelayNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -198,17 +198,18 @@ DynamicsCompressorNode::DynamicsCompress
   , mThreshold(new AudioParam(this, SendThresholdToStream, -24.f, "threshold"))
   , mKnee(new AudioParam(this, SendKneeToStream, 30.f, "knee"))
   , mRatio(new AudioParam(this, SendRatioToStream, 12.f, "ratio"))
   , mReduction(0)
   , mAttack(new AudioParam(this, SendAttackToStream, 0.003f, "attack"))
   , mRelease(new AudioParam(this, SendReleaseToStream, 0.25f, "release"))
 {
   DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 DynamicsCompressorNode::~DynamicsCompressorNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -123,17 +123,18 @@ public:
 GainNode::GainNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mGain(new AudioParam(this, SendGainToStream, 1.0f, "gain"))
 {
   GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 GainNode::~GainNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/MediaStreamAudioDestinationNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioDestinationNode.cpp
@@ -34,17 +34,18 @@ MediaStreamAudioDestinationNode::MediaSt
                                                       aContext->Graph()))
 {
   // Ensure an audio track with the correct ID is exposed to JS
   mDOMStream->CreateDOMTrack(AudioNodeStream::AUDIO_TRACK, MediaSegment::AUDIO);
 
   ProcessedMediaStream* outputStream = mDOMStream->GetStream()->AsProcessedStream();
   MOZ_ASSERT(!!outputStream);
   AudioNodeEngine* engine = new AudioNodeEngine(this);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::EXTERNAL_STREAM);
   mPort = outputStream->AllocateInputPort(mStream);
 
   nsIDocument* doc = aContext->GetParentObject()->GetExtantDoc();
   if (doc) {
     mDOMStream->CombineWithPrincipal(doc->NodePrincipal());
   }
 }
 
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -34,17 +34,17 @@ MediaStreamAudioSourceNode::MediaStreamA
                                                        DOMMediaStream* aMediaStream)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers),
     mInputStream(aMediaStream)
 {
   AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
-  mStream = aContext->Graph()->CreateAudioNodeExternalInputStream(engine);
+  mStream = AudioNodeExternalInputStream::Create(aContext->Graph(), engine);
   ProcessedMediaStream* outputStream = static_cast<ProcessedMediaStream*>(mStream.get());
   mInputPort = outputStream->AllocateInputPort(aMediaStream->GetStream(),
                                                MediaInputPort::FLAG_BLOCK_INPUT);
   mInputStream->AddConsumerToKeepAlive(static_cast<nsIDOMEventTarget*>(this));
 
   PrincipalChanged(mInputStream); // trigger enabling/disabling of the connector
   mInputStream->AddPrincipalChangeObserver(this);
 }
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -379,17 +379,18 @@ OscillatorNode::OscillatorNode(AudioCont
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mType(OscillatorType::Sine)
   , mFrequency(new AudioParam(this, SendFrequencyToStream, 440.0f, "frequency"))
   , mDetune(new AudioParam(this, SendDetuneToStream, 0.0f, "detune"))
   , mStartCalled(false)
 {
   OscillatorNodeEngine* engine = new OscillatorNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::SOURCE_STREAM);
   engine->SetSourceStream(mStream);
   mStream->AddMainThreadListener(this);
 }
 
 OscillatorNode::~OscillatorNode()
 {
 }
 
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -235,18 +235,19 @@ PannerNode::PannerNode(AudioContext* aCo
   , mVelocity()
   , mRefDistance(1.)
   , mMaxDistance(10000.)
   , mRolloffFactor(1.)
   , mConeInnerAngle(360.)
   , mConeOuterAngle(360.)
   , mConeOuterGain(0.)
 {
-  mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(this),
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(),
+                                    new PannerNodeEngine(this),
+                                    AudioNodeStream::INTERNAL_STREAM);
   // We should register once we have set up our stream and engine.
   Context()->Listener()->RegisterPannerNode(this);
 }
 
 PannerNode::~PannerNode()
 {
   if (Context()) {
     Context()->UnregisterPannerNode(this);
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -515,17 +515,18 @@ ScriptProcessorNode::ScriptProcessorNode
   , mNumberOfOutputChannels(aNumberOfOutputChannels)
 {
   MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
   ScriptProcessorNodeEngine* engine =
     new ScriptProcessorNodeEngine(this,
                                   aContext->Destination(),
                                   BufferSize(),
                                   aNumberOfInputChannels);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 ScriptProcessorNode::~ScriptProcessorNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -176,18 +176,18 @@ public:
 StereoPannerNode::StereoPannerNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Clamped_max,
               ChannelInterpretation::Speakers)
   , mPan(new AudioParam(this, SendPanToStream, 0.f, "pan"))
 {
   StereoPannerNodeEngine* engine = new StereoPannerNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine,
-                                                     MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
   engine->SetSourceStream(mStream);
 }
 
 StereoPannerNode::~StereoPannerNode()
 {
 }
 
 size_t
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -283,17 +283,18 @@ WaveShaperNode::WaveShaperNode(AudioCont
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mCurve(nullptr)
   , mType(OverSampleType::None)
 {
   mozilla::HoldJSObjects(this);
 
   WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  mStream = AudioNodeStream::Create(aContext->Graph(), engine,
+                                    AudioNodeStream::INTERNAL_STREAM);
 }
 
 WaveShaperNode::~WaveShaperNode()
 {
   ClearCurve();
 }
 
 void