Bug 1544023 - let AudioNode keep the reference of all AudioParams which belong to itself. r=padenot
authorAlastor Wu <alwu@mozilla.com>
Tue, 23 Apr 2019 17:50:54 +0000
changeset 470522 db146a70077d0fef30987a87e7835e81fd6c5c54
parent 470521 783efcde8dfbff4f9bef21fbc4e92abfe73f960d
child 470523 74d827325fbe641c181842bd62c201eecfa42432
push id35906
push useraciure@mozilla.com
push dateTue, 23 Apr 2019 22:14:56 +0000
treeherdermozilla-central@0ce3633f8b80 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1544023
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1544023 - let AudioNode keep the reference of all AudioParams which belong to itself. r=padenot When we suspend or resume the `AudioContext`, it should affect ALL media streams which belong to or are related to the `AudioNode` that are created by this `AudioContext`. As `AudioNode::OutputParams()` can only return the connected AudioParams, it doesn't return the AudioParams which are belong to itself. That means we would miss to apply the suspend/resume operation for those streams, and it would cause imbalancing suspended count. Therefore, we let `AudioNode` to keep the reference of all its AudioParam, and return them to `AudioContext` in order to do the operation for all streams. Differential Revision: https://phabricator.services.mozilla.com/D28008
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioNode.cpp
dom/media/webaudio/AudioNode.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ConstantSourceNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/StereoPannerNode.cpp
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -583,23 +583,22 @@ class AudioBufferSourceNodeEngine final 
   AudioParamTimeline mDetuneTimeline;
   bool mLoop;
 };
 
 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
     : AudioScheduledSourceNode(aContext, 2, ChannelCountMode::Max,
                                ChannelInterpretation::Speakers),
       mLoopStart(0.0),
-      mLoopEnd(0.0)
+      mLoopEnd(0.0),
       // mOffset and mDuration are initialized in Start().
-      ,
-      mPlaybackRate(new AudioParam(this, PLAYBACKRATE, "playbackRate", 1.0f)),
-      mDetune(new AudioParam(this, DETUNE, "detune", 0.0f)),
       mLoop(false),
       mStartCalled(false) {
+  CreateAudioParam(mPlaybackRate, PLAYBACKRATE, "playbackRate", 1.0f);
+  CreateAudioParam(mDetune, DETUNE, "detune", 0.0f);
   AudioBufferSourceNodeEngine* engine =
       new AudioBufferSourceNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(aContext, engine,
                                     AudioNodeStream::NEED_MAIN_THREAD_FINISHED,
                                     aContext->Graph());
   engine->SetSourceStream(mStream);
   mStream->AddMainThreadListener(this);
 }
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -899,18 +899,18 @@ void AudioContext::OnStateChanged(void* 
 nsTArray<MediaStream*> AudioContext::GetAllStreams() const {
   nsTArray<MediaStream*> streams;
   for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) {
     AudioNode* node = iter.Get()->GetKey();
     MediaStream* s = node->GetStream();
     if (s) {
       streams.AppendElement(s);
     }
-    // Add the streams for the AudioParam that have an AudioNode input.
-    const nsTArray<RefPtr<AudioParam>>& audioParams = node->OutputParams();
+    // Add the streams of AudioParam.
+    const nsTArray<RefPtr<AudioParam>>& audioParams = node->GetAudioParams();
     if (!audioParams.IsEmpty()) {
       for (auto& param : audioParams) {
         s = param->GetStream();
         if (s && !streams.Contains(s)) {
           streams.AppendElement(s);
         }
       }
     }
--- a/dom/media/webaudio/AudioNode.cpp
+++ b/dom/media/webaudio/AudioNode.cpp
@@ -21,22 +21,24 @@ static uint32_t gId = 0;
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioNode, DOMEventTargetHelper)
   tmp->DisconnectFromGraph();
   if (tmp->mContext) {
     tmp->mContext->UnregisterNode(tmp);
   }
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mParams)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputParams)
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioNode,
                                                   DOMEventTargetHelper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParams)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputParams)
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 NS_IMPL_ADDREF_INHERITED(AudioNode, DOMEventTargetHelper)
 NS_IMPL_RELEASE_INHERITED(AudioNode, DOMEventTargetHelper)
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioNode)
@@ -590,10 +592,18 @@ bool AudioNode::PassThrough() const {
 void AudioNode::SetPassThrough(bool aPassThrough) {
   MOZ_ASSERT(NumberOfInputs() <= 1 && NumberOfOutputs() == 1);
   mPassThrough = aPassThrough;
   if (mStream) {
     mStream->SetPassThrough(mPassThrough);
   }
 }
 
+void AudioNode::CreateAudioParam(RefPtr<AudioParam>& aParam, uint32_t aIndex,
+                                 const char* aName, float aDefaultValue,
+                                 float aMinValue, float aMaxValue) {
+  aParam =
+      new AudioParam(this, aIndex, aName, aDefaultValue, aMinValue, aMaxValue);
+  mParams.AppendElement(aParam);
+}
+
 }  // namespace dom
 }  // namespace mozilla
--- a/dom/media/webaudio/AudioNode.h
+++ b/dom/media/webaudio/AudioNode.h
@@ -163,20 +163,20 @@ class AudioNode : public DOMEventTargetH
     // The index of the output port this node comes out of.
     uint32_t mOutputPort;
   };
 
   // Returns the stream, if any.
   AudioNodeStream* GetStream() const { return mStream; }
 
   const nsTArray<InputNode>& InputNodes() const { return mInputNodes; }
-  const nsTArray<RefPtr<AudioNode> >& OutputNodes() const {
+  const nsTArray<RefPtr<AudioNode>>& OutputNodes() const {
     return mOutputNodes;
   }
-  const nsTArray<RefPtr<AudioParam> >& OutputParams() const {
+  const nsTArray<RefPtr<AudioParam>>& OutputParams() const {
     return mOutputParams;
   }
 
   template <typename T>
   const nsTArray<InputNode>& InputsForDestination(uint32_t aOutputIndex) const;
 
   void RemoveOutputParam(AudioParam* aParam);
 
@@ -197,16 +197,18 @@ class AudioNode : public DOMEventTargetH
   // Returns a string from constant static storage identifying the dom node
   // type.
   virtual const char* NodeType() const = 0;
 
   // This can return nullptr, but only when the AudioNode has been created
   // during document shutdown.
   AbstractThread* GetAbstractMainThread() const { return mAbstractMainThread; }
 
+  const nsTArray<RefPtr<AudioParam>>& GetAudioParams() const { return mParams; }
+
  private:
   // Given:
   //
   // - a DestinationType, that can be an AudioNode or an AudioParam ;
   // - a Predicate, a function that takes an InputNode& and returns a bool ;
   //
   // This method iterates on the InputNodes() of the node at the index
   // aDestinationIndex, and calls `DisconnectFromOutputIfConnected` with this
@@ -241,31 +243,40 @@ class AudioNode : public DOMEventTargetH
  private:
   RefPtr<AudioContext> mContext;
 
  protected:
   // Set in the constructor of all nodes except offline AudioDestinationNode.
   // Must not become null until finished.
   RefPtr<AudioNodeStream> mStream;
 
+  // The reference pointing out all audio params which belong to this node.
+  nsTArray<RefPtr<AudioParam>> mParams;
+  // Use this function to create a AudioParam, which will automatically add the
+  // new AudioParam to `mParams`.
+  void CreateAudioParam(RefPtr<AudioParam>& aParam, uint32_t aIndex,
+                        const char* aName, float aDefaultValue,
+                        float aMinValue = std::numeric_limits<float>::lowest(),
+                        float aMaxValue = std::numeric_limits<float>::max());
+
  private:
   // For every InputNode, there is a corresponding entry in mOutputNodes of the
   // InputNode's mInputNode.
   nsTArray<InputNode> mInputNodes;
   // For every mOutputNode entry, there is a corresponding entry in mInputNodes
   // of the mOutputNode entry. We won't necessarily be able to identify the
   // exact matching entry, since mOutputNodes doesn't include the port
   // identifiers and the same node could be connected on multiple ports.
-  nsTArray<RefPtr<AudioNode> > mOutputNodes;
+  nsTArray<RefPtr<AudioNode>> mOutputNodes;
   // For every mOutputParams entry, there is a corresponding entry in
   // AudioParam::mInputNodes of the mOutputParams entry. We won't necessarily be
   // able to identify the exact matching entry, since mOutputParams doesn't
   // include the port identifiers and the same node could be connected on
   // multiple ports.
-  nsTArray<RefPtr<AudioParam> > mOutputParams;
+  nsTArray<RefPtr<AudioParam>> mOutputParams;
   uint32_t mChannelCount;
   ChannelCountMode mChannelCountMode;
   ChannelInterpretation mChannelInterpretation;
   const uint32_t mId;
   // Whether the node just passes through its input.  This is a devtools API
   // that only works for some node types.
   bool mPassThrough;
   // DocGroup-specifc AbstractThread::MainThread() for MediaStreamGraph
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -219,24 +219,24 @@ class BiquadFilterNodeEngine final : pub
   AudioParamTimeline mGain;
   nsTArray<WebCore::Biquad> mBiquads;
   uint64_t mWindowID;
 };
 
 BiquadFilterNode::BiquadFilterNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Max,
                 ChannelInterpretation::Speakers),
-      mType(BiquadFilterType::Lowpass),
-      mFrequency(new AudioParam(
-          this, BiquadFilterNodeEngine::FREQUENCY, "frequency", 350.f,
-          -(aContext->SampleRate() / 2), aContext->SampleRate() / 2)),
-      mDetune(
-          new AudioParam(this, BiquadFilterNodeEngine::DETUNE, "detune", 0.f)),
-      mQ(new AudioParam(this, BiquadFilterNodeEngine::Q, "Q", 1.f)),
-      mGain(new AudioParam(this, BiquadFilterNodeEngine::GAIN, "gain", 0.f)) {
+      mType(BiquadFilterType::Lowpass) {
+  CreateAudioParam(mFrequency, BiquadFilterNodeEngine::FREQUENCY, "frequency",
+                   350.f, -(aContext->SampleRate() / 2),
+                   aContext->SampleRate() / 2);
+  CreateAudioParam(mDetune, BiquadFilterNodeEngine::DETUNE, "detune", 0.f);
+  CreateAudioParam(mQ, BiquadFilterNodeEngine::Q, "Q", 1.f);
+  CreateAudioParam(mGain, BiquadFilterNodeEngine::GAIN, "gain", 0.f);
+
   uint64_t windowID = 0;
   if (aContext->GetParentObject()) {
     windowID = aContext->GetParentObject()->WindowID();
   }
   BiquadFilterNodeEngine* engine =
       new BiquadFilterNodeEngine(this, aContext->Destination(), windowID);
   mStream = AudioNodeStream::Create(
       aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
--- a/dom/media/webaudio/ConstantSourceNode.cpp
+++ b/dom/media/webaudio/ConstantSourceNode.cpp
@@ -151,19 +151,18 @@ class ConstantSourceNodeEngine final : p
   StreamTime mStart;
   StreamTime mStop;
   AudioParamTimeline mOffset;
 };
 
 ConstantSourceNode::ConstantSourceNode(AudioContext* aContext)
     : AudioScheduledSourceNode(aContext, 2, ChannelCountMode::Max,
                                ChannelInterpretation::Speakers),
-      mOffset(new AudioParam(this, ConstantSourceNodeEngine::OFFSET, "offset",
-                             1.0f)),
       mStartCalled(false) {
+  CreateAudioParam(mOffset, ConstantSourceNodeEngine::OFFSET, "offset", 1.0f);
   ConstantSourceNodeEngine* engine =
       new ConstantSourceNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(aContext, engine,
                                     AudioNodeStream::NEED_MAIN_THREAD_FINISHED,
                                     aContext->Graph());
   engine->SetSourceStream(mStream);
   mStream->AddMainThreadListener(this);
 }
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -167,19 +167,19 @@ class DelayNodeEngine final : public Aud
   bool mHaveProducedBeforeInput;
   // How much data we have in our buffer which needs to be flushed out when our
   // inputs finish.
   int32_t mLeftOverData;
 };
 
 DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
     : AudioNode(aContext, 2, ChannelCountMode::Max,
-                ChannelInterpretation::Speakers),
-      mDelay(new AudioParam(this, DelayNodeEngine::DELAY, "delayTime", 0.0f,
-                            0.f, aMaxDelay)) {
+                ChannelInterpretation::Speakers) {
+  CreateAudioParam(mDelay, DelayNodeEngine::DELAY, "delayTime", 0.0f, 0.f,
+                   aMaxDelay);
   DelayNodeEngine* engine = new DelayNodeEngine(
       this, aContext->Destination(), aContext->SampleRate() * aMaxDelay);
   mStream = AudioNodeStream::Create(
       aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
 }
 
 /* static */
 already_AddRefed<DelayNode> DelayNode::Create(AudioContext& aAudioContext,
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -158,27 +158,27 @@ class DynamicsCompressorNodeEngine final
   AudioParamTimeline mAttack;
   AudioParamTimeline mRelease;
   nsAutoPtr<DynamicsCompressor> mCompressor;
 };
 
 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Clamped_max,
                 ChannelInterpretation::Speakers),
-      mThreshold(new AudioParam(this, DynamicsCompressorNodeEngine::THRESHOLD,
-                                "threshold", -24.f, -100.f, 0.f)),
-      mKnee(new AudioParam(this, DynamicsCompressorNodeEngine::KNEE, "knee",
-                           30.f, 0.f, 40.f)),
-      mRatio(new AudioParam(this, DynamicsCompressorNodeEngine::RATIO, "ratio",
-                            12.f, 1.f, 20.f)),
-      mReduction(0),
-      mAttack(new AudioParam(this, DynamicsCompressorNodeEngine::ATTACK,
-                             "attack", 0.003f, 0.f, 1.f)),
-      mRelease(new AudioParam(this, DynamicsCompressorNodeEngine::RELEASE,
-                              "release", 0.25f, 0.f, 1.f)) {
+      mReduction(0) {
+  CreateAudioParam(mThreshold, DynamicsCompressorNodeEngine::THRESHOLD,
+                   "threshold", -24.f, -100.f, 0.f);
+  CreateAudioParam(mKnee, DynamicsCompressorNodeEngine::KNEE, "knee", 30.f, 0.f,
+                   40.f);
+  CreateAudioParam(mRatio, DynamicsCompressorNodeEngine::RATIO, "ratio", 12.f,
+                   1.f, 20.f);
+  CreateAudioParam(mAttack, DynamicsCompressorNodeEngine::ATTACK, "attack",
+                   0.003f, 0.f, 1.f);
+  CreateAudioParam(mRelease, DynamicsCompressorNodeEngine::RELEASE, "release",
+                   0.25f, 0.f, 1.f);
   DynamicsCompressorNodeEngine* engine =
       new DynamicsCompressorNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(
       aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
 }
 
 /* static */
 already_AddRefed<DynamicsCompressorNode> DynamicsCompressorNode::Create(
--- a/dom/media/webaudio/GainNode.cpp
+++ b/dom/media/webaudio/GainNode.cpp
@@ -103,18 +103,18 @@ class GainNodeEngine final : public Audi
   }
 
   RefPtr<AudioNodeStream> mDestination;
   AudioParamTimeline mGain;
 };
 
 GainNode::GainNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Max,
-                ChannelInterpretation::Speakers),
-      mGain(new AudioParam(this, GainNodeEngine::GAIN, "gain", 1.0f)) {
+                ChannelInterpretation::Speakers) {
+  CreateAudioParam(mGain, GainNodeEngine::GAIN, "gain", 1.0f);
   GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(
       aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
 }
 
 /* static */
 already_AddRefed<GainNode> GainNode::Create(AudioContext& aAudioContext,
                                             const GainOptions& aOptions,
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -366,22 +366,21 @@ class OscillatorNodeEngine final : publi
   bool mCustomDisableNormalization;
   RefPtr<WebCore::PeriodicWave> mPeriodicWave;
 };
 
 OscillatorNode::OscillatorNode(AudioContext* aContext)
     : AudioScheduledSourceNode(aContext, 2, ChannelCountMode::Max,
                                ChannelInterpretation::Speakers),
       mType(OscillatorType::Sine),
-      mFrequency(new AudioParam(
-          this, OscillatorNodeEngine::FREQUENCY, "frequency", 440.0f,
-          -(aContext->SampleRate() / 2), aContext->SampleRate() / 2)),
-      mDetune(
-          new AudioParam(this, OscillatorNodeEngine::DETUNE, "detune", 0.0f)),
       mStartCalled(false) {
+  CreateAudioParam(mFrequency, OscillatorNodeEngine::FREQUENCY, "frequency",
+                   440.0f, -(aContext->SampleRate() / 2),
+                   aContext->SampleRate() / 2);
+  CreateAudioParam(mDetune, OscillatorNodeEngine::DETUNE, "detune", 0.0f);
   OscillatorNodeEngine* engine =
       new OscillatorNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(aContext, engine,
                                     AudioNodeStream::NEED_MAIN_THREAD_FINISHED,
                                     aContext->Graph());
   engine->SetSourceStream(mStream);
   mStream->AddMainThreadListener(this);
 }
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -291,34 +291,31 @@ class PannerNodeEngine final : public Au
 PannerNode::PannerNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Clamped_max,
                 ChannelInterpretation::Speakers)
       // Please keep these default values consistent with
       // PannerNodeEngine::PannerNodeEngine above.
       ,
       mPanningModel(PanningModelType::Equalpower),
       mDistanceModel(DistanceModelType::Inverse),
-      mPositionX(
-          new AudioParam(this, PannerNode::POSITIONX, this->NodeType(), 0.f)),
-      mPositionY(
-          new AudioParam(this, PannerNode::POSITIONY, this->NodeType(), 0.f)),
-      mPositionZ(
-          new AudioParam(this, PannerNode::POSITIONZ, this->NodeType(), 0.f)),
-      mOrientationX(new AudioParam(this, PannerNode::ORIENTATIONX,
-                                   this->NodeType(), 1.0f)),
-      mOrientationY(new AudioParam(this, PannerNode::ORIENTATIONY,
-                                   this->NodeType(), 0.f)),
-      mOrientationZ(new AudioParam(this, PannerNode::ORIENTATIONZ,
-                                   this->NodeType(), 0.f)),
       mRefDistance(1.),
       mMaxDistance(10000.),
       mRolloffFactor(1.),
       mConeInnerAngle(360.),
       mConeOuterAngle(360.),
       mConeOuterGain(0.) {
+  CreateAudioParam(mPositionX, PannerNode::POSITIONX, this->NodeType(), 0.f);
+  CreateAudioParam(mPositionY, PannerNode::POSITIONY, this->NodeType(), 0.f);
+  CreateAudioParam(mPositionZ, PannerNode::POSITIONZ, this->NodeType(), 0.f);
+  CreateAudioParam(mOrientationX, PannerNode::ORIENTATIONX, this->NodeType(),
+                   1.0f);
+  CreateAudioParam(mOrientationY, PannerNode::ORIENTATIONY, this->NodeType(),
+                   0.f);
+  CreateAudioParam(mOrientationZ, PannerNode::ORIENTATIONZ, this->NodeType(),
+                   0.f);
   mStream = AudioNodeStream::Create(
       aContext,
       new PannerNodeEngine(this, aContext->Destination(),
                            aContext->Listener()->Engine()),
       AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
 }
 
 /* static */
--- a/dom/media/webaudio/StereoPannerNode.cpp
+++ b/dom/media/webaudio/StereoPannerNode.cpp
@@ -150,19 +150,18 @@ class StereoPannerNodeEngine final : pub
   }
 
   RefPtr<AudioNodeStream> mDestination;
   AudioParamTimeline mPan;
 };
 
 StereoPannerNode::StereoPannerNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Clamped_max,
-                ChannelInterpretation::Speakers),
-      mPan(new AudioParam(this, StereoPannerNodeEngine::PAN, "pan", 0.f, -1.f,
-                          1.f)) {
+                ChannelInterpretation::Speakers) {
+  CreateAudioParam(mPan, StereoPannerNodeEngine::PAN, "pan", 0.f, -1.f, 1.f);
   StereoPannerNodeEngine* engine =
       new StereoPannerNodeEngine(this, aContext->Destination());
   mStream = AudioNodeStream::Create(
       aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
 }
 
 /* static */
 already_AddRefed<StereoPannerNode> StereoPannerNode::Create(