Bug 865234 - Part 1: Add DOM bindings for the channel mixing attributes; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Sat, 27 Apr 2013 18:44:50 -0400
changeset 142232 b474f42bd080cb7c77ac4f99e2b88dfeb356b84e
parent 142231 efe2917fb849e3e08cf54df854ef9fffb0595865
child 142233 e804341ab945ff98b48de6efd55337278353012b
push id350
push userbbajaj@mozilla.com
push dateMon, 29 Jul 2013 23:00:49 +0000
treeherdermozilla-release@064965b37dbd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs865234
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 865234 - Part 1: Add DOM bindings for the channel mixing attributes; r=roc
content/media/webaudio/AnalyserNode.cpp
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioDestinationNode.cpp
content/media/webaudio/AudioNode.cpp
content/media/webaudio/AudioNode.h
content/media/webaudio/BiquadFilterNode.cpp
content/media/webaudio/DelayNode.cpp
content/media/webaudio/DynamicsCompressorNode.cpp
content/media/webaudio/GainNode.cpp
content/media/webaudio/PannerNode.cpp
content/media/webaudio/ScriptProcessorNode.cpp
content/media/webaudio/test/test_analyserNode.html
content/media/webaudio/test/test_biquadFilterNode.html
content/media/webaudio/test/test_delayNode.html
content/media/webaudio/test/test_dynamicsCompressorNode.html
content/media/webaudio/test/test_gainNode.html
content/media/webaudio/test/test_pannerNode.html
content/media/webaudio/test/test_scriptProcessorNode.html
content/media/webaudio/test/test_singleSourceDest.html
dom/bindings/Bindings.conf
dom/webidl/AudioNode.webidl
--- a/content/media/webaudio/AnalyserNode.cpp
+++ b/content/media/webaudio/AnalyserNode.cpp
@@ -70,17 +70,20 @@ public:
         aInput.mChannelData.Length() > 0) {
       nsRefPtr<TransferBuffer> transfer = new TransferBuffer(aStream, aInput);
       NS_DispatchToMainThread(transfer);
     }
   }
 };
 
 AnalyserNode::AnalyserNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              1,
+              ChannelCountMode::Explicit,
+              ChannelInterpretation::Speakers)
   , mFFTSize(2048)
   , mMinDecibels(-100.)
   , mMaxDecibels(-30.)
   , mSmoothingTimeConstant(.8)
   , mWriteIndex(0)
 {
   mStream = aContext->Graph()->CreateAudioNodeStream(new AnalyserNodeEngine(this),
                                                      MediaStreamGraph::INTERNAL_STREAM);
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -398,17 +398,20 @@ public:
   float mPlaybackRate;
   float mDopplerShift;
   AudioNodeStream* mDestination;
   AudioParamTimeline mPlaybackRateTimeline;
   bool mLoop;
 };
 
 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Max,
+              ChannelInterpretation::Speakers)
   , mLoopStart(0.0)
   , mLoopEnd(0.0)
   , mOffset(0.0)
   , mDuration(std::numeric_limits<double>::min())
   , mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
   , mPannerNode(nullptr)
   , mLoop(false)
   , mStartCalled(false)
--- a/content/media/webaudio/AudioDestinationNode.cpp
+++ b/content/media/webaudio/AudioDestinationNode.cpp
@@ -11,17 +11,20 @@
 #include "MediaStreamGraph.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_ISUPPORTS_INHERITED0(AudioDestinationNode, AudioNode)
 
 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, MediaStreamGraph* aGraph)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Explicit,
+              ChannelInterpretation::Speakers)
 {
   mStream = aGraph->CreateAudioNodeStream(new AudioNodeEngine(this),
                                           MediaStreamGraph::EXTERNAL_STREAM);
 }
 
 JSObject*
 AudioDestinationNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
 {
--- a/content/media/webaudio/AudioNode.cpp
+++ b/content/media/webaudio/AudioNode.cpp
@@ -36,18 +36,24 @@ AudioNode::Release()
   nsrefcnt r = nsDOMEventTargetHelper::Release();
   NS_LOG_RELEASE(this, r, "AudioNode");
   return r;
 }
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioNode)
 NS_INTERFACE_MAP_END_INHERITING(nsDOMEventTargetHelper)
 
-AudioNode::AudioNode(AudioContext* aContext)
+AudioNode::AudioNode(AudioContext* aContext,
+                     uint32_t aChannelCount,
+                     ChannelCountMode aChannelCountMode,
+                     ChannelInterpretation aChannelInterpretation)
   : mContext(aContext)
+  , mChannelCount(aChannelCount)
+  , mChannelCountMode(aChannelCountMode)
+  , mChannelInterpretation(aChannelInterpretation)
 {
   MOZ_ASSERT(aContext);
   nsDOMEventTargetHelper::BindToOwner(aContext->GetParentObject());
   SetIsDOMBinding();
 }
 
 AudioNode::~AudioNode()
 {
--- a/content/media/webaudio/AudioNode.h
+++ b/content/media/webaudio/AudioNode.h
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef AudioNode_h_
 #define AudioNode_h_
 
 #include "nsDOMEventTargetHelper.h"
+#include "mozilla/dom/AudioNodeBinding.h"
 #include "nsCycleCollectionParticipant.h"
 #include "mozilla/Attributes.h"
 #include "EnableWebAudioCheck.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "AudioContext.h"
 #include "AudioParamTimeline.h"
 #include "MediaStreamGraph.h"
@@ -73,17 +74,20 @@ private:
 class AudioNode : public nsDOMEventTargetHelper,
                   public EnableWebAudioCheck
 {
 protected:
   // You can only use refcounting to delete this object
   virtual ~AudioNode();
 
 public:
-  explicit AudioNode(AudioContext* aContext);
+  AudioNode(AudioContext* aContext,
+            uint32_t aChannelCount,
+            ChannelCountMode aChannelCountMode,
+            ChannelInterpretation aChannelInterpretation);
 
   // This should be idempotent (safe to call multiple times).
   virtual void DestroyMediaStream();
 
   // This method should be overridden to return true in nodes
   // which support being hooked up to the Media Stream graph.
   virtual bool SupportsMediaStreams() const
   {
@@ -114,16 +118,38 @@ public:
   virtual void Disconnect(uint32_t aOutput, ErrorResult& aRv);
 
   // The following two virtual methods must be implemented by each node type
   // to provide their number of input and output ports. These numbers are
   // constant for the lifetime of the node. Both default to 1.
   virtual uint32_t NumberOfInputs() const { return 1; }
   virtual uint32_t NumberOfOutputs() const { return 1; }
 
+  uint32_t ChannelCount() const { return mChannelCount; }
+  void SetChannelCount(uint32_t aChannelCount)
+  {
+    mChannelCount = aChannelCount;
+  }
+  ChannelCountMode ChannelCountModeValue() const
+  {
+    return mChannelCountMode;
+  }
+  void SetChannelCountModeValue(ChannelCountMode aMode)
+  {
+    mChannelCountMode = aMode;
+  }
+  ChannelInterpretation ChannelInterpretationValue() const
+  {
+    return mChannelInterpretation;
+  }
+  void SetChannelInterpretationValue(ChannelInterpretation aMode)
+  {
+    mChannelInterpretation = aMode;
+  }
+
   struct InputNode {
     ~InputNode()
     {
       if (mStreamPort) {
         mStreamPort->Destroy();
       }
     }
 
@@ -169,14 +195,17 @@ private:
   // For every InputNode, there is a corresponding entry in mOutputNodes of the
   // InputNode's mInputNode.
   nsTArray<InputNode> mInputNodes;
   // For every mOutputNode entry, there is a corresponding entry in mInputNodes
   // of the mOutputNode entry. We won't necessarily be able to identify the
   // exact matching entry, since mOutputNodes doesn't include the port
   // identifiers and the same node could be connected on multiple ports.
   nsTArray<nsRefPtr<AudioNode> > mOutputNodes;
+  uint32_t mChannelCount;
+  ChannelCountMode mChannelCountMode;
+  ChannelInterpretation mChannelInterpretation;
 };
 
 }
 }
 
 #endif
--- a/content/media/webaudio/BiquadFilterNode.cpp
+++ b/content/media/webaudio/BiquadFilterNode.cpp
@@ -166,17 +166,20 @@ private:
   AudioParamTimeline mFrequency;
   AudioParamTimeline mDetune;
   AudioParamTimeline mQ;
   AudioParamTimeline mGain;
   nsTArray<WebCore::Biquad> mBiquads;
 };
 
 BiquadFilterNode::BiquadFilterNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Max,
+              ChannelInterpretation::Speakers)
   , mType(BiquadFilterType::Lowpass)
   , mFrequency(new AudioParam(this, SendFrequencyToStream, 350.f))
   , mDetune(new AudioParam(this, SendDetuneToStream, 0.f))
   , mQ(new AudioParam(this, SendQToStream, 1.f))
   , mGain(new AudioParam(this, SendGainToStream, 0.f))
 {
   BiquadFilterNodeEngine* engine = new BiquadFilterNodeEngine(this, aContext->Destination());
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
--- a/content/media/webaudio/DelayNode.cpp
+++ b/content/media/webaudio/DelayNode.cpp
@@ -266,17 +266,20 @@ public:
   // How much data we have in our buffer which needs to be flushed out when our inputs
   // finish.
   int32_t mLeftOverData;
   // Current delay time, in seconds
   double mCurrentDelayTime;
 };
 
 DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Max,
+              ChannelInterpretation::Speakers)
   , mDelay(new AudioParam(this, SendDelayToStream, 0.0f))
 {
   DelayNodeEngine* engine = new DelayNodeEngine(this, aContext->Destination());
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
 }
--- a/content/media/webaudio/DynamicsCompressorNode.cpp
+++ b/content/media/webaudio/DynamicsCompressorNode.cpp
@@ -172,17 +172,20 @@ private:
   AudioParamTimeline mKnee;
   AudioParamTimeline mRatio;
   AudioParamTimeline mAttack;
   AudioParamTimeline mRelease;
   nsAutoPtr<DynamicsCompressor> mCompressor;
 };
 
 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Explicit,
+              ChannelInterpretation::Speakers)
   , mThreshold(new AudioParam(this, SendThresholdToStream, -24.f))
   , mKnee(new AudioParam(this, SendKneeToStream, 30.f))
   , mRatio(new AudioParam(this, SendRatioToStream, 12.f))
   , mReduction(new AudioParam(this, Callback, 0.f))
   , mAttack(new AudioParam(this, SendAttackToStream, 0.003f))
   , mRelease(new AudioParam(this, SendReleaseToStream, 0.25f))
 {
   DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination());
--- a/content/media/webaudio/GainNode.cpp
+++ b/content/media/webaudio/GainNode.cpp
@@ -90,17 +90,20 @@ public:
   }
 
   AudioNodeStream* mSource;
   AudioNodeStream* mDestination;
   AudioParamTimeline mGain;
 };
 
 GainNode::GainNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Max,
+              ChannelInterpretation::Speakers)
   , mGain(new AudioParam(this, SendGainToStream, 1.0f))
 {
   GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
 }
 
 JSObject*
--- a/content/media/webaudio/PannerNode.cpp
+++ b/content/media/webaudio/PannerNode.cpp
@@ -169,17 +169,20 @@ public:
   ThreeDPoint mListenerOrientation;
   ThreeDPoint mListenerUpVector;
   ThreeDPoint mListenerVelocity;
   double mListenerDopplerFactor;
   double mListenerSpeedOfSound;
 };
 
 PannerNode::PannerNode(AudioContext* aContext)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Clamped_max,
+              ChannelInterpretation::Speakers)
   // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above.
   , mPanningModel(PanningModelTypeValues::HRTF)
   , mDistanceModel(DistanceModelTypeValues::Inverse)
   , mPosition()
   , mOrientation(1., 0., 0.)
   , mVelocity()
   , mRefDistance(1.)
   , mMaxDistance(10000.)
--- a/content/media/webaudio/ScriptProcessorNode.cpp
+++ b/content/media/webaudio/ScriptProcessorNode.cpp
@@ -354,17 +354,20 @@ private:
   uint32_t mInputWriteIndex;
   bool mSeenNonSilenceInput;
 };
 
 ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
                                          uint32_t aBufferSize,
                                          uint32_t aNumberOfInputChannels,
                                          uint32_t aNumberOfOutputChannels)
-  : AudioNode(aContext)
+  : AudioNode(aContext,
+              2,
+              ChannelCountMode::Explicit,
+              ChannelInterpretation::Speakers)
   , mSharedBuffers(new SharedBuffers())
   , mBufferSize(aBufferSize ?
                   aBufferSize : // respect what the web developer requested
                   4096)         // choose our own buffer size -- 4KB for now
   , mNumberOfOutputChannels(aNumberOfOutputChannels)
 {
   MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
   ScriptProcessorNodeEngine* engine =
--- a/content/media/webaudio/test/test_analyserNode.html
+++ b/content/media/webaudio/test/test_analyserNode.html
@@ -26,16 +26,20 @@ addLoadEvent(function() {
 
   var analyser = context.createAnalyser();
 
   source.buffer = buffer;
 
   source.connect(analyser);
   analyser.connect(destination);
 
+  is(analyser.channelCount, 1, "analyser node has 2 input channels by default");
+  is(analyser.channelCountMode, "explicit", "Correct channelCountMode for the analyser node");
+  is(analyser.channelInterpretation, "speakers", "Correct channelCountInterpretation for the analyser node");
+
   is(analyser.fftSize, 2048, "Correct default value for fftSize");
   is(analyser.frequencyBinCount, 1024, "Correct default value for frequencyBinCount");
   expectException(function() {
     analyser.fftSize = 0;
   }, DOMException.INDEX_SIZE_ERR);
   expectException(function() {
     analyser.fftSize = 1;
   }, DOMException.INDEX_SIZE_ERR);
--- a/content/media/webaudio/test/test_biquadFilterNode.html
+++ b/content/media/webaudio/test/test_biquadFilterNode.html
@@ -36,16 +36,19 @@ addLoadEvent(function() {
   filter.connect(destination);
 
   // Verify default values
   is(filter.type, "lowpass", "Correct default value for type");
   near(filter.frequency.defaultValue, 350, "Correct default value for filter frequency");
   near(filter.detune.defaultValue, 0, "Correct default value for filter detune");
   near(filter.Q.defaultValue, 1, "Correct default value for filter Q");
   near(filter.gain.defaultValue, 0, "Correct default value for filter gain");
+  is(filter.channelCount, 2, "Biquad filter node has 2 input channels by default");
+  is(filter.channelCountMode, "max", "Correct channelCountMode for the biquad filter node");
+  is(filter.channelInterpretation, "speakers", "Correct channelCountInterpretation for the biquad filter node");
 
   // Make sure that we can set all of the valid type values
   var types = [
     "lowpass",
     "highpass",
     "bandpass",
     "lowshelf",
     "highshelf",
--- a/content/media/webaudio/test/test_delayNode.html
+++ b/content/media/webaudio/test/test_delayNode.html
@@ -41,16 +41,19 @@ addLoadEvent(function() {
   sp.connect(destination);
 
   ok(delay.delayTime, "The audioparam member must exist");
   is(delay.delayTime.value, 0, "Correct initial value");
   is(delay.delayTime.defaultValue, 0, "Correct default value");
   delay.delayTime.value = 0.5;
   is(delay.delayTime.value, 0.5, "Correct initial value");
   is(delay.delayTime.defaultValue, 0, "Correct default value");
+  is(delay.channelCount, 2, "delay node has 2 input channels by default");
+  is(delay.channelCountMode, "max", "Correct channelCountMode for the delay node");
+  is(delay.channelInterpretation, "speakers", "Correct channelCountInterpretation for the delay node");
 
   var delay2 = context.createDelay(2);
   is(delay2.delayTime.value, 0, "Correct initial value");
   is(delay2.delayTime.defaultValue, 0, "Correct default value");
   delay2.delayTime.value = 0.5;
   is(delay2.delayTime.value, 0.5, "Correct initial value");
   is(delay2.delayTime.defaultValue, 0, "Correct default value");
 
--- a/content/media/webaudio/test/test_dynamicsCompressorNode.html
+++ b/content/media/webaudio/test/test_dynamicsCompressorNode.html
@@ -29,16 +29,20 @@ addLoadEvent(function() {
 
   var compressor = context.createDynamicsCompressor();
 
   source.buffer = buffer;
 
   source.connect(compressor);
   compressor.connect(destination);
 
+  is(compressor.channelCount, 2, "compressor node has 2 input channels by default");
+  is(compressor.channelCountMode, "explicit", "Correct channelCountMode for the compressor node");
+  is(compressor.channelInterpretation, "speakers", "Correct channelCountInterpretation for the compressor node");
+
   // Verify default values
   with (compressor) {
     near(threshold.defaultValue, -24, "Correct default value for threshold");
     near(knee.defaultValue, 30, "Correct default value for knee");
     near(ratio.defaultValue, 12, "Correct default value for ratio");
     near(reduction.defaultValue, 0, "Correct default value for reduction");
     near(attack.defaultValue, 0.003, "Correct default value for attack");
     near(release.defaultValue, 0.25, "Correct default value for release");
--- a/content/media/webaudio/test/test_gainNode.html
+++ b/content/media/webaudio/test/test_gainNode.html
@@ -41,16 +41,19 @@ addLoadEvent(function() {
   sp.connect(destination);
 
   ok(gain.gain, "The audioparam member must exist");
   is(gain.gain.value, 1.0, "Correct initial value");
   is(gain.gain.defaultValue, 1.0, "Correct default value");
   gain.gain.value = 0.5;
   is(gain.gain.value, 0.5, "Correct initial value");
   is(gain.gain.defaultValue, 1.0, "Correct default value");
+  is(gain.channelCount, 2, "gain node has 2 input channels by default");
+  is(gain.channelCountMode, "max", "Correct channelCountMode for the gain node");
+  is(gain.channelInterpretation, "speakers", "Correct channelCountInterpretation for the gain node");
 
   source.start(0);
   sp.onaudioprocess = function(e) {
     is(e.inputBuffer.numberOfChannels, 1, "Correct input channel count");
     compareBuffers(e.inputBuffer.getChannelData(0), expectedBuffer.getChannelData(0));
 
     sp.onaudioprocess = null;
 
--- a/content/media/webaudio/test/test_pannerNode.html
+++ b/content/media/webaudio/test/test_pannerNode.html
@@ -38,16 +38,19 @@ addLoadEvent(function() {
   is(panner.panningModel, "HRTF", "Correct default value for panning model");
   is(panner.distanceModel, "inverse", "Correct default value for distance model");
   near(panner.refDistance, 1, "Correct default value for ref distance");
   near(panner.maxDistance, 10000, "Correct default value for max distance");
   near(panner.rolloffFactor, 1, "Correct default value for rolloff factor");
   near(panner.coneInnerAngle, 360, "Correct default value for cone inner angle");
   near(panner.coneOuterAngle, 360, "Correct default value for cone outer angle");
   near(panner.coneOuterGain, 0, "Correct default value for cone outer gain");
+  is(panner.channelCount, 2, "panner node has 2 input channels by default");
+  is(panner.channelCountMode, "clamped-max", "Correct channelCountMode for the panner node");
+  is(panner.channelInterpretation, "speakers", "Correct channelCountInterpretation for the panner node");
 
   panner.setPosition(1, 1, 1);
   panner.setOrientation(1, 1, 1);
   panner.setVelocity(1, 1, 1);
 
   source.start(0);
   SimpleTest.executeSoon(function() {
     source.stop(0);
--- a/content/media/webaudio/test/test_scriptProcessorNode.html
+++ b/content/media/webaudio/test/test_scriptProcessorNode.html
@@ -39,16 +39,20 @@ addLoadEvent(function() {
   }, DOMException.INDEX_SIZE_ERR);
   expectException(function() {
     context.createScriptProcessor(128);
   }, DOMException.INDEX_SIZE_ERR);
   expectException(function() {
     context.createScriptProcessor(255);
   }, DOMException.INDEX_SIZE_ERR);
 
+  is(sourceSP.channelCount, 2, "script processor node has 2 input channels by default");
+  is(sourceSP.channelCountMode, "explicit", "Correct channelCountMode for the script processor node");
+  is(sourceSP.channelInterpretation, "speakers", "Correct channelCountInterpretation for the script processor node");
+
   function findFirstNonZeroSample(buffer) {
     for (var i = 0; i < buffer.length; ++i) {
       if (buffer.getChannelData(0)[i] != 0) {
         return i;
       }
     }
     return buffer.length;
   }
--- a/content/media/webaudio/test/test_singleSourceDest.html
+++ b/content/media/webaudio/test/test_singleSourceDest.html
@@ -20,16 +20,19 @@ addLoadEvent(function() {
     buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
   }
 
   var destination = context.destination;
   is(destination.context, context, "Destination node has proper context");
   is(destination.context, context, "Destination node has proper context");
   is(destination.numberOfInputs, 1, "Destination node has 1 inputs");
   is(destination.numberOfOutputs, 0, "Destination node has 0 outputs");
+  is(destination.channelCount, 2, "Destination node has 2 input channels by default");
+  is(destination.channelCountMode, "explicit", "Correct channelCountMode for the destination node");
+  is(destination.channelInterpretation, "speakers", "Correct channelCountInterpretation for the destination node");
   ok(destination instanceof EventTarget, "AudioNodes must be EventTargets");
 
   testWith(context, buffer, destination, function(source) {
     source.start(0);
   }, function(source) {
     source.stop();
   }, function() {
     testWith(context, buffer, destination, function(source) {
@@ -79,16 +82,19 @@ function createNode(context, buffer, des
   var source = context.createBufferSource();
   is(source.context, context, "Source node has proper context");
   is(source.numberOfInputs, 0, "Source node has 0 inputs");
   is(source.numberOfOutputs, 1, "Source node has 1 outputs");
   is(source.loop, false, "Source node is not looping");
   is(source.loopStart, 0, "Correct default value for loopStart");
   is(source.loopEnd, 0, "Correct default value for loopEnd");
   ok(!source.buffer, "Source node should not have a buffer when it's created");
+  is(source.channelCount, 2, "source node has 2 input channels by default");
+  is(source.channelCountMode, "max", "Correct channelCountMode for the source node");
+  is(source.channelInterpretation, "speakers", "Correct channelCountInterpretation for the source node");
 
   source.buffer = buffer;
   ok(source.buffer, "Source node should have a buffer now");
 
   source.connect(destination);
 
   is(source.numberOfInputs, 0, "Source node has 0 inputs");
   is(source.numberOfOutputs, 1, "Source node has 0 outputs");
--- a/dom/bindings/Bindings.conf
+++ b/dom/bindings/Bindings.conf
@@ -111,16 +111,20 @@ DOMInterfaces = {
 },
 
 'AudioListener' : {
     'nativeOwnership': 'refcounted'
 },
 
 'AudioNode' : {
     'concrete': False,
+    'binaryNames': {
+        'channelCountMode': 'channelCountModeValue',
+        'channelInterpretation': 'channelInterpretationValue',
+    },
 },
 
 'AudioParam' : {
     'nativeOwnership': 'refcounted'
 },
 
 'AudioProcessingEvent' : {
     'resultNotAddRefed': [ 'inputBuffer', 'outputBuffer' ],
--- a/dom/webidl/AudioNode.webidl
+++ b/dom/webidl/AudioNode.webidl
@@ -5,26 +5,42 @@
  *
  * The origin of this IDL file is
  * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
+enum ChannelCountMode {
+    "max",
+    "clamped-max",
+    "explicit"
+};
+
+enum ChannelInterpretation {
+    "speakers",
+    "discrete"
+};
+
 [PrefControlled]
 interface AudioNode : EventTarget {
 
     [Throws]
     void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
 
     // [Throws]
     // void connect(AudioParam destination, optional unsigned long output = 0);
 
     [Throws]
     void disconnect(optional unsigned long output = 0);
 
     readonly attribute AudioContext context;
     readonly attribute unsigned long numberOfInputs;
     readonly attribute unsigned long numberOfOutputs;
 
+    // Channel up-mixing and down-mixing rules for all inputs.
+    attribute unsigned long channelCount;
+    attribute ChannelCountMode channelCountMode;
+    attribute ChannelInterpretation channelInterpretation;
+
 };