Bug 865234 - Part 2: Send the channel mixing information to the AudioNodeStream; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Sat, 27 Apr 2013 19:25:23 -0400
changeset 141399 e804341ab945ff98b48de6efd55337278353012b
parent 141398 b474f42bd080cb7c77ac4f99e2b88dfeb356b84e
child 141400 1e8d0680c950f78c9483a620947ebd8550bc4136
push id2579
push userakeybl@mozilla.com
push dateMon, 24 Jun 2013 18:52:47 +0000
treeherdermozilla-beta@b69b7de8a05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs865234
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 865234 - Part 2: Send the channel mixing information to the AudioNodeStream; r=roc
content/media/AudioNodeEngine.h
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/MediaStreamGraph.cpp
content/media/MediaStreamGraph.h
content/media/webaudio/AudioNode.cpp
content/media/webaudio/AudioNode.h
content/media/webaudio/DynamicsCompressorNode.cpp
content/media/webaudio/ScriptProcessorNode.cpp
--- a/content/media/AudioNodeEngine.h
+++ b/content/media/AudioNodeEngine.h
@@ -211,16 +211,22 @@ public:
   Mutex& NodeMutex() { return mNodeMutex;}
 
   dom::AudioNode* Node() const
   {
     mNodeMutex.AssertCurrentThreadOwns();
     return mNode;
   }
 
+  dom::AudioNode* NodeMainThread() const
+  {
+    MOZ_ASSERT(NS_IsMainThread());
+    return mNode;
+  }
+
   void ClearNode()
   {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(mNode != nullptr);
     mNodeMutex.AssertCurrentThreadOwns();
     mNode = nullptr;
   }
 
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -155,16 +155,64 @@ AudioNodeStream::SetBuffer(already_AddRe
     }
     nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
   };
 
   MOZ_ASSERT(this);
   GraphImpl()->AppendMessage(new Message(this, aBuffer));
 }
 
+void
+AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
+                                            ChannelCountMode aChannelCountMode,
+                                            ChannelInterpretation aChannelInterpretation)
+{
+  class Message : public ControlMessage {
+  public:
+    Message(AudioNodeStream* aStream,
+            uint32_t aNumberOfChannels,
+            ChannelCountMode aChannelCountMode,
+            ChannelInterpretation aChannelInterpretation)
+      : ControlMessage(aStream),
+        mNumberOfChannels(aNumberOfChannels),
+        mChannelCountMode(aChannelCountMode),
+        mChannelInterpretation(aChannelInterpretation)
+    {}
+    virtual void Run()
+    {
+      static_cast<AudioNodeStream*>(mStream)->
+        SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
+                                       mChannelInterpretation);
+    }
+    uint32_t mNumberOfChannels;
+    ChannelCountMode mChannelCountMode;
+    ChannelInterpretation mChannelInterpretation;
+  };
+
+  MOZ_ASSERT(this);
+  GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
+                                         aChannelCountMode,
+                                         aChannelInterpretation));
+}
+
+void
+AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
+                                                ChannelCountMode aChannelCountMode,
+                                                ChannelInterpretation aChannelInterpretation)
+{
+  // Make sure that we're not clobbering any significant bits by fitting these
+  // values in 16 bits.
+  MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
+  MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
+
+  mNumberOfInputChannels = aNumberOfChannels;
+  mMixingMode.mChannelCountMode = aChannelCountMode;
+  mMixingMode.mChannelInterpretation = aChannelInterpretation;
+}
+
 StreamBuffer::Track*
 AudioNodeStream::EnsureTrack()
 {
   StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
   if (!track) {
     nsAutoPtr<MediaSegment> segment(new AudioSegment());
     for (uint32_t j = 0; j < mListeners.Length(); ++j) {
       MediaStreamListener* l = mListeners[j];
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -4,16 +4,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_AUDIONODESTREAM_H_
 #define MOZILLA_AUDIONODESTREAM_H_
 
 #include "MediaStreamGraph.h"
 #include "AudioChannelFormat.h"
 #include "AudioNodeEngine.h"
+#include "mozilla/dom/AudioNodeBinding.h"
 #include "mozilla/dom/AudioParam.h"
 
 #ifdef PR_LOGGING
 #define LOG(type, msg) PR_LOG(gMediaStreamGraphLog, type, msg)
 #else
 #define LOG(type, msg)
 #endif
 
@@ -38,23 +39,24 @@ class ThreadSharedFloatArrayBufferList;
 class AudioNodeStream : public ProcessedMediaStream {
 public:
   enum { AUDIO_TRACK = 1 };
 
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
-                  MediaStreamGraph::AudioNodeStreamKind aKind,
-                  uint32_t aNumberOfInputChannels = 0)
+                  MediaStreamGraph::AudioNodeStreamKind aKind)
     : ProcessedMediaStream(nullptr),
       mEngine(aEngine),
       mKind(aKind),
-      mNumberOfInputChannels(aNumberOfInputChannels)
+      mNumberOfInputChannels(2)
   {
+    mMixingMode.mChannelCountMode = dom::ChannelCountMode::Max;
+    mMixingMode.mChannelInterpretation = dom::ChannelInterpretation::Speakers;
     // AudioNodes are always producing data
     mHasCurrentData = true;
     MOZ_COUNT_CTOR(AudioNodeStream);
   }
   ~AudioNodeStream();
 
   // Control API
   /**
@@ -63,22 +65,28 @@ public:
    */
   void SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
   void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
   void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
+  void SetChannelMixingParameters(uint32_t aNumberOfChannels,
+                                  dom::ChannelCountMode aChannelCountMoe,
+                                  dom::ChannelInterpretation aChannelInterpretation);
 
   virtual AudioNodeStream* AsAudioNodeStream() { return this; }
 
   // Graph thread only
   void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                   double aStreamTime);
+  void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
+                                      dom::ChannelCountMode aChannelCountMoe,
+                                      dom::ChannelInterpretation aChannelInterpretation);
   virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
   TrackTicks GetCurrentPosition();
   bool AllInputsFinished() const;
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
 
 protected:
@@ -90,13 +98,18 @@ protected:
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The last block produced by this node.
   AudioChunk mLastChunk;
   // Whether this is an internal or external stream
   MediaStreamGraph::AudioNodeStreamKind mKind;
   // The number of input channels that this stream requires. 0 means don't care.
   uint32_t mNumberOfInputChannels;
+  // The mixing modes
+  struct {
+    dom::ChannelCountMode mChannelCountMode : 16;
+    dom::ChannelInterpretation mChannelInterpretation : 16;
+  } mMixingMode;
 };
 
 }
 
 #endif /* MOZILLA_AUDIONODESTREAM_H_ */
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -2005,20 +2005,23 @@ MediaStreamGraph::CreateTrackUnionStream
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
 AudioNodeStream*
 MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                        AudioNodeStreamKind aKind,
-                                        uint32_t aNumberOfInputChannels)
+                                        AudioNodeStreamKind aKind)
 {
-  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aNumberOfInputChannels);
+  MOZ_ASSERT(NS_IsMainThread());
+  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
+  stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
+                                         aEngine->NodeMainThread()->ChannelCountModeValue(),
+                                         aEngine->NodeMainThread()->ChannelInterpretationValue());
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
 }
--- a/content/media/MediaStreamGraph.h
+++ b/content/media/MediaStreamGraph.h
@@ -904,18 +904,17 @@ public:
   // AudioNode, whereas external AudioNodeStreams can pass their output
   // to an nsAudioStream for playback.
   enum AudioNodeStreamKind { INTERNAL_STREAM, EXTERNAL_STREAM };
   /**
    * Create a stream that will process audio for an AudioNode.
    * Takes ownership of aEngine.
    */
   AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                         AudioNodeStreamKind aKind,
-                                         uint32_t aNumberOfInputChannels = 0);
+                                         AudioNodeStreamKind aKind);
   /**
    * Returns the number of graph updates sent. This can be used to track
    * whether a given update has been processed by the graph thread and reflected
    * in main-thread stream state.
    */
   int64_t GetCurrentGraphUpdateIndex() { return mGraphUpdatesSent; }
 
   /**
--- a/content/media/webaudio/AudioNode.cpp
+++ b/content/media/webaudio/AudioNode.cpp
@@ -179,16 +179,25 @@ void
 AudioNode::SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue)
 {
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   MOZ_ASSERT(ns, "How come we don't have a stream here?");
   ns->SetThreeDPointParameter(aIndex, aValue);
 }
 
 void
+AudioNode::SendChannelMixingParametersToStream()
+{
+  AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
+  MOZ_ASSERT(ns, "How come we don't have a stream here?");
+  ns->SetChannelMixingParameters(mChannelCount, mChannelCountMode,
+                                 mChannelInterpretation);
+}
+
+void
 AudioNode::SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
                                          const AudioParamTimeline& aValue)
 {
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(aNode->mStream.get());
   MOZ_ASSERT(ns, "How come we don't have a stream here?");
   ns->SetTimelineParameter(aIndex, aValue);
 }
 
--- a/content/media/webaudio/AudioNode.h
+++ b/content/media/webaudio/AudioNode.h
@@ -122,32 +122,35 @@ public:
   // constant for the lifetime of the node. Both default to 1.
   virtual uint32_t NumberOfInputs() const { return 1; }
   virtual uint32_t NumberOfOutputs() const { return 1; }
 
   uint32_t ChannelCount() const { return mChannelCount; }
   void SetChannelCount(uint32_t aChannelCount)
   {
     mChannelCount = aChannelCount;
+    SendChannelMixingParametersToStream();
   }
   ChannelCountMode ChannelCountModeValue() const
   {
     return mChannelCountMode;
   }
   void SetChannelCountModeValue(ChannelCountMode aMode)
   {
     mChannelCountMode = aMode;
+    SendChannelMixingParametersToStream();
   }
   ChannelInterpretation ChannelInterpretationValue() const
   {
     return mChannelInterpretation;
   }
   void SetChannelInterpretationValue(ChannelInterpretation aMode)
   {
     mChannelInterpretation = aMode;
+    SendChannelMixingParametersToStream();
   }
 
   struct InputNode {
     ~InputNode()
     {
       if (mStreamPort) {
         mStreamPort->Destroy();
       }
@@ -175,16 +178,17 @@ private:
 
 protected:
   static void Callback(AudioNode* aNode) { /* not implemented */ }
 
   // Helpers for sending different value types to streams
   void SendDoubleParameterToStream(uint32_t aIndex, double aValue);
   void SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue);
   void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue);
+  void SendChannelMixingParametersToStream();
   static void SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
                                             const AudioParamTimeline& aValue);
 
 private:
   nsRefPtr<AudioContext> mContext;
 
 protected:
   // Must be set in the constructor. Must not be null.
--- a/content/media/webaudio/DynamicsCompressorNode.cpp
+++ b/content/media/webaudio/DynamicsCompressorNode.cpp
@@ -173,29 +173,28 @@ private:
   AudioParamTimeline mRatio;
   AudioParamTimeline mAttack;
   AudioParamTimeline mRelease;
   nsAutoPtr<DynamicsCompressor> mCompressor;
 };
 
 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
   : AudioNode(aContext,
-              2,
+              DEFAULT_NUMBER_OF_CHANNELS,
               ChannelCountMode::Explicit,
               ChannelInterpretation::Speakers)
   , mThreshold(new AudioParam(this, SendThresholdToStream, -24.f))
   , mKnee(new AudioParam(this, SendKneeToStream, 30.f))
   , mRatio(new AudioParam(this, SendRatioToStream, 12.f))
   , mReduction(new AudioParam(this, Callback, 0.f))
   , mAttack(new AudioParam(this, SendAttackToStream, 0.003f))
   , mRelease(new AudioParam(this, SendReleaseToStream, 0.25f))
 {
   DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination());
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM,
-                                                     DEFAULT_NUMBER_OF_CHANNELS);
+  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
 }
 
 JSObject*
 DynamicsCompressorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
 {
   return DynamicsCompressorNodeBinding::Wrap(aCx, aScope, this);
 }
--- a/content/media/webaudio/ScriptProcessorNode.cpp
+++ b/content/media/webaudio/ScriptProcessorNode.cpp
@@ -355,33 +355,32 @@ private:
   bool mSeenNonSilenceInput;
 };
 
 ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
                                          uint32_t aBufferSize,
                                          uint32_t aNumberOfInputChannels,
                                          uint32_t aNumberOfOutputChannels)
   : AudioNode(aContext,
-              2,
-              ChannelCountMode::Explicit,
-              ChannelInterpretation::Speakers)
+              aNumberOfInputChannels,
+              mozilla::dom::ChannelCountMode::Explicit,
+              mozilla::dom::ChannelInterpretation::Speakers)
   , mSharedBuffers(new SharedBuffers())
   , mBufferSize(aBufferSize ?
                   aBufferSize : // respect what the web developer requested
                   4096)         // choose our own buffer size -- 4KB for now
   , mNumberOfOutputChannels(aNumberOfOutputChannels)
 {
   MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
   ScriptProcessorNodeEngine* engine =
     new ScriptProcessorNodeEngine(this,
                                   aContext->Destination(),
                                   BufferSize(),
                                   aNumberOfInputChannels);
-  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM,
-                                                     aNumberOfInputChannels);
+  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
   engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
 }
 
 ScriptProcessorNode::~ScriptProcessorNode()
 {
   if (Context()) {
     Context()->UnregisterScriptProcessorNode(this);
   }