Bug 873553 - Part 2: Teach each AudioNodeStream about its sampling rate, and store the value inside AudioContext; r=roc
authorEhsan Akhgari <ehsan@mozilla.com>
Fri, 24 May 2013 13:09:29 -0400
changeset 144413 738b82a250374580068213fd122f0a54af1e5a33
parent 144412 ef11b35e45f12c4b428223055cd5e2ea87334132
child 144414 34b39b3d4773ee12206b8c2cb34cd9da339897f2
push id2697
push userbbajaj@mozilla.com
push dateMon, 05 Aug 2013 18:49:53 +0000
treeherdermozilla-beta@dfec938c7b63 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersroc
bugs873553
milestone24.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 873553 - Part 2: Teach each AudioNodeStream about its sampling rate, and store the value inside AudioContext; r=roc
content/media/AudioNodeEngine.h
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/MediaStreamGraph.cpp
content/media/MediaStreamGraph.h
content/media/MediaStreamGraphImpl.h
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioContext.h
content/media/webaudio/AudioParam.cpp
--- a/content/media/AudioNodeEngine.h
+++ b/content/media/AudioNodeEngine.h
@@ -2,23 +2,23 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef MOZILLA_AUDIONODEENGINE_H_
 #define MOZILLA_AUDIONODEENGINE_H_
 
 #include "AudioSegment.h"
+#include "mozilla/dom/AudioNode.h"
 #include "mozilla/dom/AudioParam.h"
 #include "mozilla/Mutex.h"
 
 namespace mozilla {
 
 namespace dom {
-class AudioNode;
 struct ThreeDPoint;
 }
 
 class AudioNodeStream;
 
 /**
  * This class holds onto a set of immutable channel buffers. The storage
  * for the buffers must be malloced, but the buffer pointers and the malloc
@@ -149,16 +149,17 @@ public:
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
   explicit AudioNodeEngine(dom::AudioNode* aNode)
     : mNode(aNode)
     , mNodeMutex("AudioNodeEngine::mNodeMutex")
     , mInputCount(aNode ? aNode->NumberOfInputs() : 1)
     , mOutputCount(aNode ? aNode->NumberOfOutputs() : 0)
   {
+    MOZ_ASSERT(NS_IsMainThread());
     MOZ_COUNT_CTOR(AudioNodeEngine);
   }
   virtual ~AudioNodeEngine()
   {
     MOZ_ASSERT(!mNode, "The node reference must be already cleared");
     MOZ_COUNT_DTOR(AudioNodeEngine);
   }
 
@@ -193,18 +194,18 @@ public:
   {
     NS_ERROR("SetRawArrayData called on an engine that doesn't support it");
   }
 
   /**
    * Produce the next block of audio samples, given input samples aInput
    * (the mixed data for input 0).
    * aInput is guaranteed to have float sample format (if it has samples at all)
-   * and to have been resampled to IdealAudioRate(), and to have exactly
-   * WEBAUDIO_BLOCK_SIZE samples.
+   * and to have been resampled to the sampling rate for the stream, and to have
+   * exactly WEBAUDIO_BLOCK_SIZE samples.
    * *aFinished is set to false by the caller. If the callee sets it to true,
    * we'll finish the stream and not call this again.
    */
   virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                  const AudioChunk& aInput,
                                  AudioChunk* aOutput,
                                  bool* aFinished)
   {
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -10,17 +10,19 @@
 #include "ThreeDPoint.h"
 
 using namespace mozilla::dom;
 
 namespace mozilla {
 
 /**
  * An AudioNodeStream produces a single audio track with ID
- * AUDIO_NODE_STREAM_TRACK_ID. This track has rate IdealAudioRate().
+ * AUDIO_NODE_STREAM_TRACK_ID. This track has rate AudioContext::sIdealAudioRate
+ * for regular audio contexts, and the rate requested by the web content
+ * for offline audio contexts.
  * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
  */
 static const int AUDIO_NODE_STREAM_TRACK_ID = 1;
 
 AudioNodeStream::~AudioNodeStream()
 {
   MOZ_COUNT_DTOR(AudioNodeStream);
 }
@@ -232,21 +234,21 @@ AudioNodeStream::SetChannelMixingParamet
 StreamBuffer::Track*
 AudioNodeStream::EnsureTrack()
 {
   StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
   if (!track) {
     nsAutoPtr<MediaSegment> segment(new AudioSegment());
     for (uint32_t j = 0; j < mListeners.Length(); ++j) {
       MediaStreamListener* l = mListeners[j];
-      l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0,
+      l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, mSampleRate, 0,
                                   MediaStreamListener::TRACK_EVENT_CREATED,
                                   *segment);
     }
-    track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
+    track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate, 0, segment.forget());
   }
   return track;
 }
 
 bool
 AudioNodeStream::AllInputsFinished() const
 {
   uint32_t inputCount = mInputs.Length();
@@ -432,17 +434,17 @@ AudioNodeStream::ProduceOutput(GraphTime
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     AudioChunk copyChunk = mLastChunks[0];
     AudioSegment tmpSegment;
     tmpSegment.AppendAndConsumeChunk(&copyChunk);
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
-                                IdealAudioRate(), segment->GetDuration(), 0,
+                                mSampleRate, segment->GetDuration(), 0,
                                 tmpSegment);
   }
 }
 
 TrackTicks
 AudioNodeStream::GetCurrentPosition()
 {
   return EnsureTrack()->Get<AudioSegment>()->GetDuration();
@@ -458,15 +460,15 @@ AudioNodeStream::FinishOutput()
   StreamBuffer::Track* track = EnsureTrack();
   track->SetEnded();
   FinishOnGraphThread();
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     AudioSegment emptySegment;
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
-                                IdealAudioRate(),
+                                mSampleRate,
                                 track->GetSegment()->GetDuration(),
                                 MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
   }
 }
 
 }
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -42,24 +42,27 @@ public:
   enum { AUDIO_TRACK = 1 };
 
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
-                  MediaStreamGraph::AudioNodeStreamKind aKind)
+                  MediaStreamGraph::AudioNodeStreamKind aKind,
+                  TrackRate aSampleRate)
     : ProcessedMediaStream(nullptr),
       mEngine(aEngine),
+      mSampleRate(aSampleRate),
       mKind(aKind),
       mNumberOfInputChannels(2),
       mMarkAsFinishedAfterThisBlock(false),
       mAudioParamStream(false)
   {
+    MOZ_ASSERT(NS_IsMainThread());
     mChannelCountMode = dom::ChannelCountMode::Max;
     mChannelInterpretation = dom::ChannelInterpretation::Speakers;
     // AudioNodes are always producing data
     mHasCurrentData = true;
     MOZ_COUNT_CTOR(AudioNodeStream);
   }
   ~AudioNodeStream();
 
@@ -103,27 +106,30 @@ public:
   }
   const OutputChunks& LastChunks() const
   {
     return mLastChunks;
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
+  TrackRate SampleRate() const { return mSampleRate; }
 
 protected:
   void FinishOutput();
 
   StreamBuffer::Track* EnsureTrack();
   void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The last block produced by this node.
   OutputChunks mLastChunks;
+  // The stream's sampling rate
+  const TrackRate mSampleRate;
   // Whether this is an internal or external stream
   MediaStreamGraph::AudioNodeStreamKind mKind;
   // The number of input channels that this stream requires. 0 means don't care.
   uint32_t mNumberOfInputChannels;
   // The mixing modes
   dom::ChannelCountMode mChannelCountMode;
   dom::ChannelInterpretation mChannelInterpretation;
   // Whether the stream should be marked as finished as soon
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -904,38 +904,38 @@ MediaStreamGraphImpl::EnsureNextIteratio
   mNeedAnotherIteration = true;
   if (mWaitState == WAITSTATE_WAITING_INDEFINITELY) {
     mWaitState = WAITSTATE_WAKING_UP;
     aLock.Notify();
   }
 }
 
 static GraphTime
-RoundUpToAudioBlock(GraphTime aTime)
+RoundUpToAudioBlock(TrackRate aSampleRate, GraphTime aTime)
 {
-  TrackRate rate = IdealAudioRate();
-  int64_t ticksAtIdealRate = (aTime*rate) >> MEDIA_TIME_FRAC_BITS;
+  int64_t ticksAtIdealaSampleRate = (aTime*aSampleRate) >> MEDIA_TIME_FRAC_BITS;
   // Round up to nearest block boundary
-  int64_t blocksAtIdealRate =
-    (ticksAtIdealRate + (WEBAUDIO_BLOCK_SIZE - 1)) >>
+  int64_t blocksAtIdealaSampleRate =
+    (ticksAtIdealaSampleRate + (WEBAUDIO_BLOCK_SIZE - 1)) >>
     WEBAUDIO_BLOCK_SIZE_BITS;
   // Round up to nearest MediaTime unit
   return
-    ((((blocksAtIdealRate + 1)*WEBAUDIO_BLOCK_SIZE) << MEDIA_TIME_FRAC_BITS)
-     + rate - 1)/rate;
+    ((((blocksAtIdealaSampleRate + 1)*WEBAUDIO_BLOCK_SIZE) << MEDIA_TIME_FRAC_BITS)
+     + aSampleRate - 1)/aSampleRate;
 }
 
 void
 MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
+                                                        TrackRate aSampleRate,
                                                         GraphTime aFrom,
                                                         GraphTime aTo)
 {
   GraphTime t = aFrom;
   while (t < aTo) {
-    GraphTime next = RoundUpToAudioBlock(t + 1);
+    GraphTime next = RoundUpToAudioBlock(aSampleRate, t + 1);
     for (uint32_t i = aStreamIndex; i < mStreams.Length(); ++i) {
       nsRefPtr<ProcessedMediaStream> ps = mStreams[i]->AsProcessedStream();
       if (ps) {
         ps->ProduceOutput(t, next);
       }
     }
     t = next;
   }
@@ -977,18 +977,31 @@ MediaStreamGraphImpl::RunThread()
       for (uint32_t j = 0; j < messages.Length(); ++j) {
         messages[j]->Run();
       }
     }
     messageQueue.Clear();
 
     UpdateStreamOrder();
 
+    // Find the sampling rate that we need to use for non-realtime graphs.
+    TrackRate sampleRate = IdealAudioRate();
+    if (!mRealtime) {
+      for (uint32_t i = 0; i < mStreams.Length(); ++i) {
+        AudioNodeStream* n = mStreams[i]->AsAudioNodeStream();
+        if (n) {
+          // We know that the rest of the streams will run at the same rate.
+          sampleRate = n->SampleRate();
+          break;
+        }
+      }
+    }
+
     GraphTime endBlockingDecisions =
-      RoundUpToAudioBlock(mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS));
+      RoundUpToAudioBlock(sampleRate, mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS));
     bool ensureNextIteration = false;
 
     // Grab pending stream input.
     for (uint32_t i = 0; i < mStreams.Length(); ++i) {
       SourceMediaStream* is = mStreams[i]->AsSourceStream();
       if (is) {
         UpdateConsumptionState(is);
         ExtractPendingInput(is, endBlockingDecisions, &ensureNextIteration);
@@ -1007,19 +1020,30 @@ MediaStreamGraphImpl::RunThread()
     // Figure out what each stream wants to do
     for (uint32_t i = 0; i < mStreams.Length(); ++i) {
       MediaStream* stream = mStreams[i];
       if (!doneAllProducing && !stream->IsFinishedOnGraphThread()) {
         ProcessedMediaStream* ps = stream->AsProcessedStream();
         if (ps) {
           AudioNodeStream* n = stream->AsAudioNodeStream();
           if (n) {
+#ifdef DEBUG
+            // Verify that the sampling rate for all of the following streams is the same
+            for (uint32_t j = i + 1; j < mStreams.Length(); ++j) {
+              AudioNodeStream* nextStream = mStreams[j]->AsAudioNodeStream();
+              if (nextStream) {
+                MOZ_ASSERT(n->SampleRate() == nextStream->SampleRate(),
+                           "All AudioNodeStreams in the graph must have the same sampling rate");
+              }
+            }
+#endif
             // Since an AudioNodeStream is present, go ahead and
             // produce audio block by block for all the rest of the streams.
-            ProduceDataForStreamsBlockByBlock(i, prevComputedTime, mStateComputedTime);
+            ProduceDataForStreamsBlockByBlock(i, n->SampleRate(), prevComputedTime, mStateComputedTime);
+            ticksProcessed += TimeToTicksRoundDown(n->SampleRate(), mStateComputedTime - prevComputedTime);
             doneAllProducing = true;
           } else {
             ps->ProduceOutput(prevComputedTime, mStateComputedTime);
             NS_ASSERTION(stream->mBuffer.GetEnd() >=
                          GraphTimeToStreamTime(stream, mStateComputedTime),
                        "Stream did not produce enough data");
           }
         }
@@ -1037,17 +1061,16 @@ MediaStreamGraphImpl::RunThread()
         UpdateBufferSufficiencyState(is);
       }
       GraphTime end;
       if (!stream->mBlocked.GetAt(mCurrentTime, &end) || end < GRAPH_TIME_MAX) {
         allBlockedForever = false;
       }
     }
     if (!mRealtime) {
-      ticksProcessed += TimeToTicksRoundDown(IdealAudioRate(), mStateComputedTime - prevComputedTime);
       // Terminate processing if we've produce enough non-realtime ticks.
       if (ticksProcessed >= mNonRealtimeTicksToProcess) {
         // Wait indefinitely when we've processed enough non-realtime ticks.
         // We'll be woken up when the graph shuts down.
         MonitorAutoLock lock(mMonitor);
         PrepareUpdatesToMainThreadState();
         mWaitState = WAITSTATE_WAITING_INDEFINITELY;
         mMonitor.Wait(PR_INTERVAL_NO_TIMEOUT);
@@ -2087,20 +2110,24 @@ MediaStreamGraph::CreateTrackUnionStream
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
 AudioNodeStream*
 MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                        AudioNodeStreamKind aKind)
+                                        AudioNodeStreamKind aKind,
+                                        TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread());
-  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind);
+  if (!aSampleRate) {
+    aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
+  }
+  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   if (aEngine->HasNode()) {
     stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
                                            aEngine->NodeMainThread()->ChannelCountModeValue(),
                                            aEngine->NodeMainThread()->ChannelInterpretationValue());
   }
--- a/content/media/MediaStreamGraph.h
+++ b/content/media/MediaStreamGraph.h
@@ -924,20 +924,23 @@ public:
    */
   ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
   // Internal AudioNodeStreams can only pass their output to another
   // AudioNode, whereas external AudioNodeStreams can pass their output
   // to an nsAudioStream for playback.
   enum AudioNodeStreamKind { INTERNAL_STREAM, EXTERNAL_STREAM };
   /**
    * Create a stream that will process audio for an AudioNode.
-   * Takes ownership of aEngine.
+   * Takes ownership of aEngine.  aSampleRate is the sampling rate used
+   * for the stream.  If 0 is passed, the sampling rate of the engine's
+   * node will get used.
    */
   AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
-                                         AudioNodeStreamKind aKind);
+                                         AudioNodeStreamKind aKind,
+                                         TrackRate aSampleRate = 0);
   /**
    * Returns the number of graph updates sent. This can be used to track
    * whether a given update has been processed by the graph thread and reflected
    * in main-thread stream state.
    */
   int64_t GetCurrentGraphUpdateIndex() { return mGraphUpdatesSent; }
   /**
    * Start processing non-realtime for a specific number of ticks.
--- a/content/media/MediaStreamGraphImpl.h
+++ b/content/media/MediaStreamGraphImpl.h
@@ -259,16 +259,17 @@ public:
                            GraphTime* aEnd);
   /**
    * Produce data for all streams >= aStreamIndex for the given time interval.
    * Advances block by block, each iteration producing data for all streams
    * for a single block.
    * This is called whenever we have an AudioNodeStream in the graph.
    */
   void ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
+                                         TrackRate aSampleRate,
                                          GraphTime aFrom,
                                          GraphTime aTo);
   /**
    * Returns true if aStream will underrun at aTime for its own playback.
    * aEndBlockingDecisions is when we plan to stop making blocking decisions.
    * *aEnd will be reduced to the first time in the future to recompute these
    * decisions.
    */
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -46,17 +46,18 @@ NS_INTERFACE_MAP_END_INHERITING(nsDOMEve
 
 static uint8_t gWebAudioOutputKey;
 
 AudioContext::AudioContext(nsPIDOMWindow* aWindow,
                            bool aIsOffline,
                            uint32_t aNumberOfChannels,
                            uint32_t aLength,
                            float aSampleRate)
-  : mDestination(new AudioDestinationNode(this, aIsOffline,
+  : mSampleRate(aIsOffline ? aSampleRate : IdealAudioRate())
+  , mDestination(new AudioDestinationNode(this, aIsOffline,
                                           aNumberOfChannels,
                                           aLength, aSampleRate))
   , mIsOffline(aIsOffline)
 {
   // Actually play audio
   mDestination->Stream()->AddAudioOutput(&gWebAudioOutputKey);
   nsDOMEventTargetHelper::BindToOwner(aWindow);
   SetIsDOMBinding();
--- a/content/media/webaudio/AudioContext.h
+++ b/content/media/webaudio/AudioContext.h
@@ -100,17 +100,17 @@ public:
 
   AudioDestinationNode* Destination() const
   {
     return mDestination;
   }
 
   float SampleRate() const
   {
-    return float(IdealAudioRate());
+    return mSampleRate;
   }
 
   double CurrentTime() const;
 
   AudioListener* Listener();
 
   already_AddRefed<AudioBufferSourceNode> CreateBufferSource();
 
@@ -198,16 +198,19 @@ public:
   JSContext* GetJSContext() const;
 
 private:
   void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
 
   friend struct ::mozilla::WebAudioDecodeJob;
 
 private:
+  // Note that it's important for mSampleRate to be initialized before
+  // mDestination, as mDestination's constructor needs to access it!
+  const float mSampleRate;
   nsRefPtr<AudioDestinationNode> mDestination;
   nsRefPtr<AudioListener> mListener;
   MediaBufferDecoder mDecoder;
   nsTArray<nsAutoPtr<WebAudioDecodeJob> > mDecodeJobs;
   // Two hashsets containing all the PannerNodes and AudioBufferSourceNodes,
   // to compute the doppler shift, and also to stop AudioBufferSourceNodes.
   // These are all weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
--- a/content/media/webaudio/AudioParam.cpp
+++ b/content/media/webaudio/AudioParam.cpp
@@ -93,17 +93,20 @@ AudioParam::DisconnectFromGraphAndDestro
 MediaStream*
 AudioParam::Stream()
 {
   if (mStream) {
     return mStream;
   }
 
   AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
-  nsRefPtr<AudioNodeStream> stream = mNode->Context()->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
+  nsRefPtr<AudioNodeStream> stream =
+    mNode->Context()->Graph()->CreateAudioNodeStream(engine,
+                                                     MediaStreamGraph::INTERNAL_STREAM,
+                                                     Node()->Context()->SampleRate());
 
   // Force the input to have only one channel, and make it down-mix using
   // the speaker rules if needed.
   stream->SetChannelMixingParametersImpl(1, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
   // Mark as an AudioParam helper stream
   stream->SetAudioParamHelperStream();
 
   mStream = stream.forget();