bug 864171 move "extra" time accounting for AudioContext with no nodes to destination stream r=padenot
authorKarl Tomlinson <karlt+@karlt.net>
Wed, 23 Sep 2015 19:05:46 +1200
changeset 264575 9e2297e3f3d9b3aabfa20f75b870dd3055aab18a
parent 264574 3c826002cabf4930cf13d4fa9c3bf7d9ea03e70f
child 264576 edabd5743aebba3bcdb7dbeab48f1538e38dcd4f
push id65677
push userktomlinson@mozilla.com
push dateSun, 27 Sep 2015 21:02:16 +0000
treeherdermozilla-inbound@c7f028fe5422 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs864171
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 864171 move "extra" time accounting for AudioContext with no nodes to destination stream r=padenot
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -664,18 +664,18 @@ AudioContext::DestinationStream() const
   }
   return nullptr;
 }
 
 double
 AudioContext::CurrentTime() const
 {
   MediaStream* stream = Destination()->Stream();
-  return StreamTimeToDOMTime(stream->
-                             StreamTimeToSeconds(stream->GetCurrentTime()));
+  return stream->StreamTimeToSeconds(stream->GetCurrentTime() +
+                                     Destination()->ExtraCurrentTime());
 }
 
 void
 AudioContext::Shutdown()
 {
   mIsShutDown = true;
 
   if (!mIsOffline) {
@@ -1089,22 +1089,16 @@ NS_IMETHODIMP
 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
                              nsISupports* aData, bool aAnonymize)
 {
   int64_t amount = SizeOfIncludingThis(MallocSizeOf);
   return MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
                             amount, "Memory used by AudioContext objects (Web Audio).");
 }
 
-double
-AudioContext::ExtraCurrentTime() const
-{
-  return static_cast<double>(mSampleRate) * mDestination->ExtraCurrentTime();
-}
-
 BasicWaveFormCache*
 AudioContext::GetBasicWaveFormCache()
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!mBasicWaveFormCache) {
     mBasicWaveFormCache = new BasicWaveFormCache(SampleRate());
   }
   return mBasicWaveFormCache;
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -301,41 +301,32 @@ public:
 
   AudioChannel TestAudioChannelInAudioNodeStream();
 
   void RegisterNode(AudioNode* aNode);
   void UnregisterNode(AudioNode* aNode);
 
   double DOMTimeToStreamTime(double aTime) const
   {
-    return aTime - ExtraCurrentTime();
+    return aTime;
   }
 
   double StreamTimeToDOMTime(double aTime) const
   {
-    return aTime + ExtraCurrentTime();
+    return aTime;
   }
 
   void OnStateChanged(void* aPromise, AudioContextState aNewState);
 
   BasicWaveFormCache* GetBasicWaveFormCache();
 
   IMPL_EVENT_HANDLER(mozinterruptbegin)
   IMPL_EVENT_HANDLER(mozinterruptend)
 
 private:
-  /**
-   * Returns the amount of extra time added to the current time of the
-   * AudioDestinationNode's MediaStream to get this AudioContext's currentTime.
-   * Must be subtracted from all DOM API parameter times that are on the same
-   * timeline as AudioContext's currentTime to get times we can pass to the
-   * MediaStreamGraph.
-   */
-  double ExtraCurrentTime() const;
-
   void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
   void ShutdownDecoder();
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData, bool aAnonymize) override;
 
   friend struct ::mozilla::WebAudioDecodeJob;
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -330,17 +330,16 @@ AudioDestinationNode::AudioDestinationNo
                                            uint32_t aNumberOfChannels,
                                            uint32_t aLength, float aSampleRate)
   : AudioNode(aContext, aIsOffline ? aNumberOfChannels : 2,
               ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mAudioChannel(AudioChannel::Normal)
   , mIsOffline(aIsOffline)
   , mAudioChannelAgentPlaying(false)
-  , mExtraCurrentTime(0)
   , mExtraCurrentTimeSinceLastStartedBlocking(0)
   , mExtraCurrentTimeUpdatedSinceLastStableState(false)
   , mCaptured(false)
 {
   MediaStreamGraph* graph = aIsOffline ?
                             MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) :
                             MediaStreamGraph::GetInstance(MediaStreamGraph::AUDIO_THREAD_DRIVER, aChannel);
   AudioNodeEngine* engine = aIsOffline ?
@@ -673,17 +672,17 @@ AudioDestinationNode::ExtraCurrentTime()
     mExtraCurrentTimeUpdatedSinceLastStableState = true;
     // Round to nearest processing block.
     double seconds =
       (TimeStamp::Now() - mStartedBlockingDueToBeingOnlyNode).ToSeconds();
     mExtraCurrentTimeSinceLastStartedBlocking = WEBAUDIO_BLOCK_SIZE *
       StreamTime(seconds * Context()->SampleRate() / WEBAUDIO_BLOCK_SIZE + 0.5);
     ScheduleStableStateNotification();
   }
-  return mExtraCurrentTime + mExtraCurrentTimeSinceLastStartedBlocking;
+  return mExtraCurrentTimeSinceLastStartedBlocking;
 }
 
 void
 AudioDestinationNode::SetIsOnlyNodeForContext(bool aIsOnlyNode)
 {
   if (!mStartedBlockingDueToBeingOnlyNode.IsNull() == aIsOnlyNode) {
     // Nothing changed.
     return;
@@ -706,19 +705,18 @@ AudioDestinationNode::SetIsOnlyNodeForCo
     mStream->Suspend();
     mStartedBlockingDueToBeingOnlyNode = TimeStamp::Now();
     // Don't do an update of mExtraCurrentTimeSinceLastStartedBlocking until the next stable state.
     mExtraCurrentTimeUpdatedSinceLastStableState = true;
     ScheduleStableStateNotification();
   } else {
     // Force update of mExtraCurrentTimeSinceLastStartedBlocking if necessary
     ExtraCurrentTime();
-    mExtraCurrentTime += mExtraCurrentTimeSinceLastStartedBlocking;
+    mStream->AdvanceAndResume(mExtraCurrentTimeSinceLastStartedBlocking);
     mExtraCurrentTimeSinceLastStartedBlocking = 0;
-    mStream->Resume();
     mStartedBlockingDueToBeingOnlyNode = TimeStamp();
   }
 }
 
 void
 AudioDestinationNode::InputMuted(bool aMuted)
 {
   MOZ_ASSERT(Context() && !Context()->IsOffline());
--- a/dom/media/webaudio/AudioDestinationNode.h
+++ b/dom/media/webaudio/AudioDestinationNode.h
@@ -107,17 +107,16 @@ private:
   nsRefPtr<Promise> mOfflineRenderingPromise;
 
   // Audio Channel Type.
   AudioChannel mAudioChannel;
   bool mIsOffline;
   bool mAudioChannelAgentPlaying;
 
   TimeStamp mStartedBlockingDueToBeingOnlyNode;
-  StreamTime mExtraCurrentTime;
   StreamTime mExtraCurrentTimeSinceLastStartedBlocking;
   bool mExtraCurrentTimeUpdatedSinceLastStableState;
   bool mCaptured;
 };
 
 } // namespace dom
 } // namespace mozilla
 
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -372,16 +372,41 @@ AudioNodeStream::ComputedNumberOfChannel
     return std::min(aInputChannelCount, mNumberOfInputChannels);
   default:
   case ChannelCountMode::Max:
     // Nothing to do here, just shut up the compiler warning.
     return aInputChannelCount;
   }
 }
 
+class AudioNodeStream::AdvanceAndResumeMessage final : public ControlMessage {
+public:
+  AdvanceAndResumeMessage(AudioNodeStream* aStream, StreamTime aAdvance) :
+    ControlMessage(aStream), mAdvance(aAdvance) {}
+  virtual void Run() override
+  {
+    auto ns = static_cast<AudioNodeStream*>(mStream);
+    ns->mBufferStartTime -= mAdvance;
+
+    StreamBuffer::Track* track = ns->EnsureTrack(AUDIO_TRACK);
+    track->Get<AudioSegment>()->AppendNullData(mAdvance);
+
+    ns->GraphImpl()->DecrementSuspendCount(mStream);
+  }
+private:
+  StreamTime mAdvance;
+};
+
+void
+AudioNodeStream::AdvanceAndResume(StreamTime aAdvance)
+{
+  mMainThreadCurrentTime += aAdvance;
+  GraphImpl()->AppendMessage(new AdvanceAndResumeMessage(this, aAdvance));
+}
+
 void
 AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk,
                                   uint32_t aPortIndex)
 {
   uint32_t inputCount = mInputs.Length();
   uint32_t outputChannelCount = 1;
   nsAutoTArray<const AudioBlock*,250> inputChunks;
   for (uint32_t i = 0; i < inputCount; ++i) {
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -101,16 +101,24 @@ public:
   }
 
   void SetAudioParamHelperStream()
   {
     MOZ_ASSERT(!mAudioParamStream, "Can only do this once");
     mAudioParamStream = true;
   }
 
+  /*
+   * Resume stream after updating its concept of current time by aAdvance.
+   * Main thread.  Used only from AudioDestinationNode when resuming a stream
+   * suspended to save running the MediaStreamGraph when there are no other
+   * nodes in the AudioContext.
+   */
+  void AdvanceAndResume(StreamTime aAdvance);
+
   virtual AudioNodeStream* AsAudioNodeStream() override { return this; }
   virtual void AddInput(MediaInputPort* aPort) override;
   virtual void RemoveInput(MediaInputPort* aPort) override;
 
   // Graph thread only
   void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                   double aStreamTime);
   void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
@@ -180,16 +188,18 @@ public:
    * inactive, or an active input is removed, or the stream finishes.  If the
    * stream is now inactive, then mInputChunks will be cleared and mLastChunks
    * will be set to null.  ProcessBlock() will not be called on the engine
    * again until SetActive() is called.
    */
   void CheckForInactive();
 
 protected:
+  class AdvanceAndResumeMessage;
+
   virtual void DestroyImpl() override;
 
   void AdvanceOutputSegment();
   void FinishOutput();
   void AccumulateInputChunk(uint32_t aInputIndex, const AudioBlock& aChunk,
                             AudioBlock* aBlock,
                             nsTArray<float>* aDownmixBuffer);
   void UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount,