Bug 952893. Part 1: Block the AudioDestinationNode when it's the only node in the AudioContext. r=padenot
☠☠ backed out by 27c8e496895a ☠ ☠
authorRobert O'Callahan <robert@ocallahan.org>
Thu, 16 Jan 2014 00:08:20 +1300
changeset 182774 15fc5d1cb10ea0e9300cef0e88895a34640ddb8c
parent 182773 41488ab273c90a222025d050a7a5a48a596a0b74
child 182775 f501606dc1d36e1ad1dd9b239ca46d3da084ce3b
push id462
push userraliiev@mozilla.com
push dateTue, 22 Apr 2014 00:22:30 +0000
treeherdermozilla-release@ac5db8c74ac0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs952893
milestone29.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 952893. Part 1: Block the AudioDestinationNode when it's the only node in the AudioContext. r=padenot
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/webaudio/AudioBufferSourceNode.cpp
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioContext.h
content/media/webaudio/AudioDestinationNode.cpp
content/media/webaudio/AudioDestinationNode.h
content/media/webaudio/AudioNode.cpp
content/media/webaudio/OscillatorNode.cpp
content/media/webaudio/moz.build
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -5,16 +5,17 @@
 
 #include "AudioNodeStream.h"
 
 #include "MediaStreamGraphImpl.h"
 #include "AudioNodeEngine.h"
 #include "ThreeDPoint.h"
 #include "AudioChannelFormat.h"
 #include "AudioParamTimeline.h"
+#include "AudioContext.h"
 
 using namespace mozilla::dom;
 
 namespace mozilla {
 
 /**
  * An AudioNodeStream produces a single audio track with ID
  * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
@@ -25,17 +26,17 @@ namespace mozilla {
  */
 
 AudioNodeStream::~AudioNodeStream()
 {
   MOZ_COUNT_DTOR(AudioNodeStream);
 }
 
 void
-AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
+AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                                         double aStreamTime)
 {
   class Message : public ControlMessage {
   public:
     Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
             double aStreamTime)
       : ControlMessage(aStream), mStreamTime(aStreamTime),
         mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
@@ -45,17 +46,19 @@ AudioNodeStream::SetStreamTimeParameter(
           SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
     }
     double mStreamTime;
     MediaStream* mRelativeToStream;
     uint32_t mIndex;
   };
 
   MOZ_ASSERT(this);
-  GraphImpl()->AppendMessage(new Message(this, aIndex, aRelativeToStream, aStreamTime));
+  GraphImpl()->AppendMessage(new Message(this, aIndex,
+      aContext->DestinationStream(),
+      aStreamTime - aContext->ExtraCurrentTime()));
 }
 
 void
 AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                             double aStreamTime)
 {
   TrackTicks ticks =
       WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -11,16 +11,17 @@
 #include "AudioSegment.h"
 
 namespace mozilla {
 
 namespace dom {
 struct ThreeDPoint;
 class AudioParamTimeline;
 class DelayNodeEngine;
+class AudioContext;
 }
 
 class ThreadSharedFloatArrayBufferList;
 class AudioNodeEngine;
 
 /**
  * An AudioNodeStream produces one audio track with ID AUDIO_TRACK.
  * The start time of the AudioTrack is aligned to the start time of the
@@ -28,16 +29,18 @@ class AudioNodeEngine;
  * samples.
  *
  * An AudioNodeStream has an AudioNodeEngine plugged into it that does the
  * actual audio processing. AudioNodeStream contains the glue code that
  * integrates audio processing with the MediaStreamGraph.
  */
 class AudioNodeStream : public ProcessedMediaStream {
 public:
+  typedef mozilla::dom::AudioContext AudioContext;
+
   enum { AUDIO_TRACK = 1 };
 
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
@@ -61,17 +64,17 @@ public:
   }
   ~AudioNodeStream();
 
   // Control API
   /**
    * Sets a parameter that's a time relative to some stream's played time.
    * This time is converted to a time relative to this stream when it's set.
    */
-  void SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
+  void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
   void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
   void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
   // This consumes the contents of aData.  aData will be emptied after this returns.
   void SetRawArrayData(nsTArray<float>& aData);
--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -527,17 +527,17 @@ AudioBufferSourceNode::Start(double aWhe
   // We can't send these parameters without a buffer because we don't know the
   // buffer's sample rate or length.
   if (mBuffer) {
     SendOffsetAndDurationParametersToStream(ns);
   }
 
   // Don't set parameter unnecessarily
   if (aWhen > 0.0) {
-    ns->SetStreamTimeParameter(START, Context()->DestinationStream(), aWhen);
+    ns->SetStreamTimeParameter(START, Context(), aWhen);
   }
 
   MarkActive();
 }
 
 void
 AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
 {
@@ -611,18 +611,17 @@ AudioBufferSourceNode::Stop(double aWhen
   }
 
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns || !Context()) {
     // We've already stopped and had our stream shut down
     return;
   }
 
-  ns->SetStreamTimeParameter(STOP, Context()->DestinationStream(),
-                             std::max(0.0, aWhen));
+  ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen));
 }
 
 void
 AudioBufferSourceNode::NotifyMainThreadStateChanged()
 {
   if (mStream->IsFinished()) {
     class EndedEventDispatcher : public nsRunnable
     {
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -79,27 +79,32 @@ float GetSampleRateForAudioContext(bool 
 AudioContext::AudioContext(nsPIDOMWindow* aWindow,
                            bool aIsOffline,
                            uint32_t aNumberOfChannels,
                            uint32_t aLength,
                            float aSampleRate)
   : nsDOMEventTargetHelper(aWindow)
   , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate))
   , mNumberOfChannels(aNumberOfChannels)
+  , mNodeCount(0)
   , mIsOffline(aIsOffline)
   , mIsStarted(!aIsOffline)
   , mIsShutDown(false)
 {
   aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
   mDestination = new AudioDestinationNode(this, aIsOffline, aNumberOfChannels,
                                           aLength, aSampleRate);
   mDestination->Stream()->AddAudioOutput(&gWebAudioOutputKey);
+  // We skip calling SetIsOnlyNodeForContext during mDestination's constructor,
+  // because we can only call SetIsOnlyNodeForContext after mDestination has
+  // been set up.
+  mDestination->SetIsOnlyNodeForContext(true);
 }
 
 AudioContext::~AudioContext()
 {
   nsPIDOMWindow* window = GetOwner();
   if (window) {
     window->RemoveAudioContext(this);
   }
@@ -538,17 +543,18 @@ AudioContext::DestinationStream() const
     return Destination()->Stream();
   }
   return nullptr;
 }
 
 double
 AudioContext::CurrentTime() const
 {
-  return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime());
+  return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime()) +
+      Destination()->ExtraCurrentTime();
 }
 
 void
 AudioContext::Shutdown()
 {
   mIsShutDown = true;
 
   // We mute rather than suspending, because the delay between the ::Shutdown
@@ -584,16 +590,28 @@ void
 AudioContext::Resume()
 {
   MediaStream* ds = DestinationStream();
   if (ds) {
     ds->ChangeExplicitBlockerCount(-1);
   }
 }
 
+void
+AudioContext::UpdateNodeCount(int32_t aDelta)
+{
+  bool firstNode = mNodeCount == 0;
+  mNodeCount += aDelta;
+  MOZ_ASSERT(mNodeCount >= 0);
+  // mDestinationNode may be null when we're destroying nodes unlinked by CC
+  if (!firstNode && mDestination) {
+    mDestination->SetIsOnlyNodeForContext(mNodeCount == 1);
+  }
+}
+
 JSContext*
 AudioContext::GetJSContext() const
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   nsCOMPtr<nsIScriptGlobalObject> scriptGlobal =
     do_QueryInterface(GetParentObject());
   if (!scriptGlobal) {
@@ -674,10 +692,16 @@ NS_IMETHODIMP
 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
                              nsISupports* aData)
 {
   int64_t amount = SizeOfIncludingThis(MallocSizeOf);
   return MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
                             amount, "Memory used by AudioContext objects (Web Audio).");
 }
 
+double
+AudioContext::ExtraCurrentTime() const
+{
+  return mDestination->ExtraCurrentTime();
+}
+
 }
 }
--- a/content/media/webaudio/AudioContext.h
+++ b/content/media/webaudio/AudioContext.h
@@ -241,16 +241,22 @@ public:
   void Mute() const;
   void Unmute() const;
 
   JSContext* GetJSContext() const;
 
   AudioChannel MozAudioChannelType() const;
   void SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv);
 
+  void UpdateNodeCount(int32_t aDelta);
+
+  // Returns the difference between CurrentTime() and the current time of the
+  // AudioDestinationNode's MediaStream.
+  double ExtraCurrentTime() const;
+
 private:
   void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
   void ShutdownDecoder();
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData);
 
@@ -267,16 +273,18 @@ private:
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Hashsets containing all the PannerNodes, to compute the doppler shift.
   // These are weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
   // Number of channels passed in the OfflineAudioContext ctor.
   uint32_t mNumberOfChannels;
+  // Number of nodes that currently exist for this AudioContext
+  int32_t mNodeCount;
   bool mIsOffline;
   bool mIsStarted;
   bool mIsShutDown;
 };
 
 }
 }
 
--- a/content/media/webaudio/AudioDestinationNode.cpp
+++ b/content/media/webaudio/AudioDestinationNode.cpp
@@ -12,16 +12,18 @@
 #include "AudioNodeStream.h"
 #include "MediaStreamGraph.h"
 #include "OfflineAudioCompletionEvent.h"
 #include "nsIInterfaceRequestorUtils.h"
 #include "nsIDocShell.h"
 #include "nsIPermissionManager.h"
 #include "nsIScriptObjectPrincipal.h"
 #include "nsServiceManagerUtils.h"
+#include "nsIAppShell.h"
+#include "nsWidgetsCID.h"
 
 namespace mozilla {
 namespace dom {
 
 class OfflineDestinationNodeEngine : public AudioNodeEngine
 {
 public:
   typedef AutoFallibleTArray<nsAutoArrayPtr<float>, 2> InputChannels;
@@ -212,16 +214,18 @@ AudioDestinationNode::AudioDestinationNo
   : AudioNode(aContext,
               aIsOffline ? aNumberOfChannels : 2,
               ChannelCountMode::Explicit,
               ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mAudioChannel(AudioChannel::Normal)
   , mIsOffline(aIsOffline)
   , mHasFinished(false)
+  , mExtraCurrentTime(0)
+  , mExtraCurrentTimeUpdatedSinceLastStableState(false)
 {
   MediaStreamGraph* graph = aIsOffline ?
                             MediaStreamGraph::CreateNonRealtimeInstance() :
                             MediaStreamGraph::GetInstance();
   AudioNodeEngine* engine = aIsOffline ?
                             new OfflineDestinationNodeEngine(this, aNumberOfChannels,
                                                              aLength, aSampleRate) :
                             static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
@@ -481,11 +485,67 @@ AudioDestinationNode::CreateAudioChannel
     docshell->GetIsActive(&isActive);
     mAudioChannelAgent->SetVisibilityState(isActive);
   }
 
   int32_t state = 0;
   mAudioChannelAgent->StartPlaying(&state);
   SetCanPlay(state == AudioChannelState::AUDIO_CHANNEL_STATE_NORMAL);
 }
+
+void
+AudioDestinationNode::NotifyStableState()
+{
+  mExtraCurrentTimeUpdatedSinceLastStableState = false;
+}
+
+static NS_DEFINE_CID(kAppShellCID, NS_APPSHELL_CID);
+
+void
+AudioDestinationNode::ScheduleStableStateNotification()
+{
+  nsCOMPtr<nsIAppShell> appShell = do_GetService(kAppShellCID);
+  if (appShell) {
+    nsCOMPtr<nsIRunnable> event =
+      NS_NewRunnableMethod(this, &AudioDestinationNode::NotifyStableState);
+    appShell->RunInStableState(event);
+  }
+}
+
+double
+AudioDestinationNode::ExtraCurrentTime()
+{
+  if (!mStartedBlockingDueToBeingOnlyNode.IsNull() &&
+      !mExtraCurrentTimeUpdatedSinceLastStableState) {
+    mExtraCurrentTimeUpdatedSinceLastStableState = true;
+    mExtraCurrentTimeSinceLastStartedBlocking =
+      (TimeStamp::Now() - mStartedBlockingDueToBeingOnlyNode).ToSeconds();
+    ScheduleStableStateNotification();
+  }
+  return mExtraCurrentTime + mExtraCurrentTimeSinceLastStartedBlocking;
+}
+
+void
+AudioDestinationNode::SetIsOnlyNodeForContext(bool aIsOnlyNode)
+{
+  if (!mStartedBlockingDueToBeingOnlyNode.IsNull() == aIsOnlyNode) {
+    return;
+  }
+
+  if (aIsOnlyNode) {
+    mStream->ChangeExplicitBlockerCount(1);
+    mStartedBlockingDueToBeingOnlyNode = TimeStamp::Now();
+    mExtraCurrentTimeSinceLastStartedBlocking = 0;
+    // Don't do an update of mExtraCurrentTimeSinceLastStartedBlocking until the next stable state.
+    mExtraCurrentTimeUpdatedSinceLastStableState = true;
+    ScheduleStableStateNotification();
+  } else {
+    // Force update of mExtraCurrentTimeSinceLastStartedBlocking if necessary
+    ExtraCurrentTime();
+    mExtraCurrentTime += mExtraCurrentTimeSinceLastStartedBlocking;
+    mStream->ChangeExplicitBlockerCount(-1);
+    mStartedBlockingDueToBeingOnlyNode = TimeStamp();
+  }
 }
 
 }
+
+}
--- a/content/media/webaudio/AudioDestinationNode.h
+++ b/content/media/webaudio/AudioDestinationNode.h
@@ -65,30 +65,48 @@ public:
   NS_IMETHOD CanPlayChanged(int32_t aCanPlay);
 
   AudioChannel MozAudioChannelType() const;
   void SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv);
 
   virtual void NotifyMainThreadStateChanged() MOZ_OVERRIDE;
   void FireOfflineCompletionEvent();
 
+  // An amount that should be added to the MediaStream's current time to
+  // get the AudioContext.currentTime.
+  double ExtraCurrentTime();
+
+  // When aIsOnlyNode is true, this is the only node for the AudioContext.
+  void SetIsOnlyNodeForContext(bool aIsOnlyNode);
+
 private:
   bool CheckAudioChannelPermissions(AudioChannel aValue);
   void CreateAudioChannelAgent();
 
   void SetCanPlay(bool aCanPlay);
 
+  void NotifyStableState();
+  void ScheduleStableStateNotification();
+
   SelfReference<AudioDestinationNode> mOfflineRenderingRef;
   uint32_t mFramesToProduce;
 
   nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent;
 
   // Audio Channel Type.
   AudioChannel mAudioChannel;
+<<<<<<< /home/roc/mozilla-central/content/media/webaudio/AudioDestinationNode.h
   bool mIsOffline;
   bool mHasFinished;
+=======
+
+  TimeStamp mStartedBlockingDueToBeingOnlyNode;
+  double mExtraCurrentTime;
+  double mExtraCurrentTimeSinceLastStartedBlocking;
+  bool mExtraCurrentTimeUpdatedSinceLastStableState;
+>>>>>>> /tmp/AudioDestinationNode.h~other.MvuUBx
 };
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/AudioNode.cpp
+++ b/content/media/webaudio/AudioNode.cpp
@@ -14,16 +14,17 @@ namespace mozilla {
 namespace dom {
 
 static const uint32_t INVALID_PORT = 0xffffffff;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioNode, nsDOMEventTargetHelper)
   tmp->DisconnectFromGraph();
+  tmp->mContext->UpdateNodeCount(-1);
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputParams)
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioNode, nsDOMEventTargetHelper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputNodes)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputParams)
@@ -53,23 +54,29 @@ AudioNode::AudioNode(AudioContext* aCont
                      ChannelInterpretation aChannelInterpretation)
   : nsDOMEventTargetHelper(aContext->GetParentObject())
   , mContext(aContext)
   , mChannelCount(aChannelCount)
   , mChannelCountMode(aChannelCountMode)
   , mChannelInterpretation(aChannelInterpretation)
 {
   MOZ_ASSERT(aContext);
+  nsDOMEventTargetHelper::BindToOwner(aContext->GetParentObject());
+  SetIsDOMBinding();
+  aContext->UpdateNodeCount(1);
 }
 
 AudioNode::~AudioNode()
 {
   MOZ_ASSERT(mInputNodes.IsEmpty());
   MOZ_ASSERT(mOutputNodes.IsEmpty());
   MOZ_ASSERT(mOutputParams.IsEmpty());
+  if (mContext) {
+    mContext->UpdateNodeCount(-1);
+  }
 }
 
 template <class InputNode>
 static uint32_t
 FindIndexOfNode(const nsTArray<InputNode>& aInputNodes, const AudioNode* aNode)
 {
   for (uint32_t i = 0; i < aInputNodes.Length(); ++i) {
     if (aInputNodes[i].mInputNode == aNode) {
--- a/content/media/webaudio/OscillatorNode.cpp
+++ b/content/media/webaudio/OscillatorNode.cpp
@@ -573,18 +573,17 @@ OscillatorNode::Start(double aWhen, Erro
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns) {
     // Nothing to play, or we're already dead for some reason
     return;
   }
 
   // TODO: Perhaps we need to do more here.
   ns->SetStreamTimeParameter(OscillatorNodeEngine::START,
-                             Context()->DestinationStream(),
-                             aWhen);
+                             Context(), aWhen);
 
   MarkActive();
 }
 
 void
 OscillatorNode::Stop(double aWhen, ErrorResult& aRv)
 {
   if (!WebAudioUtils::IsTimeValid(aWhen)) {
@@ -600,18 +599,17 @@ OscillatorNode::Stop(double aWhen, Error
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   if (!ns || !Context()) {
     // We've already stopped and had our stream shut down
     return;
   }
 
   // TODO: Perhaps we need to do more here.
   ns->SetStreamTimeParameter(OscillatorNodeEngine::STOP,
-                             Context()->DestinationStream(),
-                             std::max(0.0, aWhen));
+                             Context(), std::max(0.0, aWhen));
 }
 
 void
 OscillatorNode::NotifyMainThreadStateChanged()
 {
   if (mStream->IsFinished()) {
     class EndedEventDispatcher : public nsRunnable
     {
--- a/content/media/webaudio/moz.build
+++ b/content/media/webaudio/moz.build
@@ -4,16 +4,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 PARALLEL_DIRS += ['blink', 'test']
 
 TEST_TOOL_DIRS += ['compiledtest']
 
 EXPORTS += [
+    'AudioContext.h',
     'AudioParamTimeline.h',
     'MediaBufferDecoder.h',
     'ThreeDPoint.h',
     'WebAudioUtils.h',
 ]
 
 EXPORTS.mozilla += [
     'FFTBlock.h',