bug 1205540 account for active inputs and skip processing when streams are inactive r=padenot
☠☠ backed out by eb30e5ee32f0 ☠ ☠
authorKarl Tomlinson <karlt+@karlt.net>
Fri, 18 Sep 2015 00:03:00 +1200
changeset 295831 e89d8182d588cdba43a1340b776eafccb964ed89
parent 295830 abace4cdec06e6ab956dfb8c4a8601252450421a
child 295832 07853d7b87c1ab9c8e210b096aa24790252a92bb
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1205540
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1205540 account for active inputs and skip processing when streams are inactive r=padenot
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PannerNode.cpp
--- a/dom/media/webaudio/AnalyserNode.cpp
+++ b/dom/media/webaudio/AnalyserNode.cpp
@@ -63,42 +63,46 @@ public:
                             AudioBlock* aOutput,
                             bool* aFinished) override
   {
     *aOutput = aInput;
 
     if (aInput.IsNull()) {
       // If AnalyserNode::mChunks has only null chunks, then there is no need
       // to send further null chunks.
-      if (mChunksToProcess == 0) {
+      if (mChunksToProcess <= 0) {
+        if (mChunksToProcess != INT32_MIN) {
+          mChunksToProcess = INT32_MIN;
+          aStream->CheckForInactive();
+        }
         return;
       }
 
       --mChunksToProcess;
     } else {
       // This many null chunks will be required to empty AnalyserNode::mChunks.
       mChunksToProcess = CHUNK_COUNT;
     }
 
     nsRefPtr<TransferBuffer> transfer =
       new TransferBuffer(aStream, aInput.AsAudioChunk());
     NS_DispatchToMainThread(transfer);
   }
 
   virtual bool IsActive() const override
   {
-    return mChunksToProcess != 0;
+    return mChunksToProcess != INT32_MIN;
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
-  size_t mChunksToProcess = 0;
+  int32_t mChunksToProcess = INT32_MIN;
 };
 
 AnalyserNode::AnalyserNode(AudioContext* aContext)
   : AudioNode(aContext,
               1,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mAnalysisBlock(2048)
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -106,17 +106,21 @@ public:
       break;
     default:
       NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter.");
     };
   }
   virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
   {
     switch (aIndex) {
-    case AudioBufferSourceNode::SAMPLE_RATE: mBufferSampleRate = aParam; break;
+    case AudioBufferSourceNode::SAMPLE_RATE:
+      MOZ_ASSERT(aParam > 0);
+      mBufferSampleRate = aParam;
+      mSource->SetActive();
+      break;
     case AudioBufferSourceNode::BUFFERSTART:
       MOZ_ASSERT(aParam >= 0);
       if (mBufferPosition == 0) {
         mBufferPosition = aParam;
       }
       break;
     case AudioBufferSourceNode::BUFFEREND:
       MOZ_ASSERT(aParam >= 0);
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -304,18 +304,20 @@ public:
   }
 
   /**
    * Produce the next block of audio samples, given input samples aInput
    * (the mixed data for input 0).
    * aInput is guaranteed to have float sample format (if it has samples at all)
    * and to have been resampled to the sampling rate for the stream, and to have
    * exactly WEBAUDIO_BLOCK_SIZE samples.
-   * *aFinished is set to false by the caller. If the callee sets it to true,
-   * we'll finish the stream and not call this again.
+   * *aFinished is set to false by the caller. The callee must not set this to
+   * true unless silent output is produced. If set to true, we'll finish the
+   * stream, consider this input inactive on any downstream nodes, and not
+   * call this again.
    */
   virtual void ProcessBlock(AudioNodeStream* aStream,
                             const AudioBlock& aInput,
                             AudioBlock* aOutput,
                             bool* aFinished);
   /**
    * Produce the next block of audio samples, before input is provided.
    * ProcessBlock() will be called later, and it then should not change
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -29,31 +29,33 @@ namespace mozilla {
 AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
                                  Flags aFlags,
                                  TrackRate aSampleRate)
   : ProcessedMediaStream(nullptr),
     mEngine(aEngine),
     mSampleRate(aSampleRate),
     mFlags(aFlags),
     mNumberOfInputChannels(2),
+    mIsActive(aEngine->IsActive()),
     mMarkAsFinishedAfterThisBlock(false),
     mAudioParamStream(false),
     mPassThrough(false)
 {
   MOZ_ASSERT(NS_IsMainThread());
   mChannelCountMode = ChannelCountMode::Max;
   mChannelInterpretation = ChannelInterpretation::Speakers;
   // AudioNodes are always producing data
   mHasCurrentData = true;
   mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount()));
   MOZ_COUNT_CTOR(AudioNodeStream);
 }
 
 AudioNodeStream::~AudioNodeStream()
 {
+  MOZ_ASSERT(mActiveInputCount == 0);
   MOZ_COUNT_DTOR(AudioNodeStream);
 }
 
 void
 AudioNodeStream::DestroyImpl()
 {
   // These are graph thread objects, so clean up on graph thread.
   mInputChunks.Clear();
@@ -515,17 +517,24 @@ AudioNodeStream::ProcessInput(GraphTime 
     EnsureTrack(AUDIO_TRACK);
   }
   // No more tracks will be coming
   mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
   uint16_t outputCount = mLastChunks.Length();
   MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount()));
 
-  if (mFinished || InMutedCycle()) {
+  if (!mIsActive) {
+    // mLastChunks are already null.
+#ifdef DEBUG
+    for (const auto& chunk : mLastChunks) {
+      MOZ_ASSERT(chunk.IsNull());
+    }
+#endif
+  } else if (InMutedCycle()) {
     mInputChunks.Clear();
     for (uint16_t i = 0; i < outputCount; ++i) {
       mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
     }
   } else {
     // We need to generate at least one input
     uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
     mInputChunks.SetLength(maxInputs);
@@ -544,16 +553,17 @@ AudioNodeStream::ProcessInput(GraphTime 
       }
     }
     for (uint16_t i = 0; i < outputCount; ++i) {
       NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                    "Invalid WebAudio chunk size");
     }
     if (finished) {
       mMarkAsFinishedAfterThisBlock = true;
+      CheckForInactive();
     }
 
     if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
       for (uint32_t i = 0; i < outputCount; ++i) {
         mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
       }
     }
   }
@@ -574,17 +584,17 @@ void
 AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom)
 {
   MOZ_ASSERT(mEngine->AsDelayNodeEngine());
   MOZ_ASSERT(mEngine->OutputCount() == 1,
              "DelayNodeEngine output count should be 1");
   MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
   MOZ_ASSERT(mLastChunks.Length() == 1);
 
-  if (mFinished) {
+  if (!mIsActive) {
     mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
   } else {
     mEngine->ProduceBlockBeforeInput(&mLastChunks[0]);
     NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                  "Invalid WebAudio chunk size");
     if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
       mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
     }
@@ -679,9 +689,93 @@ AudioNodeStream::DestinationTimeFromTick
 {
   MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
 
   GraphTime graphTime = StreamTimeToGraphTime(aPosition);
   StreamTime destinationTime = aDestination->GraphTimeToStreamTime(graphTime);
   return StreamTimeToSeconds(destinationTime);
 }
 
+void
+AudioNodeStream::AddInput(MediaInputPort* aPort)
+{
+  ProcessedMediaStream::AddInput(aPort);
+  AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
+  // Streams that are not AudioNodeStreams are considered active.
+  if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
+    IncrementActiveInputCount();
+  }
+}
+void
+AudioNodeStream::RemoveInput(MediaInputPort* aPort)
+{
+  ProcessedMediaStream::RemoveInput(aPort);
+  AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
+  // Streams that are not AudioNodeStreams are considered active.
+  if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
+    DecrementActiveInputCount();
+  }
+}
+
+void
+AudioNodeStream::SetActive()
+{
+  if (mIsActive || mMarkAsFinishedAfterThisBlock) {
+    return;
+  }
+
+  mIsActive = true;
+  if (IsAudioParamStream()) {
+    // Consumers merely influence stream order.
+    // They do not read from the stream.
+    return;
+  }
+
+  for (const auto& consumer : mConsumers) {
+    AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
+    if (ns) {
+      ns->IncrementActiveInputCount();
+    }
+  }
+}
+
+void
+AudioNodeStream::CheckForInactive()
+{
+  if (((mActiveInputCount > 0 || mEngine->IsActive()) &&
+       !mMarkAsFinishedAfterThisBlock) ||
+      !mIsActive) {
+    return;
+  }
+
+  mIsActive = false;
+  mInputChunks.Clear(); // not required for foreseeable future
+  for (auto& chunk : mLastChunks) {
+    chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
+  }
+  if (IsAudioParamStream()) {
+    return;
+  }
+
+  for (const auto& consumer : mConsumers) {
+    AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
+    if (ns) {
+      ns->DecrementActiveInputCount();
+    }
+  }
+}
+
+void
+AudioNodeStream::IncrementActiveInputCount()
+{
+  ++mActiveInputCount;
+  SetActive();
+}
+
+void
+AudioNodeStream::DecrementActiveInputCount()
+{
+  MOZ_ASSERT(mActiveInputCount > 0);
+  --mActiveInputCount;
+  CheckForInactive();
+}
+
 } // namespace mozilla
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -101,16 +101,18 @@ public:
 
   void SetAudioParamHelperStream()
   {
     MOZ_ASSERT(!mAudioParamStream, "Can only do this once");
     mAudioParamStream = true;
   }
 
   virtual AudioNodeStream* AsAudioNodeStream() override { return this; }
+  virtual void AddInput(MediaInputPort* aPort) override;
+  virtual void RemoveInput(MediaInputPort* aPort) override;
 
   // Graph thread only
   void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
                                   double aStreamTime);
   void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
                                       ChannelCountMode aChannelCountMoe,
                                       ChannelInterpretation aChannelInterpretation);
   virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
@@ -160,48 +162,71 @@ public:
                                   StreamTime aPosition);
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
+  /*
+   * SetActive() is called when either an active input is added or the engine
+   * for a source node transitions from inactive to active.  This is not
+   * called from engines for processing nodes because they only become active
+   * when there are active input streams, in which case this stream is already
+   * active.
+   */
+  void SetActive();
+  /*
+   * CheckForInactive() is called when the engine transitions from active to
+   * inactive, or an active input is removed, or the stream finishes.  If the
+   * stream is now inactive, then mInputChunks will be cleared and mLastChunks
+   * will be set to null.  ProcessBlock() will not be called on the engine
+   * again until SetActive() is called.
+   */
+  void CheckForInactive();
 
 protected:
   virtual void DestroyImpl() override;
 
   void AdvanceOutputSegment();
   void FinishOutput();
   void AccumulateInputChunk(uint32_t aInputIndex, const AudioBlock& aChunk,
                             AudioBlock* aBlock,
                             nsTArray<float>* aDownmixBuffer);
   void UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount,
                          nsTArray<const float*>& aOutputChannels,
                          nsTArray<float>& aDownmixBuffer);
 
   uint32_t ComputedNumberOfChannels(uint32_t aInputChannelCount);
   void ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex);
+  void IncrementActiveInputCount();
+  void DecrementActiveInputCount();
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The mixed input blocks are kept from iteration to iteration to avoid
   // reallocating channel data arrays and any buffers for mixing.
   OutputChunks mInputChunks;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
   // Whether this is an internal or external stream
   const Flags mFlags;
+  // The number of input streams that may provide non-silent input.
+  uint32_t mActiveInputCount = 0;
   // The number of input channels that this stream requires. 0 means don't care.
   uint32_t mNumberOfInputChannels;
   // The mixing modes
   ChannelCountMode mChannelCountMode;
   ChannelInterpretation mChannelInterpretation;
+  // Streams are considered active if the stream has not finished and either
+  // the engine is active or there are active input streams.
+  bool mIsActive;
   // Whether the stream should be marked as finished as soon
   // as the current time range has been computed block by block.
   bool mMarkAsFinishedAfterThisBlock;
   // Whether the stream is an AudioParamHelper stream.
   bool mAudioParamStream;
   // Whether the stream just passes its input through.
   bool mPassThrough;
 };
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -150,16 +150,17 @@ public:
         if (mBiquads[i].hasTail()) {
           hasTail = true;
           break;
         }
       }
       if (!hasTail) {
         if (!mBiquads.IsEmpty()) {
           mBiquads.Clear();
+          aStream->CheckForInactive();
 
           nsRefPtr<PlayingRefChangeHandler> refchanged =
             new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE);
           aStream->Graph()->
             DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
         }
 
         aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -115,16 +115,17 @@ public:
     if (aInput.IsNull()) {
       if (mLeftOverData > 0) {
         mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
         input.AllocateChannels(1);
         WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE);
       } else {
         if (mLeftOverData != INT32_MIN) {
           mLeftOverData = INT32_MIN;
+          aStream->CheckForInactive();
           nsRefPtr<PlayingRefChanged> refchanged =
             new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
           aStream->Graph()->
             DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
         }
         aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
       }
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -91,25 +91,27 @@ public:
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
       }
       mLeftOverData = mBuffer.MaxDelayTicks();
     } else if (mLeftOverData > 0) {
       mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
     } else {
       if (mLeftOverData != INT32_MIN) {
         mLeftOverData = INT32_MIN;
+        aStream->CheckForInactive();
+
         // Delete our buffered data now we no longer need it
         mBuffer.Reset();
 
         nsRefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
         aStream->Graph()->
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
       }
-      *aOutput = aInput;
+      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
     mBuffer.Write(aInput);
 
     // Skip output update if mLastChunks has already been set by
     // ProduceBlockBeforeInput() when in a cycle.
     if (!mHaveProducedBeforeInput) {
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -76,17 +76,20 @@ public:
     default:
       NS_ERROR("Bad OscillatorNodeEngine TimelineParameter");
     }
   }
 
   virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override
   {
     switch (aIndex) {
-    case START: mStart = aParam; break;
+    case START:
+      mStart = aParam;
+      mSource->SetActive();
+      break;
     case STOP: mStop = aParam; break;
     default:
       NS_ERROR("Bad OscillatorNodeEngine StreamTimeParameter");
     }
   }
 
   virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
   {
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -144,24 +144,25 @@ public:
       // tail-time reference was added.  Even if the model is now equalpower,
       // the reference will need to be removed.
       if (mLeftOverData > 0 &&
           mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
         mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
       } else {
         if (mLeftOverData != INT_MIN) {
           mLeftOverData = INT_MIN;
+          aStream->CheckForInactive();
           mHRTFPanner->reset();
 
           nsRefPtr<PlayingRefChangeHandler> refchanged =
             new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE);
           aStream->Graph()->
             DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
         }
-        *aOutput = aInput;
+        aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
         return;
       }
     } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
       if (mLeftOverData == INT_MIN) {
         nsRefPtr<PlayingRefChangeHandler> refchanged =
           new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF);
         aStream->Graph()->
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());