Backed out changeset f211d6754796 (bug 856361)
authorEd Morley <emorley@mozilla.com>
Mon, 29 Jul 2013 09:55:12 +0100
changeset 140311 fab0e9b04d8da8a58c71a0f7168650df1ae9d01c
parent 140310 0f30f29ffa1f2ea92d93e7328d978a2b029146e1
child 140312 4382f83efcaf03d82dcab6dd122410daab3ccbda
push id25023
push useremorley@mozilla.com
push dateMon, 29 Jul 2013 14:13:44 +0000
treeherdermozilla-central@8c89fe2a5c92 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs856361
milestone25.0a1
backs outf211d675479689dc2b86ddb6ee3af4cee2e83e03
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset f211d6754796 (bug 856361)
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/DOMMediaStream.h
content/media/MediaStreamGraph.cpp
content/media/MediaStreamGraph.h
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -243,34 +243,16 @@ AudioNodeStream::AllInputsFinished() con
   for (uint32_t i = 0; i < inputCount; ++i) {
     if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
       return false;
     }
   }
   return !!inputCount;
 }
 
-uint32_t
-AudioNodeStream::ComputeFinalOuputChannelCount(uint32_t aInputChannelCount)
-{
-  switch (mChannelCountMode) {
-  case ChannelCountMode::Explicit:
-    // Disregard the channel count we've calculated from inputs, and just use
-    // mNumberOfInputChannels.
-    return mNumberOfInputChannels;
-  case ChannelCountMode::Clamped_max:
-    // Clamp the computed output channel count to mNumberOfInputChannels.
-    return std::min(aInputChannelCount, mNumberOfInputChannels);
-  default:
-  case ChannelCountMode::Max:
-    // Nothing to do here, just shut up the compiler warning.
-    return aInputChannelCount;
-  }
-}
-
 void
 AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
 {
   uint32_t inputCount = mInputs.Length();
   uint32_t outputChannelCount = 1;
   nsAutoTArray<AudioChunk*,250> inputChunks;
   for (uint32_t i = 0; i < inputCount; ++i) {
     if (aPortIndex != mInputs[i]->InputNumber()) {
@@ -290,17 +272,30 @@ AudioNodeStream::ObtainInputBlock(AudioC
       continue;
     }
 
     inputChunks.AppendElement(chunk);
     outputChannelCount =
       GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
   }
 
-  outputChannelCount = ComputeFinalOuputChannelCount(outputChannelCount);
+  switch (mChannelCountMode) {
+  case ChannelCountMode::Explicit:
+    // Disregard the output channel count that we've calculated, and just use
+    // mNumberOfInputChannels.
+    outputChannelCount = mNumberOfInputChannels;
+    break;
+  case ChannelCountMode::Clamped_max:
+    // Clamp the computed output channel count to mNumberOfInputChannels.
+    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
+    break;
+  case ChannelCountMode::Max:
+    // Nothing to do here, just shut up the compiler warning.
+    break;
+  }
 
   uint32_t inputChunkCount = inputChunks.Length();
   if (inputChunkCount == 0 ||
       (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
@@ -311,106 +306,91 @@ AudioNodeStream::ObtainInputBlock(AudioC
   }
 
   if (outputChannelCount == 0) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
   AllocateAudioBlock(outputChannelCount, &aTmpChunk);
+  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
   // The static storage here should be 1KB, so it's fine
   nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
 
   for (uint32_t i = 0; i < inputChunkCount; ++i) {
-    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
-  }
-}
-
-void
-AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                                      AudioChunk* aBlock,
-                                      nsTArray<float>* aDownmixBuffer)
-{
-  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
-  UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
+    AudioChunk* chunk = inputChunks[i];
+    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
+    channels.AppendElements(chunk->mChannelData);
+    if (channels.Length() < outputChannelCount) {
+      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
+        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
+        NS_ASSERTION(outputChannelCount == channels.Length(),
+                     "We called GetAudioChannelsSuperset to avoid this");
+      } else {
+        // Fill up the remaining channels by zeros
+        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
+          channels.AppendElement(silenceChannel);
+        }
+      }
+    } else if (channels.Length() > outputChannelCount) {
+      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
+        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
+        outputChannels.SetLength(outputChannelCount);
+        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
+        for (uint32_t j = 0; j < outputChannelCount; ++j) {
+          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
+        }
 
-  for (uint32_t c = 0; c < channels.Length(); ++c) {
-    const float* inputData = static_cast<const float*>(channels[c]);
-    float* outputData = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c]));
-    if (inputData) {
-      if (aInputIndex == 0) {
-        AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
+        AudioChannelsDownMix(channels, outputChannels.Elements(),
+                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);
+
+        channels.SetLength(outputChannelCount);
+        for (uint32_t j = 0; j < channels.Length(); ++j) {
+          channels[j] = outputChannels[j];
+        }
       } else {
-        AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
-      }
-    } else {
-      if (aInputIndex == 0) {
-        PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
+        // Drop the remaining channels
+        channels.RemoveElementsAt(outputChannelCount,
+                                  channels.Length() - outputChannelCount);
       }
     }
-  }
-}
 
-void
-AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
-                                   uint32_t aOutputChannelCount,
-                                   nsTArray<const void*>& aOutputChannels,
-                                   nsTArray<float>& aDownmixBuffer)
-{
-  static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
-
-  aOutputChannels.AppendElements(aChunk->mChannelData);
-  if (aOutputChannels.Length() < aOutputChannelCount) {
-    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
-      AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
-      NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
-                   "We called GetAudioChannelsSuperset to avoid this");
-    } else {
-      // Fill up the remaining aOutputChannels by zeros
-      for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
-        aOutputChannels.AppendElement(silenceChannel);
+    for (uint32_t c = 0; c < channels.Length(); ++c) {
+      const float* inputData = static_cast<const float*>(channels[c]);
+      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
+      if (inputData) {
+        if (i == 0) {
+          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
+        } else {
+          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
+        }
+      } else {
+        if (i == 0) {
+          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
+        }
       }
     }
-  } else if (aOutputChannels.Length() > aOutputChannelCount) {
-    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
-      nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
-      outputChannels.SetLength(aOutputChannelCount);
-      aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
-      for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
-        outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
-      }
-
-      AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
-                           aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
-
-      aOutputChannels.SetLength(aOutputChannelCount);
-      for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
-        aOutputChannels[j] = outputChannels[j];
-      }
-    } else {
-      // Drop the remaining aOutputChannels
-      aOutputChannels.RemoveElementsAt(aOutputChannelCount,
-        aOutputChannels.Length() - aOutputChannelCount);
-    }
   }
 }
 
 // The MediaStreamGraph guarantees that this is actually one block, for
 // AudioNodeStreams.
 void
 AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
 {
   if (mMarkAsFinishedAfterThisBlock) {
     // This stream was finished the last time that we looked at it, and all
     // of the depending streams have finished their output as well, so now
     // it's time to mark this stream as finished.
     FinishOutput();
   }
 
-  EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
+  StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
+
+  AudioSegment* segment = track->Get<AudioSegment>();
 
   uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
   mLastChunks.SetLength(outputCount);
 
   if (mInCycle) {
     // XXX DelayNode not supported yet so just produce silence
     for (uint16_t i = 0; i < outputCount; ++i) {
       mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
@@ -439,25 +419,16 @@ AudioNodeStream::ProduceOutput(GraphTime
   }
 
   if (mDisabledTrackIDs.Contains(AUDIO_NODE_STREAM_TRACK_ID)) {
     for (uint32_t i = 0; i < mLastChunks.Length(); ++i) {
       mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
     }
   }
 
-  AdvanceOutputSegment();
-}
-
-void
-AudioNodeStream::AdvanceOutputSegment()
-{
-  StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
-  AudioSegment* segment = track->Get<AudioSegment>();
-
   if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
     segment->AppendAndConsumeChunk(&mLastChunks[0]);
   } else {
     segment->AppendNullData(mLastChunks[0].GetDuration());
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -109,36 +109,24 @@ public:
     return mLastChunks;
   }
   virtual bool MainThreadNeedsUpdates() const MOZ_OVERRIDE
   {
     // Only source and external streams need updates on the main thread.
     return (mKind == MediaStreamGraph::SOURCE_STREAM && mFinished) ||
            mKind == MediaStreamGraph::EXTERNAL_STREAM;
   }
-  virtual bool IsIntrinsicallyConsumed() const MOZ_OVERRIDE
-  {
-    return true;
-  }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
   TrackRate SampleRate() const { return mSampleRate; }
 
 protected:
-  void AdvanceOutputSegment();
   void FinishOutput();
-  void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                            AudioChunk* aBlock,
-                            nsTArray<float>* aDownmixBuffer);
-  void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
-                         nsTArray<const void*>& aOutputChannels,
-                         nsTArray<float>& aDownmixBuffer);
 
-  uint32_t ComputeFinalOuputChannelCount(uint32_t aInputChannelCount);
   void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
--- a/content/media/DOMMediaStream.h
+++ b/content/media/DOMMediaStream.h
@@ -63,22 +63,20 @@ public:
   {
     return mWindow;
   }
   virtual JSObject* WrapObject(JSContext* aCx,
                                JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
 
   // WebIDL
   double CurrentTime();
-
   void GetAudioTracks(nsTArray<nsRefPtr<AudioStreamTrack> >& aTracks);
   void GetVideoTracks(nsTArray<nsRefPtr<VideoStreamTrack> >& aTracks);
 
-  MediaStream* GetStream() const { return mStream; }
-
+  MediaStream* GetStream() { return mStream; }
   bool IsFinished();
   /**
    * Returns a principal indicating who may access this stream. The stream contents
    * can only be accessed by principals subsuming this principal.
    */
   nsIPrincipal* GetPrincipal() { return mPrincipal; }
 
   /**
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -515,17 +515,17 @@ MediaStreamGraphImpl::UpdateStreamOrder(
     if (ps) {
       ps->mInCycle = false;
     }
   }
 
   mozilla::LinkedList<MediaStream> stack;
   for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
     nsRefPtr<MediaStream>& s = mOldStreams[i];
-    if (s->IsIntrinsicallyConsumed()) {
+    if (!s->mAudioOutputs.IsEmpty() || !s->mVideoOutputs.IsEmpty()) {
       MarkConsumed(s);
     }
     if (!s->mHasBeenOrdered) {
       UpdateStreamOrderForStream(&stack, s.forget());
     }
   }
 }
 
--- a/content/media/MediaStreamGraph.h
+++ b/content/media/MediaStreamGraph.h
@@ -395,26 +395,16 @@ public:
   void ChangeExplicitBlockerCountImpl(GraphTime aTime, int32_t aDelta)
   {
     mExplicitBlockerCount.SetAtAndAfter(aTime, mExplicitBlockerCount.GetAt(aTime) + aDelta);
   }
   void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
   void RemoveListenerImpl(MediaStreamListener* aListener);
   void RemoveAllListenersImpl();
   void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled);
-  /**
-   * Returns true when this stream requires the contents of its inputs even if
-   * its own outputs are not being consumed. This is used to signal inputs to
-   * this stream that they are being consumed; when they're not being consumed,
-   * we make some optimizations.
-   */
-  virtual bool IsIntrinsicallyConsumed() const
-  {
-    return !mAudioOutputs.IsEmpty() || !mVideoOutputs.IsEmpty();
-  }
 
   void AddConsumer(MediaInputPort* aPort)
   {
     mConsumers.AppendElement(aPort);
   }
   void RemoveConsumer(MediaInputPort* aPort)
   {
     mConsumers.RemoveElement(aPort);