Bug 1454998 - Remove AudioNodeStream::SampleRate(). r=karlt
☠☠ backed out by 5f5e153eb14b ☠ ☠
authorAndreas Pehrson <apehrson@mozilla.com>
Wed, 02 Oct 2019 08:17:59 +0000
changeset 495964 8ff03f2f4ca2da0761fb285f8c403b51765a19cf
parent 495963 ae6056b748d1eb640cfe5d457f39054d66346a8e
child 495965 80417bdfa72112c6f9472c29ce49e8ff81e8c688
push id114140
push userdvarga@mozilla.com
push dateWed, 02 Oct 2019 18:04:51 +0000
treeherdermozilla-inbound@32eb0ea893f3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskarlt
bugs1454998
milestone71.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1454998 - Remove AudioNodeStream::SampleRate(). r=karlt AudioNodeStream is a subclass of MediaStream, which now exposes a public const mSampleRate member. Differential Revision: https://phabricator.services.mozilla.com/D47688
dom/media/MediaStreamGraph.cpp
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/WaveShaperNode.cpp
dom/media/webaudio/WebAudioUtils.cpp
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -1200,25 +1200,25 @@ void MediaStreamGraphImpl::Process() {
         AudioNodeStream* n = stream->AsAudioNodeStream();
         if (n) {
 #ifdef DEBUG
           // Verify that the sampling rate for all of the following streams is
           // the same
           for (uint32_t j = i + 1; j < mStreams.Length(); ++j) {
             AudioNodeStream* nextStream = mStreams[j]->AsAudioNodeStream();
             if (nextStream) {
-              MOZ_ASSERT(n->SampleRate() == nextStream->SampleRate(),
+              MOZ_ASSERT(n->mSampleRate == nextStream->mSampleRate,
                          "All AudioNodeStreams in the graph must have the same "
                          "sampling rate");
             }
           }
 #endif
           // Since an AudioNodeStream is present, go ahead and
           // produce audio block by block for all the rest of the streams.
-          ProduceDataForStreamsBlockByBlock(i, n->SampleRate());
+          ProduceDataForStreamsBlockByBlock(i, n->mSampleRate);
           doneAllProducing = true;
         } else {
           ps->ProcessInput(mProcessedTime, mStateComputedTime,
                            ProcessedMediaStream::ALLOW_END);
           // Assert that a live stream produced enough data
           MOZ_ASSERT_IF(!stream->mEnded,
                         stream->GetEnd() >= GraphTimeToStreamTimeWithBlocking(
                                                 stream, mStateComputedTime));
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -92,17 +92,17 @@ class AudioBufferSourceNodeEngine final 
       default:
         NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter");
     }
   }
   void SetDoubleParameter(uint32_t aIndex, double aParam) override {
     switch (aIndex) {
       case AudioBufferSourceNode::START:
         MOZ_ASSERT(!mStart, "Another START?");
-        mStart = aParam * mDestination->SampleRate();
+        mStart = aParam * mDestination->mSampleRate;
         // Round to nearest
         mBeginProcessing = mStart + 0.5;
         break;
       default:
         NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter.");
     };
   }
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
@@ -434,17 +434,17 @@ class AudioBufferSourceNodeEngine final 
     *aCurrentPosition += numFrames;
     mBufferPosition += numFrames;
   }
 
   int32_t ComputeFinalOutSampleRate(float aPlaybackRate, float aDetune) {
     float computedPlaybackRate = aPlaybackRate * exp2(aDetune / 1200.f);
     // Make sure the playback rate is something our resampler can work with.
     int32_t rate = WebAudioUtils::TruncateFloatToInt<int32_t>(
-        mSource->SampleRate() / computedPlaybackRate);
+        mSource->mSampleRate / computedPlaybackRate);
     return rate ? rate : mBufferSampleRate;
   }
 
   void UpdateSampleRateIfNeeded(uint32_t aChannels,
                                 StreamTime aStreamPosition) {
     float playbackRate;
     float detune;
 
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -189,17 +189,17 @@ void AudioNodeStream::SetInt32Parameter(
 void AudioNodeStream::SendTimelineEvent(uint32_t aIndex,
                                         const AudioTimelineEvent& aEvent) {
   class Message final : public ControlMessage {
    public:
     Message(AudioNodeStream* aStream, uint32_t aIndex,
             const AudioTimelineEvent& aEvent)
         : ControlMessage(aStream),
           mEvent(aEvent),
-          mSampleRate(aStream->SampleRate()),
+          mSampleRate(aStream->mSampleRate),
           mIndex(aIndex) {}
     void Run() override {
       static_cast<AudioNodeStream*>(mStream)->Engine()->RecvTimelineEvent(
           mIndex, mEvent);
     }
     AudioTimelineEvent mEvent;
     TrackRate mSampleRate;
     uint32_t mIndex;
@@ -217,30 +217,35 @@ void AudioNodeStream::SetBuffer(AudioChu
           std::move(mBuffer));
     }
     AudioChunk mBuffer;
   };
 
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, std::move(aBuffer)));
 }
 
-void AudioNodeStream::SetReverb(WebCore::Reverb* aReverb, uint32_t aImpulseChannelCount) {
+void AudioNodeStream::SetReverb(WebCore::Reverb* aReverb,
+                                uint32_t aImpulseChannelCount) {
   class Message final : public ControlMessage {
    public:
-    Message(AudioNodeStream* aStream, WebCore::Reverb* aReverb, uint32_t aImpulseChannelCount)
-        : ControlMessage(aStream), mReverb(aReverb), mImpulseChanelCount(aImpulseChannelCount) {}
+    Message(AudioNodeStream* aStream, WebCore::Reverb* aReverb,
+            uint32_t aImpulseChannelCount)
+        : ControlMessage(aStream),
+          mReverb(aReverb),
+          mImpulseChanelCount(aImpulseChannelCount) {}
     void Run() override {
       static_cast<AudioNodeStream*>(mStream)->Engine()->SetReverb(
           mReverb.forget(), mImpulseChanelCount);
     }
     nsAutoPtr<WebCore::Reverb> mReverb;
     uint32_t mImpulseChanelCount;
   };
 
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aReverb, aImpulseChannelCount));
+  GraphImpl()->AppendMessage(
+      MakeUnique<Message>(this, aReverb, aImpulseChannelCount));
 }
 
 void AudioNodeStream::SetRawArrayData(nsTArray<float>& aData) {
   class Message final : public ControlMessage {
    public:
     Message(AudioNodeStream* aStream, nsTArray<float>& aData)
         : ControlMessage(aStream) {
       mData.SwapElements(aData);
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -148,17 +148,16 @@ class AudioNodeStream : public Processed
   const OutputChunks& LastChunks() const { return mLastChunks; }
   bool MainThreadNeedsUpdates() const override {
     return ((mFlags & NEED_MAIN_THREAD_ENDED) && mEnded) ||
            (mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
-  TrackRate SampleRate() const { return mSampleRate; }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
   /*
--- a/dom/media/webaudio/BiquadFilterNode.cpp
+++ b/dom/media/webaudio/BiquadFilterNode.cpp
@@ -183,18 +183,18 @@ class BiquadFilterNodeEngine final : pub
       } else {
         input = static_cast<const float*>(aInput.mChannelData[i]);
         if (aInput.mVolume != 1.0) {
           AudioBlockCopyChannelWithScale(input, aInput.mVolume,
                                          alignedInputBuffer);
           input = alignedInputBuffer;
         }
       }
-      SetParamsOnBiquad(mBiquads[i], aStream->SampleRate(), mType, freq, q,
-                        gain, detune);
+      SetParamsOnBiquad(mBiquads[i], aStream->mSampleRate, mType, freq, q, gain,
+                        detune);
 
       mBiquads[i].process(input, aOutput->ChannelFloatsForWrite(i),
                           aInput.GetDuration());
     }
   }
 
   bool IsActive() const override { return !mBiquads.IsEmpty(); }
 
--- a/dom/media/webaudio/DelayNode.cpp
+++ b/dom/media/webaudio/DelayNode.cpp
@@ -60,17 +60,17 @@ class DelayNodeEngine final : public Aud
       default:
         NS_ERROR("Bad DelayNodeEngine TimelineParameter");
     }
   }
 
   void ProcessBlock(AudioNodeStream* aStream, GraphTime aFrom,
                     const AudioBlock& aInput, AudioBlock* aOutput,
                     bool* aFinished) override {
-    MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate());
+    MOZ_ASSERT(aStream->mSampleRate == mDestination->mSampleRate);
 
     if (!aInput.IsSilentOrSubnormal()) {
       if (mLeftOverData <= 0) {
         RefPtr<PlayingRefChanged> refchanged =
             new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         aStream->Graph()->DispatchToMainThreadStableState(refchanged.forget());
       }
       mLeftOverData = mBuffer.MaxDelayTicks();
@@ -101,17 +101,17 @@ class DelayNodeEngine final : public Aud
     }
     mHaveProducedBeforeInput = false;
     mBuffer.NextBlock();
   }
 
   void UpdateOutputBlock(AudioNodeStream* aStream, GraphTime aFrom,
                          AudioBlock* aOutput, float minDelay) {
     float maxDelay = mMaxDelay;
-    float sampleRate = aStream->SampleRate();
+    float sampleRate = aStream->mSampleRate;
     ChannelInterpretation channelInterpretation =
         aStream->GetChannelInterpretation();
     if (mDelay.HasSimpleValue()) {
       // If this DelayNode is in a cycle, make sure the delay value is at least
       // one block, even if that is greater than maxDelay.
       float delayFrames = mDelay.GetValue() * sampleRate;
       float delayFramesClamped =
           std::max(minDelay, std::min(delayFrames, maxDelay));
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -36,17 +36,17 @@ class DynamicsCompressorNodeEngine final
         // Keep the default value in sync with the default value in
         // DynamicsCompressorNode::DynamicsCompressorNode.
         ,
         mThreshold(-24.f),
         mKnee(30.f),
         mRatio(12.f),
         mAttack(0.003f),
         mRelease(0.25f),
-        mCompressor(new DynamicsCompressor(mDestination->SampleRate(), 2)) {}
+        mCompressor(new DynamicsCompressor(mDestination->mSampleRate, 2)) {}
 
   enum Parameters { THRESHOLD, KNEE, RATIO, ATTACK, RELEASE };
   void RecvTimelineEvent(uint32_t aIndex, AudioTimelineEvent& aEvent) override {
     MOZ_ASSERT(mDestination);
 
     WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, mDestination);
 
     switch (aIndex) {
@@ -77,17 +77,17 @@ class DynamicsCompressorNodeEngine final
       // Just output silence
       *aOutput = aInput;
       return;
     }
 
     const uint32_t channelCount = aInput.ChannelCount();
     if (mCompressor->numberOfChannels() != channelCount) {
       // Create a new compressor object with a new channel count
-      mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(),
+      mCompressor = new WebCore::DynamicsCompressor(aStream->mSampleRate,
                                                     aInput.ChannelCount());
     }
 
     StreamTime pos = mDestination->GraphTimeToStreamTime(aFrom);
     mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold,
                                    mThreshold.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamKnee,
                                    mKnee.GetValueAtTime(pos));
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -126,17 +126,17 @@ class OscillatorNodeEngine final : publi
     // End index switch.
   }
 
   void SetBuffer(AudioChunk&& aBuffer) override {
     MOZ_ASSERT(aBuffer.ChannelCount() == 2,
                "PeriodicWave should have sent two channels");
     MOZ_ASSERT(aBuffer.mVolume == 1.0f);
     mPeriodicWave = WebCore::PeriodicWave::create(
-        mSource->SampleRate(), aBuffer.ChannelData<float>()[0],
+        mSource->mSampleRate, aBuffer.ChannelData<float>()[0],
         aBuffer.ChannelData<float>()[1], aBuffer.mDuration,
         mCustomDisableNormalization);
   }
 
   void IncrementPhase() {
     const float twoPiFloat = float(2 * M_PI);
     mPhase += mPhaseIncrement;
     if (mPhase > twoPiFloat) {
@@ -167,17 +167,17 @@ class OscillatorNodeEngine final : publi
     }
     if (simpleDetune) {
       detune = mDetune.GetValue();
     } else {
       detune = mDetune.GetValueAtTime(ticks, count);
     }
 
     float finalFrequency = frequency * exp2(detune / 1200.);
-    float signalPeriod = mSource->SampleRate() / finalFrequency;
+    float signalPeriod = mSource->mSampleRate / finalFrequency;
     mRecomputeParameters = false;
 
     mPhaseIncrement = 2 * M_PI / signalPeriod;
 
     if (finalFrequency != mFinalFrequency) {
       mFinalFrequency = finalFrequency;
       return true;
     }
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -239,17 +239,17 @@ class SharedBuffers final {
 class ScriptProcessorNodeEngine final : public AudioNodeEngine {
  public:
   ScriptProcessorNodeEngine(ScriptProcessorNode* aNode,
                             AudioDestinationNode* aDestination,
                             uint32_t aBufferSize,
                             uint32_t aNumberOfInputChannels)
       : AudioNodeEngine(aNode),
         mDestination(aDestination->Stream()),
-        mSharedBuffers(new SharedBuffers(mDestination->SampleRate())),
+        mSharedBuffers(new SharedBuffers(mDestination->mSampleRate)),
         mBufferSize(aBufferSize),
         mInputChannelCount(aNumberOfInputChannels),
         mInputWriteIndex(0) {}
 
   SharedBuffers* GetSharedBuffers() const { return mSharedBuffers; }
 
   enum {
     IS_CONNECTED,
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -237,29 +237,29 @@ class WaveShaperNodeEngine final : publi
         PodZero(alignedScaledInput, WEBAUDIO_BLOCK_SIZE);
         inputSamples = alignedScaledInput;
       }
       float* outputBuffer = aOutput->ChannelFloatsForWrite(i);
       float* sampleBuffer;
 
       switch (mType) {
         case OverSampleType::None:
-          mResampler.Reset(channelCount, aStream->SampleRate(),
+          mResampler.Reset(channelCount, aStream->mSampleRate,
                            OverSampleType::None);
           ProcessCurve<1>(inputSamples, outputBuffer);
           break;
         case OverSampleType::_2x:
-          mResampler.Reset(channelCount, aStream->SampleRate(),
+          mResampler.Reset(channelCount, aStream->mSampleRate,
                            OverSampleType::_2x);
           sampleBuffer = mResampler.UpSample(i, inputSamples, 2);
           ProcessCurve<2>(sampleBuffer, sampleBuffer);
           mResampler.DownSample(i, outputBuffer, 2);
           break;
         case OverSampleType::_4x:
-          mResampler.Reset(channelCount, aStream->SampleRate(),
+          mResampler.Reset(channelCount, aStream->mSampleRate,
                            OverSampleType::_4x);
           sampleBuffer = mResampler.UpSample(i, inputSamples, 4);
           ProcessCurve<4>(sampleBuffer, sampleBuffer);
           mResampler.DownSample(i, outputBuffer, 4);
           break;
         default:
           MOZ_ASSERT_UNREACHABLE("We should never reach here");
       }
--- a/dom/media/webaudio/WebAudioUtils.cpp
+++ b/dom/media/webaudio/WebAudioUtils.cpp
@@ -18,18 +18,18 @@ namespace mozilla {
 LazyLogModule gWebAudioAPILog("WebAudioAPI");
 
 namespace dom {
 
 void WebAudioUtils::ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
                                                      AudioNodeStream* aDest) {
   aEvent.SetTimeInTicks(
       aDest->SecondsToNearestStreamTime(aEvent.Time<double>()));
-  aEvent.mTimeConstant *= aDest->SampleRate();
-  aEvent.mDuration *= aDest->SampleRate();
+  aEvent.mTimeConstant *= aDest->mSampleRate;
+  aEvent.mDuration *= aDest->mSampleRate;
 }
 
 void WebAudioUtils::Shutdown() { WebCore::HRTFDatabaseLoader::shutdown(); }
 
 int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
                                          uint32_t aChannel, const float* aIn,
                                          uint32_t* aInLen, float* aOut,
                                          uint32_t* aOutLen) {