author | Ehsan Akhgari <ehsan@mozilla.com> |
Sun, 05 May 2013 11:48:45 -0400 | |
changeset 141843 | d6307d661d41c7657b17300b777ede8e9e9f6b2d |
parent 141842 | c619b0cb55f0c694a4fc79d99eadecd27ad68c59 |
child 141844 | ab91fc927a9fdb5d19e003df90e3903f3f986d96 |
push id | 2579 |
push user | akeybl@mozilla.com |
push date | Mon, 24 Jun 2013 18:52:47 +0000 |
treeherder | mozilla-beta@b69b7de8a05a [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | roc |
bugs | 865247 |
milestone | 23.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/content/media/AudioNodeEngine.h +++ b/content/media/AudioNodeEngine.h @@ -140,19 +140,24 @@ AudioBlockPanStereoToStereo(const float float aOutputR[WEBAUDIO_BLOCK_SIZE]); /** * All methods of this class and its subclasses are called on the * MediaStreamGraph thread. */ class AudioNodeEngine { public: + // This should be compatible with AudioNodeStream::OutputChunks. + typedef nsAutoTArray<AudioChunk, 1> OutputChunks; + explicit AudioNodeEngine(dom::AudioNode* aNode) : mNode(aNode) , mNodeMutex("AudioNodeEngine::mNodeMutex") + , mInputCount(aNode ? aNode->NumberOfInputs() : 1) + , mOutputCount(aNode ? aNode->NumberOfOutputs() : 0) { MOZ_COUNT_CTOR(AudioNodeEngine); } virtual ~AudioNodeEngine() { MOZ_ASSERT(!mNode, "The node reference must be already cleared"); MOZ_COUNT_DTOR(AudioNodeEngine); } @@ -182,31 +187,56 @@ public: virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) { NS_ERROR("SetBuffer called on engine that doesn't support it"); } /** * Produce the next block of audio samples, given input samples aInput * (the mixed data for input 0). - * By default, simply returns the mixed input. * aInput is guaranteed to have float sample format (if it has samples at all) * and to have been resampled to IdealAudioRate(), and to have exactly * WEBAUDIO_BLOCK_SIZE samples. * *aFinished is set to false by the caller. If the callee sets it to true, * we'll finish the stream and not call this again. */ virtual void ProduceAudioBlock(AudioNodeStream* aStream, const AudioChunk& aInput, AudioChunk* aOutput, bool* aFinished) { + MOZ_ASSERT(mInputCount <= 1 && mOutputCount <= 1); *aOutput = aInput; } + /** + * Produce the next block of audio samples, given input samples in the aInput + * array. There is one input sample per active port in aInput, in order. + * This is the multi-input/output version of ProduceAudioBlock. Only one kind + * of ProduceAudioBlock is called on each node, depending on whether the + * number of inputs and outputs are both 1 or not. + * + * aInput is always guaranteed to not contain more input AudioChunks than the + * maximum number of inputs for the node. It is the responsibility of the + * overrides of this function to make sure they will only add a maximum number + * of AudioChunks to aOutput as advertized by the AudioNode implementation. + * An engine may choose to produce fewer inputs than advertizes by the + * corresponding AudioNode, in which case it will be interpreted as a channel + * of silence. + */ + virtual void ProduceAudioBlocksOnPorts(AudioNodeStream* aStream, + const OutputChunks& aInput, + OutputChunks& aOutput, + bool* aFinished) + { + MOZ_ASSERT(mInputCount > 1 || mOutputCount > 1); + // Only produce one output port, and drop all other input ports. + aOutput[0] = aInput[0]; + } + Mutex& NodeMutex() { return mNodeMutex;} bool HasNode() const { return !!mNode; } dom::AudioNode* Node() const @@ -224,16 +254,21 @@ public: void ClearNode() { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(mNode != nullptr); mNodeMutex.AssertCurrentThreadOwns(); mNode = nullptr; } + uint16_t InputCount() const { return mInputCount; } + uint16_t OutputCount() const { return mOutputCount; } + private: dom::AudioNode* mNode; Mutex mNodeMutex; + const uint16_t mInputCount; + const uint16_t mOutputCount; }; } #endif /* MOZILLA_AUDIONODEENGINE_H_ */
--- a/content/media/AudioNodeStream.cpp +++ b/content/media/AudioNodeStream.cpp @@ -232,31 +232,35 @@ AudioNodeStream::AllInputsFinished() con for (uint32_t i = 0; i < inputCount; ++i) { if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) { return false; } } return !!inputCount; } -AudioChunk* -AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk) +void +AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 1; nsAutoTArray<AudioChunk*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { + if (aPortIndex != mInputs[i]->InputNumber()) { + // This input is connected to a different port + continue; + } MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsFinishedOnGraphThread() || a->IsAudioParamStream()) { continue; } - AudioChunk* chunk = &a->mLastChunk; + AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; MOZ_ASSERT(chunk); if (chunk->IsNull()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length()); @@ -274,26 +278,27 @@ AudioNodeStream::ObtainInputBlock(AudioC break; case ChannelCountMode::Max: // Nothing to do here, just shut up the compiler warning. break; } uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0) { - aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE); - return aTmpChunk; + aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); + return; } if (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == outputChannelCount) { - return inputChunks[0]; + aTmpChunk = *inputChunks[0]; + return; } - AllocateAudioBlock(outputChannelCount, aTmpChunk); + AllocateAudioBlock(outputChannelCount, &aTmpChunk); float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f}; // The static storage here should be 1KB, so it's fine nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < inputChunkCount; ++i) { AudioChunk* chunk = inputChunks[i]; nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels; channels.AppendElements(chunk->mChannelData); @@ -328,76 +333,82 @@ AudioNodeStream::ObtainInputBlock(AudioC // Drop the remaining channels channels.RemoveElementsAt(outputChannelCount, channels.Length() - outputChannelCount); } } for (uint32_t c = 0; c < channels.Length(); ++c) { const float* inputData = static_cast<const float*>(channels[c]); - float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c])); + float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c])); if (inputData) { if (i == 0) { AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData); } else { AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData); } } else { if (i == 0) { memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float)); } } } } - - return aTmpChunk; } // The MediaStreamGraph guarantees that this is actually one block, for // AudioNodeStreams. void AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo) { if (mMarkAsFinishedAfterThisBlock) { // This stream was finished the last time that we looked at it, and all // of the depending streams have finished their output as well, so now // it's time to mark this stream as finished. FinishOutput(); } StreamBuffer::Track* track = EnsureTrack(); - AudioChunk outputChunk; AudioSegment* segment = track->Get<AudioSegment>(); - outputChunk.SetNull(0); + mLastChunks.SetLength(1); + mLastChunks[0].SetNull(0); if (mInCycle) { // XXX DelayNode not supported yet so just produce silence - outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE); + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } else { - AudioChunk tmpChunk; - AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk); + // We need to generate at least one input + uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount()); + OutputChunks inputChunks; + inputChunks.SetLength(maxInputs); + for (uint16_t i = 0; i < maxInputs; ++i) { + ObtainInputBlock(inputChunks[i], i); + } bool finished = false; - mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished); + if (maxInputs <= 1 && mEngine->OutputCount() <= 1) { + mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished); + } else { + mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished); + } if (finished) { mMarkAsFinishedAfterThisBlock = true; } } - mLastChunk = outputChunk; if (mKind == MediaStreamGraph::EXTERNAL_STREAM) { - segment->AppendAndConsumeChunk(&outputChunk); + segment->AppendAndConsumeChunk(&mLastChunks[0]); } else { - segment->AppendNullData(outputChunk.GetDuration()); + segment->AppendNullData(mLastChunks[0].GetDuration()); } for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; - AudioChunk copyChunk = outputChunk; + AudioChunk copyChunk = mLastChunks[0]; AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), segment->GetDuration(), 0, tmpSegment); } }
--- a/content/media/AudioNodeStream.h +++ b/content/media/AudioNodeStream.h @@ -36,16 +36,18 @@ class ThreadSharedFloatArrayBufferList; * An AudioNodeStream has an AudioNodeEngine plugged into it that does the * actual audio processing. AudioNodeStream contains the glue code that * integrates audio processing with the MediaStreamGraph. */ class AudioNodeStream : public ProcessedMediaStream { public: enum { AUDIO_TRACK = 1 }; + typedef nsAutoTArray<AudioChunk, 1> OutputChunks; + /** * Transfers ownership of aEngine to the new AudioNodeStream. */ AudioNodeStream(AudioNodeEngine* aEngine, MediaStreamGraph::AudioNodeStreamKind aKind) : ProcessedMediaStream(nullptr), mEngine(aEngine), mKind(aKind), @@ -92,34 +94,34 @@ public: dom::ChannelInterpretation aChannelInterpretation); virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo); TrackTicks GetCurrentPosition(); bool AllInputsFinished() const; bool IsAudioParamStream() const { return mAudioParamStream; } - const AudioChunk& LastChunk() const + const OutputChunks& LastChunks() const { - return mLastChunk; + return mLastChunks; } // Any thread AudioNodeEngine* Engine() { return mEngine; } protected: void FinishOutput(); StreamBuffer::Track* EnsureTrack(); - AudioChunk* ObtainInputBlock(AudioChunk* aTmpChunk); + void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex); // The engine that will generate output for this node. nsAutoPtr<AudioNodeEngine> mEngine; // The last block produced by this node. - AudioChunk mLastChunk; + OutputChunks mLastChunks; // Whether this is an internal or external stream MediaStreamGraph::AudioNodeStreamKind mKind; // The number of input channels that this stream requires. 0 means don't care. uint32_t mNumberOfInputChannels; // The mixing modes struct { dom::ChannelCountMode mChannelCountMode : 16; dom::ChannelInterpretation mChannelInterpretation : 16;
--- a/content/media/webaudio/AnalyserNode.cpp +++ b/content/media/webaudio/AnalyserNode.cpp @@ -55,17 +55,17 @@ public: : AudioNodeEngine(aNode) { MOZ_ASSERT(NS_IsMainThread()); } virtual void ProduceAudioBlock(AudioNodeStream* aStream, const AudioChunk& aInput, AudioChunk* aOutput, - bool* aFinished) + bool* aFinished) MOZ_OVERRIDE { *aOutput = aInput; MutexAutoLock lock(NodeMutex()); if (Node() && aInput.mChannelData.Length() > 0) { nsRefPtr<TransferBuffer> transfer = new TransferBuffer(aStream, aInput);
--- a/content/media/webaudio/AudioBufferSourceNode.h +++ b/content/media/webaudio/AudioBufferSourceNode.h @@ -24,17 +24,17 @@ public: virtual void DestroyMediaStream() MOZ_OVERRIDE { if (mStream) { mStream->RemoveMainThreadListener(this); } AudioNode::DestroyMediaStream(); } - virtual uint32_t NumberOfInputs() const MOZ_FINAL MOZ_OVERRIDE + virtual uint16_t NumberOfInputs() const MOZ_FINAL MOZ_OVERRIDE { return 0; } virtual AudioBufferSourceNode* AsAudioBufferSourceNode() MOZ_OVERRIDE { return this; } NS_DECL_ISUPPORTS_INHERITED
--- a/content/media/webaudio/AudioDestinationNode.h +++ b/content/media/webaudio/AudioDestinationNode.h @@ -19,17 +19,17 @@ class AudioDestinationNode : public Audi public: AudioDestinationNode(AudioContext* aContext, MediaStreamGraph* aGraph); NS_DECL_ISUPPORTS_INHERITED virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope) MOZ_OVERRIDE; - virtual uint32_t NumberOfOutputs() const MOZ_FINAL MOZ_OVERRIDE + virtual uint16_t NumberOfOutputs() const MOZ_FINAL MOZ_OVERRIDE { return 0; } }; } }
--- a/content/media/webaudio/AudioNode.h +++ b/content/media/webaudio/AudioNode.h @@ -150,18 +150,18 @@ public: virtual void Connect(AudioParam& aDestination, uint32_t aOutput, ErrorResult& aRv); virtual void Disconnect(uint32_t aOutput, ErrorResult& aRv); // The following two virtual methods must be implemented by each node type // to provide their number of input and output ports. These numbers are // constant for the lifetime of the node. Both default to 1. - virtual uint32_t NumberOfInputs() const { return 1; } - virtual uint32_t NumberOfOutputs() const { return 1; } + virtual uint16_t NumberOfInputs() const { return 1; } + virtual uint16_t NumberOfOutputs() const { return 1; } uint32_t ChannelCount() const { return mChannelCount; } void SetChannelCount(uint32_t aChannelCount) { mChannelCount = aChannelCount; SendChannelMixingParametersToStream(); } ChannelCountMode ChannelCountModeValue() const
--- a/content/media/webaudio/AudioParam.cpp +++ b/content/media/webaudio/AudioParam.cpp @@ -125,17 +125,17 @@ AudioParamTimeline::AudioNodeInputValue( { MOZ_ASSERT(mStream); // If we have a chunk produced by the AudioNode inputs to the AudioParam, // get its value now. We use aCounter to tell us which frame of the last // AudioChunk to look at. float audioNodeInputValue = 0.0f; const AudioChunk& lastAudioNodeChunk = - static_cast<AudioNodeStream*>(mStream.get())->LastChunk(); + static_cast<AudioNodeStream*>(mStream.get())->LastChunks()[0]; if (!lastAudioNodeChunk.IsNull()) { MOZ_ASSERT(lastAudioNodeChunk.GetDuration() == WEBAUDIO_BLOCK_SIZE); audioNodeInputValue = static_cast<const float*>(lastAudioNodeChunk.mChannelData[0])[aCounter]; } return audioNodeInputValue; }