author | Boris Zbarsky <bzbarsky@mit.edu> |
Tue, 11 Feb 2020 16:58:33 +0000 | |
changeset 513370 | c4ae1e6ed447e79fe58e0536f0c9248c2e837210 |
parent 513369 | 9a0b9cc394b028cdd7609a11d271e0589f841255 |
child 513371 | 92dd7149719c1537792107db865bedbc5f87688c |
push id | 37112 |
push user | btara@mozilla.com |
push date | Tue, 11 Feb 2020 21:47:36 +0000 |
treeherder | mozilla-central@79d5ac0c4227 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | padenot |
bugs | 1613827 |
milestone | 75.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/dom/media/MediaData.cpp +++ b/dom/media/MediaData.cpp @@ -113,18 +113,20 @@ AudioDataValue* AudioData::GetAdjustedDa return mAudioData.Data() + mDataOffset; } void AudioData::EnsureAudioBuffer() { if (mAudioBuffer || !mAudioData) { return; } const AudioDataValue* srcData = GetAdjustedData(); - mAudioBuffer = - SharedBuffer::Create(mFrames * mChannels * sizeof(AudioDataValue)); + CheckedInt<size_t> bufferSize(sizeof(AudioDataValue)); + bufferSize *= mFrames; + bufferSize *= mChannels; + mAudioBuffer = SharedBuffer::Create(bufferSize); AudioDataValue* destData = static_cast<AudioDataValue*>(mAudioBuffer->Data()); for (uint32_t i = 0; i < mFrames; ++i) { for (uint32_t j = 0; j < mChannels; ++j) { destData[j * mFrames + i] = srcData[i * mChannels + j]; } } }
--- a/dom/media/SharedBuffer.h +++ b/dom/media/SharedBuffer.h @@ -52,40 +52,55 @@ class ThreadSharedObject { * This only guarantees 4-byte alignment of the data. For alignment we simply * assume that the memory from malloc is at least 4-byte aligned and the * refcount's size is large enough that SharedBuffer's size is divisible by 4. */ class SharedBuffer : public ThreadSharedObject { public: void* Data() { return this + 1; } - static already_AddRefed<SharedBuffer> Create(size_t aSize, + // Ensure that the caller has a CheckedInt. We have to take one by + // non-const reference to do that, because if we take one by const + // reference or value or rvalue reference the implicit constructor on + // CheckedInt will helpfully synthesize one for us at the callsite + // even if the caller passes a raw size_t. + static already_AddRefed<SharedBuffer> Create(CheckedInt<size_t>& aSize, const fallible_t&) { - void* m = operator new(AllocSize(aSize), fallible); + CheckedInt<size_t> allocSize = AllocSize(aSize, fallible); + if (!allocSize.isValid()) { + return nullptr; + } + void* m = operator new(allocSize.value(), fallible); if (!m) { return nullptr; } RefPtr<SharedBuffer> p = new (m) SharedBuffer(); return p.forget(); } - static already_AddRefed<SharedBuffer> Create(size_t aSize) { + static already_AddRefed<SharedBuffer> Create(CheckedInt<size_t>& aSize) { void* m = operator new(AllocSize(aSize)); RefPtr<SharedBuffer> p = new (m) SharedBuffer(); return p.forget(); } size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); } private: - static size_t AllocSize(size_t aDataSize) { + static CheckedInt<size_t> AllocSize(CheckedInt<size_t> aDataSize, + const fallible_t&) { CheckedInt<size_t> size = sizeof(SharedBuffer); size += aDataSize; + return size; + } + + static size_t AllocSize(CheckedInt<size_t> aDataSize) { + CheckedInt<size_t> size = AllocSize(aDataSize, fallible); if (!size.isValid()) { MOZ_CRASH(); } return size.value(); } SharedBuffer() { NS_ASSERTION(
--- a/dom/media/gtest/AudioGenerator.cpp +++ b/dom/media/gtest/AudioGenerator.cpp @@ -9,18 +9,19 @@ #include "AudioSegment.h" using namespace mozilla; AudioGenerator::AudioGenerator(int32_t aChannels, int32_t aSampleRate) : mGenerator(aSampleRate, 1000), mChannels(aChannels) {} void AudioGenerator::Generate(AudioSegment& aSegment, const int32_t& aSamples) { - RefPtr<SharedBuffer> buffer = - SharedBuffer::Create(aSamples * sizeof(int16_t)); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= aSamples; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); int16_t* dest = static_cast<int16_t*>(buffer->Data()); mGenerator.generate(dest, aSamples); AutoTArray<const int16_t*, 1> channels; for (int32_t i = 0; i < mChannels; i++) { channels.AppendElement(dest); } aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
--- a/dom/media/webaudio/ConvolverNode.cpp +++ b/dom/media/webaudio/ConvolverNode.cpp @@ -413,18 +413,21 @@ void ConvolverNode::SetBuffer(JSContext* // Reverb expects data in float format. // Convert on the main thread so as to minimize allocations on the audio // thread. // Reverb will dispose of the buffer once initialized, so convert here // and leave the smaller arrays in the AudioBuffer. // There is currently no value in providing 16/32-byte aligned data // because PadAndMakeScaledDFT() will copy the data (without SIMD // instructions) to aligned arrays for the FFT. - RefPtr<SharedBuffer> floatBuffer = SharedBuffer::Create( - sizeof(float) * data.mDuration * data.ChannelCount()); + CheckedInt<size_t> bufferSize(sizeof(float)); + bufferSize *= data.mDuration; + bufferSize *= data.ChannelCount(); + RefPtr<SharedBuffer> floatBuffer = + SharedBuffer::Create(bufferSize, fallible); if (!floatBuffer) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } auto floatData = static_cast<float*>(floatBuffer->Data()); for (size_t i = 0; i < data.ChannelCount(); ++i) { ConvertAudioSamples(data.ChannelData<int16_t>()[i], floatData, data.mDuration);
--- a/dom/media/webaudio/MediaBufferDecoder.cpp +++ b/dom/media/webaudio/MediaBufferDecoder.cpp @@ -485,18 +485,20 @@ void MediaDecodeTask::FinishDecode() { if (!buffer) { ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError); return; } for (uint32_t i = 0; i < channelCount; ++i) { mDecodeJob.mBuffer.mChannelData[i] = buffer->GetData(i); } #else - RefPtr<SharedBuffer> buffer = SharedBuffer::Create( - sizeof(AudioDataValue) * resampledFrames * channelCount); + CheckedInt<size_t> bufferSize(sizeof(AudioDataValue)); + bufferSize *= resampledFrames; + bufferSize *= channelCount; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); if (!buffer) { ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError); return; } auto data = static_cast<AudioDataValue*>(floatBuffer->Data()); for (uint32_t i = 0; i < channelCount; ++i) { mDecodeJob.mBuffer.mChannelData[i] = data; data += resampledFrames;
--- a/dom/media/webaudio/PeriodicWave.cpp +++ b/dom/media/webaudio/PeriodicWave.cpp @@ -24,18 +24,20 @@ PeriodicWave::PeriodicWave(AudioContext* MOZ_ASSERT((aRealData || aImagData) || aLength == 2); // Caller should have checked this and thrown. MOZ_ASSERT(aLength > 0); mCoefficients.mDuration = aLength; // Copy coefficient data. // The SharedBuffer and two arrays share a single allocation. - RefPtr<SharedBuffer> buffer = - SharedBuffer::Create(sizeof(float) * aLength * 2, fallible); + CheckedInt<size_t> bufferSize(sizeof(float)); + bufferSize *= aLength; + bufferSize *= 2; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize, fallible); if (!buffer) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } auto data = static_cast<float*>(buffer->Data()); mCoefficients.mBuffer = std::move(buffer);
--- a/dom/media/webrtc/MediaEngineDefault.cpp +++ b/dom/media/webrtc/MediaEngineDefault.cpp @@ -493,17 +493,19 @@ nsresult MediaEngineDefaultAudioSource:: } void AudioSourcePullListener::NotifyPull(MediaTrackGraph* aGraph, TrackTime aEndOfAppendedData, TrackTime aDesiredTime) { TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaTrack %p", mTrack.get()); AudioSegment segment; TrackTicks delta = aDesiredTime - aEndOfAppendedData; - RefPtr<SharedBuffer> buffer = SharedBuffer::Create(delta * sizeof(int16_t)); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= delta; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); int16_t* dest = static_cast<int16_t*>(buffer->Data()); mSineGenerator->generate(dest, delta); AutoTArray<const int16_t*, 1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, delta, mPrincipalHandle); mTrack->AppendData(&segment); }
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -928,18 +928,20 @@ void AudioInputProcessing::PacketizeAndP StreamConfig inputConfig(aRate, aChannels, false /* we don't use typing detection*/); StreamConfig outputConfig = inputConfig; // Bug 1404965: Get the right delay here, it saves some work down the line. mAudioProcessing->set_stream_delay_ms(0); // Bug 1414837: find a way to not allocate here. - RefPtr<SharedBuffer> buffer = SharedBuffer::Create( - mPacketizerInput->PacketSize() * aChannels * sizeof(float)); + CheckedInt<size_t> bufferSize(sizeof(float)); + bufferSize *= mPacketizerInput->PacketSize(); + bufferSize *= aChannels; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); // Prepare channel pointers to the SharedBuffer created above. AutoTArray<float*, 8> processedOutputChannelPointers; AutoTArray<const float*, 8> processedOutputChannelPointersConst; processedOutputChannelPointers.SetLength(aChannels); processedOutputChannelPointersConst.SetLength(aChannels); offset = 0; @@ -996,18 +998,20 @@ void AudioInputProcessing::InsertInGraph #ifdef DEBUG mLastCallbackAppendTime = mTrack->GraphImpl()->IterationEnd(); #endif mLiveFramesAppended = true; MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); AudioSegment segment; - RefPtr<SharedBuffer> buffer = - SharedBuffer::Create(aFrames * aChannels * sizeof(T)); + CheckedInt<size_t> bufferSize(sizeof(T)); + bufferSize *= aFrames; + bufferSize *= aChannels; + RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); AutoTArray<const T*, 8> channels; if (aChannels == 1) { PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); channels.AppendElement(static_cast<T*>(buffer->Data())); } else { channels.SetLength(aChannels); AutoTArray<T*, 8> write_channels; write_channels.SetLength(aChannels);
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp +++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp @@ -1008,18 +1008,19 @@ uint32_t SpeechRecognition::FillSamplesB * created. */ uint32_t SpeechRecognition::SplitSamplesBuffer( const int16_t* aSamplesBuffer, uint32_t aSampleCount, nsTArray<RefPtr<SharedBuffer>>& aResult) { uint32_t chunkStart = 0; while (chunkStart + mAudioSamplesPerChunk <= aSampleCount) { - RefPtr<SharedBuffer> chunk = - SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t)); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= mAudioSamplesPerChunk; + RefPtr<SharedBuffer> chunk = SharedBuffer::Create(bufferSize); PodCopy(static_cast<short*>(chunk->Data()), aSamplesBuffer + chunkStart, mAudioSamplesPerChunk); aResult.AppendElement(chunk.forget()); chunkStart += mAudioSamplesPerChunk; } @@ -1076,18 +1077,19 @@ void SpeechRecognition::FeedAudioData(al if (samplesIndex < aDuration) { samplesIndex += SplitSamplesBuffer(samples + samplesIndex, aDuration - samplesIndex, chunksToSend); } // buffer remaining samples if (samplesIndex < aDuration) { mBufferedSamples = 0; - mAudioSamplesBuffer = - SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t)); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= mAudioSamplesPerChunk; + mAudioSamplesBuffer = SharedBuffer::Create(bufferSize); FillSamplesBuffer(samples + samplesIndex, aDuration - samplesIndex); } AudioSegment* segment = CreateAudioSegment(chunksToSend); RefPtr<SpeechEvent> event = new SpeechEvent(this, EVENT_AUDIO_DATA); event->mAudioSegment = segment; event->mProvider = aProvider;
--- a/dom/media/webspeech/recognition/SpeechTrackListener.cpp +++ b/dom/media/webspeech/recognition/SpeechTrackListener.cpp @@ -64,18 +64,20 @@ void SpeechTrackListener::NotifyQueuedCh } } template <typename SampleFormatType> void SpeechTrackListener::ConvertAndDispatchAudioChunk(int aDuration, float aVolume, SampleFormatType* aData, TrackRate aTrackRate) { - RefPtr<SharedBuffer> samples(SharedBuffer::Create(aDuration * 1 * // channel - sizeof(int16_t))); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= aDuration; + bufferSize *= 1; // channel + RefPtr<SharedBuffer> samples(SharedBuffer::Create(bufferSize)); int16_t* to = static_cast<int16_t*>(samples->Data()); ConvertAudioSamplesWithScale(aData, to, aDuration, aVolume); mRecognition->FeedAudioData(samples.forget(), aDuration, this, aTrackRate); } void SpeechTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
--- a/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp +++ b/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp @@ -103,18 +103,21 @@ class FakeAudioTrack : public mozilla::P const int AUDIO_BUFFER_SIZE = 1600; const int NUM_CHANNELS = 2; mozilla::MutexAutoLock lock(t->mMutex); if (t->mSuspended) { return; } - RefPtr<mozilla::SharedBuffer> samples = mozilla::SharedBuffer::Create( - AUDIO_BUFFER_SIZE * NUM_CHANNELS * sizeof(int16_t)); + CheckedInt<size_t> bufferSize(sizeof(int16_t)); + bufferSize *= NUM_CHANNELS; + bufferSize *= AUDIO_BUFFER_SIZE; + RefPtr<mozilla::SharedBuffer> samples = + mozilla::SharedBuffer::Create(bufferSize); int16_t* data = reinterpret_cast<int16_t*>(samples->Data()); for (int i = 0; i < (AUDIO_BUFFER_SIZE * NUM_CHANNELS); i++) { // saw tooth audio sample data[i] = ((t->mCount % 8) * 4000) - (7 * 4000) / 2; t->mCount++; } mozilla::AudioSegment segment;
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp @@ -1479,18 +1479,19 @@ class MediaPipelineReceiveAudio::Pipelin PodArrayZero(scratchBuffer); } MOZ_RELEASE_ASSERT(samplesLength <= scratchBufferLength); MOZ_LOG(gMediaPipelineLog, LogLevel::Debug, ("Audio conduit returned buffer of length %zu", samplesLength)); - RefPtr<SharedBuffer> samples = - SharedBuffer::Create(samplesLength * sizeof(uint16_t)); + CheckedInt<size_t> bufferSize(sizeof(uint16_t)); + bufferSize *= samplesLength; + RefPtr<SharedBuffer> samples = SharedBuffer::Create(bufferSize); int16_t* samplesData = static_cast<int16_t*>(samples->Data()); AudioSegment segment; size_t frames = samplesLength / channelCount; if (mForceSilence) { segment.AppendNullData(frames); } else { AutoTArray<int16_t*, 2> channels; AutoTArray<const int16_t*, 2> outputChannels;