--- a/dom/media/AudioCompactor.h
+++ b/dom/media/AudioCompactor.h
@@ -36,33 +36,33 @@ public:
uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc)
{
// If we are losing more than a reasonable amount to padding, try to chunk
// the data.
size_t maxSlop = AudioDataSize(aFrames, aChannels) / MAX_SLOP_DIVISOR;
while (aFrames > 0) {
uint32_t samples = GetChunkSamples(aFrames, aChannels, maxSlop);
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples]);
+ auto buffer = MakeUnique<AudioDataValue[]>(samples);
// Copy audio data to buffer using caller-provided functor.
- uint32_t framesCopied = aCopyFunc(buffer, samples);
+ uint32_t framesCopied = aCopyFunc(buffer.get(), samples);
NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames");
CheckedInt64 duration = FramesToUsecs(framesCopied, aSampleRate);
if (!duration.isValid()) {
return false;
}
mQueue.Push(new AudioData(aOffset,
aTime,
duration.value(),
framesCopied,
- buffer.forget(),
+ Move(buffer),
aChannels,
aSampleRate));
// Remove the frames we just pushed into the queue and loop if there is
// more to be done.
aTime += duration.value();
aFrames -= framesCopied;
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -41,40 +41,38 @@ AudioData::EnsureAudioBuffer()
data[j*mFrames + i] = mAudioData[i*mChannels + j];
}
}
}
size_t
AudioData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
- size_t size = aMallocSizeOf(this) + aMallocSizeOf(mAudioData);
+ size_t size = aMallocSizeOf(this) + aMallocSizeOf(mAudioData.get());
if (mAudioBuffer) {
size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf);
}
return size;
}
/* static */
already_AddRefed<AudioData>
AudioData::TransferAndUpdateTimestampAndDuration(AudioData* aOther,
int64_t aTimestamp,
int64_t aDuration)
{
NS_ENSURE_TRUE(aOther, nullptr);
RefPtr<AudioData> v = new AudioData(aOther->mOffset,
- aTimestamp,
- aDuration,
- aOther->mFrames,
- aOther->mAudioData,
- aOther->mChannels,
- aOther->mRate);
+ aTimestamp,
+ aDuration,
+ aOther->mFrames,
+ Move(aOther->mAudioData),
+ aOther->mChannels,
+ aOther->mRate);
v->mDiscontinuity = aOther->mDiscontinuity;
- // Remove aOther's AudioData as it can't be shared across two targets.
- aOther->mAudioData.forget();
return v.forget();
}
static bool
ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
{
return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -8,16 +8,17 @@
#include "nsSize.h"
#include "mozilla/gfx/Rect.h"
#include "nsRect.h"
#include "AudioSampleFormat.h"
#include "nsIMemoryReporter.h"
#include "SharedBuffer.h"
#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
#include "nsTArray.h"
namespace mozilla {
namespace layers {
class Image;
class ImageContainer;
} // namespace layers
@@ -118,23 +119,23 @@ protected:
// Holds chunk a decoded audio frames.
class AudioData : public MediaData {
public:
AudioData(int64_t aOffset,
int64_t aTime,
int64_t aDuration,
uint32_t aFrames,
- AudioDataValue* aData,
+ UniquePtr<AudioDataValue[]> aData,
uint32_t aChannels,
uint32_t aRate)
: MediaData(sType, aOffset, aTime, aDuration, aFrames)
, mChannels(aChannels)
, mRate(aRate)
- , mAudioData(aData) {}
+ , mAudioData(Move(aData)) {}
static const Type sType = AUDIO_DATA;
static const char* sTypeName;
// Creates a new AudioData identical to aOther, but with a different
// specified timestamp and duration. All data from aOther is copied
// into the new AudioData but the audio data which is transferred.
// After such call, the original aOther is unusable.
@@ -149,17 +150,17 @@ public:
void EnsureAudioBuffer();
const uint32_t mChannels;
const uint32_t mRate;
// At least one of mAudioBuffer/mAudioData must be non-null.
// mChannels channels, each with mFrames frames
RefPtr<SharedBuffer> mAudioBuffer;
// mFrames frames, each with mChannels values
- nsAutoArrayPtr<AudioDataValue> mAudioData;
+ UniquePtr<AudioDataValue[]> mAudioData;
protected:
~AudioData() {}
};
namespace layers {
class TextureClient;
class PlanarYCbCrImage;
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2648,31 +2648,31 @@ MediaDecoderStateMachine::DropAudioUpToS
if (framesToPrune.value() > audio->mFrames) {
// We've messed up somehow. Don't try to trim frames, the |frames|
// variable below will overflow.
DECODER_WARN("Can't prune more frames that we have!");
return NS_ERROR_FAILURE;
}
uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
uint32_t channels = audio->mChannels;
- nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
+ auto audioData = MakeUnique<AudioDataValue[]>(frames * channels);
memcpy(audioData.get(),
audio->mAudioData.get() + (framesToPrune.value() * channels),
frames * channels * sizeof(AudioDataValue));
CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
if (!duration.isValid()) {
return NS_ERROR_FAILURE;
}
RefPtr<AudioData> data(new AudioData(audio->mOffset,
- mCurrentSeek.mTarget.mTime,
- duration.value(),
- frames,
- audioData.forget(),
- channels,
- audio->mRate));
+ mCurrentSeek.mTarget.mTime,
+ duration.value(),
+ frames,
+ Move(audioData),
+ channels,
+ audio->mRate));
PushFront(data, MediaData::AUDIO_DATA);
return NS_OK;
}
void MediaDecoderStateMachine::UpdateNextFrameStatus()
{
MOZ_ASSERT(OnTaskQueue());
--- a/dom/media/apple/AppleMP3Reader.cpp
+++ b/dom/media/apple/AppleMP3Reader.cpp
@@ -203,17 +203,17 @@ AppleMP3Reader::AudioSampleCallback(UInt
// This API insists on having MP3 packets spoon-fed to it from a callback.
// This structure exists only to pass our state and the result of the parser
// on to the callback above.
PassthroughUserData userData = { this, aNumPackets, aNumBytes, aData, aPackets, false };
do {
// Decompressed audio buffer
- nsAutoArrayPtr<uint8_t> decoded(new uint8_t[decodedSize]);
+ auto decoded = MakeUnique<uint8_t[]>(decodedSize);
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = mAudioChannels;
decBuffer.mBuffers[0].mDataByteSize = decodedSize;
decBuffer.mBuffers[0].mData = decoded.get();
// in: the max number of packets we can handle from the decoder.
@@ -241,19 +241,21 @@ AppleMP3Reader::AudioSampleCallback(UInt
}
int64_t time = FramesToUsecs(mCurrentAudioFrame, mAudioSampleRate).value();
int64_t duration = FramesToUsecs(numFrames, mAudioSampleRate).value();
LOGD("pushed audio at time %lfs; duration %lfs\n",
(double)time / USECS_PER_S, (double)duration / USECS_PER_S);
+ auto samples = UniquePtr<AudioDataValue[]>(reinterpret_cast<AudioDataValue*>
+ (decoded.release()));
AudioData *audio = new AudioData(mResource.Tell(),
time, duration, numFrames,
- reinterpret_cast<AudioDataValue *>(decoded.forget()),
+ Move(samples),
mAudioChannels, mAudioSampleRate);
mAudioQueue.Push(audio);
mCurrentAudioFrame += numFrames;
if (rv == kNeedMoreData) {
// No error; we just need more data.
LOGD("FillComplexBuffer out of data\n");
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -468,17 +468,17 @@ DecodedAudioDataSink::PlayFromAudioQueue
AssertOnAudioThread();
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
RefPtr<AudioData> audio =
dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
SINK_LOG_V("playing %u frames of audio at time %lld",
audio->mFrames, audio->mTime);
if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
- mAudioStream->Write(audio->mAudioData, audio->mFrames);
+ mAudioStream->Write(audio->mAudioData.get(), audio->mFrames);
} else {
SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
PlaySilence(audio->mFrames);
}
StartAudioStreamPlaybackIfNeeded();
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -510,17 +510,17 @@ nsresult OggReader::DecodeVorbis(ogg_pac
}
VorbisPCMValue** pcm = 0;
int32_t frames = 0;
uint32_t channels = mVorbisState->mInfo.channels;
ogg_int64_t endFrame = aPacket->granulepos;
while ((frames = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
mVorbisState->ValidateVorbisPacketSamples(aPacket, frames);
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}
// No channel mapping for more than 8 channels.
@@ -529,17 +529,17 @@ nsresult OggReader::DecodeVorbis(ogg_pac
}
int64_t duration = mVorbisState->Time((int64_t)frames);
int64_t startTime = mVorbisState->Time(endFrame - frames);
mAudioQueue.Push(new AudioData(mResource.Tell(),
startTime,
duration,
frames,
- buffer.forget(),
+ Move(buffer),
channels,
mVorbisState->mInfo.rate));
mDecodedAudioFrames += frames;
endFrame -= frames;
if (vorbis_synthesis_read(&mVorbisState->mDsp, frames) != 0) {
return NS_ERROR_FAILURE;
@@ -559,27 +559,27 @@ nsresult OggReader::DecodeOpus(ogg_packe
int32_t samples = opus_packet_get_samples_per_frame(aPacket->packet,
(opus_int32) mOpusState->mRate);
int32_t frames = frames_number*samples;
// A valid Opus packet must be between 2.5 and 120 ms long.
if (frames < 120 || frames > 5760)
return NS_ERROR_FAILURE;
uint32_t channels = mOpusState->mChannels;
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
// Decode to the appropriate sample type.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
int ret = opus_multistream_decode_float(mOpusState->mDecoder,
aPacket->packet, aPacket->bytes,
- buffer, frames, false);
+ buffer.get(), frames, false);
#else
int ret = opus_multistream_decode(mOpusState->mDecoder,
aPacket->packet, aPacket->bytes,
- buffer, frames, false);
+ buffer.get(), frames, false);
#endif
if (ret < 0)
return NS_ERROR_FAILURE;
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
int64_t endFrame = aPacket->granulepos;
int64_t startFrame;
// If this is the last packet, perform end trimming.
@@ -599,23 +599,23 @@ nsresult OggReader::DecodeOpus(ogg_packe
// discard the whole packet
mOpusState->mSkip -= frames;
LOG(LogLevel::Debug, ("Opus decoder skipping %d frames"
" (whole packet)", frames));
return NS_OK;
}
int32_t keepFrames = frames - skipFrames;
int samples = keepFrames * channels;
- nsAutoArrayPtr<AudioDataValue> trimBuffer(new AudioDataValue[samples]);
+ auto trimBuffer = MakeUnique<AudioDataValue[]>(samples);
for (int i = 0; i < samples; i++)
trimBuffer[i] = buffer[skipFrames*channels + i];
startFrame = endFrame - keepFrames;
frames = keepFrames;
- buffer = trimBuffer;
+ buffer = Move(trimBuffer);
mOpusState->mSkip -= skipFrames;
LOG(LogLevel::Debug, ("Opus decoder skipping %d frames", skipFrames));
}
// Save this packet's granule position in case we need to perform end
// trimming on the next packet.
mOpusState->mPrevPacketGranulepos = endFrame;
@@ -646,17 +646,17 @@ nsresult OggReader::DecodeOpus(ogg_packe
LOG(LogLevel::Debug, ("Opus decoder pushing %d frames", frames));
int64_t startTime = mOpusState->Time(startFrame);
int64_t endTime = mOpusState->Time(endFrame);
mAudioQueue.Push(new AudioData(mResource.Tell(),
startTime,
endTime - startTime,
frames,
- buffer.forget(),
+ Move(buffer),
channels,
mOpusState->mRate));
mDecodedAudioFrames += frames;
return NS_OK;
}
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -178,32 +178,32 @@ public:
CheckedInt64 frames =
UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
if (!frames.isValid() ||
!mChannelCount ||
!mSampleRate ||
frames.value() > (UINT32_MAX / mChannelCount)) {
return nullptr;
}
- AudioDataValue* samples = new AudioDataValue[frames.value() * mChannelCount];
+ auto samples = MakeUnique<AudioDataValue[]>(frames.value() * mChannelCount);
// Fill the sound buffer with an A4 tone.
static const float pi = 3.14159265f;
static const float noteHz = 440.0f;
for (int i = 0; i < frames.value(); i++) {
float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
for (unsigned c = 0; c < mChannelCount; c++) {
samples[i * mChannelCount + c] = AudioDataValue(f);
}
mFrameSum++;
}
return new AudioData(aOffsetInStream,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
uint32_t(frames.value()),
- samples,
+ Move(samples),
mChannelCount,
mSampleRate);
}
private:
int64_t mFrameSum;
uint32_t mChannelCount;
uint32_t mSampleRate;
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -163,27 +163,27 @@ OpusDataDecoder::DoDecode(MediaRawData*
// A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
int32_t frames = frames_number*samples;
if (frames < 120 || frames > 5760) {
OPUS_DEBUG("Invalid packet frames: %ld", frames);
return -1;
}
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
// Decode to the appropriate sample type.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
int ret = opus_multistream_decode_float(mOpusDecoder,
aSample->Data(), aSample->Size(),
- buffer, frames, false);
+ buffer.get(), frames, false);
#else
int ret = opus_multistream_decode(mOpusDecoder,
aSample->Data(), aSample->Size(),
- buffer, frames, false);
+ buffer.get(), frames, false);
#endif
if (ret < 0) {
return -1;
}
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aSample->mTime;
// Trim the initial frames while the decoder is settling.
@@ -259,17 +259,17 @@ OpusDataDecoder::DoDecode(MediaRawData*
NS_WARNING("OpusDataDecoder: Int overflow shifting tstamp by codec delay");
return -1;
};
mCallback->Output(new AudioData(aSample->mOffset,
time.value(),
duration.value(),
frames,
- buffer.forget(),
+ Move(buffer),
mOpusParser->mChannels,
mOpusParser->mRate));
mFrames += frames;
return frames;
}
void
OpusDataDecoder::DoDrain()
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -179,17 +179,17 @@ VorbisDataDecoder::DoDecode(MediaRawData
0,
0,
nullptr,
mVorbisDsp.vi->channels,
mVorbisDsp.vi->rate));
}
while (frames > 0) {
uint32_t channels = mVorbisDsp.vi->channels;
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames*channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames*channels);
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}
CheckedInt64 duration = FramesToUsecs(frames, mVorbisDsp.vi->rate);
@@ -210,17 +210,17 @@ VorbisDataDecoder::DoDecode(MediaRawData
return -1;
};
aTotalFrames += frames;
mCallback->Output(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
- buffer.forget(),
+ Move(buffer),
mVorbisDsp.vi->channels,
mVorbisDsp.vi->rate));
mFrames += aTotalFrames;
if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
return -1;
}
frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
--- a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
@@ -32,17 +32,17 @@ AudioCallbackAdapter::Decoded(const nsTA
if (aRate == 0 || aChannels == 0) {
NS_WARNING("Invalid rate or num channels returned on GMP audio samples");
mCallback->Error();
return;
}
size_t numFrames = aPCM.Length() / aChannels;
MOZ_ASSERT((aPCM.Length() % aChannels) == 0);
- nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[aPCM.Length()]);
+ auto audioData = MakeUnique<AudioDataValue[]>(aPCM.Length());
for (size_t i = 0; i < aPCM.Length(); ++i) {
audioData[i] = AudioSampleToFloat(aPCM[i]);
}
if (mMustRecaptureAudioPosition) {
mAudioFrameSum = 0;
auto timestamp = UsecsToFrames(aTimeStamp, aRate);
@@ -66,22 +66,22 @@ AudioCallbackAdapter::Decoded(const nsTA
auto duration = FramesToUsecs(numFrames, aRate);
if (!duration.isValid()) {
NS_WARNING("Invalid duration on audio samples");
mCallback->Error();
return;
}
RefPtr<AudioData> audio(new AudioData(mLastStreamOffset,
- timestamp.value(),
- duration.value(),
- numFrames,
- audioData.forget(),
- aChannels,
- aRate));
+ timestamp.value(),
+ duration.value(),
+ numFrames,
+ Move(audioData),
+ aChannels,
+ aRate));
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
timestamp, duration, currentLength);
#endif
mCallback->Output(audio);
}
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -252,28 +252,28 @@ public:
#ifdef MOZ_SAMPLE_TYPE_S16
int32_t numSamples = size / 2;
#else
#error We only support 16-bit integer PCM
#endif
const int32_t numFrames = numSamples / numChannels;
- AudioDataValue* audio = new AudioDataValue[numSamples];
+ auto audio = MakeUnique<AudioDataValue[]>(numSamples);
uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
- PodCopy(audio, reinterpret_cast<AudioDataValue*>(bufferStart), numSamples);
+ PodCopy(audio.get(), reinterpret_cast<AudioDataValue*>(bufferStart), numSamples);
int64_t presentationTimeUs;
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
aDuration.ToMicroseconds(),
numFrames,
- audio,
+ Move(audio),
numChannels,
sampleRate);
INVOKE_CALLBACK(Output, data);
return NS_OK;
}
};
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -268,25 +268,25 @@ AppleATDecoder::DecodeSample(MediaRawDat
}
#ifdef LOG_SAMPLE_DECODE
LOG("pushed audio at time %lfs; duration %lfs\n",
(double)aSample->mTime / USECS_PER_S,
duration.ToSeconds());
#endif
- nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
+ auto data = MakeUnique<AudioDataValue[]>(outputData.Length());
PodCopy(data.get(), &outputData[0], outputData.Length());
RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
- aSample->mTime,
- duration.ToMicroseconds(),
- numFrames,
- data.forget(),
- channels,
- rate);
+ aSample->mTime,
+ duration.ToMicroseconds(),
+ numFrames,
+ Move(data),
+ channels,
+ rate);
mCallback->Output(audio);
return NS_OK;
}
nsresult
AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
const nsTArray<uint8_t>& aExtraData)
{
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -30,60 +30,59 @@ RefPtr<MediaDataDecoder::InitPromise>
FFmpegAudioDecoder<LIBAV_VER>::Init()
{
nsresult rv = InitDecoder();
return rv == NS_OK ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
: InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
}
-static AudioDataValue*
+static UniquePtr<AudioDataValue[]>
CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumAFrames)
{
MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);
- nsAutoArrayPtr<AudioDataValue> audio(
- new AudioDataValue[aNumChannels * aNumAFrames]);
+ auto audio = MakeUnique<AudioDataValue[]>(aNumChannels * aNumAFrames);
if (aFrame->format == AV_SAMPLE_FMT_FLT) {
// Audio data already packed. No need to do anything other than copy it
// into a buffer we own.
- memcpy(audio, aFrame->data[0],
+ memcpy(audio.get(), aFrame->data[0],
aNumChannels * aNumAFrames * sizeof(AudioDataValue));
} else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
// Planar audio data. Pack it into something we can understand.
- AudioDataValue* tmp = audio;
+ AudioDataValue* tmp = audio.get();
AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = data[channel][frame];
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S16) {
// Audio data already packed. Need to convert from S16 to 32 bits Float
- AudioDataValue* tmp = audio;
+ AudioDataValue* tmp = audio.get();
int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = AudioSampleToFloat(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
// Planar audio data. Convert it from S16 to 32 bits float
// and pack it into something we can understand.
- AudioDataValue* tmp = audio;
+ AudioDataValue* tmp = audio.get();
int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = AudioSampleToFloat(data[channel][frame]);
}
}
}
- return audio.forget();
+ return audio;
}
void
FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
AVPacket packet;
av_init_packet(&packet);
@@ -110,34 +109,34 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePac
mCallback->Error();
return;
}
if (decoded) {
uint32_t numChannels = mCodecContext->channels;
uint32_t samplingRate = mCodecContext->sample_rate;
- nsAutoArrayPtr<AudioDataValue> audio(
- CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
+ UniquePtr<AudioDataValue[]> audio =
+ CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
media::TimeUnit duration =
FramesToTimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
mCallback->Error();
return;
}
RefPtr<AudioData> data = new AudioData(samplePosition,
- pts.ToMicroseconds(),
- duration.ToMicroseconds(),
- mFrame->nb_samples,
- audio.forget(),
- numChannels,
- samplingRate);
+ pts.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ mFrame->nb_samples,
+ Move(audio),
+ numChannels,
+ samplingRate);
mCallback->Output(data);
pts += duration;
if (!pts.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
mCallback->Error();
return;
}
}
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -275,17 +275,17 @@ WMFAudioMFTManager::Output(int64_t aStre
MOZ_ASSERT(numFrames >= 0);
MOZ_ASSERT(numSamples >= 0);
if (numFrames == 0) {
// All data from this chunk stripped, loop back and try to output the next
// frame, if possible.
return S_OK;
}
- nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[numSamples]);
+ auto audioData = MakeUnique<AudioDataValue[]>(numSamples);
int16_t* pcm = (int16_t*)data;
for (int32_t i = 0; i < numSamples; ++i) {
audioData[i] = AudioSampleToFloat(pcm[i]);
}
buffer->Unlock();
@@ -297,17 +297,17 @@ WMFAudioMFTManager::Output(int64_t aStre
media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
aOutData = new AudioData(aStreamOffset,
timestamp.ToMicroseconds(),
duration.ToMicroseconds(),
numFrames,
- audioData.forget(),
+ Move(audioData),
mAudioChannels,
mAudioRate);
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
#endif
--- a/dom/media/wave/WaveReader.cpp
+++ b/dom/media/wave/WaveReader.cpp
@@ -189,17 +189,17 @@ bool WaveReader::DecodeAudioData()
static const int64_t BLOCK_SIZE = 4096;
int64_t readSize = std::min(BLOCK_SIZE, remaining);
int64_t frames = readSize / mFrameSize;
static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX /
sizeof(AudioDataValue) / MAX_CHANNELS,
"bufferSize calculation could overflow.");
const size_t bufferSize = static_cast<size_t>(frames * mChannels);
- nsAutoArrayPtr<AudioDataValue> sampleBuffer(new AudioDataValue[bufferSize]);
+ auto sampleBuffer = MakeUnique<AudioDataValue[]>(bufferSize);
static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX / sizeof(char),
"BLOCK_SIZE too large for enumerator.");
nsAutoArrayPtr<char> dataBuffer(new char[static_cast<size_t>(readSize)]);
if (!ReadAll(dataBuffer, readSize)) {
return false;
}
@@ -224,17 +224,17 @@ bool WaveReader::DecodeAudioData()
NS_ASSERTION(posTime <= INT64_MAX / USECS_PER_S, "posTime overflow");
NS_ASSERTION(readSizeTime <= INT64_MAX / USECS_PER_S, "readSizeTime overflow");
NS_ASSERTION(frames < INT32_MAX, "frames overflow");
mAudioQueue.Push(new AudioData(pos,
static_cast<int64_t>(posTime * USECS_PER_S),
static_cast<int64_t>(readSizeTime * USECS_PER_S),
static_cast<int32_t>(frames),
- sampleBuffer.forget(),
+ Move(sampleBuffer),
mChannels,
mSampleRate));
return true;
}
bool WaveReader::DecodeVideoFrame(bool &aKeyframeSkip,
int64_t aTimeThreshold)
--- a/dom/media/webm/AudioDecoder.cpp
+++ b/dom/media/webm/AudioDecoder.cpp
@@ -175,17 +175,17 @@ VorbisDecoder::Decode(const unsigned cha
// data.
if (frames == 0 && first_packet) {
mReader->AudioQueue().Push(new AudioData(aOffset, aTstampUsecs, 0, 0, nullptr,
mVorbisDsp.vi->channels,
mVorbisDsp.vi->rate));
}
while (frames > 0) {
uint32_t channels = mVorbisDsp.vi->channels;
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames*channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames*channels);
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}
CheckedInt64 duration = FramesToUsecs(frames, mVorbisDsp.vi->rate);
@@ -206,17 +206,17 @@ VorbisDecoder::Decode(const unsigned cha
return false;
};
*aTotalFrames += frames;
mReader->AudioQueue().Push(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
- buffer.forget(),
+ Move(buffer),
mVorbisDsp.vi->channels,
mVorbisDsp.vi->rate));
if (vorbis_synthesis_read(&mVorbisDsp, frames)) {
return false;
}
frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
}
@@ -366,27 +366,27 @@ OpusDecoder::Decode(const unsigned char*
int32_t samples =
opus_packet_get_samples_per_frame(aData, opus_int32(mOpusParser->mRate));
// A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
int32_t frames = frames_number*samples;
if (frames < 120 || frames > 5760)
return false;
- nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
+ auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
// Decode to the appropriate sample type.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
int ret = opus_multistream_decode_float(mOpusDecoder,
aData, aLength,
- buffer, frames, false);
+ buffer.get(), frames, false);
#else
int ret = opus_multistream_decode(mOpusDecoder,
aData, aLength,
- buffer, frames, false);
+ buffer.get(), frames, false);
#endif
if (ret < 0)
return false;
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aTstampUsecs;
// Trim the initial frames while the decoder is settling.
if (mSkip > 0) {
@@ -458,15 +458,15 @@ OpusDecoder::Decode(const unsigned char*
if (!time.isValid()) {
NS_WARNING("Int overflow shifting tstamp by codec delay");
return false;
};
mReader->AudioQueue().Push(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
- buffer.forget(),
+ Move(buffer),
mOpusParser->mChannels,
mOpusParser->mRate));
return true;
}
} // namespace mozilla