Bug 1248861: P3. Use AlignedAudioBuffer object with AudioData. r=cpearce
authorJean-Yves Avenard <jyavenard@mozilla.com>
Sun, 03 Apr 2016 23:09:45 +1000
changeset 330575 6aa7d5d7d5c325070402d9df3b86538ab6ae3ed1
parent 330574 b52c07a5009a19b605f5434f9e3847108e9414b1
child 330576 45e19952805dbbded2845cbaa21f1abaf83d0f69
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce
bugs1248861
milestone48.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1248861: P3. Use AlignedAudioBuffer object with AudioData. r=cpearce MozReview-Commit-ID: 7HiF4eHlRwB
dom/media/AudioCompactor.h
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/ogg/OggReader.cpp
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
dom/media/platforms/android/AndroidDecoderModule.cpp
dom/media/platforms/apple/AppleATDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/wave/WaveReader.cpp
--- a/dom/media/AudioCompactor.h
+++ b/dom/media/AudioCompactor.h
@@ -36,17 +36,20 @@ public:
             uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc)
   {
     // If we are losing more than a reasonable amount to padding, try to chunk
     // the data.
     size_t maxSlop = AudioDataSize(aFrames, aChannels) / MAX_SLOP_DIVISOR;
 
     while (aFrames > 0) {
       uint32_t samples = GetChunkSamples(aFrames, aChannels, maxSlop);
-      auto buffer = MakeUnique<AudioDataValue[]>(samples);
+      AlignedAudioBuffer buffer(samples);
+      if (!buffer) {
+        return false;
+      }
 
       // Copy audio data to buffer using caller-provided functor.
       uint32_t framesCopied = aCopyFunc(buffer.get(), samples);
 
       NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames");
 
       CheckedInt64 duration = FramesToUsecs(framesCopied, aSampleRate);
       if (!duration.isValid()) {
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -41,17 +41,18 @@ AudioData::EnsureAudioBuffer()
       data[j*mFrames + i] = mAudioData[i*mChannels + j];
     }
   }
 }
 
 size_t
 AudioData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
-  size_t size = aMallocSizeOf(this) + aMallocSizeOf(mAudioData.get());
+  size_t size =
+    aMallocSizeOf(this) + mAudioData.SizeOfExcludingThis(aMallocSizeOf);
   if (mAudioBuffer) {
     size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf);
   }
   return size;
 }
 
 bool
 AudioData::IsAudible() const
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -347,17 +347,17 @@ protected:
 // Holds chunk a decoded audio frames.
 class AudioData : public MediaData {
 public:
 
   AudioData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             uint32_t aFrames,
-            UniquePtr<AudioDataValue[]> aData,
+            AlignedAudioBuffer&& aData,
             uint32_t aChannels,
             uint32_t aRate)
     : MediaData(sType, aOffset, aTime, aDuration, aFrames)
     , mChannels(aChannels)
     , mRate(aRate)
     , mAudioData(Move(aData)) {}
 
   static const Type sType = AUDIO_DATA;
@@ -382,17 +382,17 @@ public:
   bool IsAudible() const;
 
   const uint32_t mChannels;
   const uint32_t mRate;
   // At least one of mAudioBuffer/mAudioData must be non-null.
   // mChannels channels, each with mFrames frames
   RefPtr<SharedBuffer> mAudioBuffer;
   // mFrames frames, each with mChannels values
-  UniquePtr<AudioDataValue[]> mAudioData;
+  AlignedAudioBuffer mAudioData;
 
 protected:
   ~AudioData() {}
 };
 
 namespace layers {
 class TextureClient;
 class PlanarYCbCrImage;
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2506,17 +2506,20 @@ MediaDecoderStateMachine::DropAudioUpToS
   if (framesToPrune.value() > audio->mFrames) {
     // We've messed up somehow. Don't try to trim frames, the |frames|
     // variable below will overflow.
     DECODER_WARN("Can't prune more frames that we have!");
     return NS_ERROR_FAILURE;
   }
   uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
   uint32_t channels = audio->mChannels;
-  auto audioData = MakeUnique<AudioDataValue[]>(frames * channels);
+  AlignedAudioBuffer audioData(frames * channels);
+  if (!audioData) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
   memcpy(audioData.get(),
          audio->mAudioData.get() + (framesToPrune.value() * channels),
          frames * channels * sizeof(AudioDataValue));
   CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
   if (!duration.isValid()) {
     return NS_ERROR_FAILURE;
   }
   RefPtr<AudioData> data(new AudioData(audio->mOffset,
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -523,17 +523,20 @@ nsresult OggReader::DecodeVorbis(ogg_pac
   }
 
   VorbisPCMValue** pcm = 0;
   int32_t frames = 0;
   uint32_t channels = mVorbisState->mInfo.channels;
   ogg_int64_t endFrame = aPacket->granulepos;
   while ((frames = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
     mVorbisState->ValidateVorbisPacketSamples(aPacket, frames);
-    auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
+    AlignedAudioBuffer buffer(frames * channels);
+    if (!buffer) {
+      return NS_ERROR_OUT_OF_MEMORY;
+    }
     for (uint32_t j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
       for (uint32_t i = 0; i < uint32_t(frames); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
     // No channel mapping for more than 8 channels.
@@ -572,17 +575,20 @@ nsresult OggReader::DecodeOpus(ogg_packe
   int32_t samples = opus_packet_get_samples_per_frame(aPacket->packet,
                                                       (opus_int32) mOpusState->mRate);
   int32_t frames = frames_number*samples;
 
   // A valid Opus packet must be between 2.5 and 120 ms long.
   if (frames < 120 || frames > 5760)
     return NS_ERROR_FAILURE;
   uint32_t channels = mOpusState->mChannels;
-  auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
+  AlignedAudioBuffer buffer(frames * channels);
+  if (!buffer) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
 
   // Decode to the appropriate sample type.
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
   int ret = opus_multistream_decode_float(mOpusState->mDecoder,
                                           aPacket->packet, aPacket->bytes,
                                           buffer.get(), frames, false);
 #else
   int ret = opus_multistream_decode(mOpusState->mDecoder,
@@ -612,17 +618,20 @@ nsresult OggReader::DecodeOpus(ogg_packe
       // discard the whole packet
       mOpusState->mSkip -= frames;
       LOG(LogLevel::Debug, ("Opus decoder skipping %d frames"
                          " (whole packet)", frames));
       return NS_OK;
     }
     int32_t keepFrames = frames - skipFrames;
     int samples = keepFrames * channels;
-    auto trimBuffer = MakeUnique<AudioDataValue[]>(samples);
+    AlignedAudioBuffer trimBuffer(samples);
+    if (!trimBuffer) {
+      return NS_ERROR_OUT_OF_MEMORY;
+    }
     for (int i = 0; i < samples; i++)
       trimBuffer[i] = buffer[skipFrames*channels + i];
 
     startFrame = endFrame - keepFrames;
     frames = keepFrames;
     buffer = Move(trimBuffer);
 
     mOpusState->mSkip -= skipFrames;
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -6,18 +6,16 @@
 
 #include "ImageContainer.h"
 #include "MediaDecoderReader.h"
 #include "MediaInfo.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/mozalloc.h" // for operator new, and new (fallible)
 #include "mozilla/RefPtr.h"
 #include "mozilla/TaskQueue.h"
-#include "mozilla/UniquePtr.h"
-#include "mozilla/UniquePtrExtensions.h"
 #include "nsRect.h"
 #include "PlatformDecoderModule.h"
 #include "TimeUnits.h"
 #include "VideoUtils.h"
 
 namespace mozilla {
 
 // Decoder that uses a passed in object's Create function to create blank
@@ -189,18 +187,17 @@ public:
     CheckedInt64 frames =
       UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
     if (!frames.isValid() ||
         !mChannelCount ||
         !mSampleRate ||
         frames.value() > (UINT32_MAX / mChannelCount)) {
       return nullptr;
     }
-    auto samples =
-      MakeUniqueFallible<AudioDataValue[]>(frames.value() * mChannelCount);
+    AlignedAudioBuffer samples(frames.value() * mChannelCount);
     if (!samples) {
       return nullptr;
     }
     // Fill the sound buffer with an A4 tone.
     static const float pi = 3.14159265f;
     static const float noteHz = 440.0f;
     for (int i = 0; i < frames.value(); i++) {
       float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -169,17 +169,20 @@ OpusDataDecoder::DoDecode(MediaRawData* 
 
   // A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
   int32_t frames = frames_number*samples;
   if (frames < 120 || frames > 5760) {
     OPUS_DEBUG("Invalid packet frames: %ld", frames);
     return -1;
   }
 
-  auto buffer = MakeUnique<AudioDataValue[]>(frames * channels);
+  AlignedAudioBuffer buffer(frames * channels);
+  if (!buffer) {
+    return -1;
+  }
 
   // Decode to the appropriate sample type.
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
   int ret = opus_multistream_decode_float(mOpusDecoder,
                                           aSample->Data(), aSample->Size(),
                                           buffer.get(), frames, false);
 #else
   int ret = opus_multistream_decode(mOpusDecoder,
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -179,23 +179,26 @@ VorbisDataDecoder::DoDecode(MediaRawData
   // start time is calculated.  Otherwise we'd end up with a media start
   // time derived from the timecode of the first packet that produced
   // data.
   if (frames == 0 && first_packet) {
     mCallback->Output(new AudioData(aOffset,
                                     aTstampUsecs,
                                     0,
                                     0,
-                                    nullptr,
+                                    AlignedAudioBuffer(),
                                     mVorbisDsp.vi->channels,
                                     mVorbisDsp.vi->rate));
   }
   while (frames > 0) {
     uint32_t channels = mVorbisDsp.vi->channels;
-    auto buffer = MakeUnique<AudioDataValue[]>(frames*channels);
+    AlignedAudioBuffer buffer(frames*channels);
+    if (!buffer) {
+      return -1;
+    }
     for (uint32_t j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
       for (uint32_t i = 0; i < uint32_t(frames); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
     CheckedInt64 duration = FramesToUsecs(frames, mVorbisDsp.vi->rate);
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -94,17 +94,20 @@ WaveDataDecoder::DoDecode(MediaRawData* 
 {
   size_t aLength = aSample->Size();
   ByteReader aReader = ByteReader(aSample->Data(), aLength);
   int64_t aOffset = aSample->mOffset;
   uint64_t aTstampUsecs = aSample->mTime;
 
   int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
 
-  auto buffer = MakeUnique<AudioDataValue[]>(frames * mInfo.mChannels);
+  AlignedAudioBuffer buffer(frames * mInfo.mChannels);
+  if (!buffer) {
+    return false;
+  }
   for (int i = 0; i < frames; ++i) {
     for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
       if (mInfo.mProfile == 6) {                              //ALAW Data
         uint8_t v = aReader.ReadU8();
         int16_t decoded = DecodeALawSample(v);
         buffer[i * mInfo.mChannels + j] =
             IntegerToAudioSample<AudioDataValue>(decoded);
       } else if (mInfo.mProfile == 7) {                       //ULAW Data
--- a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
@@ -32,17 +32,21 @@ AudioCallbackAdapter::Decoded(const nsTA
   if (aRate == 0 || aChannels == 0) {
     NS_WARNING("Invalid rate or num channels returned on GMP audio samples");
     mCallback->Error();
     return;
   }
 
   size_t numFrames = aPCM.Length() / aChannels;
   MOZ_ASSERT((aPCM.Length() % aChannels) == 0);
-  auto audioData = MakeUnique<AudioDataValue[]>(aPCM.Length());
+  AlignedAudioBuffer audioData(aPCM.Length());
+  if (!audioData) {
+    mCallback->Error();
+    return;
+  }
 
   for (size_t i = 0; i < aPCM.Length(); ++i) {
     audioData[i] = AudioSampleToFloat(aPCM[i]);
   }
 
   if (mMustRecaptureAudioPosition) {
     mAudioFrameSum = 0;
     auto timestamp = UsecsToFrames(aTimeStamp, aRate);
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -213,17 +213,20 @@ public:
 
 #ifdef MOZ_SAMPLE_TYPE_S16
     const int32_t numSamples = size / 2;
 #else
 #error We only support 16-bit integer PCM
 #endif
 
     const int32_t numFrames = numSamples / numChannels;
-    auto audio = MakeUnique<AudioDataValue[]>(numSamples);
+    AlignedAudioBuffer audio(numSamples);
+    if (!audio) {
+      return NS_ERROR_OUT_OF_MEMORY;
+    }
 
     const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
     PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
             numSamples);
 
     int64_t presentationTimeUs;
     NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
 
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -215,17 +215,20 @@ AppleATDecoder::DecodeSample(MediaRawDat
   auto packets = MakeUnique<AudioStreamPacketDescription[]>(MAX_AUDIO_FRAMES);
 
   // This API insists on having packets spoon-fed to it from a callback.
   // This structure exists only to pass our state.
   PassthroughUserData userData =
     { channels, (UInt32)aSample->Size(), aSample->Data() };
 
   // Decompressed audio buffer
-  auto decoded = MakeUnique<AudioDataValue[]>(maxDecodedSamples);
+  AlignedAudioBuffer decoded(maxDecodedSamples);
+  if (!decoded) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
 
   do {
     AudioBufferList decBuffer;
     decBuffer.mNumberBuffers = 1;
     decBuffer.mBuffers[0].mNumberChannels = channels;
     decBuffer.mBuffers[0].mDataByteSize =
       maxDecodedSamples * sizeof(AudioDataValue);
     decBuffer.mBuffers[0].mData = decoded.get();
@@ -268,17 +271,20 @@ AppleATDecoder::DecodeSample(MediaRawDat
   }
 
 #ifdef LOG_SAMPLE_DECODE
   LOG("pushed audio at time %lfs; duration %lfs\n",
       (double)aSample->mTime / USECS_PER_S,
       duration.ToSeconds());
 #endif
 
-  auto data = MakeUnique<AudioDataValue[]>(outputData.Length());
+  AlignedAudioBuffer data(outputData.Length());
+  if (!data) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
   PodCopy(data.get(), &outputData[0], outputData.Length());
   RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
                                           aSample->mTime,
                                           duration.ToMicroseconds(),
                                           numFrames,
                                           Move(data),
                                           channels,
                                           rate);
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -43,22 +43,25 @@ FFmpegAudioDecoder<LIBAV_VER>::InitCodec
   // isn't implemented.
   mCodecContext->thread_count = 1;
   // FFmpeg takes this as a suggestion for what format to use for audio samples.
   // LibAV 0.8 produces rubbish float interleaved samples, request 16 bits audio.
   mCodecContext->request_sample_fmt =
     (mLib->mVersion == 53) ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT;
 }
 
-static UniquePtr<AudioDataValue[]>
+static AlignedAudioBuffer
 CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumAFrames)
 {
   MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);
 
-  auto audio = MakeUnique<AudioDataValue[]>(aNumChannels * aNumAFrames);
+  AlignedAudioBuffer audio(aNumChannels * aNumAFrames);
+  if (!audio) {
+    return audio;
+  }
 
   if (aFrame->format == AV_SAMPLE_FMT_FLT) {
     // Audio data already packed. No need to do anything other than copy it
     // into a buffer we own.
     memcpy(audio.get(), aFrame->data[0],
            aNumChannels * aNumAFrames * sizeof(AudioDataValue));
   } else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
     // Planar audio data. Pack it into something we can understand.
@@ -122,22 +125,22 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePac
       mCallback->Error();
       return;
     }
 
     if (decoded) {
       uint32_t numChannels = mCodecContext->channels;
       uint32_t samplingRate = mCodecContext->sample_rate;
 
-      UniquePtr<AudioDataValue[]> audio =
+      AlignedAudioBuffer audio =
         CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
 
       media::TimeUnit duration =
         FramesToTimeUnit(mFrame->nb_samples, samplingRate);
-      if (!duration.IsValid()) {
+      if (!audio || !duration.IsValid()) {
         NS_WARNING("Invalid count of accumulated audio samples");
         mCallback->Error();
         return;
       }
 
       RefPtr<AudioData> data = new AudioData(samplePosition,
                                              pts.ToMicroseconds(),
                                              duration.ToMicroseconds(),
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -285,17 +285,20 @@ WMFAudioMFTManager::Output(int64_t aStre
   MOZ_ASSERT(numFrames >= 0);
   MOZ_ASSERT(numSamples >= 0);
   if (numFrames == 0) {
     // All data from this chunk stripped, loop back and try to output the next
     // frame, if possible.
     return S_OK;
   }
 
-  auto audioData = MakeUnique<AudioDataValue[]>(numSamples);
+  AlignedAudioBuffer audioData(numSamples);
+  if (!audioData) {
+    return E_OUTOFMEMORY;
+  }
 
   int16_t* pcm = (int16_t*)data;
   for (int32_t i = 0; i < numSamples; ++i) {
     audioData[i] = AudioSampleToFloat(pcm[i]);
   }
 
   buffer->Unlock();
 
--- a/dom/media/wave/WaveReader.cpp
+++ b/dom/media/wave/WaveReader.cpp
@@ -213,17 +213,20 @@ bool WaveReader::DecodeAudioData()
 
   MOZ_ASSERT(BLOCK_SIZE % 3 == 0);
   MOZ_ASSERT(BLOCK_SIZE % 2 == 0);
 
   static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX /
                 sizeof(AudioDataValue) / MAX_CHANNELS,
                 "bufferSize calculation could overflow.");
   const size_t bufferSize = static_cast<size_t>(frames * mChannels);
-  auto sampleBuffer = MakeUnique<AudioDataValue[]>(bufferSize);
+  AlignedAudioBuffer sampleBuffer(bufferSize);
+  if (!sampleBuffer) {
+    return false;
+  }
 
   static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX / sizeof(char),
                 "BLOCK_SIZE too large for enumerator.");
   auto dataBuffer = MakeUnique<char[]>(static_cast<size_t>(readSize));
 
   if (!ReadAll(dataBuffer.get(), readSize)) {
     return false;
   }