Bug 1524890 - P8. Rely on buffer length to calculate the number of frames. r=bryce
authorJean-Yves Avenard <jyavenard@mozilla.com>
Fri, 22 Feb 2019 09:19:00 +0000
changeset 518678 731a0f1e7f215d04e6bf777e9fc3358cb406693c
parent 518677 926ec4aa54291ce0a7ba86342c53a246d3e7833b
child 518679 ddc76517c3366c8651038ad3df94ca5eb00d1058
push id10862
push userffxbld-merge
push dateMon, 11 Mar 2019 13:01:11 +0000
treeherdermozilla-beta@a2e7f5c935da [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbryce
bugs1524890
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1524890 - P8. Rely on buffer length to calculate the number of frames. r=bryce Differential Revision: https://phabricator.services.mozilla.com/D20166
dom/media/AudioCompactor.h
dom/media/MediaData.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/ipc/PRemoteDecoder.ipdl
dom/media/ipc/RemoteAudioDecoder.cpp
dom/media/mediasink/AudioSink.cpp
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/android/RemoteDataDecoder.cpp
dom/media/platforms/apple/AppleATDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
--- a/dom/media/AudioCompactor.h
+++ b/dom/media/AudioCompactor.h
@@ -61,18 +61,18 @@ class AudioCompactor {
       NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames");
       buffer.SetLength(size_t(framesCopied) * aChannels);
 
       auto duration = FramesToTimeUnit(framesCopied, aSampleRate);
       if (!duration.IsValid()) {
         return false;
       }
 
-      mQueue.Push(new AudioData(aOffset, time, duration, framesCopied,
-                                std::move(buffer), aChannels, aSampleRate));
+      mQueue.Push(new AudioData(aOffset, time, duration, std::move(buffer),
+                                aChannels, aSampleRate));
 
       // Remove the frames we just pushed into the queue and loop if there is
       // more to be done.
       time += duration;
       aFrames -= framesCopied;
 
       // NOTE: No need to update aOffset as its only an approximation anyway.
     }
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -329,27 +329,25 @@ class NullData : public MediaData {
 
   static const Type sType = Type::NULL_DATA;
 };
 
 // Holds chunk a decoded audio frames.
 class AudioData : public MediaData {
  public:
   AudioData(int64_t aOffset, const media::TimeUnit& aTime,
-            const media::TimeUnit& aDuration, uint32_t aFrames,
-            AlignedAudioBuffer&& aData, uint32_t aChannels, uint32_t aRate,
+            const media::TimeUnit& aDuration, AlignedAudioBuffer&& aData,
+            uint32_t aChannels, uint32_t aRate,
             uint32_t aChannelMap = AudioConfig::ChannelLayout::UNKNOWN_MAP)
       : MediaData(sType, aOffset, aTime, aDuration),
         mChannels(aChannels),
         mChannelMap(aChannelMap),
         mRate(aRate),
         mAudioData(std::move(aData)),
-        mFrames(mAudioData.Length() / aChannels) {
-    MOZ_DIAGNOSTIC_ASSERT(mFrames == aFrames);
-  }
+        mFrames(mAudioData.Length() / aChannels) {}
 
   static const Type sType = Type::AUDIO_DATA;
   static const char* sTypeName;
 
   // Access the buffer as a Span.
   Span<AudioDataValue> Data() const {
     return MakeSpan(mAudioData.Data(), mAudioData.Length());
   }
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1348,17 +1348,17 @@ class MediaDecoderStateMachine::Accurate
     memcpy(audioData.get(),
            audioDataRange.Elements() + (framesToPrune.value() * channels),
            frames * channels * sizeof(AudioDataValue));
     auto duration = FramesToTimeUnit(frames, Info().mAudio.mRate);
     if (!duration.IsValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
     RefPtr<AudioData> data(new AudioData(
-        aAudio->mOffset, mSeekJob.mTarget->GetTime(), duration, frames,
+        aAudio->mOffset, mSeekJob.mTarget->GetTime(), duration,
         std::move(audioData), channels, aAudio->mRate, aAudio->mChannelMap));
     MOZ_ASSERT(AudioQueue().GetSize() == 0,
                "Should be the 1st sample after seeking");
     mMaster->PushAudio(data);
     mDoneAudioSeeking = true;
 
     return NS_OK;
   }
--- a/dom/media/ipc/PRemoteDecoder.ipdl
+++ b/dom/media/ipc/PRemoteDecoder.ipdl
@@ -22,17 +22,16 @@ struct RemoteVideoDataIPDL
   int32_t frameID;
 };
 
 struct RemoteAudioDataIPDL
 {
   MediaDataIPDL base;
   uint32_t channels;
   uint32_t rate;
-  uint32_t frames;
   uint32_t channelMap;
   Shmem buffer;
 };
 
 union DecodedOutputIPDL
 {
   RemoteAudioDataIPDL;
   RemoteVideoDataIPDL;
--- a/dom/media/ipc/RemoteAudioDecoder.cpp
+++ b/dom/media/ipc/RemoteAudioDecoder.cpp
@@ -25,18 +25,18 @@ mozilla::ipc::IPCResult RemoteAudioDecod
           alignedAudioBuffer.Length());
 
   DeallocShmem(aData.buffer());
 
   RefPtr<AudioData> audio =
       new AudioData(aData.base().offset(),
                     media::TimeUnit::FromMicroseconds(aData.base().time()),
                     media::TimeUnit::FromMicroseconds(aData.base().duration()),
-                    aData.frames(), std::move(alignedAudioBuffer),
-                    aData.channels(), aData.rate(), aData.channelMap());
+                    std::move(alignedAudioBuffer), aData.channels(),
+                    aData.rate(), aData.channelMap());
 
   mDecodedData.AppendElement(std::move(audio));
   return IPC_OK();
 }
 
 MediaResult RemoteAudioDecoderChild::InitIPDL(
     const AudioInfo& aAudioInfo,
     const CreateDecoderParams::OptionSet& aOptions) {
@@ -113,16 +113,15 @@ void RemoteAudioDecoderParent::ProcessDe
       PodCopy(buffer.get<AudioDataValue>(), audio->Data().Elements(),
               audio->Data().Length());
     }
 
     RemoteAudioDataIPDL output(
         MediaDataIPDL(data->mOffset, data->mTime.ToMicroseconds(),
                       data->mTimecode.ToMicroseconds(),
                       data->mDuration.ToMicroseconds(), data->mKeyframe),
-        audio->mChannels, audio->mRate, audio->Frames(), audio->mChannelMap,
-        buffer);
+        audio->mChannels, audio->mRate, audio->mChannelMap, buffer);
 
     Unused << SendOutput(output);
   }
 }
 
 }  // namespace mozilla
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -454,17 +454,17 @@ already_AddRefed<AudioData> AudioSink::C
   }
   auto duration = FramesToTimeUnit(frames, mOutputRate);
   if (!duration.IsValid()) {
     NS_WARNING("Int overflow in AudioSink");
     mErrored = true;
     return nullptr;
   }
   RefPtr<AudioData> data =
-      new AudioData(aReference->mOffset, aReference->mTime, duration, frames,
+      new AudioData(aReference->mOffset, aReference->mTime, duration,
                     std::move(aBuffer), mOutputChannels, mOutputRate);
   return data.forget();
 }
 
 uint32_t AudioSink::DrainConverter(uint32_t aMaxFrames) {
   MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
 
   if (!mConverter || !mLastProcessedPacket || !aMaxFrames) {
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -98,20 +98,19 @@ already_AddRefed<MediaData> BlankAudioDa
   static const float noteHz = 440.0f;
   for (int i = 0; i < frames.value(); i++) {
     float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
     for (unsigned c = 0; c < mChannelCount; c++) {
       samples[i * mChannelCount + c] = AudioDataValue(f);
     }
     mFrameSum++;
   }
-  RefPtr<AudioData> data(
-      new AudioData(aSample->mOffset, aSample->mTime, aSample->mDuration,
-                    uint32_t(frames.value()), std::move(samples), mChannelCount,
-                    mSampleRate));
+  RefPtr<AudioData> data(new AudioData(aSample->mOffset, aSample->mTime,
+                                       aSample->mDuration, std::move(samples),
+                                       mChannelCount, mSampleRate));
   return data.forget();
 }
 
 already_AddRefed<MediaDataDecoder> BlankDecoderModule::CreateVideoDecoder(
     const CreateDecoderParams& aParams) {
   const VideoInfo& config = aParams.VideoConfig();
   UniquePtr<DummyDataCreator> creator = MakeUnique<BlankVideoDataCreator>(
       config.mDisplay.width, config.mDisplay.height, aParams.mImageContainer);
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -328,17 +328,17 @@ RefPtr<MediaDataDecoder::DecodePromise> 
   if (!frames) {
     return DecodePromise::CreateAndResolve(DecodedData(), __func__);
   }
 
   // Trim extra allocated frames.
   buffer.SetLength(frames * channels);
 
   return DecodePromise::CreateAndResolve(
-      DecodedData{new AudioData(aSample->mOffset, time, duration, frames,
+      DecodedData{new AudioData(aSample->mOffset, time, duration,
                                 std::move(buffer), mOpusParser->mChannels,
                                 mOpusParser->mRate, mChannelMap)},
       __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Drain() {
   RefPtr<OpusDataDecoder> self = this;
   // InvokeAsync dispatches a task that will be run after any pending decode
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -229,18 +229,18 @@ RefPtr<MediaDataDecoder::DecodePromise> 
                       channels, rate);
       mAudioConverter = MakeUnique<AudioConverter>(in, out);
     }
     MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
     AudioSampleBuffer data(std::move(buffer));
     data = mAudioConverter->Process(std::move(data));
 
     results.AppendElement(
-        new AudioData(aOffset, time, duration, frames, data.Forget(), channels,
-                      rate, mAudioConverter->OutputConfig().Layout().Map()));
+        new AudioData(aOffset, time, duration, data.Forget(), channels, rate,
+                      mAudioConverter->OutputConfig().Layout().Map()));
     mFrames += frames;
     err = vorbis_synthesis_read(&mVorbisDsp, frames);
     if (err) {
       return DecodePromise::CreateAndReject(
           MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
           __func__);
     }
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -122,17 +122,17 @@ RefPtr<MediaDataDecoder::DecodePromise> 
         }
       }
     }
   }
 
   auto duration = FramesToTimeUnit(frames, mInfo.mRate);
 
   return DecodePromise::CreateAndResolve(
-      DecodedData{new AudioData(aOffset, aSample->mTime, duration, frames,
+      DecodedData{new AudioData(aOffset, aSample->mTime, duration,
                                 std::move(buffer), mInfo.mChannels,
                                 mInfo.mRate)},
       __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Drain() {
   return InvokeAsync(mTaskQueue, __func__, [] {
     return DecodePromise::CreateAndResolve(DecodedData(), __func__);
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -437,20 +437,20 @@ class RemoteAudioDecoder : public Remote
       if (!audio) {
         Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
         return;
       }
 
       jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
       aSample->WriteToByteBuffer(dest);
 
-      RefPtr<AudioData> data = new AudioData(
-          0, TimeUnit::FromMicroseconds(presentationTimeUs),
-          FramesToTimeUnit(numFrames, mOutputSampleRate), numFrames,
-          std::move(audio), mOutputChannels, mOutputSampleRate);
+      RefPtr<AudioData> data =
+          new AudioData(0, TimeUnit::FromMicroseconds(presentationTimeUs),
+                        FramesToTimeUnit(numFrames, mOutputSampleRate),
+                        std::move(audio), mOutputChannels, mOutputSampleRate);
 
       UpdateOutputStatus(std::move(data));
     }
 
     if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
       DrainComplete();
     }
   }
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -295,22 +295,21 @@ MediaResult AppleATDecoder::DecodeSample
                     channels, rate);
     mAudioConverter = MakeUnique<AudioConverter>(in, out);
   }
   if (mAudioConverter && mChannelLayout && mChannelLayout->IsValid()) {
     MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
     data = mAudioConverter->Process(std::move(data));
   }
 
-  RefPtr<AudioData> audio =
-      new AudioData(aSample->mOffset, aSample->mTime, duration, numFrames,
-                    data.Forget(), channels, rate,
-                    mChannelLayout && mChannelLayout->IsValid()
-                        ? mChannelLayout->Map()
-                        : AudioConfig::ChannelLayout::UNKNOWN_MAP);
+  RefPtr<AudioData> audio = new AudioData(
+      aSample->mOffset, aSample->mTime, duration, data.Forget(), channels, rate,
+      mChannelLayout && mChannelLayout->IsValid()
+          ? mChannelLayout->Map()
+          : AudioConfig::ChannelLayout::UNKNOWN_MAP);
   mDecodedSamples.AppendElement(std::move(audio));
   return NS_OK;
 }
 
 MediaResult AppleATDecoder::GetInputAudioDescription(
     AudioStreamBasicDescription& aDesc, const nsTArray<uint8_t>& aExtraData) {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -238,18 +238,18 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER
       media::TimeUnit newpts = pts + duration;
       if (!newpts.IsValid()) {
         return MediaResult(
             NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
             RESULT_DETAIL("Invalid count of accumulated audio samples"));
       }
 
       aResults.AppendElement(new AudioData(
-          samplePosition, pts, duration, mFrame->nb_samples, std::move(audio),
-          numChannels, samplingRate, mCodecContext->channel_layout));
+          samplePosition, pts, duration, std::move(audio), numChannels,
+          samplingRate, mCodecContext->channel_layout));
 
       pts = newpts;
 
       if (aGotFrame) {
         *aGotFrame = true;
       }
     }
     packet.data += bytesConsumed;
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -321,19 +321,19 @@ WMFAudioMFTManager::Output(int64_t aStre
       mAudioTimeOffset + FramesToTimeUnit(mAudioFrameSum, mAudioRate);
   NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
 
   mAudioFrameSum += numFrames;
 
   media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
 
-  aOutData = new AudioData(aStreamOffset, timestamp, duration, numFrames,
-                           std::move(audioData), mAudioChannels, mAudioRate,
-                           mChannelsMap);
+  aOutData =
+      new AudioData(aStreamOffset, timestamp, duration, std::move(audioData),
+                    mAudioChannels, mAudioRate, mChannelsMap);
 
 #ifdef LOG_SAMPLE_DECODE
   LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
       timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
 #endif
 
   return S_OK;
 }