Bug 1404997 - P23. Strongly enforced that our destination buffer is big enough. r=padenot
authorJean-Yves Avenard <jyavenard@mozilla.com>
Wed, 13 Dec 2017 18:44:30 +0100
changeset 448424 a695b9c49eda544cef788728505b4a0e985d1fce
parent 448423 09a1076a2a9fa173923d293b6496ca812e49d142
child 448425 32c31f7924afe3a8bf441a5da89634af218b7e63
push id8527
push userCallek@gmail.com
push dateThu, 11 Jan 2018 21:05:50 +0000
treeherdermozilla-beta@95342d212a7a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1404997
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1404997 - P23. Strongly enforced that our destination buffer is big enough. r=padenot MozReview-Commit-ID: A1kLsH75SzX
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -710,28 +710,27 @@ WebrtcAudioConduit::SendAudioFrame(const
 MediaConduitErrorCode
 WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
                                   int32_t samplingFreqHz,
                                   int32_t capture_delay,
                                   int& lengthSamples)
 {
 
   CSFLogDebug(LOGTAG,  "%s ", __FUNCTION__);
-  unsigned int numSamples = 0;
 
   //validate params
   if(!speechData )
   {
     CSFLogError(LOGTAG,"%s Null Audio Buffer Pointer", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   // Validate sample length
-  if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0  )
+  if(GetNum10msSamplesForFrequency(samplingFreqHz) == 0)
   {
     CSFLogError(LOGTAG,"%s Invalid Sampling Frequency ", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   //validate capture time
   if(capture_delay < 0 )
@@ -744,35 +743,35 @@ WebrtcAudioConduit::GetAudioFrame(int16_
   //Conduit should have reception enabled before we ask for decoded
   // samples
   if(!mEngineReceiving)
   {
     CSFLogError(LOGTAG, "%s Engine not Receiving ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-
+  int lengthSamplesAllowed = lengthSamples;
   lengthSamples = 0;  //output paramter
 
   if (mPtrVoEXmedia->GetAudioFrame(mChannel,
                                    samplingFreqHz,
                                    &mAudioFrame) != 0) {
     int error = mPtrVoEBase->LastError();
     CSFLogError(LOGTAG,  "%s Getting audio data Failed %d", __FUNCTION__, error);
     if(error == VE_RUNTIME_PLAY_ERROR)
     {
       return kMediaConduitPlayoutError;
     }
     return kMediaConduitUnknownError;
   }
 
   // XXX Annoying, have to copy to our buffers -- refactor?
   lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
-  PodCopy(speechData, mAudioFrame.data_,
-          lengthSamples);
+  MOZ_RELEASE_ASSERT(lengthSamples <= lengthSamplesAllowed);
+  PodCopy(speechData, mAudioFrame.data_, lengthSamples);
 
   // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
   mSamples += lengthSamples;
   if (mSamples >= mLastSyncLog + samplingFreqHz) {
     int jitter_buffer_delay_ms;
     int playout_buffer_delay_ms;
     int avsync_offset_ms;
     if (GetAVStats(&jitter_buffer_delay_ms,
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -135,16 +135,17 @@ public:
 
   /**
    * Function to grab a decoded audio-sample from the media engine for rendering
    * / playoutof length 10 milliseconds.
    *
    * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
    * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
    * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
+   * @param lengthSamples [in]: Contain maximum length of speechData array.
    * @param lengthSamples [out]: Will contain length of the audio frame in samples at return.
                                  Ex: A value of 160 implies 160 samples each of 16-bits was copied
                                      into speechData
    * NOTE: This function should be invoked every 10 milliseconds for the best
    *          peformance
    * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the decoded samples are ready for reading and playout is enabled.
    *
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2216,19 +2216,21 @@ private:
   {
     uint32_t samplesPer10ms = mRate / 100;
     // Determine how many frames we need.
     // As we get frames from conduit_ at the same rate as the graph's rate,
     // the number of frames needed straightfully determined.
     TrackTicks framesNeeded = aDesiredTime - mPlayedTicks;
 
     while (framesNeeded >= 0) {
-      int16_t scratchBuffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+      const int scratchBufferLength =
+        AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t);
+      int16_t scratchBuffer[scratchBufferLength];
 
-      int samplesLength;
+      int samplesLength = scratchBufferLength;
 
       // This fetches 10ms of data, either mono or stereo
       MediaConduitErrorCode err =
         static_cast<AudioSessionConduit*>(mConduit.get())
           ->GetAudioFrame(scratchBuffer,
                           mRate,
                           0, // TODO(ekr@rtfm.com): better estimate of "capture"
                              // (really playout) delay
@@ -2243,18 +2245,17 @@ private:
                     mPlayedTicks,
                     aDesiredTime,
                     mSource->StreamTimeToSeconds(aDesiredTime));
         // if this is not enough we'll loop and provide more
         samplesLength = samplesPer10ms;
         PodArrayZero(scratchBuffer);
       }
 
-      MOZ_ASSERT(samplesLength * sizeof(uint16_t) <=
-                 AUDIO_SAMPLE_BUFFER_MAX_BYTES);
+      MOZ_RELEASE_ASSERT(samplesLength <= scratchBufferLength);
 
       CSFLogDebug(
         LOGTAG, "Audio conduit returned buffer of length %u", samplesLength);
 
       RefPtr<SharedBuffer> samples =
         SharedBuffer::Create(samplesLength * sizeof(uint16_t));
       int16_t* samplesData = static_cast<int16_t*>(samples->Data());
       AudioSegment segment;