Bug 1089478 - Apply r5630, r5692 to fix Clang 3.5 warnings in WebRTC. r=rjesup
authorJan Beich <jbeich@vfemail.net>
Mon, 27 Oct 2014 08:18:00 +0100
changeset 212591 2a1404167fb6207b2c0ef7a0b12e4bbe1de8449b
parent 212590 79eace8a91537a576409ac8bf2c33e621dacaad0
child 212592 c410756cb38621425fea6bd7fcfde718e84eb520
push id51011
push usercbook@mozilla.com
push dateTue, 28 Oct 2014 09:06:47 +0000
treeherdermozilla-inbound@2a1404167fb6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrjesup
bugs1089478
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1089478 - Apply r5630, r5692 to fix Clang 3.5 warnings in WebRTC. r=rjesup
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
media/webrtc/trunk/webrtc/modules/audio_processing/agc/digital_agc.c
media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_estimator.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/rtt_filter.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
media/webrtc/trunk/webrtc/video/call_perf_tests.cc
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
@@ -731,17 +731,17 @@ int WebRtcAec_GetDelayMetricsCore(AecCor
       break;
     }
   }
   // Account for lookahead.
   *median = (my_median - kLookaheadBlocks) * kMsPerBlock;
 
   // Calculate the L1 norm, with median value as central moment.
   for (i = 0; i < kHistorySizeBlocks; i++) {
-    l1_norm += (float)(fabs(i - my_median) * self->delay_histogram[i]);
+    l1_norm += (float)abs(i - my_median) * self->delay_histogram[i];
   }
   *std = (int)(l1_norm / (float)num_delay_values + 0.5f) * kMsPerBlock;
 
   // Reset histogram.
   memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
 
   return 0;
 }
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/agc/digital_agc.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/agc/digital_agc.c
@@ -284,17 +284,17 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc
 
     return 0;
 }
 
 int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const int16_t *in_far,
                                      int16_t nrSamples)
 {
     // Check for valid pointer
-    if (&stt->vadFarend == NULL)
+    if (stt == NULL)
     {
         return -1;
     }
 
     // VAD for far end
     WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
 
     return 0;
--- a/media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -244,20 +244,20 @@ void OveruseDetector::UpdateKalman(int64
 
   const double h[2] = {fs_delta, 1.0};
   const double Eh[2] = {E_[0][0]*h[0] + E_[0][1]*h[1],
                         E_[1][0]*h[0] + E_[1][1]*h[1]};
 
   const double residual = t_ts_delta - slope_*h[0] - offset_;
 
   const bool stable_state =
-      (BWE_MIN(num_of_deltas_, 60) * fabsf(offset_) < threshold_);
+      (BWE_MIN(num_of_deltas_, 60) * fabs(offset_) < threshold_);
   // We try to filter out very late frames. For instance periodic key
   // frames doesn't fit the Gaussian model well.
-  if (fabsf(residual) < 3 * sqrt(var_noise_)) {
+  if (fabs(residual) < 3 * sqrt(var_noise_)) {
     UpdateNoiseEstimate(residual, min_frame_period, stable_state);
   } else {
     UpdateNoiseEstimate(3 * sqrt(var_noise_), min_frame_period, stable_state);
   }
 
   const double denom = var_noise_ + h[0]*Eh[0] + h[1]*Eh[1];
 
   const double K[2] = {Eh[0] / denom,
@@ -353,17 +353,17 @@ void OveruseDetector::UpdateNoiseEstimat
   }
 }
 
 BandwidthUsage OveruseDetector::Detect(double ts_delta) {
   if (num_of_deltas_ < 2) {
     return kBwNormal;
   }
   const double T = BWE_MIN(num_of_deltas_, 60) * offset_;
-  if (fabsf(T) > threshold_) {
+  if (fabs(T) > threshold_) {
     if (offset_ > 0) {
       if (time_over_using_ == -1) {
         // Initialize the timer. Assume that we've been
         // over-using half of the time since the previous
         // sample.
         time_over_using_ = ts_delta / 2;
       } else {
         // Increment timer
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_estimator.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_estimator.cc
@@ -157,17 +157,17 @@ VCMJitterEstimator::UpdateEstimate(int64
     _prevFrameSize = frameSizeBytes;
 
     // Only update the Kalman filter if the sample is not considered
     // an extreme outlier. Even if it is an extreme outlier from a
     // delay point of view, if the frame size also is large the
     // deviation is probably due to an incorrect line slope.
     double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
 
-    if (abs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+    if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
         frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
     {
         // Update the variance of the deviation from the
         // line given by the Kalman filter
         EstimateRandomJitter(deviation, incompleteFrame);
         // Prevent updating with frames which have been congested by a large
         // frame, and therefore arrives almost at the same time as that frame.
         // This can occur when we receive a large frame (key frame) which
@@ -252,17 +252,17 @@ VCMJitterEstimator::KalmanEstimateChanne
     Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
     Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
     // sigma weights measurements with a small deltaFS as noisy and
     // measurements with large deltaFS as good
     if (_maxFrameSize < 1.0)
     {
         return;
     }
-    double sigma = (300.0 * exp(-abs(static_cast<double>(deltaFSBytes)) /
+    double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
                    (1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
     if (sigma < 1.0)
     {
         sigma = 1.0;
     }
     hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
     if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
     {
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.cc
@@ -154,22 +154,22 @@ VCMEncodedFrame* VCMReceiver::FrameForDe
   const int64_t now_ms = clock_->TimeInMilliseconds();
   timing_->UpdateCurrentDelay(frame_timestamp);
   next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
   // Check render timing.
   bool timing_error = false;
   // Assume that render timing errors are due to changes in the video stream.
   if (next_render_time_ms < 0) {
     timing_error = true;
-  } else if (abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
+  } else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
     WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
                  VCMId(vcm_id_, receiver_id_),
                  "This frame is out of our delay bounds, resetting jitter "
                  "buffer: %d > %d",
-                 static_cast<int>(abs(next_render_time_ms - now_ms)),
+                 static_cast<int>(std::abs(next_render_time_ms - now_ms)),
                  max_video_delay_ms_);
     timing_error = true;
   } else if (static_cast<int>(timing_->TargetVideoDelay()) >
              max_video_delay_ms_) {
     WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
                  VCMId(vcm_id_, receiver_id_),
                  "More than %u ms target delay. Flushing jitter buffer and"
                  "resetting timing.", max_video_delay_ms_);
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/rtt_filter.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/rtt_filter.cc
@@ -109,17 +109,17 @@ VCMRttFilter::Update(uint32_t rttMs)
                "RttFilter Update: sample=%u avgRtt=%f varRtt=%f maxRtt=%u",
                rttMs, _avgRtt, _varRtt, _maxRtt);
 }
 
 bool
 VCMRttFilter::JumpDetection(uint32_t rttMs)
 {
     double diffFromAvg = _avgRtt - rttMs;
-    if (abs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
+    if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
     {
         int diffSign = (diffFromAvg >= 0) ? 1 : -1;
         int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
         if (diffSign != jumpCountSign)
         {
             // Since the signs differ the samples currently
             // in the buffer is useless as they represent a
             // jump in a different direction.
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
@@ -47,17 +47,17 @@ enum {
 struct Vp8StreamInfo {
   float framerate_fps[kMaxNumberOfTemporalLayers];
   int bitrate_kbps[kMaxNumberOfTemporalLayers];
 };
 
 MATCHER_P(MatchesVp8StreamInfo, expected, "") {
   bool res = true;
   for (int tl = 0; tl < kMaxNumberOfTemporalLayers; ++tl) {
-    if (abs(expected.framerate_fps[tl] - arg.framerate_fps[tl]) > 0.5) {
+    if (fabs(expected.framerate_fps[tl] - arg.framerate_fps[tl]) > 0.5) {
       *result_listener << " framerate_fps[" << tl
                        << "] = " << arg.framerate_fps[tl] << " (expected "
                        << expected.framerate_fps[tl] << ") ";
       res = false;
     }
     if (abs(expected.bitrate_kbps[tl] - arg.bitrate_kbps[tl]) > 10) {
       *result_listener << " bitrate_kbps[" << tl
                        << "] = " << arg.bitrate_kbps[tl] << " (expected "
--- a/media/webrtc/trunk/webrtc/video/call_perf_tests.cc
+++ b/media/webrtc/trunk/webrtc/video/call_perf_tests.cc
@@ -179,17 +179,17 @@ class VideoRtcpAndSyncObserver : public 
     ss << stream_offset;
     webrtc::test::PrintResult(
         "stream_offset", "", "synchronization", ss.str(), "ms", false);
     int64_t time_since_creation = now_ms - creation_time_ms_;
     // During the first couple of seconds audio and video can falsely be
     // estimated as being synchronized. We don't want to trigger on those.
     if (time_since_creation < kStartupTimeMs)
       return;
-    if (abs(latest_audio_ntp - latest_video_ntp) < kInSyncThresholdMs) {
+    if (std::abs(latest_audio_ntp - latest_video_ntp) < kInSyncThresholdMs) {
       if (first_time_in_sync_ == -1) {
         first_time_in_sync_ = now_ms;
         webrtc::test::PrintResult("sync_convergence_time",
                                   "",
                                   "synchronization",
                                   time_since_creation,
                                   "ms",
                                   false);