Bug 783953 - Rename MOZ_SAMPLE_TYPE_S16LE to MOZ_SAMPLE_TYPE_S16. r=kinetik,roc
authorPaul Adenot <paul@paul.cx>
Sat, 01 Sep 2012 11:35:56 -0400
changeset 104095 978761f9b84ec03c0a651e6033058bbae4a11f01
parent 104094 72e7dbce3872824401ae05e614b039191459b1d3
child 104096 dff1055c2f505ee9428b8809daf78648fdf7b95b
push id14332
push userryanvm@gmail.com
push dateSat, 01 Sep 2012 15:36:01 +0000
treeherdermozilla-inbound@304f86e12b60 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskinetik, roc
bugs783953
milestone18.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 783953 - Rename MOZ_SAMPLE_TYPE_S16LE to MOZ_SAMPLE_TYPE_S16. r=kinetik,roc
configure.in
content/html/content/src/nsHTMLAudioElement.cpp
content/media/AudioSegment.cpp
content/media/AudioSegment.h
content/media/nsAudioStream.cpp
content/media/nsAudioStream.h
content/media/nsBuiltinDecoderReader.h
content/media/wave/nsWaveReader.cpp
content/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/configure.in
+++ b/configure.in
@@ -4136,17 +4136,17 @@ MOZ_OGG=1
 MOZ_RAW=
 MOZ_SYDNEYAUDIO=
 MOZ_SPEEX_RESAMPLER=1
 MOZ_CUBEB=
 MOZ_VORBIS=
 MOZ_TREMOR=
 MOZ_WAVE=1
 MOZ_SAMPLE_TYPE_FLOAT32=
-MOZ_SAMPLE_TYPE_S16LE=
+MOZ_SAMPLE_TYPE_S16=
 MOZ_MEDIA=
 MOZ_OPUS=1
 MOZ_WEBM=1
 MOZ_WEBRTC=1
 MOZ_WEBRTC_SIGNALING=
 MOZ_MEDIA_PLUGINS=
 MOZ_MEDIA_NAVIGATOR=
 MOZ_OMX_PLUGIN=
@@ -5149,19 +5149,19 @@ if test -n "$MOZ_WEBRTC"; then
     MOZ_VP8_ENCODER=1
     MOZ_VP8_ERROR_CONCEALMENT=1
 fi
 
 AC_SUBST(MOZ_WEBRTC)
 
 case "$target_cpu" in
 arm*)
-    MOZ_SAMPLE_TYPE_S16LE=1
-    AC_DEFINE(MOZ_SAMPLE_TYPE_S16LE)
-    AC_SUBST(MOZ_SAMPLE_TYPE_S16LE)
+    MOZ_SAMPLE_TYPE_S16=1
+    AC_DEFINE(MOZ_SAMPLE_TYPE_S16)
+    AC_SUBST(MOZ_SAMPLE_TYPE_S16)
 ;;
 *)
     MOZ_SAMPLE_TYPE_FLOAT32=1
     AC_DEFINE(MOZ_SAMPLE_TYPE_FLOAT32)
     AC_SUBST(MOZ_SAMPLE_TYPE_FLOAT32)
 ;;
 esac
 
--- a/content/html/content/src/nsHTMLAudioElement.cpp
+++ b/content/html/content/src/nsHTMLAudioElement.cpp
@@ -164,17 +164,17 @@ nsHTMLAudioElement::MozWriteAudio(const 
   if (dataLength % mChannels != 0) {
     return NS_ERROR_DOM_INDEX_SIZE_ERR;
   }
 
   // Don't write more than can be written without blocking.
   uint32_t writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels);
 
   float* frames = JS_GetFloat32ArrayData(tsrc, aCx);
-#ifdef MOZ_SAMPLE_TYPE_S16LE
+#ifdef MOZ_SAMPLE_TYPE_S16
   // Convert the samples back to integers as we are using fixed point audio in
   // the nsAudioStream.
   nsAutoArrayPtr<short> shortsArray(new short[writeLen * mChannels]);
   // Hard clip the samples.
   for (uint32_t i = 0; i <  writeLen * mChannels; ++i) {
     float scaled_value = floorf(0.5 + 32768 * frames[i]);
     if (frames[i] < 0.0) {
       shortsArray[i] = (scaled_value < -32768.0) ?
--- a/content/media/AudioSegment.cpp
+++ b/content/media/AudioSegment.cpp
@@ -2,26 +2,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSegment.h"
 
 namespace mozilla {
 
-static uint16_t
-FlipByteOrderIfBigEndian(uint16_t aValue)
-{
-  uint16_t s = aValue;
-#if defined(IS_BIG_ENDIAN)
-  s = (s << 8) | (s >> 8);
-#endif
-  return s;
-}
-
 /*
  * Use "2^N" conversion since it's simple, fast, "bit transparent", used by
  * many other libraries and apparently behaves reasonably.
  * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html
  * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
  */
 static float
 SampleToFloat(float aValue)
@@ -31,17 +21,17 @@ SampleToFloat(float aValue)
 static float
 SampleToFloat(uint8_t aValue)
 {
   return (aValue - 128)/128.0f;
 }
 static float
 SampleToFloat(int16_t aValue)
 {
-  return int16_t(FlipByteOrderIfBigEndian(aValue))/32768.0f;
+  return aValue/32768.0f;
 }
 
 static void
 FloatToSample(float aValue, float* aOut)
 {
   *aOut = aValue;
 }
 static void
@@ -51,17 +41,17 @@ FloatToSample(float aValue, uint8_t* aOu
   float clamped = NS_MAX(0.0f, NS_MIN(255.0f, v));
   *aOut = uint8_t(clamped);
 }
 static void
 FloatToSample(float aValue, int16_t* aOut)
 {
   float v = aValue*32768.0f;
   float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v));
-  *aOut = int16_t(FlipByteOrderIfBigEndian(int16_t(clamped)));
+  *aOut = int16_t(clamped);
 }
 
 template <class SrcT, class DestT>
 static void
 InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength,
                            int32_t aLength,
                            float aVolume,
                            int32_t aChannels,
@@ -84,18 +74,18 @@ InterleaveAndConvertBuffer(const int16_t
                            int32_t aChannels,
                            int16_t* aOutput)
 {
   int16_t* output = aOutput;
   float v = NS_MAX(NS_MIN(aVolume, 1.0f), -1.0f);
   int32_t volume = int32_t((1 << 16) * v);
   for (int32_t i = 0; i < aLength; ++i) {
     for (int32_t channel = 0; channel < aChannels; ++channel) {
-      int16_t s = FlipByteOrderIfBigEndian(aSource[channel*aSourceLength + i]);
-      *output = FlipByteOrderIfBigEndian(int16_t((int32_t(s) * volume) >> 16));
+      int16_t s = aSource[channel*aSourceLength + i];
+      *output = int16_t((int32_t(s) * volume) >> 16);
       ++output;
     }
   }
 }
 
 template <class SrcT>
 static void
 InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength,
@@ -104,17 +94,17 @@ InterleaveAndConvertBuffer(const SrcT* a
                            int32_t aChannels,
                            void* aOutput, nsAudioStream::SampleFormat aOutputFormat)
 {
   switch (aOutputFormat) {
   case nsAudioStream::FORMAT_FLOAT32:
     InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
                                aChannels, static_cast<float*>(aOutput));
     break;
-  case nsAudioStream::FORMAT_S16_LE:
+  case nsAudioStream::FORMAT_S16:
     InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
                                aChannels, static_cast<int16_t*>(aOutput));
     break;
   case nsAudioStream::FORMAT_U8:
     InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
                                aChannels, static_cast<uint8_t*>(aOutput));
     break;
   }
@@ -131,17 +121,17 @@ InterleaveAndConvertBuffer(const void* a
   switch (aSourceFormat) {
   case nsAudioStream::FORMAT_FLOAT32:
     InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, aSourceLength,
                                aLength,
                                aVolume,
                                aChannels,
                                aOutput, aOutputFormat);
     break;
-  case nsAudioStream::FORMAT_S16_LE:
+  case nsAudioStream::FORMAT_S16:
     InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset, aSourceLength,
                                aLength,
                                aVolume,
                                aChannels,
                                aOutput, aOutputFormat);
     break;
   case nsAudioStream::FORMAT_U8:
     InterleaveAndConvertBuffer(static_cast<const uint8_t*>(aSource) + aOffset, aSourceLength,
--- a/content/media/AudioSegment.h
+++ b/content/media/AudioSegment.h
@@ -63,17 +63,17 @@ struct AudioChunk {
 class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
 public:
   typedef nsAudioStream::SampleFormat SampleFormat;
 
   static int GetSampleSize(SampleFormat aFormat)
   {
     switch (aFormat) {
     case nsAudioStream::FORMAT_U8: return 1;
-    case nsAudioStream::FORMAT_S16_LE: return 2;
+    case nsAudioStream::FORMAT_S16: return 2;
     case nsAudioStream::FORMAT_FLOAT32: return 4;
     }
     NS_ERROR("Bad format");
     return 0;
   }
 
   AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO), mChannels(0) {}
 
--- a/content/media/nsAudioStream.cpp
+++ b/content/media/nsAudioStream.cpp
@@ -469,25 +469,21 @@ nsresult nsNativeAudioStream::Write(cons
   if (mInError)
     return NS_ERROR_FAILURE;
 
   uint32_t samples = aFrames * mChannels;
   nsAutoArrayPtr<short> s_data(new short[samples]);
 
   if (s_data) {
     double scaled_volume = GetVolumeScale() * mVolume;
-#ifdef MOZ_SAMPLE_TYPE_S16LE
+#ifdef MOZ_SAMPLE_TYPE_S16
     const short* buf = static_cast<const short*>(aBuf);
     int32_t volume = int32_t((1 << 16) * scaled_volume);
     for (uint32_t i = 0; i < samples; ++i) {
-      short s = buf[i];
-#if defined(IS_BIG_ENDIAN)
-      s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8);
-#endif
-      s_data[i] = short((int32_t(s) * volume) >> 16);
+      s_data[i] = short((int32_t(buf[i]) * volume) >> 16);
     }
 #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
     const SampleType* buf = static_cast<const SampleType*>(aBuf);
     for (uint32_t i = 0; i <  samples; ++i) {
       float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume);
       if (buf[i] < 0.0) {
         s_data[i] = (scaled_value < -32768.0) ?
           -32768 :
@@ -946,17 +942,17 @@ nsBufferedAudioStream::Init(int32_t aNum
 
   mRate = aRate;
   mChannels = aNumChannels;
   mFormat = MOZ_AUDIO_DATA_FORMAT;
 
   cubeb_stream_params params;
   params.rate = aRate;
   params.channels = aNumChannels;
-#ifdef MOZ_SAMPLE_TYPE_S16LE
+#ifdef MOZ_SAMPLE_TYPE_S16
   params.format =  CUBEB_SAMPLE_S16NE;
   mBytesPerFrame = sizeof(int16_t) * aNumChannels;
 #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
   params.format = CUBEB_SAMPLE_FLOAT32NE;
   mBytesPerFrame = sizeof(float) * aNumChannels;
 #endif
 
   {
@@ -1184,17 +1180,17 @@ nsBufferedAudioStream::DataCallback(void
     uint8_t* output = reinterpret_cast<uint8_t*>(aBuffer);
     for (int i = 0; i < 2; ++i) {
       // Fast path for unity volume case.
       if (scaled_volume == 1.0) {
         memcpy(output, input[i], input_size[i]);
         output += input_size[i];
       } else {
         // Adjust volume as each sample is copied out.
-#ifdef MOZ_SAMPLE_TYPE_S16LE
+#ifdef MOZ_SAMPLE_TYPE_S16
         int32_t volume = int32_t(1 << 16) * scaled_volume;
 
         const short* src = static_cast<const short*>(input[i]);
         short* dst = reinterpret_cast<short*>(output);
         for (uint32_t j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) {
           dst[j] = short((int32_t(src[j]) * volume) >> 16);
         }
 #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
--- a/content/media/nsAudioStream.h
+++ b/content/media/nsAudioStream.h
@@ -6,18 +6,18 @@
 #if !defined(nsAudioStream_h_)
 #define nsAudioStream_h_
 
 #include "nscore.h"
 #include "nsISupportsImpl.h"
 #include "nsIThread.h"
 #include "nsAutoPtr.h"
 
-#ifdef MOZ_SAMPLE_TYPE_S16LE
-#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
+#ifdef MOZ_SAMPLE_TYPE_S16
+#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16)
 typedef short SampleType;
 #else
 #define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
 typedef float SampleType;
 #endif
 
 // Access to a single instance of this class must be synchronized by
 // callers, or made from a single thread.  One exception is that access to
@@ -25,17 +25,17 @@ typedef float SampleType;
 // is thread-safe without external synchronization.
 class nsAudioStream : public nsISupports
 {
 public:
 
   enum SampleFormat
   {
     FORMAT_U8,
-    FORMAT_S16_LE,
+    FORMAT_S16,
     FORMAT_FLOAT32
   };
 
   nsAudioStream()
     : mRate(0),
       mChannels(0)
   {}
 
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -48,17 +48,17 @@ public:
 
   // True if we have an active audio bitstream.
   bool mHasAudio;
 
   // True if we have an active video bitstream.
   bool mHasVideo;
 };
 
-#ifdef MOZ_SAMPLE_TYPE_S16LE
+#ifdef MOZ_SAMPLE_TYPE_S16
 #include <ogg/os_types.h>
 typedef ogg_int32_t VorbisPCMValue;
 typedef short AudioDataValue;
 
 #define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767)
 // Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) \
  (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9)))
--- a/content/media/wave/nsWaveReader.cpp
+++ b/content/media/wave/nsWaveReader.cpp
@@ -173,25 +173,25 @@ bool nsWaveReader::DecodeAudioData()
 
   // convert data to samples
   const char* d = dataBuffer.get();
   AudioDataValue* s = sampleBuffer.get();
   for (int i = 0; i < frames; ++i) {
     for (unsigned int j = 0; j < mChannels; ++j) {
       if (mSampleFormat == nsAudioStream::FORMAT_U8) {
         uint8_t v =  ReadUint8(&d);
-#if defined(MOZ_SAMPLE_TYPE_S16LE)
+#if defined(MOZ_SAMPLE_TYPE_S16)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * PR_UINT16_MAX + PR_INT16_MIN;
 #elif defined(MOZ_SAMPLE_TYPE_FLOAT32)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * 2.F - 1.F;
 #endif
       }
-      else if (mSampleFormat == nsAudioStream::FORMAT_S16_LE) {
+      else if (mSampleFormat == nsAudioStream::FORMAT_S16) {
         int16_t v =  ReadInt16LE(&d);
-#if defined(MOZ_SAMPLE_TYPE_S16LE)
+#if defined(MOZ_SAMPLE_TYPE_S16)
         *s++ = v;
 #elif defined(MOZ_SAMPLE_TYPE_FLOAT32)
         *s++ = (int32_t(v) - PR_INT16_MIN) / float(PR_UINT16_MAX) * 2.F - 1.F;
 #endif
       }
     }
   }
 
@@ -448,17 +448,17 @@ nsWaveReader::LoadFormatChunk()
 
   ReentrantMonitorAutoEnter monitor(mDecoder->GetReentrantMonitor());
   mSampleRate = rate;
   mChannels = channels;
   mFrameSize = frameSize;
   if (sampleFormat == 8) {
     mSampleFormat = nsAudioStream::FORMAT_U8;
   } else {
-    mSampleFormat = nsAudioStream::FORMAT_S16_LE;
+    mSampleFormat = nsAudioStream::FORMAT_S16;
   }
   return true;
 }
 
 bool
 nsWaveReader::FindDataOffset()
 {
   // RIFF chunks are always word (two byte) aligned.
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -209,16 +209,16 @@ MediaEngineWebRTCAudioSource::Process(co
   sample* dest = static_cast<sample*>(buffer->Data());
   for (int i = 0; i < length; i++) {
     dest[i] = audio10ms[i];
   }
 
   AudioSegment segment;
   segment.Init(CHANNELS);
   segment.AppendFrames(
-    buffer.forget(), length, 0, length, nsAudioStream::FORMAT_S16_LE
+    buffer.forget(), length, 0, length, nsAudioStream::FORMAT_S16
   );
   mSource->AppendToTrack(mTrackID, &segment);
 
   return;
 }
 
 }