Bug 805254. Part 8: Consolidate audio sample processing code using templates over the format types. r=kinetik
authorRobert O'Callahan <robert@ocallahan.org>
Thu, 25 Oct 2012 23:09:40 +1300
changeset 111624 882cfaba69c25be5368542dbd10675c69401c6f4
parent 111623 569b5cf142851ddc79c169a205c21a2dc84f3acd
child 111625 caff86361591bc5c4f899298ad278b9b7972203e
push id93
push usernmatsakis@mozilla.com
push dateWed, 31 Oct 2012 21:26:57 +0000
reviewerskinetik
bugs805254
milestone19.0a1
Bug 805254. Part 8: Consolidate audio sample processing code using templates over the format types. r=kinetik Replace nsAudioStream::Format with an AUDIO_OUTPUT_FORMAT enum value so we can use it as a template parameter. Introduce AudioSampleTraits<AudioSampleFormat> to give us access to the C++ type corresponding to an enum value. Move SampleToFloat/FloatToSample to AudioSampleFormat.h. Introduce ConvertAudioSamples and ConvertAudioSamplesWithScale functions and use them from various places. Moves AudioDataValue to AudioSampleFormat.h. The name isn't great, but it'll do.
content/html/content/src/nsHTMLAudioElement.cpp
content/media/AudioSampleFormat.h
content/media/AudioSegment.cpp
content/media/nsAudioStream.cpp
content/media/nsAudioStream.h
content/media/nsBuiltinDecoderReader.h
content/media/nsBuiltinDecoderStateMachine.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/content/html/content/src/nsHTMLAudioElement.cpp
+++ b/content/html/content/src/nsHTMLAudioElement.cpp
@@ -9,16 +9,17 @@
 #include "nsGenericHTMLElement.h"
 #include "nsGkAtoms.h"
 #include "nsIDocument.h"
 #include "jsfriendapi.h"
 #include "nsContentUtils.h"
 #include "nsJSUtils.h"
 #include "AudioSampleFormat.h"
 
+using namespace mozilla;
 using namespace mozilla::dom;
 
 nsGenericHTMLElement*
 NS_NewHTMLAudioElement(already_AddRefed<nsINodeInfo> aNodeInfo,
                        FromParser aFromParser)
 {
   /*
    * nsHTMLAudioElement's will be created without a nsINodeInfo passed in
@@ -176,34 +177,22 @@ nsHTMLAudioElement::MozWriteAudio(const 
     return NS_ERROR_DOM_INDEX_SIZE_ERR;
   }
 
   // Don't write more than can be written without blocking.
   uint32_t writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels);
 
   float* frames = JS_GetFloat32ArrayData(tsrc, aCx);
   nsresult rv;
-  if (nsAudioStream::Format() == AUDIO_FORMAT_S16) {
+  if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
     // Convert the samples back to integers as we are using fixed point audio in
     // the nsAudioStream.
-    nsAutoArrayPtr<short> shortsArray(new short[writeLen * mChannels]);
-    // Hard clip the samples.
-    for (uint32_t i = 0; i <  writeLen * mChannels; ++i) {
-      float scaled_value = floorf(0.5 + 32768 * frames[i]);
-      if (frames[i] < 0.0) {
-        shortsArray[i] = (scaled_value < -32768.0) ?
-          -32768 :
-          short(scaled_value);
-      } else {
-        shortsArray[i] = (scaled_value > 32767.0) ?
-          32767 :
-          short(scaled_value);
-      }
-    }
-    rv = mAudioStream->Write(shortsArray, writeLen);
+    nsAutoArrayPtr<AudioDataValue> shortsArray(new AudioDataValue[writeLen * mChannels]);
+    ConvertAudioSamples(frames, shortsArray.get(), writeLen * mChannels);
+    rv = mAudioStream->Write(shortsArray.get(), writeLen);
   } else {
     rv = mAudioStream->Write(frames, writeLen);
   }
 
   if (NS_FAILED(rv)) {
     return rv;
   }
 
--- a/content/media/AudioSampleFormat.h
+++ b/content/media/AudioSampleFormat.h
@@ -1,28 +1,135 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef MOZILLA_AUDIOSAMPLEFORMAT_H_
 #define MOZILLA_AUDIOSAMPLEFORMAT_H_
 
+#include "nsAlgorithm.h"
+
 namespace mozilla {
 
 /**
  * Audio formats supported in MediaStreams and media elements.
  *
  * Only one of these is supported by nsAudioStream, and that is determined
- * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). That format
- * is returned by nsAudioStream::Format().
+ * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). Media decoders
+ * produce that format only; queued AudioData always uses that format.
  */
 enum AudioSampleFormat
 {
   // Native-endian signed 16-bit audio samples
   AUDIO_FORMAT_S16,
   // Signed 32-bit float samples
-  AUDIO_FORMAT_FLOAT32
+  AUDIO_FORMAT_FLOAT32,
+  // The format used for output by nsAudioStream.
+#ifdef MOZ_SAMPLE_TYPE_S16
+  AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_S16
+#else
+  AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_FLOAT32
+#endif
+};
+
+template <AudioSampleFormat Format> class AudioSampleTraits;
+
+template <> class AudioSampleTraits<AUDIO_FORMAT_FLOAT32> {
+public:
+  typedef float Type;
+};
+template <> class AudioSampleTraits<AUDIO_FORMAT_S16> {
+public:
+  typedef int16_t Type;
 };
 
+typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue;
+
+// Single-sample conversion
+
+/*
+ * Use "2^N" conversion since it's simple, fast, "bit transparent", used by
+ * many other libraries and apparently behaves reasonably.
+ * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html
+ * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
+ */
+inline float
+AudioSampleToFloat(float aValue)
+{
+  return aValue;
+}
+inline float
+AudioSampleToFloat(int16_t aValue)
+{
+  return aValue/32768.0f;
+}
+
+template <typename T> T FloatToAudioSample(float aValue);
+
+template <> inline float
+FloatToAudioSample<float>(float aValue)
+{
+  return aValue;
+}
+template <> inline int16_t
+FloatToAudioSample<int16_t>(float aValue)
+{
+  float v = aValue*32768.0f;
+  float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v));
+  return int16_t(clamped);
+}
+
+// Sample buffer conversion
+
+template <typename From, typename To> inline void
+ConvertAudioSamples(const From* aFrom, To* aTo, int aCount)
+{
+  for (int i = 0; i < aCount; ++i) {
+    aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i]));
+  }
+}
+inline void
+ConvertAudioSamples(const int16_t* aFrom, int16_t* aTo, int aCount)
+{
+  memcpy(aTo, aFrom, sizeof(*aTo)*aCount);
+}
+inline void
+ConvertAudioSamples(const float* aFrom, float* aTo, int aCount)
+{
+  memcpy(aTo, aFrom, sizeof(*aTo)*aCount);
+}
+
+// Sample buffer conversion with scale
+
+template <typename From, typename To> inline void
+ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount, float aScale)
+{
+  if (aScale == 1.0f) {
+    ConvertAudioSamples(aFrom, aTo, aCount);
+    return;
+  }
+  for (int i = 0; i < aCount; ++i) {
+    aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i])*aScale);
+  }
+}
+inline void
+ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo, int aCount, float aScale)
+{
+  if (aScale == 1.0f) {
+    ConvertAudioSamples(aFrom, aTo, aCount);
+    return;
+  }
+  if (0.0f <= aScale && aScale < 1.0f) {
+    int32_t scale = int32_t((1 << 16) * aScale);
+    for (int i = 0; i < aCount; ++i) {
+      aTo[i] = int16_t((int32_t(aFrom[i]) * scale) >> 16);
+    }
+    return;
+  }
+  for (int i = 0; i < aCount; ++i) {
+    aTo[i] = FloatToAudioSample<int16_t>(AudioSampleToFloat(aFrom[i])*aScale);
+  }
+}
+
 }
 
 #endif /* MOZILLA_AUDIOSAMPLEFORMAT_H_ */
--- a/content/media/AudioSegment.cpp
+++ b/content/media/AudioSegment.cpp
@@ -4,59 +4,29 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSegment.h"
 
 #include "nsAudioStream.h"
 
 namespace mozilla {
 
-/*
- * Use "2^N" conversion since it's simple, fast, "bit transparent", used by
- * many other libraries and apparently behaves reasonably.
- * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html
- * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
- */
-static float
-SampleToFloat(float aValue)
-{
-  return aValue;
-}
-static float
-SampleToFloat(int16_t aValue)
-{
-  return aValue/32768.0f;
-}
-
-static void
-FloatToSample(float aValue, float* aOut)
-{
-  *aOut = aValue;
-}
-static void
-FloatToSample(float aValue, int16_t* aOut)
-{
-  float v = aValue*32768.0f;
-  float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v));
-  *aOut = int16_t(clamped);
-}
-
 template <class SrcT, class DestT>
 static void
 InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength,
                            int32_t aLength,
                            float aVolume,
                            int32_t aChannels,
                            DestT* aOutput)
 {
   DestT* output = aOutput;
   for (int32_t i = 0; i < aLength; ++i) {
     for (int32_t channel = 0; channel < aChannels; ++channel) {
-      float v = SampleToFloat(aSource[channel*aSourceLength + i])*aVolume;
-      FloatToSample(v, output);
+      float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume;
+      *output = FloatToAudioSample<DestT>(v);
       ++output;
     }
   }
 }
 
 static void
 InterleaveAndConvertBuffer(const int16_t* aSource, int32_t aSourceLength,
                            int32_t aLength,
@@ -132,30 +102,30 @@ AudioSegment::ApplyVolume(float aVolume)
 
 static const int STATIC_AUDIO_BUFFER_BYTES = 50000;
 
 void
 AudioSegment::WriteTo(nsAudioStream* aOutput)
 {
   NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels");
   nsAutoTArray<uint8_t,STATIC_AUDIO_BUFFER_BYTES> buf;
-  uint32_t frameSize = GetSampleSize(nsAudioStream::Format())*mChannels;
+  uint32_t frameSize = GetSampleSize(AUDIO_OUTPUT_FORMAT)*mChannels;
   for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
     AudioChunk& c = *ci;
     if (frameSize*c.mDuration > UINT32_MAX) {
       NS_ERROR("Buffer overflow");
       return;
     }
     buf.SetLength(int32_t(frameSize*c.mDuration));
     if (c.mBuffer) {
       InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength,
                                  c.mOffset, int32_t(c.mDuration),
                                  c.mVolume,
                                  aOutput->GetChannels(),
-                                 buf.Elements(), nsAudioStream::Format());
+                                 buf.Elements(), AUDIO_OUTPUT_FORMAT);
     } else {
       // Assumes that a bit pattern of zeroes == 0.0f
       memset(buf.Elements(), 0, buf.Length());
     }
     aOutput->Write(buf.Elements(), int32_t(c.mDuration));
   }
 }
 
--- a/content/media/nsAudioStream.cpp
+++ b/content/media/nsAudioStream.cpp
@@ -469,40 +469,19 @@ nsresult nsNativeAudioStream::Write(cons
   NS_ASSERTION(!mPaused, "Don't write audio when paused, you'll block");
 
   if (mInError)
     return NS_ERROR_FAILURE;
 
   uint32_t samples = aFrames * mChannels;
   nsAutoArrayPtr<short> s_data(new short[samples]);
 
-  if (s_data) {
-    double scaled_volume = GetVolumeScale() * mVolume;
-    if (Format() == AUDIO_FORMAT_S16) {
-      const short* buf = static_cast<const short*>(aBuf);
-      int32_t volume = int32_t((1 << 16) * scaled_volume);
-      for (uint32_t i = 0; i < samples; ++i) {
-        s_data[i] = short((int32_t(buf[i]) * volume) >> 16);
-      }
-    } else {
-      const float* buf = static_cast<const float*>(aBuf);
-      for (uint32_t i = 0; i <  samples; ++i) {
-        float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume);
-        if (buf[i] < 0.0) {
-          s_data[i] = (scaled_value < -32768.0) ?
-            -32768 :
-            short(scaled_value);
-        } else {
-          s_data[i] = (scaled_value > 32767.0) ?
-            32767 :
-            short(scaled_value);
-        }
-      }
-    }
-  }
+  float scaled_volume = float(GetVolumeScale() * mVolume);
+  const AudioDataValue* buf = static_cast<const AudioDataValue*>(aBuf);
+  ConvertAudioSamplesWithScale(buf, s_data.get(), samples, scaled_volume);
 
   if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle),
                       s_data.get(),
                       samples * sizeof(short)) != SA_SUCCESS)
   {
     PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error"));
     mInError = true;
     return NS_ERROR_FAILURE;
@@ -628,17 +607,17 @@ nsRemotedAudioStream::~nsRemotedAudioStr
 NS_IMPL_THREADSAFE_ISUPPORTS0(nsRemotedAudioStream)
 
 nsresult
 nsRemotedAudioStream::Init(int32_t aNumChannels,
                            int32_t aRate)
 {
   mRate = aRate;
   mChannels = aNumChannels;
-  mBytesPerFrame = (Format() == FORMAT_FLOAT32 ? 4 : 2) * mChannels;
+  mBytesPerFrame = sizeof(AudioDataValue) * mChannels;
 
   nsCOMPtr<nsIRunnable> event = new AudioInitEvent(this);
   NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
   return NS_OK;
 }
 
 void
 nsRemotedAudioStream::Shutdown()
@@ -942,23 +921,22 @@ nsBufferedAudioStream::Init(int32_t aNum
   }
 
   mRate = aRate;
   mChannels = aNumChannels;
 
   cubeb_stream_params params;
   params.rate = aRate;
   params.channels = aNumChannels;
-  if (Format() == AUDIO_FORMAT_S16) {
-    params.format =  CUBEB_SAMPLE_S16NE;
-    mBytesPerFrame = sizeof(int16_t) * aNumChannels;
+  if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
+    params.format = CUBEB_SAMPLE_S16NE;
   } else {
     params.format = CUBEB_SAMPLE_FLOAT32NE;
-    mBytesPerFrame = sizeof(float) * aNumChannels;
   }
+  mBytesPerFrame = sizeof(AudioDataValue) * aNumChannels;
 
   {
     cubeb_stream* stream;
     if (cubeb_stream_init(cubebContext, &stream, "nsBufferedAudioStream", params,
                           GetCubebLatency(), DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
       mCubebStream.own(stream);
     }
   }
@@ -1165,47 +1143,31 @@ nsBufferedAudioStream::DataCallback(void
   uint32_t bytesWanted = aFrames * mBytesPerFrame;
 
   // Adjust bytesWanted to fit what is available in mBuffer.
   uint32_t available = NS_MIN(bytesWanted, mBuffer.Length());
   NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
 
   if (available > 0) {
     // Copy each sample from mBuffer to aBuffer, adjusting the volume during the copy.
-    double scaled_volume = GetVolumeScale() * mVolume;
+    float scaled_volume = float(GetVolumeScale() * mVolume);
 
     // Fetch input pointers from the ring buffer.
     void* input[2];
     uint32_t input_size[2];
     mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
 
-    uint8_t* output = reinterpret_cast<uint8_t*>(aBuffer);
+    uint8_t* output = static_cast<uint8_t*>(aBuffer);
     for (int i = 0; i < 2; ++i) {
-      // Fast path for unity volume case.
-      if (scaled_volume == 1.0) {
-        memcpy(output, input[i], input_size[i]);
-        output += input_size[i];
-      } else if (Format() == AUDIO_FORMAT_S16) {
-        // Adjust volume as each sample is copied out.
-        int32_t volume = int32_t(1 << 16) * scaled_volume;
+      const AudioDataValue* src = static_cast<const AudioDataValue*>(input[i]);
+      AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(output);
 
-        const short* src = static_cast<const short*>(input[i]);
-        short* dst = reinterpret_cast<short*>(output);
-        for (uint32_t j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) {
-          dst[j] = short((int32_t(src[j]) * volume) >> 16);
-        }
-        output += input_size[i];
-      } else {
-        const float* src = static_cast<const float*>(input[i]);
-        float* dst = reinterpret_cast<float*>(output);
-        for (uint32_t j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) {
-          dst[j] = src[j] * scaled_volume;
-        }
-        output += input_size[i];
-      }
+      ConvertAudioSamplesWithScale(src, dst, input_size[i]/sizeof(AudioDataValue),
+                                   scaled_volume);
+      output += input_size[i];
     }
 
     NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");
 
     // Notify any blocked Write() call that more space is available in mBuffer.
     mon.NotifyAll();
 
     // Calculate remaining bytes requested by caller.  If the stream is not
--- a/content/media/nsAudioStream.h
+++ b/content/media/nsAudioStream.h
@@ -94,23 +94,15 @@ public:
   // you can be sure that something will be played.
   // Unsafe to call with a monitor held due to synchronous event execution
   // on the main thread, which may attempt to acquire any held monitor.
   virtual int32_t GetMinWriteSize() = 0;
 
   int GetRate() { return mRate; }
   int GetChannels() { return mChannels; }
 
-  static mozilla::AudioSampleFormat Format() {
-#ifdef MOZ_SAMPLE_TYPE_S16
-    return mozilla::AUDIO_FORMAT_S16;
-#else
-    return mozilla::AUDIO_FORMAT_FLOAT32;
-#endif
-  }
-
 protected:
   nsCOMPtr<nsIThread> mAudioPlaybackThread;
   int mRate;
   int mChannels;
 };
 
 #endif
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -7,16 +7,17 @@
 #define nsBuiltinDecoderReader_h_
 
 #include <nsDeque.h>
 #include "nsSize.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "MediaStreamGraph.h"
 #include "SharedBuffer.h"
 #include "ImageLayers.h"
+#include "AudioSampleFormat.h"
 
 // Stores info relevant to presenting media frames.
 class nsVideoInfo {
 public:
   nsVideoInfo()
     : mAudioRate(44100),
       mAudioChannels(2),
       mDisplay(0,0),
@@ -51,39 +52,38 @@ public:
 
   // True if we have an active video bitstream.
   bool mHasVideo;
 };
 
 #ifdef MOZ_SAMPLE_TYPE_S16
 #include <ogg/os_types.h>
 typedef ogg_int32_t VorbisPCMValue;
-typedef short AudioDataValue;
 
 #define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767)
 // Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) \
  (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9)))
 // Convert a AudioDataValue to a float for the Audio API
 #define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768))
 
 #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
 
 typedef float VorbisPCMValue;
-typedef float AudioDataValue;
 
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
 #define MOZ_CONVERT_AUDIO_SAMPLE(x) (x)
 
 #endif
 
 // Holds chunk a decoded audio frames.
 class AudioData {
 public:
   typedef mozilla::SharedBuffer SharedBuffer;
+  typedef mozilla::AudioDataValue AudioDataValue;
 
   AudioData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             uint32_t aFrames,
             AudioDataValue* aData,
             uint32_t aChannels)
   : mOffset(aOffset),
@@ -374,16 +374,17 @@ private:
 // calling into this class. Unless otherwise specified, methods and fields of
 // this class can only be accessed on the decode thread.
 class nsBuiltinDecoderReader : public nsRunnable {
 public:
   typedef mozilla::ReentrantMonitor ReentrantMonitor;
   typedef mozilla::ReentrantMonitorAutoEnter ReentrantMonitorAutoEnter;
   typedef mozilla::VideoFrameContainer VideoFrameContainer;
   typedef mozilla::MediaByteRange MediaByteRange;
+  typedef mozilla::AudioDataValue AudioDataValue;
 
   nsBuiltinDecoderReader(nsBuiltinDecoder* aDecoder);
   virtual ~nsBuiltinDecoderReader();
 
   // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE
   // on failure.
   virtual nsresult Init(nsBuiltinDecoderReader* aCloneDonor) = 0;
 
--- a/content/media/nsBuiltinDecoderStateMachine.cpp
+++ b/content/media/nsBuiltinDecoderStateMachine.cpp
@@ -556,17 +556,17 @@ void nsBuiltinDecoderStateMachine::SendS
   }
 
   if (offset >= aAudio->mFrames)
     return;
 
   aAudio->EnsureAudioBuffer();
   nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
   aOutput->AppendFrames(buffer.forget(), aAudio->mFrames, int32_t(offset), aAudio->mFrames,
-                        nsAudioStream::Format());
+                        AUDIO_OUTPUT_FORMAT);
   LOG(PR_LOG_DEBUG, ("%p Decoder writing %d frames of data to MediaStream for AudioData at %lld",
                      mDecoder.get(), aAudio->mFrames - int32_t(offset), aAudio->mTime));
   aStream->mAudioFramesWritten += aAudio->mFrames - int32_t(offset);
 }
 
 static void WriteVideoToMediaStream(mozilla::layers::Image* aImage,
                                     int64_t aDuration, const gfxIntSize& aIntrinsicSize,
                                     VideoSegment* aOutput)
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -554,27 +554,18 @@ void MediaPipelineTransmit::ProcessAudio
     switch(chunk.mBufferFormat) {
       case AUDIO_FORMAT_FLOAT32:
         MOZ_MTLOG(PR_LOG_ERROR, "Can't process audio except in 16-bit PCM yet");
         MOZ_ASSERT(PR_FALSE);
         return;
         break;
       case AUDIO_FORMAT_S16:
         {
-          // Code based on nsAudioStream
           const short* buf = static_cast<const short *>(chunk.mBuffer->Data());
-
-          int32_t volume = int32_t((1 << 16) * chunk.mVolume);
-          for (uint32_t i = 0; i < chunk.mDuration; ++i) {
-            int16_t s = buf[i];
-#if defined(IS_BIG_ENDIAN)
-            s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8);
-#endif
-            samples[i] = short((int32_t(s) * volume) >> 16);
-          }
+          ConvertAudioSamplesWithScale(buf, samples, chunk.mDuration, chunk.mVolume);
         }
         break;
       default:
         MOZ_ASSERT(PR_FALSE);
         return;
         break;
     }
   } else {