Bug 805254. Part 12: Simplify AudioSegment::WriteTo and related code now that the output format is known statically. r=kinetik
authorRobert O'Callahan <robert@ocallahan.org>
Thu, 25 Oct 2012 23:09:41 +1300
changeset 111628 1eb3bd54b7bd6ed60456e790540917cad9fa7b4b
parent 111627 c3fcdbf501049aec59009d7804e05d793abaeacc
child 111629 2b496b04c1831346a898d288f0c03ab23b5c076f
push id93
push usernmatsakis@mozilla.com
push dateWed, 31 Oct 2012 21:26:57 +0000
reviewerskinetik
bugs805254
milestone19.0a1
Bug 805254. Part 12: Simplify AudioSegment::WriteTo and related code now that the output format is known statically. r=kinetik Also fixes what I think is a bug in InterleaveAndConvertBuffer converting S16 to S16. Instead of clamping the volume, we should handle arbitrary volumes by falling back to the float conversion path.
content/media/AudioSegment.cpp
content/media/AudioSegment.h
--- a/content/media/AudioSegment.cpp
+++ b/content/media/AudioSegment.cpp
@@ -22,111 +22,102 @@ InterleaveAndConvertBuffer(const SrcT* a
     for (int32_t channel = 0; channel < aChannels; ++channel) {
       float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume;
       *output = FloatToAudioSample<DestT>(v);
       ++output;
     }
   }
 }
 
-static void
+static inline void
 InterleaveAndConvertBuffer(const int16_t* aSource, int32_t aSourceLength,
                            int32_t aLength,
                            float aVolume,
                            int32_t aChannels,
                            int16_t* aOutput)
 {
   int16_t* output = aOutput;
-  float v = NS_MAX(NS_MIN(aVolume, 1.0f), -1.0f);
-  int32_t volume = int32_t((1 << 16) * v);
+  if (0.0f <= aVolume && aVolume <= 1.0f) {
+    int32_t scale = int32_t((1 << 16) * aVolume);
+    for (int32_t i = 0; i < aLength; ++i) {
+      for (int32_t channel = 0; channel < aChannels; ++channel) {
+        int16_t s = aSource[channel*aSourceLength + i];
+        *output = int16_t((int32_t(s) * scale) >> 16);
+        ++output;
+      }
+    }
+    return;
+  }
+
   for (int32_t i = 0; i < aLength; ++i) {
     for (int32_t channel = 0; channel < aChannels; ++channel) {
-      int16_t s = aSource[channel*aSourceLength + i];
-      *output = int16_t((int32_t(s) * volume) >> 16);
+      float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume;
+      *output = FloatToAudioSample<int16_t>(v);
       ++output;
     }
   }
 }
 
-template <class SrcT>
-static void
-InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength,
-                           int32_t aLength,
-                           float aVolume,
-                           int32_t aChannels,
-                           void* aOutput, AudioSampleFormat aOutputFormat)
-{
-  switch (aOutputFormat) {
-  case AUDIO_FORMAT_FLOAT32:
-    InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
-                               aChannels, static_cast<float*>(aOutput));
-    break;
-  case AUDIO_FORMAT_S16:
-    InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
-                               aChannels, static_cast<int16_t*>(aOutput));
-    break;
-  }
-}
-
 static void
 InterleaveAndConvertBuffer(const void* aSource, AudioSampleFormat aSourceFormat,
                            int32_t aSourceLength,
                            int32_t aOffset, int32_t aLength,
                            float aVolume,
                            int32_t aChannels,
-                           void* aOutput, AudioSampleFormat aOutputFormat)
+                           AudioDataValue* aOutput)
 {
   switch (aSourceFormat) {
   case AUDIO_FORMAT_FLOAT32:
-    InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, aSourceLength,
+    InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset,
+                               aSourceLength,
                                aLength,
                                aVolume,
                                aChannels,
-                               aOutput, aOutputFormat);
+                               aOutput);
     break;
   case AUDIO_FORMAT_S16:
-    InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset, aSourceLength,
+    InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset,
+                               aSourceLength,
                                aLength,
                                aVolume,
                                aChannels,
-                               aOutput, aOutputFormat);
+                               aOutput);
     break;
   }
 }
 
 void
 AudioSegment::ApplyVolume(float aVolume)
 {
   for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
     ci->mVolume *= aVolume;
   }
 }
 
-static const int STATIC_AUDIO_BUFFER_BYTES = 50000;
+static const int STATIC_AUDIO_SAMPLES = 10000;
 
 void
 AudioSegment::WriteTo(nsAudioStream* aOutput)
 {
   NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels");
-  nsAutoTArray<uint8_t,STATIC_AUDIO_BUFFER_BYTES> buf;
-  uint32_t frameSize = GetSampleSize(AUDIO_OUTPUT_FORMAT)*mChannels;
+  nsAutoTArray<AudioDataValue,STATIC_AUDIO_SAMPLES> buf;
   for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
     AudioChunk& c = *ci;
-    if (frameSize*c.mDuration > UINT32_MAX) {
+    if (uint64_t(mChannels)*c.mDuration > INT32_MAX) {
       NS_ERROR("Buffer overflow");
       return;
     }
-    buf.SetLength(int32_t(frameSize*c.mDuration));
+    buf.SetLength(int32_t(mChannels*c.mDuration));
     if (c.mBuffer) {
       InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength,
                                  c.mOffset, int32_t(c.mDuration),
                                  c.mVolume,
                                  aOutput->GetChannels(),
-                                 buf.Elements(), AUDIO_OUTPUT_FORMAT);
+                                 buf.Elements());
     } else {
       // Assumes that a bit pattern of zeroes == 0.0f
-      memset(buf.Elements(), 0, buf.Length());
+      memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue));
     }
     aOutput->Write(buf.Elements(), int32_t(c.mDuration));
   }
 }
 
 }
--- a/content/media/AudioSegment.h
+++ b/content/media/AudioSegment.h
@@ -61,26 +61,16 @@ struct AudioChunk {
 /**
  * A list of audio samples consisting of a sequence of slices of SharedBuffers.
  * The audio rate is determined by the track, not stored in this class.
  */
 class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
 public:
   typedef mozilla::AudioSampleFormat SampleFormat;
 
-  static int GetSampleSize(SampleFormat aFormat)
-  {
-    switch (aFormat) {
-    case AUDIO_FORMAT_S16: return 2;
-    case AUDIO_FORMAT_FLOAT32: return 4;
-    }
-    NS_ERROR("Bad format");
-    return 0;
-  }
-
   AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO), mChannels(0) {}
 
   bool IsInitialized()
   {
     return mChannels > 0;
   }
   void Init(int32_t aChannels)
   {
@@ -88,29 +78,16 @@ public:
     NS_ASSERTION(!IsInitialized(), "Already initialized");
     mChannels = aChannels;
   }
   int32_t GetChannels()
   {
     NS_ASSERTION(IsInitialized(), "Not initialized");
     return mChannels;
   }
-  /**
-   * Returns the format of the first audio frame that has data, or
-   * AUDIO_FORMAT_FLOAT32 if there is none.
-   */
-  SampleFormat GetFirstFrameFormat()
-  {
-    for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
-      if (ci->mBuffer) {
-        return ci->mBufferFormat;
-      }
-    }
-    return AUDIO_FORMAT_FLOAT32;
-  }
   void AppendFrames(already_AddRefed<SharedBuffer> aBuffer, int32_t aBufferLength,
                     int32_t aStart, int32_t aEnd, SampleFormat aFormat)
   {
     NS_ASSERTION(mChannels > 0, "Not initialized");
     AudioChunk* chunk = AppendChunk(aEnd - aStart);
     chunk->mBuffer = aBuffer;
     chunk->mBufferFormat = aFormat;
     chunk->mBufferLength = aBufferLength;