bug 1391482 accept int16_t sample buffers in ConvolverNode r?padenot draft
authorKarl Tomlinson <karlt+@karlt.net>
Thu, 10 Aug 2017 19:55:36 +1200
changeset 654565 740dbfc1e33a1a00a59fc5795403e5894ad58dca
parent 654564 8d0b7ad3ef11e1c47e9589d1c65f6ed2b64a09e6
child 654566 108e46a68476c95af583b27e2030dd402dbe34bb
push id76601
push userktomlinson@mozilla.com
push dateMon, 28 Aug 2017 23:35:34 +0000
reviewerspadenot
bugs1391482
milestone57.0a1
bug 1391482 accept int16_t sample buffers in ConvolverNode r?padenot MozReview-Commit-ID: LvYkiSvhQdP
dom/media/webaudio/ConvolverNode.cpp
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -246,29 +246,55 @@ ConvolverNode::SetBuffer(JSContext* aCx,
       // Supported number of channels
       break;
     default:
       aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
       return;
     }
   }
 
-  mBuffer = aBuffer;
-
   // Send the buffer to the stream
   AudioNodeStream* ns = mStream;
   MOZ_ASSERT(ns, "Why don't we have a stream here?");
-  if (mBuffer) {
-    AudioChunk data = mBuffer->GetThreadSharedChannelsForRate(aCx);
+  if (aBuffer) {
+    AudioChunk data = aBuffer->GetThreadSharedChannelsForRate(aCx);
+    if (data.mBufferFormat == AUDIO_FORMAT_S16) {
+      // Reverb expects data in float format.
+      // Convert on the main thread so as to minimize allocations on the audio
+      // thread.
+      // Reverb will dispose of the buffer once initialized, so convert here
+      // and leave the smaller arrays in the AudioBuffer.
+      // There is currently no value in providing 16/32-byte aligned data
+      // because PadAndMakeScaledDFT() will copy the data (without SIMD
+      // instructions) to aligned arrays for the FFT.
+      RefPtr<SharedBuffer> floatBuffer =
+        SharedBuffer::Create(sizeof(float) *
+                             data.mDuration * data.ChannelCount());
+      if (!floatBuffer) {
+        aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+        return;
+      }
+      auto floatData = static_cast<float*>(floatBuffer->Data());
+      for (size_t i = 0; i < data.ChannelCount(); ++i) {
+        ConvertAudioSamples(data.ChannelData<int16_t>()[i],
+                            floatData, data.mDuration);
+        data.mChannelData[i] = floatData;
+        floatData += data.mDuration;
+      }
+      data.mBuffer = Move(floatBuffer);
+      data.mBufferFormat = AUDIO_FORMAT_FLOAT32;
+    }
     SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
-                                mBuffer->SampleRate());
+                                aBuffer->SampleRate());
     ns->SetBuffer(Move(data));
   } else {
     ns->SetBuffer(AudioChunk());
   }
+
+  mBuffer = aBuffer;
 }
 
 void
 ConvolverNode::SetNormalize(bool aNormalize)
 {
   mNormalize = aNormalize;
   SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize);
 }