Backed out 15 changesets (bug 1391482) for rooting hazard
authorPhil Ringnalda <philringnalda@gmail.com>
Mon, 28 Aug 2017 20:40:11 -0700
changeset 377361 ca5dcf6d7ff1a53b938e7b2272f3a4872740a1c2
parent 377360 9b9102b84ebc0a86b3a1f4f63a0c51b17c78cb09
child 377362 a36884ff708dd3242451775027e06fbf28d69934
push id32407
push userarchaeopteryx@coole-files.de
push dateTue, 29 Aug 2017 18:28:36 +0000
treeherdermozilla-central@d814f791de3b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1391482
milestone57.0a1
backs out4d4ed9b64bdbb1127341abd3d55fc04904ed8869
c02da061fc56ad9f325a3146f7c056ea71050687
7f096b0d1d0e4d9868ba9ef5f97a40ba78b4c654
497e04031fc3918667eb09ba71f376fa4429c8c9
cb6ac4267562689889cb10779e8ac2c09d1aec1d
b9a522cc88c967701c4280a4d9bf1a4bf56997f9
6feba222e86b431b0ef87ff76a8388eec6041ef1
ee13302be6c8518acf5215e3d9c3161b457dbc20
0e12a1bdb2faba15ffc7c0f29f08ab853a19cf4b
7ffc044e742adcae35c501c6ee399ef2d97a6b5d
fc35c12c815f9b6aa804ac573d470b2d502bbe2e
527ea972cdf36fe336a5a30eb86a84f153433c39
a9c7abf924553c47fc9f3d7bf0351e5c4395cd67
fea3e5cd3590403a758b399da46fc9fc9bee6a7f
ca07d3a43b11208d5b3646f1a0eeb34b38e576cd
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 15 changesets (bug 1391482) for rooting hazard Backed out changeset 4d4ed9b64bdb (bug 1391482) Backed out changeset c02da061fc56 (bug 1391482) Backed out changeset 7f096b0d1d0e (bug 1391482) Backed out changeset 497e04031fc3 (bug 1391482) Backed out changeset cb6ac4267562 (bug 1391482) Backed out changeset b9a522cc88c9 (bug 1391482) Backed out changeset 6feba222e86b (bug 1391482) Backed out changeset ee13302be6c8 (bug 1391482) Backed out changeset 0e12a1bdb2fa (bug 1391482) Backed out changeset 7ffc044e742a (bug 1391482) Backed out changeset fc35c12c815f (bug 1391482) Backed out changeset 527ea972cdf3 (bug 1391482) Backed out changeset a9c7abf92455 (bug 1391482) Backed out changeset fea3e5cd3590 (bug 1391482) Backed out changeset ca07d3a43b11 (bug 1391482) MozReview-Commit-ID: 9Cq8e7pr2SP
dom/media/AudioSegment.h
dom/media/SharedBuffer.h
dom/media/webaudio/AudioBuffer.cpp
dom/media/webaudio/AudioBuffer.h
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/MediaBufferDecoder.cpp
dom/media/webaudio/MediaBufferDecoder.h
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/PeriodicWave.cpp
dom/media/webaudio/PeriodicWave.h
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/blink/Reverb.cpp
dom/media/webaudio/blink/Reverb.h
--- a/dom/media/AudioSegment.h
+++ b/dom/media/AudioSegment.h
@@ -228,28 +228,16 @@ struct AudioChunk {
   template<typename T>
   const nsTArray<const T*>& ChannelData() const
   {
     MOZ_ASSERT(AudioSampleTypeToFormat<T>::Format == mBufferFormat);
     return *reinterpret_cast<const AutoTArray<const T*,GUESS_AUDIO_CHANNELS>*>
       (&mChannelData);
   }
 
-  /**
-   * ChannelFloatsForWrite() should be used only when mBuffer is owned solely
-   * by the calling thread.
-   */
-  template<typename T>
-  T* ChannelDataForWrite(size_t aChannel)
-  {
-    MOZ_ASSERT(AudioSampleTypeToFormat<T>::Format == mBufferFormat);
-    MOZ_ASSERT(!mBuffer->IsShared());
-    return static_cast<T*>(const_cast<void*>(mChannelData[aChannel]));
-  }
-
   PrincipalHandle GetPrincipalHandle() const { return mPrincipalHandle; }
 
   StreamTime mDuration; // in frames within the buffer
   RefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes
   // one pointer per channel; empty if and only if mBuffer is null
   AutoTArray<const void*,GUESS_AUDIO_CHANNELS> mChannelData;
   float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull)
   SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull)
--- a/dom/media/SharedBuffer.h
+++ b/dom/media/SharedBuffer.h
@@ -8,33 +8,28 @@
 
 #include "mozilla/CheckedInt.h"
 #include "mozilla/mozalloc.h"
 #include "nsCOMPtr.h"
 
 namespace mozilla {
 
 class AudioBlockBuffer;
-class ThreadSharedFloatArrayBufferList;
 
 /**
  * Base class for objects with a thread-safe refcount and a virtual
  * destructor.
  */
 class ThreadSharedObject {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadSharedObject)
 
   bool IsShared() { return mRefCnt.get() > 1; }
 
   virtual AudioBlockBuffer* AsAudioBlockBuffer() { return nullptr; };
-  virtual ThreadSharedFloatArrayBufferList* AsThreadSharedFloatArrayBufferList()
-  {
-    return nullptr;
-  };
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return 0;
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
   {
@@ -52,50 +47,34 @@ protected:
  * This only guarantees 4-byte alignment of the data. For alignment we simply
  * assume that the memory from malloc is at least 4-byte aligned and the
  * refcount's size is large enough that SharedBuffer's size is divisible by 4.
  */
 class SharedBuffer : public ThreadSharedObject {
 public:
   void* Data() { return this + 1; }
 
-  static already_AddRefed<SharedBuffer> Create(size_t aSize, const fallible_t&)
-  {
-    return InternalCreate(&malloc, aSize);
-  }
-
   static already_AddRefed<SharedBuffer> Create(size_t aSize)
   {
-    // Use moz_xmalloc() to include its diagnostic message indicating the
-    // size of any failed allocations.
-    return InternalCreate(&moz_xmalloc, aSize);
+    CheckedInt<size_t> size = sizeof(SharedBuffer);
+    size += aSize;
+    if (!size.isValid()) {
+      MOZ_CRASH();
+    }
+    void* m = moz_xmalloc(size.value());
+    RefPtr<SharedBuffer> p = new (m) SharedBuffer();
+    NS_ASSERTION((reinterpret_cast<char*>(p.get() + 1) - reinterpret_cast<char*>(p.get())) % 4 == 0,
+                 "SharedBuffers should be at least 4-byte aligned");
+    return p.forget();
   }
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 private:
-  static already_AddRefed<SharedBuffer>
-  InternalCreate(void* aMalloc(size_t), size_t aSize)
-  {
-    CheckedInt<size_t> size = sizeof(SharedBuffer);
-    size += aSize;
-    if (!size.isValid()) {
-      MOZ_CRASH();
-    }
-    void* m = (*aMalloc)(size.value());
-    if (!m) {
-      return nullptr;
-    }
-    RefPtr<SharedBuffer> p = new (m) SharedBuffer();
-    NS_ASSERTION((reinterpret_cast<char*>(p.get() + 1) - reinterpret_cast<char*>(p.get())) % 4 == 0,
-                 "SharedBuffers should be at least 4-byte aligned");
-    return p.forget();
-  }
-
   SharedBuffer() {}
 };
 
 } // namespace mozilla
 
 #endif /* MOZILLA_SHAREDBUFFER_H_ */
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -155,32 +155,25 @@ AudioBufferMemoryTracker::CollectReports
 
   return NS_OK;
 }
 
 AudioBuffer::AudioBuffer(nsPIDOMWindowInner* aWindow,
                          uint32_t aNumberOfChannels,
                          uint32_t aLength,
                          float aSampleRate,
-                         ErrorResult& aRv)
+                         already_AddRefed<ThreadSharedFloatArrayBufferList>
+                           aInitialContents)
   : mOwnerWindow(do_GetWeakReference(aWindow)),
+    mSharedChannels(aInitialContents),
+    mLength(aLength),
     mSampleRate(aSampleRate)
 {
-  // Note that a buffer with zero channels is permitted here for the sake of
-  // AudioProcessingEvent, where channel counts must match parameters passed
-  // to createScriptProcessor(), one of which may be zero.
-  if (aSampleRate < WebAudioUtils::MinSampleRate ||
-      aSampleRate > WebAudioUtils::MaxSampleRate ||
-      aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
-      !aLength || aLength > INT32_MAX) {
-    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
-    return;
-  }
-
-  mSharedChannels.mDuration = aLength;
+  MOZ_ASSERT(!mSharedChannels ||
+             mSharedChannels->GetChannels() == aNumberOfChannels);
   mJSChannels.SetLength(aNumberOfChannels);
   mozilla::HoldJSObjects(this);
   AudioBufferMemoryTracker::RegisterAudioBuffer(this);
 }
 
 AudioBuffer::~AudioBuffer()
 {
   AudioBufferMemoryTracker::UnregisterAudioBuffer(this);
@@ -206,194 +199,146 @@ AudioBuffer::Constructor(const GlobalObj
 }
 
 void
 AudioBuffer::ClearJSChannels()
 {
   mJSChannels.Clear();
 }
 
-void
-AudioBuffer::SetSharedChannels(
-  already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
-{
-  RefPtr<ThreadSharedFloatArrayBufferList> buffer = aBuffer;
-  uint32_t channelCount = buffer->GetChannels();
-  mSharedChannels.mChannelData.SetLength(channelCount);
-  for (uint32_t i = 0; i < channelCount; ++i) {
-    mSharedChannels.mChannelData[i] = buffer->GetData(i);
-  }
-  mSharedChannels.mBuffer = buffer.forget();
-  mSharedChannels.mVolume = 1.0f;
-  mSharedChannels.mBufferFormat = AUDIO_FORMAT_FLOAT32;
-}
-
 /* static */ already_AddRefed<AudioBuffer>
 AudioBuffer::Create(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
                     uint32_t aLength, float aSampleRate,
                     already_AddRefed<ThreadSharedFloatArrayBufferList>
                       aInitialContents,
                     ErrorResult& aRv)
 {
-  RefPtr<ThreadSharedFloatArrayBufferList> initialContents = aInitialContents;
-  RefPtr<AudioBuffer> buffer =
-    new AudioBuffer(aWindow, aNumberOfChannels, aLength, aSampleRate, aRv);
-  if (aRv.Failed()) {
+  // Note that a buffer with zero channels is permitted here for the sake of
+  // AudioProcessingEvent, where channel counts must match parameters passed
+  // to createScriptProcessor(), one of which may be zero.
+  if (aSampleRate < WebAudioUtils::MinSampleRate ||
+      aSampleRate > WebAudioUtils::MaxSampleRate ||
+      aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
+      !aLength || aLength > INT32_MAX) {
+    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  if (initialContents) {
-    MOZ_ASSERT(initialContents->GetChannels() == aNumberOfChannels);
-    buffer->SetSharedChannels(initialContents.forget());
-  }
-
-  return buffer.forget();
-}
-
-/* static */ already_AddRefed<AudioBuffer>
-AudioBuffer::Create(nsPIDOMWindowInner* aWindow, float aSampleRate,
-                    AudioChunk&& aInitialContents)
-{
-  AudioChunk initialContents = aInitialContents;
-  ErrorResult rv;
   RefPtr<AudioBuffer> buffer =
-    new AudioBuffer(aWindow, initialContents.ChannelCount(),
-                    initialContents.mDuration, aSampleRate, rv);
-  if (rv.Failed()) {
-    return nullptr;
-  }
-  buffer->mSharedChannels = Move(aInitialContents);
+    new AudioBuffer(aWindow, aNumberOfChannels, aLength, aSampleRate,
+                    Move(aInitialContents));
 
   return buffer.forget();
 }
 
 JSObject*
 AudioBuffer::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return AudioBufferBinding::Wrap(aCx, this, aGivenProto);
 }
 
-static void
-CopyChannelDataToFloat(const AudioChunk& aChunk, uint32_t aChannel,
-                       uint32_t aSrcOffset, float* aOutput, uint32_t aLength)
-{
-  MOZ_ASSERT(aChunk.mVolume == 1.0f);
-  if (aChunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-    mozilla::PodCopy(aOutput,
-                     aChunk.ChannelData<float>()[aChannel] + aSrcOffset,
-                     aLength);
-  } else {
-    MOZ_ASSERT(aChunk.mBufferFormat == AUDIO_FORMAT_S16);
-    ConvertAudioSamples(aChunk.ChannelData<int16_t>()[aChannel] + aSrcOffset,
-                        aOutput, aLength);
-  }
-}
-
 bool
 AudioBuffer::RestoreJSChannelData(JSContext* aJSContext)
 {
   for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
     if (mJSChannels[i]) {
       // Already have data in JS array.
       continue;
     }
 
     // The following code first zeroes the array and then copies our data
     // into it. We could avoid this with additional JS APIs to construct
     // an array (or ArrayBuffer) containing initial data.
     JS::Rooted<JSObject*> array(aJSContext,
-                                JS_NewFloat32Array(aJSContext, Length()));
+                                JS_NewFloat32Array(aJSContext, mLength));
     if (!array) {
       return false;
     }
-    if (!mSharedChannels.IsNull()) {
+    if (mSharedChannels) {
       // "4. Attach ArrayBuffers containing copies of the data to the
       // AudioBuffer, to be returned by the next call to getChannelData."
+      const float* data = mSharedChannels->GetData(i);
       JS::AutoCheckCannotGC nogc;
       bool isShared;
-      float* jsData = JS_GetFloat32ArrayData(array, &isShared, nogc);
+      mozilla::PodCopy(JS_GetFloat32ArrayData(array, &isShared, nogc), data, mLength);
       MOZ_ASSERT(!isShared); // Was created as unshared above
-      CopyChannelDataToFloat(mSharedChannels, i, 0, jsData, Length());
     }
     mJSChannels[i] = array;
   }
 
-  mSharedChannels.mBuffer = nullptr;
-  mSharedChannels.mChannelData.Clear();
+  mSharedChannels = nullptr;
 
   return true;
 }
 
 void
 AudioBuffer::CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber,
                              uint32_t aStartInChannel, ErrorResult& aRv)
 {
   aDestination.ComputeLengthAndData();
 
   uint32_t length = aDestination.Length();
   CheckedInt<uint32_t> end = aStartInChannel;
   end += length;
   if (aChannelNumber >= NumberOfChannels() ||
-      !end.isValid() || end.value() > Length()) {
+      !end.isValid() || end.value() > mLength) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return;
   }
 
   JS::AutoCheckCannotGC nogc;
   JSObject* channelArray = mJSChannels[aChannelNumber];
+  const float* sourceData = nullptr;
   if (channelArray) {
-    if (JS_GetTypedArrayLength(channelArray) != Length()) {
+    if (JS_GetTypedArrayLength(channelArray) != mLength) {
       // The array's buffer was detached.
       aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
       return;
     }
 
     bool isShared = false;
-    const float* sourceData =
-      JS_GetFloat32ArrayData(channelArray, &isShared, nogc);
+    sourceData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc);
     // The sourceData arrays should all have originated in
     // RestoreJSChannelData, where they are created unshared.
     MOZ_ASSERT(!isShared);
-    PodMove(aDestination.Data(), sourceData + aStartInChannel, length);
-    return;
+  } else if (mSharedChannels) {
+    sourceData = mSharedChannels->GetData(aChannelNumber);
   }
 
-  if (!mSharedChannels.IsNull()) {
-    CopyChannelDataToFloat(mSharedChannels, aChannelNumber, aStartInChannel,
-                           aDestination.Data(), length);
-    return;
+  if (sourceData) {
+    PodMove(aDestination.Data(), sourceData + aStartInChannel, length);
+  } else {
+    PodZero(aDestination.Data(), length);
   }
-
-  PodZero(aDestination.Data(), length);
 }
 
 void
 AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
                            uint32_t aChannelNumber, uint32_t aStartInChannel,
                            ErrorResult& aRv)
 {
   aSource.ComputeLengthAndData();
 
   uint32_t length = aSource.Length();
   CheckedInt<uint32_t> end = aStartInChannel;
   end += length;
   if (aChannelNumber >= NumberOfChannels() ||
-      !end.isValid() || end.value() > Length()) {
+      !end.isValid() || end.value() > mLength) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return;
   }
 
   if (!RestoreJSChannelData(aJSContext)) {
     aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
     return;
   }
 
   JS::AutoCheckCannotGC nogc;
   JSObject* channelArray = mJSChannels[aChannelNumber];
-  if (JS_GetTypedArrayLength(channelArray) != Length()) {
+  if (JS_GetTypedArrayLength(channelArray) != mLength) {
     // The array's buffer was detached.
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return;
   }
 
   bool isShared = false;
   float* channelData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc);
   // The channelData arrays should all have originated in
@@ -423,17 +368,17 @@ AudioBuffer::GetChannelData(JSContext* a
 already_AddRefed<ThreadSharedFloatArrayBufferList>
 AudioBuffer::StealJSArrayDataIntoSharedChannels(JSContext* aJSContext)
 {
   // "1. If any of the AudioBuffer's ArrayBuffer have been detached, abort
   // these steps, and return a zero-length channel data buffers to the
   // invoker."
   for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
     JSObject* channelArray = mJSChannels[i];
-    if (!channelArray || Length() != JS_GetTypedArrayLength(channelArray)) {
+    if (!channelArray || mLength != JS_GetTypedArrayLength(channelArray)) {
       // Either empty buffer or one of the arrays' buffers was detached.
       return nullptr;
     }
   }
 
   // "2. Detach all ArrayBuffers for arrays previously returned by
   // getChannelData on this AudioBuffer."
   // "3. Retain the underlying data buffers from those ArrayBuffers and return
@@ -464,35 +409,31 @@ AudioBuffer::StealJSArrayDataIntoSharedC
 
   for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
     mJSChannels[i] = nullptr;
   }
 
   return result.forget();
 }
 
-const AudioChunk&
+ThreadSharedFloatArrayBufferList*
 AudioBuffer::GetThreadSharedChannelsForRate(JSContext* aJSContext)
 {
-  if (mSharedChannels.IsNull()) {
-    // mDuration is set in constructor
-    RefPtr<ThreadSharedFloatArrayBufferList> buffer =
-      StealJSArrayDataIntoSharedChannels(aJSContext);
-
-    if (buffer) {
-      SetSharedChannels(buffer.forget());
-    }
+  if (!mSharedChannels) {
+    mSharedChannels = StealJSArrayDataIntoSharedChannels(aJSContext);
   }
 
   return mSharedChannels;
 }
 
 size_t
 AudioBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   size_t amount = aMallocSizeOf(this);
   amount += mJSChannels.ShallowSizeOfExcludingThis(aMallocSizeOf);
-  amount += mSharedChannels.SizeOfExcludingThis(aMallocSizeOf, false);
+  if (mSharedChannels) {
+    amount += mSharedChannels->SizeOfIncludingThis(aMallocSizeOf);
+  }
   return amount;
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/AudioBuffer.h
+++ b/dom/media/webaudio/AudioBuffer.h
@@ -2,17 +2,16 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef AudioBuffer_h_
 #define AudioBuffer_h_
 
-#include "AudioSegment.h"
 #include "nsWrapperCache.h"
 #include "nsCycleCollectionParticipant.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/StaticMutex.h"
 #include "nsTArray.h"
 #include "js/TypeDecls.h"
 #include "mozilla/MemoryReporting.h"
@@ -49,21 +48,16 @@ public:
   Create(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
          uint32_t aLength, float aSampleRate,
          ErrorResult& aRv)
   {
     return Create(aWindow, aNumberOfChannels, aLength, aSampleRate,
                   nullptr, aRv);
   }
 
-  // Non-unit AudioChunk::mVolume is not supported
-  static already_AddRefed<AudioBuffer>
-  Create(nsPIDOMWindowInner* aWindow, float aSampleRate,
-         AudioChunk&& aInitialContents);
-
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
   NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(AudioBuffer)
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioBuffer)
 
   static already_AddRefed<AudioBuffer>
   Constructor(const GlobalObject& aGlobal,
               const AudioBufferOptions& aOptions, ErrorResult& aRv);
@@ -78,22 +72,22 @@ public:
 
   float SampleRate() const
   {
     return mSampleRate;
   }
 
   uint32_t Length() const
   {
-    return mSharedChannels.mDuration;
+    return mLength;
   }
 
   double Duration() const
   {
-    return Length() / static_cast<double> (mSampleRate);
+    return mLength / static_cast<double> (mSampleRate);
   }
 
   uint32_t NumberOfChannels() const
   {
     return mJSChannels.Length();
   }
 
   /**
@@ -106,44 +100,43 @@ public:
 
   void CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber,
                        uint32_t aStartInChannel, ErrorResult& aRv);
   void CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
                      uint32_t aChannelNumber, uint32_t aStartInChannel,
                      ErrorResult& aRv);
 
   /**
-   * Returns a reference to an AudioChunk containing the sample data.
-   * The AudioChunk can have a null buffer if there is no data.
+   * Returns a ThreadSharedFloatArrayBufferList containing the sample data.
+   * Can return null if there is no data.
    */
-  const AudioChunk& GetThreadSharedChannelsForRate(JSContext* aContext);
+  ThreadSharedFloatArrayBufferList* GetThreadSharedChannelsForRate(JSContext* aContext);
 
 protected:
   AudioBuffer(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
-              uint32_t aLength, float aSampleRate, ErrorResult& aRv);
+              uint32_t aLength, float aSampleRate,
+              already_AddRefed<ThreadSharedFloatArrayBufferList>
+                aInitialContents);
   ~AudioBuffer();
 
-  void
-  SetSharedChannels(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
-
   bool RestoreJSChannelData(JSContext* aJSContext);
 
   already_AddRefed<ThreadSharedFloatArrayBufferList>
   StealJSArrayDataIntoSharedChannels(JSContext* aJSContext);
 
   void ClearJSChannels();
 
+  nsWeakPtr mOwnerWindow;
   // Float32Arrays
   AutoTArray<JS::Heap<JSObject*>, 2> mJSChannels;
+
   // mSharedChannels aggregates the data from mJSChannels. This is non-null
-  // if and only if the mJSChannels' buffers are detached, but its mDuration
-  // member keeps the buffer length regardless of whether the buffer is
-  // provided by mJSChannels or mSharedChannels.
-  AudioChunk mSharedChannels;
+  // if and only if the mJSChannels' buffers are detached.
+  RefPtr<ThreadSharedFloatArrayBufferList> mSharedChannels;
 
-  nsWeakPtr mOwnerWindow;
+  uint32_t mLength;
   float mSampleRate;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp
+++ b/dom/media/webaudio/AudioBufferSourceNode.cpp
@@ -139,17 +139,17 @@ public:
     case AudioBufferSourceNode::LOOPEND:
       MOZ_ASSERT(aParam >= 0);
       mLoopEnd = aParam;
       break;
     default:
       NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter");
     }
   }
-  void SetBuffer(AudioChunk&& aBuffer) override
+  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override
   {
     mBuffer = aBuffer;
   }
 
   bool BegunResampling()
   {
     return mBeginProcessing == -STREAM_TIME_MAX;
   }
@@ -210,40 +210,36 @@ public:
     }
   }
 
   // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer
   // at offset aSourceOffset.  This avoids copying memory.
   void BorrowFromInputBuffer(AudioBlock* aOutput,
                              uint32_t aChannels)
   {
-    aOutput->SetBuffer(mBuffer.mBuffer);
+    aOutput->SetBuffer(mBuffer);
     aOutput->mChannelData.SetLength(aChannels);
     for (uint32_t i = 0; i < aChannels; ++i) {
-      aOutput->mChannelData[i] =
-        mBuffer.ChannelData<float>()[i] + mBufferPosition;
+      aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition;
     }
-    aOutput->mVolume = mBuffer.mVolume;
+    aOutput->mVolume = 1.0f;
     aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32;
   }
 
   // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset
   // and put it at offset aBufferOffset in the destination buffer.
-  template <typename T> void
-  CopyFromInputBuffer(AudioBlock* aOutput,
-                      uint32_t aChannels,
-                      uintptr_t aOffsetWithinBlock,
-                      uint32_t aNumberOfFrames)
-  {
-    MOZ_ASSERT(mBuffer.mVolume == 1.0f);
+  void CopyFromInputBuffer(AudioBlock* aOutput,
+                           uint32_t aChannels,
+                           uintptr_t aOffsetWithinBlock,
+                           uint32_t aNumberOfFrames) {
     for (uint32_t i = 0; i < aChannels; ++i) {
       float* baseChannelData = aOutput->ChannelFloatsForWrite(i);
-      ConvertAudioSamples(mBuffer.ChannelData<T>()[i] + mBufferPosition,
-                          baseChannelData + aOffsetWithinBlock,
-                          aNumberOfFrames);
+      memcpy(baseChannelData + aOffsetWithinBlock,
+             mBuffer->GetData(i) + mBufferPosition,
+             aNumberOfFrames * sizeof(float));
     }
   }
 
   // Resamples input data to an output buffer, according to |mBufferSampleRate| and
   // the playbackRate/detune.
   // The number of frames consumed/produced depends on the amount of space
   // remaining in both the input and output buffer, and the playback rate (that
   // is, the ratio between the output samplerate and the input samplerate).
@@ -289,38 +285,27 @@ public:
         }
         speex_resampler_set_skip_frac_num(resampler,
                                   std::min<int64_t>(skipFracNum, UINT32_MAX));
 
         mBeginProcessing = -STREAM_TIME_MAX;
       }
       inputLimit = std::min(inputLimit, availableInInputBuffer);
 
-      MOZ_ASSERT(mBuffer.mVolume == 1.0f);
       for (uint32_t i = 0; true; ) {
         uint32_t inSamples = inputLimit;
+        const float* inputData = mBuffer->GetData(i) + mBufferPosition;
 
         uint32_t outSamples = aAvailableInOutput;
         float* outputData =
           aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock;
 
-        if (mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-          const float* inputData =
-            mBuffer.ChannelData<float>()[i] + mBufferPosition;
-          WebAudioUtils::SpeexResamplerProcess(resampler, i,
-                                               inputData, &inSamples,
-                                               outputData, &outSamples);
-        } else {
-          MOZ_ASSERT(mBuffer.mBufferFormat == AUDIO_FORMAT_S16);
-          const int16_t* inputData =
-            mBuffer.ChannelData<int16_t>()[i] + mBufferPosition;
-          WebAudioUtils::SpeexResamplerProcess(resampler, i,
-                                               inputData, &inSamples,
-                                               outputData, &outSamples);
-        }
+        WebAudioUtils::SpeexResamplerProcess(resampler, i,
+                                             inputData, &inSamples,
+                                             outputData, &outSamples);
         if (++i == aChannels) {
           mBufferPosition += inSamples;
           MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop);
           *aOffsetWithinBlock += outSamples;
           *aCurrentPosition += outSamples;
           if (inSamples == availableInInputBuffer && !mLoop) {
             // We'll feed in enough zeros to empty out the resampler's memory.
             // This handles the output latency as well as capturing the low
@@ -428,42 +413,32 @@ public:
         mBufferSampleRate / mResamplerOutRate;
       mBufferPosition += end - start;
       return;
     }
 
     uint32_t numFrames = std::min(aBufferMax - mBufferPosition,
                                   availableInOutput);
 
-    bool shouldBorrow = false;
-    if (numFrames == WEBAUDIO_BLOCK_SIZE &&
-        mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-      shouldBorrow = true;
-      for (uint32_t i = 0; i < aChannels; ++i) {
-        if (!IS_ALIGNED16(mBuffer.ChannelData<float>()[i] + mBufferPosition)) {
-          shouldBorrow = false;
-          break;
-        }
+    bool inputBufferAligned = true;
+    for (uint32_t i = 0; i < aChannels; ++i) {
+      if (!IS_ALIGNED16(mBuffer->GetData(i) + mBufferPosition)) {
+        inputBufferAligned = false;
       }
     }
-    MOZ_ASSERT(mBufferPosition < aBufferMax);
-    if (shouldBorrow) {
+
+    if (numFrames == WEBAUDIO_BLOCK_SIZE && inputBufferAligned) {
+      MOZ_ASSERT(mBufferPosition < aBufferMax);
       BorrowFromInputBuffer(aOutput, aChannels);
     } else {
       if (*aOffsetWithinBlock == 0) {
         aOutput->AllocateChannels(aChannels);
       }
-      if (mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-        CopyFromInputBuffer<float>(aOutput, aChannels,
-                                   *aOffsetWithinBlock, numFrames);
-      } else {
-        MOZ_ASSERT(mBuffer.mBufferFormat == AUDIO_FORMAT_S16);
-        CopyFromInputBuffer<int16_t>(aOutput, aChannels,
-                                     *aOffsetWithinBlock, numFrames);
-      }
+      MOZ_ASSERT(mBufferPosition < aBufferMax);
+      CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames);
     }
     *aOffsetWithinBlock += numFrames;
     *aCurrentPosition += numFrames;
     mBufferPosition += numFrames;
   }
 
   int32_t ComputeFinalOutSampleRate(float aPlaybackRate, float aDetune)
   {
@@ -509,17 +484,17 @@ public:
   {
     if (mBufferSampleRate == 0) {
       // start() has not yet been called or no buffer has yet been set
       aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
       return;
     }
 
     StreamTime streamPosition = mDestination->GraphTimeToStreamTime(aFrom);
-    uint32_t channels = mBuffer.ChannelCount();
+    uint32_t channels = mBuffer ? mBuffer->GetChannels() : 0;
 
     UpdateSampleRateIfNeeded(channels, streamPosition);
 
     uint32_t written = 0;
     while (written < WEBAUDIO_BLOCK_SIZE) {
       if (mStop != STREAM_TIME_MAX &&
           streamPosition >= mStop) {
         FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX);
@@ -589,17 +564,17 @@ public:
 
   double mStart; // including the fractional position between ticks
   // Low pass filter effects from the resampler mean that samples before the
   // start time are influenced by resampling the buffer.  mBeginProcessing
   // includes the extent of this filter.  The special value of -STREAM_TIME_MAX
   // indicates that the resampler has begun processing.
   StreamTime mBeginProcessing;
   StreamTime mStop;
-  AudioChunk mBuffer;
+  RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
   SpeexResamplerState* mResampler;
   // mRemainingResamplerTail, like mBufferPosition, and
   // mBufferEnd, is measured in input buffer samples.
   uint32_t mRemainingResamplerTail;
   uint32_t mBufferEnd;
   uint32_t mLoopStart;
   uint32_t mLoopEnd;
   uint32_t mBufferPosition;
@@ -750,25 +725,26 @@ void
 AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
 {
   AudioNodeStream* ns = mStream;
   if (!ns) {
     return;
   }
 
   if (mBuffer) {
-    AudioChunk data = mBuffer->GetThreadSharedChannelsForRate(aCx);
-    ns->SetBuffer(Move(data));
+    RefPtr<ThreadSharedFloatArrayBufferList> data =
+      mBuffer->GetThreadSharedChannelsForRate(aCx);
+    ns->SetBuffer(data.forget());
 
     if (mStartCalled) {
       SendOffsetAndDurationParametersToStream(ns);
     }
   } else {
     ns->SetInt32Parameter(BUFFEREND, 0);
-    ns->SetBuffer(AudioChunk());
+    ns->SetBuffer(nullptr);
 
     MarkInactive();
   }
 }
 
 void
 AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream)
 {
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -43,22 +43,16 @@ public:
   /**
    * Create with buffers suitable for transfer to
    * JS_NewArrayBufferWithContents().  The buffer contents are uninitialized
    * and so should be set using GetDataForWrite().
    */
   static already_AddRefed<ThreadSharedFloatArrayBufferList>
   Create(uint32_t aChannelCount, size_t aLength, const mozilla::fallible_t&);
 
-  ThreadSharedFloatArrayBufferList*
-  AsThreadSharedFloatArrayBufferList() override
-  {
-    return this;
-  };
-
   struct Storage final
   {
     Storage() :
       mDataToFree(nullptr),
       mFree(nullptr),
       mSampleData(nullptr)
     {}
     ~Storage() {
@@ -289,17 +283,17 @@ public:
   {
     NS_ERROR("Invalid RecvTimelineEvent index");
   }
   virtual void SetThreeDPointParameter(uint32_t aIndex,
                                        const dom::ThreeDPoint& aValue)
   {
     NS_ERROR("Invalid SetThreeDPointParameter index");
   }
-  virtual void SetBuffer(AudioChunk&& aBuffer)
+  virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
   {
     NS_ERROR("SetBuffer called on engine that doesn't support it");
   }
   // This consumes the contents of aData.  aData will be emptied after this returns.
   virtual void SetRawArrayData(nsTArray<float>& aData)
   {
     NS_ERROR("SetRawArrayData called on an engine that doesn't support it");
   }
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -249,33 +249,34 @@ AudioNodeStream::SetThreeDPointParameter
     ThreeDPoint mValue;
     uint32_t mIndex;
   };
 
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue));
 }
 
 void
-AudioNodeStream::SetBuffer(AudioChunk&& aBuffer)
+AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
 {
   class Message final : public ControlMessage
   {
   public:
-    Message(AudioNodeStream* aStream, AudioChunk&& aBuffer)
+    Message(AudioNodeStream* aStream,
+            already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
       : ControlMessage(aStream), mBuffer(aBuffer)
     {}
     void Run() override
     {
       static_cast<AudioNodeStream*>(mStream)->Engine()->
-        SetBuffer(Move(mBuffer));
+          SetBuffer(mBuffer.forget());
     }
-    AudioChunk mBuffer;
+    RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
   };
 
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, Move(aBuffer)));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aBuffer));
 }
 
 void
 AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
 {
   class Message final : public ControlMessage
   {
   public:
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -86,17 +86,17 @@ public:
    * Sets a parameter that's a time relative to some stream's played time.
    * This time is converted to a time relative to this stream when it's set.
    */
   void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
-  void SetBuffer(AudioChunk&& aBuffer);
+  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer);
   // This sends a single event to the timeline on the MSG thread side.
   void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent);
   // This consumes the contents of aData.  aData will be emptied after this returns.
   void SetRawArrayData(nsTArray<float>& aData);
   void SetChannelMixingParameters(uint32_t aNumberOfChannels,
                                   ChannelCountMode aChannelCountMoe,
                                   ChannelInterpretation aChannelInterpretation);
   void SetPassThrough(bool aPassThrough);
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -25,30 +25,39 @@ NS_IMPL_ADDREF_INHERITED(ConvolverNode, 
 NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode)
 
 class ConvolverNodeEngine final : public AudioNodeEngine
 {
   typedef PlayingRefChangeHandler PlayingRefChanged;
 public:
   ConvolverNodeEngine(AudioNode* aNode, bool aNormalize)
     : AudioNodeEngine(aNode)
+    , mBufferLength(0)
     , mLeftOverData(INT32_MIN)
     , mSampleRate(0.0f)
     , mUseBackgroundThreads(!aNode->Context()->IsOffline())
     , mNormalize(aNormalize)
   {
   }
 
   enum Parameters {
+    BUFFER_LENGTH,
     SAMPLE_RATE,
     NORMALIZE
   };
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
   {
     switch (aIndex) {
+    case BUFFER_LENGTH:
+      // BUFFER_LENGTH is the first parameter that we set when setting a new buffer,
+      // so we should be careful to invalidate the rest of our state here.
+      mSampleRate = 0.0f;
+      mBufferLength = aParam;
+      mLeftOverData = INT32_MIN;
+      break;
     case NORMALIZE:
       mNormalize = !!aParam;
       break;
     default:
       NS_ERROR("Bad ConvolverNodeEngine Int32Parameter");
     }
   }
   void SetDoubleParameter(uint32_t aIndex, double aParam) override
@@ -58,34 +67,36 @@ public:
       mSampleRate = aParam;
       // The buffer is passed after the sample rate.
       // mReverb will be set using this sample rate when the buffer is received.
       break;
     default:
       NS_ERROR("Bad ConvolverNodeEngine DoubleParameter");
     }
   }
-  void SetBuffer(AudioChunk&& aBuffer) override
+  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override
   {
+    RefPtr<ThreadSharedFloatArrayBufferList> buffer = aBuffer;
+
     // Note about empirical tuning (this is copied from Blink)
     // The maximum FFT size affects reverb performance and accuracy.
     // If the reverb is single-threaded and processes entirely in the real-time audio thread,
     // it's important not to make this too high.  In this case 8192 is a good value.
     // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy.
     // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise.
     const size_t MaxFFTSize = 32768;
 
-    mLeftOverData = INT32_MIN; // reset
-
-    if (aBuffer.IsNull() || !mSampleRate) {
+    if (!buffer || !mBufferLength || !mSampleRate) {
       mReverb = nullptr;
+      mLeftOverData = INT32_MIN;
       return;
     }
 
-    mReverb = new WebCore::Reverb(aBuffer, MaxFFTSize, mUseBackgroundThreads,
+    mReverb = new WebCore::Reverb(buffer, mBufferLength,
+                                  MaxFFTSize, mUseBackgroundThreads,
                                   mNormalize, mSampleRate);
   }
 
   void ProcessBlock(AudioNodeStream* aStream,
                     GraphTime aFrom,
                     const AudioBlock& aInput,
                     AudioBlock* aOutput,
                     bool* aFinished) override
@@ -126,17 +137,17 @@ public:
       }
 
       if (mLeftOverData <= 0) {
         RefPtr<PlayingRefChanged> refchanged =
           new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
         aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
           refchanged.forget());
       }
-      mLeftOverData = mReverb->impulseResponseLength();
+      mLeftOverData = mBufferLength;
       MOZ_ASSERT(mLeftOverData > 0);
     }
     aOutput->AllocateChannels(2);
 
     mReverb->process(&input, aOutput);
   }
 
   bool IsActive() const override
@@ -157,16 +168,17 @@ public:
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 private:
   nsAutoPtr<WebCore::Reverb> mReverb;
+  int32_t mBufferLength;
   int32_t mLeftOverData;
   float mSampleRate;
   bool mUseBackgroundThreads;
   bool mNormalize;
 };
 
 ConvolverNode::ConvolverNode(AudioContext* aContext)
   : AudioNode(aContext,
@@ -246,55 +258,32 @@ ConvolverNode::SetBuffer(JSContext* aCx,
       // Supported number of channels
       break;
     default:
       aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
       return;
     }
   }
 
+  mBuffer = aBuffer;
+
   // Send the buffer to the stream
   AudioNodeStream* ns = mStream;
   MOZ_ASSERT(ns, "Why don't we have a stream here?");
-  if (aBuffer) {
-    AudioChunk data = aBuffer->GetThreadSharedChannelsForRate(aCx);
-    if (data.mBufferFormat == AUDIO_FORMAT_S16) {
-      // Reverb expects data in float format.
-      // Convert on the main thread so as to minimize allocations on the audio
-      // thread.
-      // Reverb will dispose of the buffer once initialized, so convert here
-      // and leave the smaller arrays in the AudioBuffer.
-      // There is currently no value in providing 16/32-byte aligned data
-      // because PadAndMakeScaledDFT() will copy the data (without SIMD
-      // instructions) to aligned arrays for the FFT.
-      RefPtr<SharedBuffer> floatBuffer =
-        SharedBuffer::Create(sizeof(float) *
-                             data.mDuration * data.ChannelCount());
-      if (!floatBuffer) {
-        aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
-        return;
-      }
-      auto floatData = static_cast<float*>(floatBuffer->Data());
-      for (size_t i = 0; i < data.ChannelCount(); ++i) {
-        ConvertAudioSamples(data.ChannelData<int16_t>()[i],
-                            floatData, data.mDuration);
-        data.mChannelData[i] = floatData;
-        floatData += data.mDuration;
-      }
-      data.mBuffer = Move(floatBuffer);
-      data.mBufferFormat = AUDIO_FORMAT_FLOAT32;
-    }
+  if (mBuffer) {
+    uint32_t length = mBuffer->Length();
+    RefPtr<ThreadSharedFloatArrayBufferList> data =
+      mBuffer->GetThreadSharedChannelsForRate(aCx);
+    SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length);
     SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
-                                aBuffer->SampleRate());
-    ns->SetBuffer(Move(data));
+                                mBuffer->SampleRate());
+    ns->SetBuffer(data.forget());
   } else {
-    ns->SetBuffer(AudioChunk());
+    ns->SetBuffer(nullptr);
   }
-
-  mBuffer = aBuffer;
 }
 
 void
 ConvolverNode::SetNormalize(bool aNormalize)
 {
   mNormalize = aNormalize;
   SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize);
 }
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -333,114 +333,86 @@ MediaDecodeTask::FinishDecode()
     resampler = speex_resampler_init(channelCount,
                                      sampleRate,
                                      destSampleRate,
                                      SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr);
     speex_resampler_skip_zeros(resampler);
     resampledFrames += speex_resampler_get_output_latency(resampler);
   }
 
-  // Allocate contiguous channel buffers.  Note that if we end up resampling,
-  // we may write fewer bytes than mResampledFrames to the output buffer, in
-  // which case writeIndex will tell us how many valid samples we have.
-  mDecodeJob.mBuffer.mChannelData.SetLength(channelCount);
-#if AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_FLOAT32
-  // This buffer has separate channel arrays that could be transferred to
-  // JS_NewArrayBufferWithContents(), but AudioBuffer::RestoreJSChannelData()
-  // does not yet take advantage of this.
-  RefPtr<ThreadSharedFloatArrayBufferList> buffer =
-    ThreadSharedFloatArrayBufferList::
+  // Allocate the channel buffers.  Note that if we end up resampling, we may
+  // write fewer bytes than mResampledFrames to the output buffer, in which
+  // case mWriteIndex will tell us how many valid samples we have.
+  mDecodeJob.mBuffer = ThreadSharedFloatArrayBufferList::
     Create(channelCount, resampledFrames, fallible);
-  if (!buffer) {
+  if (!mDecodeJob.mBuffer) {
     ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError);
     return;
   }
-  for (uint32_t i = 0; i < channelCount; ++i) {
-    mDecodeJob.mBuffer.mChannelData[i] = buffer->GetData(i);
-  }
-#else
-  RefPtr<SharedBuffer> buffer =
-    SharedBuffer::Create(sizeof(AudioDataValue) *
-                         resampledFrames * channelCount);
-  if (!buffer) {
-    ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError);
-    return;
-  }
-  auto data = static_cast<AudioDataValue*>(floatBuffer->Data());
-  for (uint32_t i = 0; i < channelCount; ++i) {
-    mDecodeJob.mBuffer.mChannelData[i] = data;
-    data += resampledFrames;
-  }
-#endif
-  mDecodeJob.mBuffer.mBuffer = buffer.forget();
-  mDecodeJob.mBuffer.mVolume = 1.0f;
-  mDecodeJob.mBuffer.mBufferFormat = AUDIO_OUTPUT_FORMAT;
 
-  uint32_t writeIndex = 0;
   RefPtr<AudioData> audioData;
   while ((audioData = mAudioQueue.PopFront())) {
     audioData->EnsureAudioBuffer(); // could lead to a copy :(
-    const AudioDataValue* bufferData = static_cast<AudioDataValue*>
+    AudioDataValue* bufferData = static_cast<AudioDataValue*>
       (audioData->mAudioBuffer->Data());
 
     if (sampleRate != destSampleRate) {
-      const uint32_t maxOutSamples = resampledFrames - writeIndex;
+      const uint32_t maxOutSamples = resampledFrames - mDecodeJob.mWriteIndex;
 
       for (uint32_t i = 0; i < audioData->mChannels; ++i) {
         uint32_t inSamples = audioData->mFrames;
         uint32_t outSamples = maxOutSamples;
-        AudioDataValue* outData = mDecodeJob.mBuffer.
-          ChannelDataForWrite<AudioDataValue>(i) + writeIndex;
+        float* outData =
+          mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex;
 
         WebAudioUtils::SpeexResamplerProcess(
             resampler, i, &bufferData[i * audioData->mFrames], &inSamples,
             outData, &outSamples);
 
         if (i == audioData->mChannels - 1) {
-          writeIndex += outSamples;
-          MOZ_ASSERT(writeIndex <= resampledFrames);
+          mDecodeJob.mWriteIndex += outSamples;
+          MOZ_ASSERT(mDecodeJob.mWriteIndex <= resampledFrames);
           MOZ_ASSERT(inSamples == audioData->mFrames);
         }
       }
     } else {
       for (uint32_t i = 0; i < audioData->mChannels; ++i) {
-        AudioDataValue* outData = mDecodeJob.mBuffer.
-          ChannelDataForWrite<AudioDataValue>(i) + writeIndex;
-        PodCopy(outData, &bufferData[i * audioData->mFrames],
-                audioData->mFrames);
+        float* outData =
+          mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex;
+        ConvertAudioSamples(&bufferData[i * audioData->mFrames],
+                            outData, audioData->mFrames);
 
         if (i == audioData->mChannels - 1) {
-          writeIndex += audioData->mFrames;
+          mDecodeJob.mWriteIndex += audioData->mFrames;
         }
       }
     }
   }
 
   if (sampleRate != destSampleRate) {
     uint32_t inputLatency = speex_resampler_get_input_latency(resampler);
-    const uint32_t maxOutSamples = resampledFrames - writeIndex;
+    const uint32_t maxOutSamples = resampledFrames - mDecodeJob.mWriteIndex;
     for (uint32_t i = 0; i < channelCount; ++i) {
       uint32_t inSamples = inputLatency;
       uint32_t outSamples = maxOutSamples;
-      AudioDataValue* outData =
-        mDecodeJob.mBuffer.ChannelDataForWrite<AudioDataValue>(i) + writeIndex;
+      float* outData =
+        mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex;
 
       WebAudioUtils::SpeexResamplerProcess(
           resampler, i, (AudioDataValue*)nullptr, &inSamples,
           outData, &outSamples);
 
       if (i == channelCount - 1) {
-        writeIndex += outSamples;
-        MOZ_ASSERT(writeIndex <= resampledFrames);
+        mDecodeJob.mWriteIndex += outSamples;
+        MOZ_ASSERT(mDecodeJob.mWriteIndex <= resampledFrames);
         MOZ_ASSERT(inSamples == inputLatency);
       }
     }
   }
 
-  mDecodeJob.mBuffer.mDuration = writeIndex;
   mPhase = PhaseEnum::AllocateBuffer;
   mMainThread->Dispatch(do_AddRef(this));
 }
 
 void
 MediaDecodeTask::AllocateBuffer()
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -467,19 +439,22 @@ MediaDecodeTask::CallbackTheResult()
 
 bool
 WebAudioDecodeJob::AllocateBuffer()
 {
   MOZ_ASSERT(!mOutput);
   MOZ_ASSERT(NS_IsMainThread());
 
   // Now create the AudioBuffer
-  mOutput = AudioBuffer::Create(mContext->GetOwner(),
-                                mContext->SampleRate(), Move(mBuffer));
-  return mOutput != nullptr;
+  ErrorResult rv;
+  uint32_t channelCount = mBuffer->GetChannels();
+  mOutput = AudioBuffer::Create(mContext->GetOwner(), channelCount,
+                                mWriteIndex, mContext->SampleRate(),
+                                mBuffer.forget(), rv);
+  return !rv.Failed();
 }
 
 void
 AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer,
                     uint32_t aLength, WebAudioDecodeJob& aDecodeJob)
 {
   Maybe<MediaContainerType> containerType = MakeMediaContainerType(aContentType);
   // Do not attempt to decode the media if we were not successful at sniffing
@@ -515,16 +490,17 @@ AsyncDecodeWebAudio(const char* aContent
 }
 
 WebAudioDecodeJob::WebAudioDecodeJob(const nsACString& aContentType,
                                      AudioContext* aContext,
                                      Promise* aPromise,
                                      DecodeSuccessCallback* aSuccessCallback,
                                      DecodeErrorCallback* aFailureCallback)
   : mContentType(aContentType)
+  , mWriteIndex(0)
   , mContext(aContext)
   , mPromise(aPromise)
   , mSuccessCallback(aSuccessCallback)
   , mFailureCallback(aFailureCallback)
 {
   MOZ_ASSERT(aContext);
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_COUNT_CTOR(WebAudioDecodeJob);
@@ -614,17 +590,19 @@ WebAudioDecodeJob::SizeOfExcludingThis(M
     amount += mSuccessCallback->SizeOfIncludingThis(aMallocSizeOf);
   }
   if (mFailureCallback) {
     amount += mFailureCallback->SizeOfIncludingThis(aMallocSizeOf);
   }
   if (mOutput) {
     amount += mOutput->SizeOfIncludingThis(aMallocSizeOf);
   }
-  amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf, false);
+  if (mBuffer) {
+    amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
+  }
   return amount;
 }
 
 size_t
 WebAudioDecodeJob::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
 }
--- a/dom/media/webaudio/MediaBufferDecoder.h
+++ b/dom/media/webaudio/MediaBufferDecoder.h
@@ -2,17 +2,16 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaBufferDecoder_h_
 #define MediaBufferDecoder_h_
 
-#include "AudioSegment.h"
 #include "nsWrapperCache.h"
 #include "nsCOMPtr.h"
 #include "nsString.h"
 #include "nsTArray.h"
 #include "mozilla/dom/TypedArray.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace mozilla {
@@ -51,23 +50,24 @@ struct WebAudioDecodeJob final
   void OnSuccess(ErrorCode /* ignored */);
   void OnFailure(ErrorCode aErrorCode);
 
   bool AllocateBuffer();
 
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
-  AudioChunk mBuffer;
   nsCString mContentType;
+  uint32_t mWriteIndex;
   RefPtr<dom::AudioContext> mContext;
   RefPtr<dom::Promise> mPromise;
   RefPtr<dom::DecodeSuccessCallback> mSuccessCallback;
   RefPtr<dom::DecodeErrorCallback> mFailureCallback; // can be null
   RefPtr<dom::AudioBuffer> mOutput;
+  RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
 };
 
 void AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer,
                          uint32_t aLength, WebAudioDecodeJob& aDecodeJob);
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/webaudio/OscillatorNode.cpp
+++ b/dom/media/webaudio/OscillatorNode.cpp
@@ -36,31 +36,33 @@ public:
     // Keep the default values in sync with OscillatorNode::OscillatorNode.
     , mFrequency(440.f)
     , mDetune(0.f)
     , mType(OscillatorType::Sine)
     , mPhase(0.)
     , mFinalFrequency(0.)
     , mPhaseIncrement(0.)
     , mRecomputeParameters(true)
+    , mCustomLength(0)
     , mCustomDisableNormalization(false)
   {
     MOZ_ASSERT(NS_IsMainThread());
     mBasicWaveFormCache = aDestination->Context()->GetBasicWaveFormCache();
   }
 
   void SetSourceStream(AudioNodeStream* aSource)
   {
     mSource = aSource;
   }
 
   enum Parameters {
     FREQUENCY,
     DETUNE,
     TYPE,
+    PERIODICWAVE_LENGTH,
     DISABLE_NORMALIZATION,
     START,
     STOP,
   };
   void RecvTimelineEvent(uint32_t aIndex,
                          AudioTimelineEvent& aEvent) override
   {
     mRecomputeParameters = true;
@@ -98,17 +100,19 @@ public:
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
   {
     switch (aIndex) {
       case TYPE:
         // Set the new type.
         mType = static_cast<OscillatorType>(aParam);
         if (mType == OscillatorType::Sine) {
           // Forget any previous custom data.
+          mCustomLength = 0;
           mCustomDisableNormalization = false;
+          mCustom = nullptr;
           mPeriodicWave = nullptr;
           mRecomputeParameters = true;
         }
         switch (mType) {
           case OscillatorType::Sine:
             mPhase = 0.0;
             break;
           case OscillatorType::Square:
@@ -118,37 +122,41 @@ public:
             break;
           case OscillatorType::Custom:
             break;
           default:
             NS_ERROR("Bad OscillatorNodeEngine type parameter.");
         }
         // End type switch.
         break;
+      case PERIODICWAVE_LENGTH:
+        MOZ_ASSERT(aParam >= 0, "negative custom array length");
+        mCustomLength = static_cast<uint32_t>(aParam);
+        break;
       case DISABLE_NORMALIZATION:
         MOZ_ASSERT(aParam >= 0, "negative custom array length");
         mCustomDisableNormalization = static_cast<uint32_t>(aParam);
         break;
       default:
         NS_ERROR("Bad OscillatorNodeEngine Int32Parameter.");
     }
     // End index switch.
   }
 
-  void SetBuffer(AudioChunk&& aBuffer) override
+  void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override
   {
-    MOZ_ASSERT(aBuffer.ChannelCount() == 2,
+    MOZ_ASSERT(mCustomLength, "Custom buffer sent before length");
+    mCustom = aBuffer;
+    MOZ_ASSERT(mCustom->GetChannels() == 2,
                "PeriodicWave should have sent two channels");
-    MOZ_ASSERT(aBuffer.mVolume == 1.0f);
-    mPeriodicWave =
-      WebCore::PeriodicWave::create(mSource->SampleRate(),
-                                    aBuffer.ChannelData<float>()[0],
-                                    aBuffer.ChannelData<float>()[1],
-                                    aBuffer.mDuration,
-                                    mCustomDisableNormalization);
+    mPeriodicWave = WebCore::PeriodicWave::create(mSource->SampleRate(),
+                                                  mCustom->GetData(0),
+                                                  mCustom->GetData(1),
+                                                  mCustomLength,
+                                                  mCustomDisableNormalization);
   }
 
   void IncrementPhase()
   {
     const float twoPiFloat = float(2 * M_PI);
     mPhase += mPhaseIncrement;
     if (mPhase > twoPiFloat) {
       mPhase -= twoPiFloat;
@@ -360,16 +368,20 @@ public:
     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
 
     // Not owned:
     // - mSource
     // - mDestination
     // - mFrequency (internal ref owned by node)
     // - mDetune (internal ref owned by node)
 
+    if (mCustom) {
+      amount += mCustom->SizeOfIncludingThis(aMallocSizeOf);
+    }
+
     if (mPeriodicWave) {
       amount += mPeriodicWave->sizeOfIncludingThis(aMallocSizeOf);
     }
 
     return amount;
   }
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
@@ -384,17 +396,19 @@ public:
   StreamTime mStop;
   AudioParamTimeline mFrequency;
   AudioParamTimeline mDetune;
   OscillatorType mType;
   float mPhase;
   float mFinalFrequency;
   float mPhaseIncrement;
   bool mRecomputeParameters;
+  RefPtr<ThreadSharedFloatArrayBufferList> mCustom;
   RefPtr<BasicWaveFormCache> mBasicWaveFormCache;
+  uint32_t mCustomLength;
   bool mCustomDisableNormalization;
   RefPtr<WebCore::PeriodicWave> mPeriodicWave;
 };
 
 OscillatorNode::OscillatorNode(AudioContext* aContext)
   : AudioScheduledSourceNode(aContext,
                              2,
                              ChannelCountMode::Max,
@@ -495,20 +509,23 @@ OscillatorNode::SendTypeToStream()
 }
 
 void OscillatorNode::SendPeriodicWaveToStream()
 {
   NS_ASSERTION(mType == OscillatorType::Custom,
                "Sending custom waveform to engine thread with non-custom type");
   MOZ_ASSERT(mStream, "Missing node stream.");
   MOZ_ASSERT(mPeriodicWave, "Send called without PeriodicWave object.");
+  SendInt32ParameterToStream(OscillatorNodeEngine::PERIODICWAVE_LENGTH,
+                             mPeriodicWave->DataLength());
   SendInt32ParameterToStream(OscillatorNodeEngine::DISABLE_NORMALIZATION,
                              mPeriodicWave->DisableNormalization());
-  AudioChunk data = mPeriodicWave->GetThreadSharedBuffer();
-  mStream->SetBuffer(Move(data));
+  RefPtr<ThreadSharedFloatArrayBufferList> data =
+    mPeriodicWave->GetThreadSharedBuffer();
+  mStream->SetBuffer(data.forget());
 }
 
 void
 OscillatorNode::Start(double aWhen, ErrorResult& aRv)
 {
   if (!WebAudioUtils::IsTimeValid(aWhen)) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return;
--- a/dom/media/webaudio/PeriodicWave.cpp
+++ b/dom/media/webaudio/PeriodicWave.cpp
@@ -25,47 +25,41 @@ PeriodicWave::PeriodicWave(AudioContext*
   : mContext(aContext)
   , mDisableNormalization(aDisableNormalization)
 {
   MOZ_ASSERT(aContext);
   MOZ_ASSERT(aRealData || aImagData);
 
   // Caller should have checked this and thrown.
   MOZ_ASSERT(aLength > 0);
-  mCoefficients.mDuration = aLength;
+  mLength = aLength;
 
-  // Copy coefficient data.
-  // The SharedBuffer and two arrays share a single allocation.
-  RefPtr<SharedBuffer> buffer =
-    SharedBuffer::Create(sizeof(float) * aLength * 2, fallible);
-  if (!buffer) {
+  // Copy coefficient data. The two arrays share an allocation.
+  mCoefficients = new ThreadSharedFloatArrayBufferList(2);
+  float* buffer = static_cast<float*>(malloc(aLength*sizeof(float)*2));
+  if (buffer == nullptr) {
     aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
     return;
   }
 
-  auto data = static_cast<float*>(buffer->Data());
-  mCoefficients.mBuffer = Move(buffer);
+  if (aRealData) {
+    PodCopy(buffer, aRealData, aLength);
+  } else {
+    PodZero(buffer, aLength);
+  }
 
-  if (aRealData) {
-    PodCopy(data, aRealData, aLength);
-  } else {
-    PodZero(data, aLength);
-  }
-  mCoefficients.mChannelData.AppendElement(data);
+  mCoefficients->SetData(0, buffer, free, buffer);
 
-  data += aLength;
   if (aImagData) {
-    PodCopy(data, aImagData, aLength);
+    PodCopy(buffer+aLength, aImagData, aLength);
   } else {
-    PodZero(data, aLength);
+    PodZero(buffer+aLength, aLength);
   }
-  mCoefficients.mChannelData.AppendElement(data);
 
-  mCoefficients.mVolume = 1.0f;
-  mCoefficients.mBufferFormat = AUDIO_FORMAT_FLOAT32;
+  mCoefficients->SetData(1, nullptr, free, buffer+aLength);
 }
 
 /* static */ already_AddRefed<PeriodicWave>
 PeriodicWave::Constructor(const GlobalObject& aGlobal,
                           AudioContext& aAudioContext,
                           const PeriodicWaveOptions& aOptions,
                           ErrorResult& aRv)
 {
@@ -103,17 +97,19 @@ PeriodicWave::Constructor(const GlobalOb
 }
 
 size_t
 PeriodicWave::SizeOfExcludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const
 {
   // Not owned:
   // - mContext
   size_t amount = 0;
-  amount += mCoefficients.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+  if (!mCoefficients->IsShared()) {
+    amount += mCoefficients->SizeOfIncludingThis(aMallocSizeOf);
+  }
 
   return amount;
 }
 
 size_t
 PeriodicWave::SizeOfIncludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const
 {
   return aMallocSizeOf(this) + SizeOfExcludingThisIfNotShared(aMallocSizeOf);
--- a/dom/media/webaudio/PeriodicWave.h
+++ b/dom/media/webaudio/PeriodicWave.h
@@ -41,36 +41,37 @@ public:
   {
     return mContext;
   }
 
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   uint32_t DataLength() const
   {
-    return mCoefficients.mDuration;
+    return mLength;
   }
 
   bool DisableNormalization() const
   {
     return mDisableNormalization;
   }
 
-  const AudioChunk& GetThreadSharedBuffer() const
+  ThreadSharedFloatArrayBufferList* GetThreadSharedBuffer() const
   {
     return mCoefficients;
   }
 
   size_t SizeOfExcludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const;
   size_t SizeOfIncludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const;
 
 private:
   ~PeriodicWave() = default;
 
-  AudioChunk mCoefficients;
   RefPtr<AudioContext> mContext;
+  RefPtr<ThreadSharedFloatArrayBufferList> mCoefficients;
+  uint32_t mLength;
   bool mDisableNormalization;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -116,17 +116,18 @@ public:
       MutexAutoLock lock(mOutputQueue.Lock());
       amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf);
     }
 
     return amount;
   }
 
   // main thread
-  void FinishProducingOutputBuffer(const AudioChunk& aBuffer)
+  void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer,
+                                   uint32_t aBufferSize)
   {
     MOZ_ASSERT(NS_IsMainThread());
 
     TimeStamp now = TimeStamp::Now();
 
     if (mLastEventTime.IsNull()) {
       mLastEventTime = now;
     } else {
@@ -136,38 +137,47 @@ public:
       // latency is also reset to 0.
       // It could happen that the output queue becomes empty before the input
       // node has fully caught up. In this case there will be events where
       // |(now - mLastEventTime)| is very short, making mLatency negative.
       // As this happens and the size of |mLatency| becomes greater than
       // MAX_LATENCY_S, frame dropping starts again to maintain an as short
       // output queue as possible.
       float latency = (now - mLastEventTime).ToSeconds();
-      float bufferDuration = aBuffer.mDuration / mSampleRate;
+      float bufferDuration = aBufferSize / mSampleRate;
       mLatency += latency - bufferDuration;
       mLastEventTime = now;
       if (fabs(mLatency) > MAX_LATENCY_S) {
         mDroppingBuffers = true;
       }
     }
 
     MutexAutoLock lock(mOutputQueue.Lock());
     if (mDroppingBuffers) {
       if (mOutputQueue.ReadyToConsume()) {
         return;
       }
       mDroppingBuffers = false;
       mLatency = 0;
     }
 
-    for (uint32_t offset = 0; offset < aBuffer.mDuration;
-         offset += WEBAUDIO_BLOCK_SIZE) {
+    for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) {
       AudioChunk& chunk = mOutputQueue.Produce();
-      chunk = aBuffer;
-      chunk.SliceTo(offset, offset + WEBAUDIO_BLOCK_SIZE);
+      if (aBuffer) {
+        chunk.mDuration = WEBAUDIO_BLOCK_SIZE;
+        chunk.mBuffer = aBuffer;
+        chunk.mChannelData.SetLength(aBuffer->GetChannels());
+        for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) {
+          chunk.mChannelData[i] = aBuffer->GetData(i) + offset;
+        }
+        chunk.mVolume = 1.0f;
+        chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32;
+      } else {
+        chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
+      }
     }
   }
 
   // graph thread
   AudioChunk GetOutputBuffer()
   {
     MOZ_ASSERT(!NS_IsMainThread());
     AudioChunk buffer;
@@ -369,67 +379,67 @@ private:
         , mStream(aStream)
         , mInputBuffer(aInputBuffer)
         , mPlaybackTime(aPlaybackTime)
       {
       }
 
       NS_IMETHOD Run() override
       {
+        RefPtr<ThreadSharedFloatArrayBufferList> output;
 
         auto engine =
           static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
-        AudioChunk output;
-        output.SetNull(engine->mBufferSize);
         {
           auto node = static_cast<ScriptProcessorNode*>
             (engine->NodeMainThread());
           if (!node) {
             return NS_OK;
           }
 
           if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
-            DispatchAudioProcessEvent(node, &output);
+            output = DispatchAudioProcessEvent(node);
           }
           // The node may have been destroyed during event dispatch.
         }
 
         // Append it to our output buffer queue
-        engine->GetSharedBuffers()->FinishProducingOutputBuffer(output);
+        engine->GetSharedBuffers()->
+          FinishProducingOutputBuffer(output, engine->mBufferSize);
 
         return NS_OK;
       }
 
-      // Sets up |output| iff buffers are set in event handlers.
-      void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
-                                     AudioChunk* aOutput)
+      // Returns the output buffers if set in event handlers.
+      ThreadSharedFloatArrayBufferList*
+        DispatchAudioProcessEvent(ScriptProcessorNode* aNode)
       {
         AudioContext* context = aNode->Context();
         if (!context) {
-          return;
+          return nullptr;
         }
 
         AutoJSAPI jsapi;
         if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
-          return;
+          return nullptr;
         }
         JSContext* cx = jsapi.cx();
         uint32_t inputChannelCount = aNode->ChannelCount();
 
         // Create the input buffer
         RefPtr<AudioBuffer> inputBuffer;
         if (mInputBuffer) {
           ErrorResult rv;
           inputBuffer =
             AudioBuffer::Create(context->GetOwner(), inputChannelCount,
                                 aNode->BufferSize(), context->SampleRate(),
                                 mInputBuffer.forget(), rv);
           if (rv.Failed()) {
             rv.SuppressException();
-            return;
+            return nullptr;
           }
         }
 
         // Ask content to produce data in the output buffer
         // Note that we always avoid creating the output buffer here, and we try to
         // avoid creating the input buffer as well.  The AudioProcessingEvent class
         // knows how to lazily create them if needed once the script tries to access
         // them.  Otherwise, we may be able to get away without creating them!
@@ -443,21 +453,20 @@ private:
         // FinishProducingOutputBuffer() will optimize output = null.
         // GetThreadSharedChannelsForRate() may also return null after OOM.
         if (event->HasOutputBuffer()) {
           ErrorResult rv;
           AudioBuffer* buffer = event->GetOutputBuffer(rv);
           // HasOutputBuffer() returning true means that GetOutputBuffer()
           // will not fail.
           MOZ_ASSERT(!rv.Failed());
-          *aOutput = buffer->GetThreadSharedChannelsForRate(cx);
-          MOZ_ASSERT(aOutput->IsNull() ||
-                     aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
-                     "AudioBuffers initialized from JS have float data");
+          return buffer->GetThreadSharedChannelsForRate(cx);
         }
+
+        return nullptr;
       }
     private:
       RefPtr<AudioNodeStream> mStream;
       RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
       double mPlaybackTime;
     };
 
     RefPtr<Command> command = new Command(aStream, mInputBuffer.forget(),
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -39,25 +39,25 @@ namespace WebCore {
 
 // Empirical gain calibration tested across many impulse responses to ensure perceived volume is same as dry (unprocessed) signal
 const float GainCalibration = 0.00125f;
 const float GainCalibrationSampleRate = 44100;
 
 // A minimum power value to when normalizing a silent (or very quiet) impulse response
 const float MinPower = 0.000125f;
 
-static float calculateNormalizationScale(const nsTArray<const float*>& response, size_t aLength, float sampleRate)
+static float calculateNormalizationScale(ThreadSharedFloatArrayBufferList* response, size_t aLength, float sampleRate)
 {
     // Normalize by RMS power
-    size_t numberOfChannels = response.Length();
+    size_t numberOfChannels = response->GetChannels();
 
     float power = 0;
 
     for (size_t i = 0; i < numberOfChannels; ++i) {
-        float channelPower = AudioBufferSumOfSquares(response[i], aLength);
+        float channelPower = AudioBufferSumOfSquares(static_cast<const float*>(response->GetData(i)), aLength);
         power += channelPower;
     }
 
     power = sqrt(power / (numberOfChannels * aLength));
 
     // Protect against accidental overload
     if (!IsFinite(power) || IsNaN(power) || power < MinPower)
         power = MinPower;
@@ -66,41 +66,43 @@ static float calculateNormalizationScale
 
     scale *= GainCalibration; // calibrate to make perceived volume same as unprocessed
 
     // Scale depends on sample-rate.
     if (sampleRate)
         scale *= GainCalibrationSampleRate / sampleRate;
 
     // True-stereo compensation
-    if (numberOfChannels == 4)
+    if (response->GetChannels() == 4)
         scale *= 0.5f;
 
     return scale;
 }
 
-Reverb::Reverb(const AudioChunk& impulseResponse, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
+Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
 {
-    size_t impulseResponseBufferLength = impulseResponse.mDuration;
-    float scale = impulseResponse.mVolume;
+    float scale = 1;
 
-    AutoTArray<const float*,4> irChannels(impulseResponse.ChannelData<float>());
+    AutoTArray<const float*,4> irChannels;
+    for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) {
+        irChannels.AppendElement(impulseResponse->GetData(i));
+    }
     AutoTArray<float,1024> tempBuf;
 
     if (normalize) {
-        scale = calculateNormalizationScale(irChannels, impulseResponseBufferLength, sampleRate);
-    }
+        scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate);
 
-    if (scale != 1.0f) {
-        tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
-        for (uint32_t i = 0; i < irChannels.Length(); ++i) {
-            float* buf = &tempBuf[i*impulseResponseBufferLength];
-            AudioBufferCopyWithScale(irChannels[i], scale, buf,
-                                     impulseResponseBufferLength);
-            irChannels[i] = buf;
+        if (scale) {
+            tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
+            for (uint32_t i = 0; i < irChannels.Length(); ++i) {
+                float* buf = &tempBuf[i*impulseResponseBufferLength];
+                AudioBufferCopyWithScale(irChannels[i], scale, buf,
+                                         impulseResponseBufferLength);
+                irChannels[i] = buf;
+            }
         }
     }
 
     initialize(irChannels, impulseResponseBufferLength,
                maxFFTSize, useBackgroundThreads);
 }
 
 size_t Reverb::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
--- a/dom/media/webaudio/blink/Reverb.h
+++ b/dom/media/webaudio/blink/Reverb.h
@@ -30,26 +30,31 @@
 #define Reverb_h
 
 #include "ReverbConvolver.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "AudioBlock.h"
 #include "mozilla/MemoryReporting.h"
 
+namespace mozilla {
+class ThreadSharedFloatArrayBufferList;
+} // namespace mozilla
+
 namespace WebCore {
 
 // Multi-channel convolution reverb with channel matrixing - one or more ReverbConvolver objects are used internally.
 
 class Reverb {
 public:
     enum { MaxFrameSize = 256 };
 
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
-    Reverb(const mozilla::AudioChunk& impulseResponseBuffer, size_t maxFFTSize,
+    Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer,
+           size_t impulseResponseBufferLength, size_t maxFFTSize,
            bool useBackgroundThreads, bool normalize, float sampleRate);
 
     void process(const mozilla::AudioBlock* sourceBus,
                  mozilla::AudioBlock* destinationBus);
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;