Bug 1329744 - AudioBuffer ctor updated, r=padenot
authorAndrea Marchesini <amarchesini@mozilla.com>
Tue, 10 Jan 2017 21:30:28 +0000
changeset 356881 44518b9776d40c85da56f486f16a3a6d26337c3f
parent 356880 bd533dd0ca2a44161eba477f33c6b99873e52ce6
child 356882 9edd6d03c95308727054e6c868acbe166eca17e4
push id10621
push userjlund@mozilla.com
push dateMon, 23 Jan 2017 16:02:43 +0000
treeherdermozilla-aurora@dca7b42e6c67 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1329744
milestone53.0a1
Bug 1329744 - AudioBuffer ctor updated, r=padenot
dom/events/test/test_all_synthetic_events.html
dom/media/webaudio/AudioBuffer.cpp
dom/media/webaudio/AudioBuffer.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioProcessingEvent.cpp
dom/media/webaudio/MediaBufferDecoder.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/test/test_stereoPannerNode.html
dom/webidl/AudioBuffer.webidl
testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html
--- a/dom/events/test/test_all_synthetic_events.html
+++ b/dom/events/test/test_all_synthetic_events.html
@@ -287,17 +287,17 @@ const kEventConstructors = {
                                                          var e = document.createEvent("notifypaintevent");
                                                          e.initEvent(aName, aProps.bubbles, aProps.cancelable);
                                                          return e;
                                                        },
                                              },
   OfflineAudioCompletionEvent:               { create: "AudioContext" in self
                                                         ? function (aName, aProps) {
                                                             var ac = new AudioContext();
-                                                            var ab = new AudioBuffer(ac, { length: 42 });
+                                                            var ab = new AudioBuffer({ length: 42, sampleRate: ac.sampleRate });
                                                             aProps.renderedBuffer = ab;
                                                             return new OfflineAudioCompletionEvent(aName, aProps);
                                                           }
                                                         : null,
                                              },
   PageTransitionEvent:                       { create: function (aName, aProps) {
                                                          return new PageTransitionEvent(aName, aProps);
                                                        },
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -151,21 +151,23 @@ AudioBufferMemoryTracker::CollectReports
 
   MOZ_COLLECT_REPORT(
     "explicit/webaudio/audiobuffer", KIND_HEAP, UNITS_BYTES, amount,
     "Memory used by AudioBuffer objects (Web Audio).");
 
   return NS_OK;
 }
 
-AudioBuffer::AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels,
-                         uint32_t aLength, float aSampleRate,
+AudioBuffer::AudioBuffer(nsPIDOMWindowInner* aWindow,
+                         uint32_t aNumberOfChannels,
+                         uint32_t aLength,
+                         float aSampleRate,
                          already_AddRefed<ThreadSharedFloatArrayBufferList>
                            aInitialContents)
-  : mOwnerWindow(do_GetWeakReference(aContext->GetOwner())),
+  : mOwnerWindow(do_GetWeakReference(aWindow)),
     mSharedChannels(aInitialContents),
     mLength(aLength),
     mSampleRate(aSampleRate)
 {
   MOZ_ASSERT(!mSharedChannels ||
              mSharedChannels->GetChannels() == aNumberOfChannels);
   mJSChannels.SetLength(aNumberOfChannels);
   mozilla::HoldJSObjects(this);
@@ -176,40 +178,39 @@ AudioBuffer::~AudioBuffer()
 {
   AudioBufferMemoryTracker::UnregisterAudioBuffer(this);
   ClearJSChannels();
   mozilla::DropJSObjects(this);
 }
 
 /* static */ already_AddRefed<AudioBuffer>
 AudioBuffer::Constructor(const GlobalObject& aGlobal,
-                         AudioContext& aAudioContext,
                          const AudioBufferOptions& aOptions,
                          ErrorResult& aRv)
 {
   if (!aOptions.mNumberOfChannels) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  float sampleRate = aOptions.mSampleRate.WasPassed()
-                       ? aOptions.mSampleRate.Value()
-                       : aAudioContext.SampleRate();
-  return Create(&aAudioContext, aOptions.mNumberOfChannels, aOptions.mLength,
-                sampleRate, aRv);
+  nsCOMPtr<nsPIDOMWindowInner> window =
+    do_QueryInterface(aGlobal.GetAsSupports());
+
+  return Create(window, aOptions.mNumberOfChannels, aOptions.mLength,
+                aOptions.mSampleRate, aRv);
 }
 
 void
 AudioBuffer::ClearJSChannels()
 {
   mJSChannels.Clear();
 }
 
 /* static */ already_AddRefed<AudioBuffer>
-AudioBuffer::Create(AudioContext* aContext, uint32_t aNumberOfChannels,
+AudioBuffer::Create(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
                     uint32_t aLength, float aSampleRate,
                     already_AddRefed<ThreadSharedFloatArrayBufferList>
                       aInitialContents,
                     ErrorResult& aRv)
 {
   // Note that a buffer with zero channels is permitted here for the sake of
   // AudioProcessingEvent, where channel counts must match parameters passed
   // to createScriptProcessor(), one of which may be zero.
@@ -217,17 +218,17 @@ AudioBuffer::Create(AudioContext* aConte
       aSampleRate > WebAudioUtils::MaxSampleRate ||
       aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
       !aLength || aLength > INT32_MAX) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
   RefPtr<AudioBuffer> buffer =
-    new AudioBuffer(aContext, aNumberOfChannels, aLength, aSampleRate,
+    new AudioBuffer(aWindow, aNumberOfChannels, aLength, aSampleRate,
                     Move(aInitialContents));
 
   return buffer.forget();
 }
 
 JSObject*
 AudioBuffer::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
--- a/dom/media/webaudio/AudioBuffer.h
+++ b/dom/media/webaudio/AudioBuffer.h
@@ -8,62 +8,60 @@
 #define AudioBuffer_h_
 
 #include "nsWrapperCache.h"
 #include "nsCycleCollectionParticipant.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/StaticMutex.h"
 #include "nsTArray.h"
-#include "AudioContext.h"
 #include "js/TypeDecls.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace mozilla {
 
 class ErrorResult;
 class ThreadSharedFloatArrayBufferList;
 
 namespace dom {
 
 struct AudioBufferOptions;
-class AudioContext;
 
 /**
  * An AudioBuffer keeps its data either in the mJSChannels objects, which
  * are Float32Arrays, or in mSharedChannels if the mJSChannels objects' buffers
  * are detached.
  */
 class AudioBuffer final : public nsWrapperCache
 {
 public:
   // If non-null, aInitialContents must have number of channels equal to
   // aNumberOfChannels and their lengths must be at least aLength.
   static already_AddRefed<AudioBuffer>
-  Create(AudioContext* aContext, uint32_t aNumberOfChannels,
+  Create(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
          uint32_t aLength, float aSampleRate,
          already_AddRefed<ThreadSharedFloatArrayBufferList> aInitialContents,
          ErrorResult& aRv);
 
   static already_AddRefed<AudioBuffer>
-  Create(AudioContext* aContext, uint32_t aNumberOfChannels,
+  Create(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
          uint32_t aLength, float aSampleRate,
          ErrorResult& aRv)
   {
-    return Create(aContext, aNumberOfChannels, aLength, aSampleRate,
+    return Create(aWindow, aNumberOfChannels, aLength, aSampleRate,
                   nullptr, aRv);
   }
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
   NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(AudioBuffer)
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioBuffer)
 
   static already_AddRefed<AudioBuffer>
-  Constructor(const GlobalObject& aGlobal, AudioContext& aAudioContext,
+  Constructor(const GlobalObject& aGlobal,
               const AudioBufferOptions& aOptions, ErrorResult& aRv);
 
   nsPIDOMWindowInner* GetParentObject() const
   {
     nsCOMPtr<nsPIDOMWindowInner> parentObject = do_QueryReferent(mOwnerWindow);
     return parentObject;
   }
 
@@ -105,17 +103,17 @@ public:
 
   /**
    * Returns a ThreadSharedFloatArrayBufferList containing the sample data.
    * Can return null if there is no data.
    */
   ThreadSharedFloatArrayBufferList* GetThreadSharedChannelsForRate(JSContext* aContext);
 
 protected:
-  AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels,
+  AudioBuffer(nsPIDOMWindowInner* aWindow, uint32_t aNumberOfChannels,
               uint32_t aLength, float aSampleRate,
               already_AddRefed<ThreadSharedFloatArrayBufferList>
                 aInitialContents);
   ~AudioBuffer();
 
   bool RestoreJSChannelData(JSContext* aJSContext);
 
   already_AddRefed<ThreadSharedFloatArrayBufferList>
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -281,17 +281,17 @@ AudioContext::CreateBuffer(uint32_t aNum
                            float aSampleRate,
                            ErrorResult& aRv)
 {
   if (!aNumberOfChannels) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  return AudioBuffer::Create(this, aNumberOfChannels, aLength,
+  return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength,
                              aSampleRate, aRv);
 }
 
 namespace {
 
 bool IsValidBufferSize(uint32_t aBufferSize) {
   switch (aBufferSize) {
   case 0:       // let the implementation choose the buffer size
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -153,18 +153,18 @@ public:
     context->Shutdown();
     // Shutdown drops self reference, but the context is still referenced by aNode,
     // which is strongly referenced by the runnable that called
     // AudioDestinationNode::FireOfflineCompletionEvent.
 
     // Create the input buffer
     ErrorResult rv;
     RefPtr<AudioBuffer> renderedBuffer =
-      AudioBuffer::Create(context, mNumberOfChannels, mLength, mSampleRate,
-                          mBuffer.forget(), rv);
+      AudioBuffer::Create(context->GetOwner(), mNumberOfChannels, mLength,
+                          mSampleRate, mBuffer.forget(), rv);
     if (rv.Failed()) {
       rv.SuppressException();
       return;
     }
 
     aNode->ResolvePromise(renderedBuffer);
 
     RefPtr<OnCompleteTask> onCompleteTask =
--- a/dom/media/webaudio/AudioProcessingEvent.cpp
+++ b/dom/media/webaudio/AudioProcessingEvent.cpp
@@ -40,17 +40,17 @@ AudioProcessingEvent::WrapObjectInternal
   return AudioProcessingEventBinding::Wrap(aCx, this, aGivenProto);
 }
 
 already_AddRefed<AudioBuffer>
 AudioProcessingEvent::LazilyCreateBuffer(uint32_t aNumberOfChannels,
                                          ErrorResult& aRv)
 {
   RefPtr<AudioBuffer> buffer =
-    AudioBuffer::Create(mNode->Context(), aNumberOfChannels,
+    AudioBuffer::Create(mNode->Context()->GetOwner(), aNumberOfChannels,
                         mNode->BufferSize(),
                         mNode->Context()->SampleRate(), aRv);
   MOZ_ASSERT(buffer || aRv.ErrorCodeIs(NS_ERROR_OUT_OF_MEMORY));
   return buffer.forget();
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -486,17 +486,17 @@ bool
 WebAudioDecodeJob::AllocateBuffer()
 {
   MOZ_ASSERT(!mOutput);
   MOZ_ASSERT(NS_IsMainThread());
 
   // Now create the AudioBuffer
   ErrorResult rv;
   uint32_t channelCount = mBuffer->GetChannels();
-  mOutput = AudioBuffer::Create(mContext, channelCount,
+  mOutput = AudioBuffer::Create(mContext->GetOwner(), channelCount,
                                 mWriteIndex, mContext->SampleRate(),
                                 mBuffer.forget(), rv);
   return !rv.Failed();
 }
 
 void
 AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer,
                     uint32_t aLength, WebAudioDecodeJob& aDecodeJob)
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -423,17 +423,17 @@ private:
         JSContext* cx = jsapi.cx();
         uint32_t inputChannelCount = aNode->ChannelCount();
 
         // Create the input buffer
         RefPtr<AudioBuffer> inputBuffer;
         if (mInputBuffer) {
           ErrorResult rv;
           inputBuffer =
-            AudioBuffer::Create(context, inputChannelCount,
+            AudioBuffer::Create(context->GetOwner(), inputChannelCount,
                                 aNode->BufferSize(), context->SampleRate(),
                                 mInputBuffer.forget(), rv);
           if (rv.Failed()) {
             rv.SuppressException();
             return nullptr;
           }
         }
 
--- a/dom/media/webaudio/test/test_stereoPannerNode.html
+++ b/dom/media/webaudio/test/test_stereoPannerNode.html
@@ -65,62 +65,63 @@ expectException(function() {
 
 // A sine to be used to fill the buffers
 function sine(t) {
   return Math.sin(440 * 2 * Math.PI * t / context.sampleRate);
 }
 
 // A couple mono and stereo buffers: the StereoPannerNode equation is different
 // if the input is mono or stereo
-var stereoBuffer = new AudioBuffer(context, { numberOfChannels: 2,
-                                              length: BUF_SIZE });
-var monoBuffer = new AudioBuffer(context, { numberOfChannels: 1,
-                                            length: BUF_SIZE,
-                                            sampleRate: context.sampleRate });
+var stereoBuffer = new AudioBuffer({ numberOfChannels: 2,
+                                     length: BUF_SIZE,
+                                     sampleRate: context.sampleRate });
+var monoBuffer = new AudioBuffer({ numberOfChannels: 1,
+                                   length: BUF_SIZE,
+                                   sampleRate: context.sampleRate });
 for (var i = 0; i < BUF_SIZE; ++i) {
   monoBuffer.getChannelData(0)[i]   =
   stereoBuffer.getChannelData(0)[i] =
   stereoBuffer.getChannelData(1)[i] = sine(i);
 }
 
 // Expected test vectors
 function expectedBufferNoop(gain) {
   gain = gain || 1.0;
-  var expectedBuffer = new AudioBuffer(context, { numberOfChannels: 2,
-                                                  length: BUF_SIZE,
-                                                  sampleRate: SR });
+  var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+                                         length: BUF_SIZE,
+                                         sampleRate: SR });
   for (var i = 0; i < BUF_SIZE; i++) {
     expectedBuffer.getChannelData(0)[i] = gain * sine(i);
     expectedBuffer.getChannelData(1)[i] = gain * sine(i);
   }
   return expectedBuffer;
 }
 
 function expectedBufferForStereo(gain) {
   gain = gain || 1.0;
-  var expectedBuffer = new AudioBuffer(context, { numberOfChannels: 2,
-                                                  length: BUF_SIZE,
-                                                  sampleRate: SR });
+  var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+                                         length: BUF_SIZE,
+                                         sampleRate: SR });
   var gainPanning = gainForPanningStereoToStereo(PANNING);
   gainPanning[0] *= gain;
   gainPanning[1] *= gain;
   for (var i = 0; i < BUF_SIZE; i++) {
     var values = [ sine(i), sine(i) ];
     var processed = applyStereoToStereoPanning(values[0], values[1], gainPanning, PANNING);
     expectedBuffer.getChannelData(0)[i] = processed[0];
     expectedBuffer.getChannelData(1)[i] = processed[1];
   }
   return expectedBuffer;
 }
 
 function expectedBufferForMono(gain) {
   gain = gain || 1.0;
-  var expectedBuffer = new AudioBuffer(context, { numberOfChannels: 2,
-                                                  length: BUF_SIZE,
-                                                  sampleRate: SR });
+  var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+                                         length: BUF_SIZE,
+                                         sampleRate: SR });
   var gainPanning = gainForPanningMonoToStereo(PANNING);
   gainPanning[0] *= gain;
   gainPanning[1] *= gain;
   for (var i = 0; i < BUF_SIZE; i++) {
     var value = sine(i);
     var processed = applyMonoToStereoPanning(value, gainPanning);
     expectedBuffer.getChannelData(0)[i] = processed[0];
     expectedBuffer.getChannelData(1)[i] = processed[1];
--- a/dom/webidl/AudioBuffer.webidl
+++ b/dom/webidl/AudioBuffer.webidl
@@ -8,21 +8,21 @@
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
 dictionary AudioBufferOptions {
              unsigned long numberOfChannels = 1;
     required unsigned long length;
-             float         sampleRate;
+    required float         sampleRate;
 };
 
 [Pref="dom.webaudio.enabled",
- Constructor(BaseAudioContext context, AudioBufferOptions options)]
+ Constructor(AudioBufferOptions options)]
 interface AudioBuffer {
 
     readonly attribute float sampleRate;
     readonly attribute unsigned long length;
 
     // in seconds 
     readonly attribute double duration;
 
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html
@@ -61,20 +61,20 @@ interface BaseAudioContext : EventTarget
     DynamicsCompressorNode createDynamicsCompressor ();
     OscillatorNode         createOscillator ();
     PeriodicWave           createPeriodicWave (Float32Array real, Float32Array imag, optional PeriodicWaveConstraints constraints);
 };</pre>
 
    <pre id="audio-buffer-idl">dictionary AudioBufferOptions {
              unsigned long numberOfChannels = 1;
     required unsigned long length;
-             float         sampleRate;
+    required float         sampleRate;
 };
 
-[Constructor(BaseAudioContext context, AudioBufferOptions options)]
+[Constructor(AudioBufferOptions options)]
 interface AudioBuffer {
     readonly        attribute float         sampleRate;
     readonly        attribute unsigned long length;
     readonly        attribute double        duration;
     readonly        attribute unsigned long numberOfChannels;
     Float32Array getChannelData (unsigned long channel);
     void         copyFromChannel (Float32Array destination, unsigned long channelNumber, optional unsigned long startInChannel = 0
               );