Bug 991533 - Limit AudioBuffer channel counts and sample rate range. r=padenot, r=ehsan, r=bz, a=abillings
authorKarl Tomlinson <karlt+@karlt.net>
Tue, 20 May 2014 09:21:31 -0400
changeset 192309 ece051d029de
parent 192308 09a9c1859b6b
child 192310 20143f06748b
push id3566
push userryanvm@gmail.com
push date2014-05-20 13:28 +0000
treeherdermozilla-beta@7d6a74b90622 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot, ehsan, bz, abillings
bugs991533
milestone30.0
Bug 991533 - Limit AudioBuffer channel counts and sample rate range. r=padenot, r=ehsan, r=bz, a=abillings
content/media/webaudio/AudioBuffer.cpp
content/media/webaudio/AudioBuffer.h
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioDestinationNode.cpp
content/media/webaudio/AudioProcessingEvent.cpp
content/media/webaudio/AudioProcessingEvent.h
content/media/webaudio/MediaBufferDecoder.cpp
content/media/webaudio/ScriptProcessorNode.cpp
content/media/webaudio/WebAudioUtils.h
content/media/webaudio/blink/DynamicsCompressorKernel.cpp
dom/webidl/AudioProcessingEvent.webidl
--- a/content/media/webaudio/AudioBuffer.cpp
+++ b/content/media/webaudio/AudioBuffer.cpp
@@ -36,54 +36,69 @@ NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Aud
   for (uint32_t i = 0; i < tmp->mJSChannels.Length(); ++i) {
     NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mJSChannels[i])
   }
 NS_IMPL_CYCLE_COLLECTION_TRACE_END
 
 NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(AudioBuffer, AddRef)
 NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(AudioBuffer, Release)
 
-AudioBuffer::AudioBuffer(AudioContext* aContext, uint32_t aLength,
-                         float aSampleRate)
+AudioBuffer::AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels,
+                         uint32_t aLength, float aSampleRate)
   : mContext(aContext),
     mLength(aLength),
     mSampleRate(aSampleRate)
 {
+  mJSChannels.SetCapacity(aNumberOfChannels);
   SetIsDOMBinding();
   mozilla::HoldJSObjects(this);
 }
 
 AudioBuffer::~AudioBuffer()
 {
   ClearJSChannels();
 }
 
 void
 AudioBuffer::ClearJSChannels()
 {
   mJSChannels.Clear();
   mozilla::DropJSObjects(this);
 }
 
-bool
-AudioBuffer::InitializeBuffers(uint32_t aNumberOfChannels, JSContext* aJSContext)
+/* static */ already_AddRefed<AudioBuffer>
+AudioBuffer::Create(AudioContext* aContext, uint32_t aNumberOfChannels,
+                    uint32_t aLength, float aSampleRate,
+                    JSContext* aJSContext, ErrorResult& aRv)
 {
-  if (!mJSChannels.SetCapacity(aNumberOfChannels)) {
-    return false;
+  // Note that a buffer with zero channels is permitted here for the sake of
+  // AudioProcessingEvent, where channel counts must match parameters passed
+  // to createScriptProcessor(), one of which may be zero.
+  if (aSampleRate < WebAudioUtils::MinSampleRate ||
+      aSampleRate > WebAudioUtils::MaxSampleRate ||
+      aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
+      !aLength || aLength > INT32_MAX) {
+    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
+    return nullptr;
   }
+
+  nsRefPtr<AudioBuffer> buffer =
+    new AudioBuffer(aContext, aNumberOfChannels, aLength, aSampleRate);
+
   for (uint32_t i = 0; i < aNumberOfChannels; ++i) {
     JS::Rooted<JSObject*> array(aJSContext,
-                                JS_NewFloat32Array(aJSContext, mLength));
+                                JS_NewFloat32Array(aJSContext, aLength));
     if (!array) {
-      return false;
+      aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+      return nullptr;
     }
-    mJSChannels.AppendElement(array.get());
+    buffer->mJSChannels.AppendElement(array.get());
   }
 
-  return true;
+  return buffer.forget();
 }
 
 JSObject*
 AudioBuffer::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
 {
   return AudioBufferBinding::Wrap(aCx, aScope, this);
 }
 
--- a/content/media/webaudio/AudioBuffer.h
+++ b/content/media/webaudio/AudioBuffer.h
@@ -28,27 +28,23 @@ class AudioContext;
 /**
  * An AudioBuffer keeps its data either in the mJSChannels objects, which
  * are Float32Arrays, or in mSharedChannels if the mJSChannels objects have
  * been neutered.
  */
 class AudioBuffer MOZ_FINAL : public nsWrapperCache
 {
 public:
-  AudioBuffer(AudioContext* aContext, uint32_t aLength,
-              float aSampleRate);
-  ~AudioBuffer();
+  static already_AddRefed<AudioBuffer>
+  Create(AudioContext* aContext, uint32_t aNumberOfChannels,
+         uint32_t aLength, float aSampleRate,
+         JSContext* aJSContext, ErrorResult& aRv);
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
-  // This function needs to be called in order to allocate
-  // all of the channels.  It is fallible!
-  bool InitializeBuffers(uint32_t aNumberOfChannels,
-                         JSContext* aJSContext);
-
   NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(AudioBuffer)
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioBuffer)
 
   AudioContext* GetParentObject() const
   {
     return mContext;
   }
 
@@ -100,22 +96,26 @@ public:
   // initialized.
   void SetRawChannelContents(JSContext* aJSContext,
                              uint32_t aChannel,
                              float* aContents);
 
   void MixToMono(JSContext* aJSContext);
 
 protected:
+  AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels,
+              uint32_t aLength, float aSampleRate);
+  ~AudioBuffer();
+
   bool RestoreJSChannelData(JSContext* aJSContext);
   void ClearJSChannels();
 
   nsRefPtr<AudioContext> mContext;
   // Float32Arrays
-  AutoFallibleTArray<JS::Heap<JSObject*>, 2> mJSChannels;
+  nsAutoTArray<JS::Heap<JSObject*>, 2> mJSChannels;
 
   // mSharedChannels aggregates the data from mJSChannels. This is non-null
   // if and only if the mJSChannels are neutered.
   nsRefPtr<ThreadSharedFloatArrayBufferList> mSharedChannels;
 
   uint32_t mLength;
   float mSampleRate;
 };
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -147,18 +147,18 @@ AudioContext::Constructor(const GlobalOb
   if (!window) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   if (aNumberOfChannels == 0 ||
       aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
       aLength == 0 ||
-      aSampleRate <= 1.0f ||
-      aSampleRate >= TRACK_RATE_MAX) {
+      aSampleRate < WebAudioUtils::MinSampleRate ||
+      aSampleRate > WebAudioUtils::MaxSampleRate) {
     // The DOM binding protects us against infinity and NaN
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
 
   nsRefPtr<AudioContext> object = new AudioContext(window,
                                                    true,
                                                    aNumberOfChannels,
@@ -178,34 +178,23 @@ AudioContext::CreateBufferSource()
   return bufferNode.forget();
 }
 
 already_AddRefed<AudioBuffer>
 AudioContext::CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
                            uint32_t aLength, float aSampleRate,
                            ErrorResult& aRv)
 {
-  if (aSampleRate < 8000 || aSampleRate > 192000 || !aLength || !aNumberOfChannels) {
+  if (!aNumberOfChannels) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  if (aLength > INT32_MAX) {
-    aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
-    return nullptr;
-  }
-
-  nsRefPtr<AudioBuffer> buffer =
-    new AudioBuffer(this, int32_t(aLength), aSampleRate);
-  if (!buffer->InitializeBuffers(aNumberOfChannels, aJSContext)) {
-    aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
-    return nullptr;
-  }
-
-  return buffer.forget();
+  return AudioBuffer::Create(this, aNumberOfChannels, aLength,
+                             aSampleRate, aJSContext, aRv);
 }
 
 already_AddRefed<AudioBuffer>
 AudioContext::CreateBuffer(JSContext* aJSContext, const ArrayBuffer& aBuffer,
                           bool aMixToMono, ErrorResult& aRv)
 {
   // Do not accept this method unless the legacy pref has been set.
   if (!Preferences::GetBool("media.webaudio.legacy.AudioContext")) {
--- a/content/media/webaudio/AudioDestinationNode.cpp
+++ b/content/media/webaudio/AudioDestinationNode.cpp
@@ -125,20 +125,21 @@ public:
 
     AutoPushJSContext cx(context->GetJSContext());
     if (!cx) {
       return;
     }
     JSAutoRequest ar(cx);
 
     // Create the input buffer
-    nsRefPtr<AudioBuffer> renderedBuffer = new AudioBuffer(context,
-                                                           mLength,
-                                                           mSampleRate);
-    if (!renderedBuffer->InitializeBuffers(mInputChannels.Length(), cx)) {
+    ErrorResult rv;
+    nsRefPtr<AudioBuffer> renderedBuffer =
+      AudioBuffer::Create(context, mInputChannels.Length(),
+                          mLength, mSampleRate, cx, rv);
+    if (rv.Failed()) {
       return;
     }
     for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
       renderedBuffer->SetRawChannelContents(cx, i, mInputChannels[i]);
     }
 
     nsRefPtr<OfflineAudioCompletionEvent> event =
         new OfflineAudioCompletionEvent(context, nullptr, nullptr);
--- a/content/media/webaudio/AudioProcessingEvent.cpp
+++ b/content/media/webaudio/AudioProcessingEvent.cpp
@@ -31,22 +31,25 @@ AudioProcessingEvent::AudioProcessingEve
 }
 
 JSObject*
 AudioProcessingEvent::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
 {
   return AudioProcessingEventBinding::Wrap(aCx, aScope, this);
 }
 
-void
-AudioProcessingEvent::LazilyCreateBuffer(nsRefPtr<AudioBuffer>& aBuffer,
-                                         uint32_t aNumberOfChannels)
+already_AddRefed<AudioBuffer>
+AudioProcessingEvent::LazilyCreateBuffer(uint32_t aNumberOfChannels,
+                                         ErrorResult& aRv)
 {
   AutoPushJSContext cx(mNode->Context()->GetJSContext());
 
-  aBuffer = new AudioBuffer(mNode->Context(), mNode->BufferSize(),
-                            mNode->Context()->SampleRate());
-  aBuffer->InitializeBuffers(aNumberOfChannels, cx);
+  nsRefPtr<AudioBuffer> buffer =
+    AudioBuffer::Create(mNode->Context(), aNumberOfChannels,
+                        mNode->BufferSize(),
+                        mNode->Context()->SampleRate(), cx, aRv);
+  MOZ_ASSERT(buffer || aRv.ErrorCode() == NS_ERROR_OUT_OF_MEMORY);
+  return buffer.forget();
 }
 
 }
 }
 
--- a/content/media/webaudio/AudioProcessingEvent.h
+++ b/content/media/webaudio/AudioProcessingEvent.h
@@ -38,40 +38,40 @@ public:
     mPlaybackTime = aPlaybackTime;
   }
 
   double PlaybackTime() const
   {
     return mPlaybackTime;
   }
 
-  AudioBuffer* InputBuffer()
+  AudioBuffer* GetInputBuffer(ErrorResult& aRv)
   {
     if (!mInputBuffer) {
-      LazilyCreateBuffer(mInputBuffer, mNumberOfInputChannels);
+      mInputBuffer = LazilyCreateBuffer(mNumberOfInputChannels, aRv);
     }
     return mInputBuffer;
   }
 
-  AudioBuffer* OutputBuffer()
+  AudioBuffer* GetOutputBuffer(ErrorResult& aRv)
   {
     if (!mOutputBuffer) {
-      LazilyCreateBuffer(mOutputBuffer, mNode->NumberOfOutputChannels());
+      mOutputBuffer = LazilyCreateBuffer(mNode->NumberOfOutputChannels(), aRv);
     }
     return mOutputBuffer;
   }
 
   bool HasOutputBuffer() const
   {
     return !!mOutputBuffer;
   }
 
 private:
-  void LazilyCreateBuffer(nsRefPtr<AudioBuffer>& aBuffer,
-                          uint32_t aNumberOfChannels);
+  already_AddRefed<AudioBuffer>
+  LazilyCreateBuffer(uint32_t aNumberOfChannels, ErrorResult& rv);
 
 private:
   double mPlaybackTime;
   nsRefPtr<AudioBuffer> mInputBuffer;
   nsRefPtr<AudioBuffer> mOutputBuffer;
   nsRefPtr<ScriptProcessorNode> mNode;
   uint32_t mNumberOfInputChannels;
 };
--- a/content/media/webaudio/MediaBufferDecoder.cpp
+++ b/content/media/webaudio/MediaBufferDecoder.cpp
@@ -431,18 +431,20 @@ WebAudioDecodeJob::AllocateBuffer()
 
   // First, get a JSContext
   AutoPushJSContext cx(mContext->GetJSContext());
   if (!cx) {
     return false;
   }
 
   // Now create the AudioBuffer
-  mOutput = new AudioBuffer(mContext, mWriteIndex, mContext->SampleRate());
-  if (!mOutput->InitializeBuffers(mChannelBuffers.Length(), cx)) {
+  ErrorResult rv;
+  mOutput = AudioBuffer::Create(mContext, mChannelBuffers.Length(),
+                                mWriteIndex, mContext->SampleRate(), cx, rv);
+  if (rv.Failed()) {
     return false;
   }
 
   for (uint32_t i = 0; i < mChannelBuffers.Length(); ++i) {
     mOutput->SetRawChannelContents(cx, i, mChannelBuffers[i]);
   }
 
   return true;
--- a/content/media/webaudio/ScriptProcessorNode.cpp
+++ b/content/media/webaudio/ScriptProcessorNode.cpp
@@ -359,20 +359,22 @@ private:
         }
 
         AutoPushJSContext cx(node->Context()->GetJSContext());
         if (cx) {
 
           // Create the input buffer
           nsRefPtr<AudioBuffer> inputBuffer;
           if (!mNullInput) {
-            inputBuffer = new AudioBuffer(node->Context(),
-                                          node->BufferSize(),
-                                          node->Context()->SampleRate());
-            if (!inputBuffer->InitializeBuffers(mInputChannels.Length(), cx)) {
+            ErrorResult rv;
+            inputBuffer =
+              AudioBuffer::Create(node->Context(), mInputChannels.Length(),
+                                  node->BufferSize(),
+                                  node->Context()->SampleRate(), cx, rv);
+            if (rv.Failed()) {
               return NS_OK;
             }
             // Put the channel data inside it
             for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
               inputBuffer->SetRawChannelContents(cx, i, mInputChannels[i]);
             }
           }
 
@@ -382,20 +384,28 @@ private:
           // knows how to lazily create them if needed once the script tries to access
           // them.  Otherwise, we may be able to get away without creating them!
           nsRefPtr<AudioProcessingEvent> event = new AudioProcessingEvent(node, nullptr, nullptr);
           event->InitEvent(inputBuffer,
                            mInputChannels.Length(),
                            mPlaybackTime);
           node->DispatchTrustedEvent(event);
 
-          // Steal the output buffers
+          // Steal the output buffers if they have been set.  Don't create a
+          // buffer if it hasn't been used to return output;
+          // FinishProducingOutputBuffer() will optimize output = null.
+          // GetThreadSharedChannelsForRate() may also return null after OOM.
           nsRefPtr<ThreadSharedFloatArrayBufferList> output;
           if (event->HasOutputBuffer()) {
-            output = event->OutputBuffer()->GetThreadSharedChannelsForRate(cx);
+            ErrorResult rv;
+            AudioBuffer* buffer = event->GetOutputBuffer(rv);
+            // HasOutputBuffer() returning true means that GetOutputBuffer()
+            // will not fail.
+            MOZ_ASSERT(!rv.Failed());
+            output = buffer->GetThreadSharedChannelsForRate(cx);
           }
 
           // Append it to our output buffer queue
           node->GetSharedBuffers()->FinishProducingOutputBuffer(output, node->BufferSize());
         }
         return NS_OK;
       }
     private:
--- a/content/media/webaudio/WebAudioUtils.h
+++ b/content/media/webaudio/WebAudioUtils.h
@@ -20,92 +20,97 @@ namespace mozilla {
 
 class AudioNodeStream;
 class MediaStream;
 
 namespace dom {
 
 class AudioParamTimeline;
 
-struct WebAudioUtils {
-  // This is an arbitrary large number used to protect against OOMs.
-  // We can adjust it later if needed.
-  static const uint32_t MaxChannelCount = 32;
+namespace WebAudioUtils {
+  // 32 is the minimum required by the spec for createBuffer() and
+  // createScriptProcessor() and matches what is used by Blink.  The limit
+  // protects against large memory allocations.
+  const uint32_t MaxChannelCount = 32;
+  // AudioContext::CreateBuffer() "must support sample-rates in at least the
+  // range 22050 to 96000."
+  const uint32_t MinSampleRate = 8000;
+  const uint32_t MaxSampleRate = 192000;
 
-  static bool FuzzyEqual(float v1, float v2)
+  inline bool FuzzyEqual(float v1, float v2)
   {
     using namespace std;
     return fabsf(v1 - v2) < 1e-7f;
   }
-  static bool FuzzyEqual(double v1, double v2)
+  inline bool FuzzyEqual(double v1, double v2)
   {
     using namespace std;
     return fabs(v1 - v2) < 1e-7;
   }
 
   /**
    * Computes an exponential smoothing rate for a time based variable
    * over aDuration seconds.
    */
-  static double ComputeSmoothingRate(double aDuration, double aSampleRate)
+  inline double ComputeSmoothingRate(double aDuration, double aSampleRate)
   {
     return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate));
   }
 
   /**
    * Converts AudioParamTimeline floating point time values to tick values
    * with respect to a source and a destination AudioNodeStream.
    *
    * This needs to be called for each AudioParamTimeline that gets sent to an
    * AudioNodeEngine on the engine side where the AudioParamTimeline is
    * received.  This means that such engines need to be aware of their source
    * and destination streams as well.
    */
-  static void ConvertAudioParamToTicks(AudioParamTimeline& aParam,
-                                       AudioNodeStream* aSource,
-                                       AudioNodeStream* aDest);
+  void ConvertAudioParamToTicks(AudioParamTimeline& aParam,
+                                AudioNodeStream* aSource,
+                                AudioNodeStream* aDest);
 
   /**
    * Converts a linear value to decibels.  Returns aMinDecibels if the linear
    * value is 0.
    */
-  static float ConvertLinearToDecibels(float aLinearValue, float aMinDecibels)
+  inline float ConvertLinearToDecibels(float aLinearValue, float aMinDecibels)
   {
     return aLinearValue ? 20.0f * std::log10(aLinearValue) : aMinDecibels;
   }
 
   /**
    * Converts a decibel value to a linear value.
    */
-  static float ConvertDecibelsToLinear(float aDecibels)
+  inline float ConvertDecibelsToLinear(float aDecibels)
   {
     return std::pow(10.0f, 0.05f * aDecibels);
   }
 
   /**
    * Converts a decibel to a linear value.
    */
-  static float ConvertDecibelToLinear(float aDecibel)
+  inline float ConvertDecibelToLinear(float aDecibel)
   {
     return std::pow(10.0f, 0.05f * aDecibel);
   }
 
-  static void FixNaN(double& aDouble)
+  inline void FixNaN(double& aDouble)
   {
     if (IsNaN(aDouble) || IsInfinite(aDouble)) {
       aDouble = 0.0;
     }
   }
 
-  static double DiscreteTimeConstantForSampleRate(double timeConstant, double sampleRate)
+  inline double DiscreteTimeConstantForSampleRate(double timeConstant, double sampleRate)
   {
     return 1.0 - std::exp(-1.0 / (sampleRate * timeConstant));
   }
 
-  static bool IsTimeValid(double aTime)
+  inline bool IsTimeValid(double aTime)
   {
     return aTime >= 0 &&  aTime <= (MEDIA_TIME_MAX >> MEDIA_TIME_FRAC_BITS);
   }
 
   /**
    * Converts a floating point value to an integral type in a safe and
    * platform agnostic way.  The following program demonstrates the kinds
    * of ways things can go wrong depending on the CPU architecture you're
@@ -163,17 +168,17 @@ struct WebAudioUtils {
    *  999999995904.000000 2147483647
    *  nan 0
    *
    * Note that the caller is responsible to make sure that the value
    * passed to this function is not a NaN.  This function will abort if
    * it sees a NaN.
    */
   template <typename IntType, typename FloatType>
-  static IntType TruncateFloatToInt(FloatType f)
+  IntType TruncateFloatToInt(FloatType f)
   {
     using namespace std;
 
     static_assert(mozilla::IsIntegral<IntType>::value == true,
                   "IntType must be an integral type");
     static_assert(mozilla::IsFloatingPoint<FloatType>::value == true,
                   "FloatType must be a floating point type");
 
@@ -194,28 +199,28 @@ struct WebAudioUtils {
       // integral value for this type, just clamp to the minimum value.
       return numeric_limits<IntType>::min();
     }
 
     // Otherwise, this conversion must be well defined.
     return IntType(f);
   }
 
-  static void Shutdown();
+  void Shutdown();
 
-  static int
+  int
   SpeexResamplerProcess(SpeexResamplerState* aResampler,
                         uint32_t aChannel,
                         const float* aIn, uint32_t* aInLen,
                         float* aOut, uint32_t* aOutLen);
 
-  static int
+  int
   SpeexResamplerProcess(SpeexResamplerState* aResampler,
                         uint32_t aChannel,
                         const int16_t* aIn, uint32_t* aInLen,
                         float* aOut, uint32_t* aOutLen);
-};
+}
 
 }
 }
 
 #endif
 
--- a/content/media/webaudio/blink/DynamicsCompressorKernel.cpp
+++ b/content/media/webaudio/blink/DynamicsCompressorKernel.cpp
@@ -32,17 +32,17 @@
 #include <algorithm>
 
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/Constants.h"
 #include "WebAudioUtils.h"
 
 using namespace std;
 
-using mozilla::dom::WebAudioUtils;
+using namespace mozilla::dom; // for WebAudioUtils
 using mozilla::IsInfinite;
 using mozilla::IsNaN;
 
 namespace WebCore {
 
 
 // Metering hits peaks instantly, but releases this fast (in seconds).
 const float meteringReleaseTimeConstant = 0.325f;
--- a/dom/webidl/AudioProcessingEvent.webidl
+++ b/dom/webidl/AudioProcessingEvent.webidl
@@ -7,14 +7,17 @@
  * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
 interface AudioProcessingEvent : Event {
 
-    readonly attribute double playbackTime;
-    readonly attribute AudioBuffer inputBuffer;
-    readonly attribute AudioBuffer outputBuffer;
+  readonly attribute double playbackTime;
+
+  [Throws]
+  readonly attribute AudioBuffer inputBuffer;
+  [Throws]
+  readonly attribute AudioBuffer outputBuffer;
 
 };