Bug 999908 - Remove support for the Web Audio legacy prefs for AudioBufferSourceNode, AudioContext, and AudioParam; r=padenot
authorEhsan Akhgari <ehsan@mozilla.com>
Wed, 23 Apr 2014 08:56:42 -0400
changeset 180117 215537ea2189f250cc695da40737350f51ffec5b
parent 180116 691d410b7f59dff294f91c3fd34e5688fea154e9
child 180149 794f8b74baaa20b1c799811aa64e81b5309cc117
push id272
push userpvanderbeken@mozilla.com
push dateMon, 05 May 2014 16:31:18 +0000
reviewerspadenot
bugs999908
milestone31.0a1
Bug 999908 - Remove support for the Web Audio legacy prefs for AudioBufferSourceNode, AudioContext, and AudioParam; r=padenot
content/media/webaudio/AudioBuffer.cpp
content/media/webaudio/AudioBuffer.h
content/media/webaudio/AudioBufferSourceNode.h
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioContext.h
content/media/webaudio/AudioParam.h
content/media/webaudio/MediaBufferDecoder.cpp
content/media/webaudio/MediaBufferDecoder.h
content/media/webaudio/test/mochitest.ini
content/media/webaudio/test/test_AudioParam.html
content/media/webaudio/test/test_delayNode.html
content/media/webaudio/test/test_gainNode.html
content/media/webaudio/test/test_mediaDecoding.html
content/media/webaudio/test/test_scriptProcessorNode.html
content/media/webaudio/test/test_singleSourceDest.html
dom/webidl/AudioBufferSourceNode.webidl
dom/webidl/AudioContext.webidl
dom/webidl/AudioParam.webidl
testing/profiles/prefs_general.js
--- a/content/media/webaudio/AudioBuffer.cpp
+++ b/content/media/webaudio/AudioBuffer.cpp
@@ -223,43 +223,16 @@ AudioBuffer::GetThreadSharedChannelsForR
 
     mSharedChannels =
       StealJSArrayDataIntoThreadSharedFloatArrayBufferList(aJSContext, mJSChannels);
   }
 
   return mSharedChannels;
 }
 
-void
-AudioBuffer::MixToMono(JSContext* aJSContext)
-{
-  if (mJSChannels.Length() == 1) {
-    // The buffer is already mono
-    return;
-  }
-
-  // Prepare the input channels
-  nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channels;
-  channels.SetLength(mJSChannels.Length());
-  for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
-    channels[i] = JS_GetFloat32ArrayData(mJSChannels[i]);
-  }
-
-  // Prepare the output channels
-  float* downmixBuffer = new float[mLength];
-
-  // Perform the down-mix
-  AudioChannelsDownMix(channels, &downmixBuffer, 1, mLength);
-
-  // Truncate the shared channels and copy the downmixed data over
-  mJSChannels.SetLength(1);
-  SetRawChannelContents(aJSContext, 0, downmixBuffer);
-  delete[] downmixBuffer;
-}
-
 size_t
 AudioBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   size_t amount = aMallocSizeOf(this);
   amount += mJSChannels.SizeOfExcludingThis(aMallocSizeOf);
   if (mSharedChannels) {
     amount += mSharedChannels->SizeOfExcludingThis(aMallocSizeOf);
   }
--- a/content/media/webaudio/AudioBuffer.h
+++ b/content/media/webaudio/AudioBuffer.h
@@ -96,18 +96,16 @@ public:
   // This replaces the contents of the JS array for the given channel.
   // This function needs to be called on an AudioBuffer which has not been
   // handed off to the content yet, and right after the object has been
   // initialized.
   void SetRawChannelContents(JSContext* aJSContext,
                              uint32_t aChannel,
                              float* aContents);
 
-  void MixToMono(JSContext* aJSContext);
-
 protected:
   bool RestoreJSChannelData(JSContext* aJSContext);
   void ClearJSChannels();
 
   nsRefPtr<AudioContext> mContext;
   // Float32Arrays
   AutoFallibleTArray<JS::Heap<JSObject*>, 2> mJSChannels;
 
--- a/content/media/webaudio/AudioBufferSourceNode.h
+++ b/content/media/webaudio/AudioBufferSourceNode.h
@@ -39,32 +39,17 @@ public:
   }
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioBufferSourceNode, AudioNode)
 
   virtual JSObject* WrapObject(JSContext* aCx) MOZ_OVERRIDE;
 
   void Start(double aWhen, double aOffset,
              const Optional<double>& aDuration, ErrorResult& aRv);
-  void NoteOn(double aWhen, ErrorResult& aRv)
-  {
-    Start(aWhen, 0.0, Optional<double>(), aRv);
-  }
-  void NoteGrainOn(double aWhen, double aOffset,
-                   double aDuration, ErrorResult& aRv)
-  {
-    Optional<double> duration;
-    duration.Construct(aDuration);
-    Start(aWhen, aOffset, duration, aRv);
-  }
   void Stop(double aWhen, ErrorResult& aRv);
-  void NoteOff(double aWhen, ErrorResult& aRv)
-  {
-    Stop(aWhen, aRv);
-  }
 
   AudioBuffer* GetBuffer(JSContext* aCx) const
   {
     return mBuffer;
   }
   void SetBuffer(JSContext* aCx, AudioBuffer* aBuffer)
   {
     mBuffer = aBuffer;
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -199,49 +199,16 @@ AudioContext::CreateBuffer(JSContext* aJ
   if (!buffer->InitializeBuffers(aNumberOfChannels, aJSContext)) {
     aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   return buffer.forget();
 }
 
-already_AddRefed<AudioBuffer>
-AudioContext::CreateBuffer(JSContext* aJSContext, const ArrayBuffer& aBuffer,
-                          bool aMixToMono, ErrorResult& aRv)
-{
-  // Do not accept this method unless the legacy pref has been set.
-  if (!Preferences::GetBool("media.webaudio.legacy.AudioContext")) {
-    aRv.ThrowNotEnoughArgsError();
-    return nullptr;
-  }
-
-  // Sniff the content of the media.
-  // Failed type sniffing will be handled by SyncDecodeMedia.
-  nsAutoCString contentType;
-  NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr,
-                  aBuffer.Data(), aBuffer.Length(),
-                  contentType);
-
-  nsRefPtr<WebAudioDecodeJob> job =
-    new WebAudioDecodeJob(contentType, this, aBuffer);
-
-  if (mDecoder.SyncDecodeMedia(contentType.get(),
-                               aBuffer.Data(), aBuffer.Length(), *job) &&
-      job->mOutput) {
-    nsRefPtr<AudioBuffer> buffer = job->mOutput.forget();
-    if (aMixToMono) {
-      buffer->MixToMono(aJSContext);
-    }
-    return buffer.forget();
-  }
-
-  return nullptr;
-}
-
 namespace {
 
 bool IsValidBufferSize(uint32_t aBufferSize) {
   switch (aBufferSize) {
   case 0:       // let the implementation choose the buffer size
   case 256:
   case 512:
   case 1024:
--- a/content/media/webaudio/AudioContext.h
+++ b/content/media/webaudio/AudioContext.h
@@ -121,68 +121,42 @@ public:
 
   already_AddRefed<AudioBufferSourceNode> CreateBufferSource();
 
   already_AddRefed<AudioBuffer>
   CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
                uint32_t aLength, float aSampleRate,
                ErrorResult& aRv);
 
-  already_AddRefed<AudioBuffer>
-  CreateBuffer(JSContext* aJSContext, const ArrayBuffer& aBuffer,
-               bool aMixToMono, ErrorResult& aRv);
-
   already_AddRefed<MediaStreamAudioDestinationNode>
   CreateMediaStreamDestination(ErrorResult& aRv);
 
   already_AddRefed<ScriptProcessorNode>
   CreateScriptProcessor(uint32_t aBufferSize,
                         uint32_t aNumberOfInputChannels,
                         uint32_t aNumberOfOutputChannels,
                         ErrorResult& aRv);
 
-  already_AddRefed<ScriptProcessorNode>
-  CreateJavaScriptNode(uint32_t aBufferSize,
-                       uint32_t aNumberOfInputChannels,
-                       uint32_t aNumberOfOutputChannels,
-                       ErrorResult& aRv)
-  {
-    return CreateScriptProcessor(aBufferSize, aNumberOfInputChannels,
-                                 aNumberOfOutputChannels, aRv);
-  }
-
   already_AddRefed<AnalyserNode>
   CreateAnalyser();
 
   already_AddRefed<GainNode>
   CreateGain();
 
   already_AddRefed<WaveShaperNode>
   CreateWaveShaper();
 
-  already_AddRefed<GainNode>
-  CreateGainNode()
-  {
-    return CreateGain();
-  }
-
   already_AddRefed<MediaElementAudioSourceNode>
   CreateMediaElementSource(HTMLMediaElement& aMediaElement, ErrorResult& aRv);
   already_AddRefed<MediaStreamAudioSourceNode>
   CreateMediaStreamSource(DOMMediaStream& aMediaStream, ErrorResult& aRv);
 
   already_AddRefed<DelayNode>
   CreateDelay(double aMaxDelayTime, ErrorResult& aRv);
 
-  already_AddRefed<DelayNode>
-  CreateDelayNode(double aMaxDelayTime, ErrorResult& aRv)
-  {
-    return CreateDelay(aMaxDelayTime, aRv);
-  }
-
   already_AddRefed<PannerNode>
   CreatePanner();
 
   already_AddRefed<ConvolverNode>
   CreateConvolver();
 
   already_AddRefed<ChannelSplitterNode>
   CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv);
--- a/content/media/webaudio/AudioParam.h
+++ b/content/media/webaudio/AudioParam.h
@@ -104,20 +104,16 @@ public:
     if (!WebAudioUtils::IsTimeValid(aStartTime) ||
         !WebAudioUtils::IsTimeValid(aTimeConstant)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
     AudioParamTimeline::SetTargetAtTime(aTarget, DOMTimeToStreamTime(aStartTime), aTimeConstant, aRv);
     mCallback(mNode);
   }
-  void SetTargetValueAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
-  {
-    SetTargetAtTime(aTarget, aStartTime, aTimeConstant, aRv);
-  }
   void CancelScheduledValues(double aStartTime, ErrorResult& aRv)
   {
     if (!WebAudioUtils::IsTimeValid(aStartTime)) {
       aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
       return;
     }
     AudioParamTimeline::CancelScheduledValues(DOMTimeToStreamTime(aStartTime));
     mCallback(mNode);
--- a/content/media/webaudio/MediaBufferDecoder.cpp
+++ b/content/media/webaudio/MediaBufferDecoder.cpp
@@ -127,18 +127,16 @@ private:
                               NS_DISPATCH_NORMAL);
 
       nsCOMPtr<nsIRunnable> event =
         new ReportResultTask(mDecodeJob, &WebAudioDecodeJob::OnFailure, aErrorCode);
       NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
     }
   }
 
-  void RunNextPhase();
-
   void Decode();
   void AllocateBuffer();
   void CallbackTheResult();
 
   void Cleanup()
   {
     MOZ_ASSERT(NS_IsMainThread());
     // MediaDecoderReader expects that BufferDecoder is alive.
@@ -202,40 +200,16 @@ MediaDecodeTask::CreateReader()
   nsresult rv = mDecoderReader->Init(nullptr);
   if (NS_FAILED(rv)) {
     return false;
   }
 
   return true;
 }
 
-void
-MediaDecodeTask::RunNextPhase()
-{
-  // This takes care of handling the logic of where to run the next phase.
-  // If we were invoked synchronously, we do not have a thread pool and
-  // everything happens on the main thread. Just invoke Run() in that case.
-  // Otherwise, some things happen on the main thread and others are run
-  // in the thread pool.
-  if (!mThreadPool) {
-    Run();
-    return;
-  }
-
-  switch (mPhase) {
-  case PhaseEnum::AllocateBuffer:
-    MOZ_ASSERT(!NS_IsMainThread());
-    NS_DispatchToMainThread(this);
-    break;
-  case PhaseEnum::Decode:
-  case PhaseEnum::Done:
-    MOZ_CRASH("Invalid phase Decode");
-  }
-}
-
 class AutoResampler {
 public:
   AutoResampler()
     : mResampler(nullptr)
   {}
   ~AutoResampler()
   {
     if (mResampler) {
@@ -254,18 +228,17 @@ public:
 
 private:
   SpeexResamplerState* mResampler;
 };
 
 void
 MediaDecodeTask::Decode()
 {
-  MOZ_ASSERT(!mThreadPool == NS_IsMainThread(),
-             "We should be on the main thread only if we don't have a thread pool");
+  MOZ_ASSERT(!NS_IsMainThread());
 
   mBufferDecoder->BeginDecoding(NS_GetCurrentThread());
 
   // Tell the decoder reader that we are not going to play the data directly,
   // and that we should not reject files with more channels than the audio
   // bakend support.
   mDecoderReader->SetIgnoreAudioOutputFormat();
 
@@ -390,17 +363,17 @@ MediaDecodeTask::Decode()
         mDecodeJob.mWriteIndex += outSamples;
         MOZ_ASSERT(mDecodeJob.mWriteIndex <= resampledFrames);
         MOZ_ASSERT(inSamples == inputLatency);
       }
     }
   }
 
   mPhase = PhaseEnum::AllocateBuffer;
-  RunNextPhase();
+  NS_DispatchToMainThread(this);
 }
 
 void
 MediaDecodeTask::AllocateBuffer()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (!mDecodeJob.AllocateBuffer()) {
@@ -485,39 +458,16 @@ MediaBufferDecoder::AsyncDecodeMedia(con
                            WebAudioDecodeJob::UnknownError);
     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
   } else {
     mThreadPool->Dispatch(task, nsIThreadPool::DISPATCH_NORMAL);
   }
 }
 
 bool
-MediaBufferDecoder::SyncDecodeMedia(const char* aContentType, uint8_t* aBuffer,
-                                    uint32_t aLength,
-                                    WebAudioDecodeJob& aDecodeJob)
-{
-  // Do not attempt to decode the media if we were not successful at sniffing
-  // the content type.
-  if (!*aContentType ||
-      strcmp(aContentType, APPLICATION_OCTET_STREAM) == 0) {
-    return false;
-  }
-
-  nsRefPtr<MediaDecodeTask> task =
-    new MediaDecodeTask(aContentType, aBuffer, aLength, aDecodeJob, nullptr);
-  if (!task->CreateReader()) {
-    return false;
-  }
-
-  task->Run();
-  return true;
-}
-
-
-bool
 MediaBufferDecoder::EnsureThreadPoolInitialized()
 {
   if (!mThreadPool) {
     mThreadPool = SharedThreadPool::Get(NS_LITERAL_CSTRING("MediaBufferDecoder"));
     if (!mThreadPool) {
       return false;
     }
   }
@@ -531,25 +481,22 @@ WebAudioDecodeJob::WebAudioDecodeJob(con
                                      DecodeErrorCallback* aFailureCallback)
   : mContentType(aContentType)
   , mWriteIndex(0)
   , mContext(aContext)
   , mSuccessCallback(aSuccessCallback)
   , mFailureCallback(aFailureCallback)
 {
   MOZ_ASSERT(aContext);
+  MOZ_ASSERT(aSuccessCallback);
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_COUNT_CTOR(WebAudioDecodeJob);
 
   mArrayBuffer = aBuffer.Obj();
 
-  MOZ_ASSERT(aSuccessCallback ||
-             (!aSuccessCallback && !aFailureCallback),
-             "If a success callback is not passed, no failure callback should be passed either");
-
   mozilla::HoldJSObjects(this);
 }
 
 WebAudioDecodeJob::~WebAudioDecodeJob()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_COUNT_DTOR(WebAudioDecodeJob);
   mArrayBuffer = nullptr;
@@ -559,20 +506,18 @@ WebAudioDecodeJob::~WebAudioDecodeJob()
 void
 WebAudioDecodeJob::OnSuccess(ErrorCode aErrorCode)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aErrorCode == NoError);
 
   // Ignore errors in calling the callback, since there is not much that we can
   // do about it here.
-  if (mSuccessCallback) {
-    ErrorResult rv;
-    mSuccessCallback->Call(*mOutput, rv);
-  }
+  ErrorResult rv;
+  mSuccessCallback->Call(*mOutput, rv);
 
   mContext->RemoveFromDecodeQueue(this);
 }
 
 void
 WebAudioDecodeJob::OnFailure(ErrorCode aErrorCode)
 {
   MOZ_ASSERT(NS_IsMainThread());
--- a/content/media/webaudio/MediaBufferDecoder.h
+++ b/content/media/webaudio/MediaBufferDecoder.h
@@ -75,19 +75,16 @@ struct WebAudioDecodeJob MOZ_FINAL
  * thread-pool) and provides a clean external interface.
  */
 class MediaBufferDecoder
 {
 public:
   void AsyncDecodeMedia(const char* aContentType, uint8_t* aBuffer,
                         uint32_t aLength, WebAudioDecodeJob& aDecodeJob);
 
-  bool SyncDecodeMedia(const char* aContentType, uint8_t* aBuffer,
-                       uint32_t aLength, WebAudioDecodeJob& aDecodeJob);
-
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
   {
     return 0;
   }
 
 private:
   bool EnsureThreadPoolInitialized();
 
--- a/content/media/webaudio/test/mochitest.ini
+++ b/content/media/webaudio/test/mochitest.ini
@@ -21,17 +21,16 @@ support-files =
   ting-44.1k-2ch.wav
   ting-48k-1ch.wav
   ting-48k-2ch.wav
   webaudio.js
 
 [test_AudioBuffer.html]
 [test_AudioContext.html]
 [test_AudioListener.html]
-[test_AudioParam.html]
 [test_OfflineAudioContext.html]
 [test_analyserNode.html]
 [test_audioBufferSourceNode.html]
 [test_audioBufferSourceNodeEnded.html]
 [test_audioBufferSourceNodeLazyLoopParam.html]
 [test_audioBufferSourceNodeLoop.html]
 [test_audioBufferSourceNodeLoopStartEnd.html]
 [test_audioBufferSourceNodeLoopStartEndSame.html]
deleted file mode 100644
--- a/content/media/webaudio/test/test_AudioParam.html
+++ /dev/null
@@ -1,36 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
-  <title>Test AudioParam</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="text/javascript" src="webaudio.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-var context = new AudioContext();
-var gain = context.createGain().gain;
-
-ok("value" in gain, "The value attr must exist");
-gain.value = 0.5;
-ok("defaultValue" in gain, "The defaultValue attr must exist");
-(function() {
-  "use strict"; // in order to get the readOnly setter to throw
-  expectTypeError(function() {
-    gain.defaultValue = 0.5;
-  });
-})();
-
-gain.setValueAtTime(1, 0.25);
-gain.linearRampToValueAtTime(0.75, 0.5);
-gain.exponentialRampToValueAtTime(0.1, 0.75);
-gain.setTargetAtTime(0.2, 1, 0.5);
-gain.setTargetValueAtTime(0.3, 1.25, 0.5);
-gain.cancelScheduledValues(1.5);
-
-</script>
-</pre>
-</body>
-</html>
--- a/content/media/webaudio/test/test_delayNode.html
+++ b/content/media/webaudio/test/test_delayNode.html
@@ -18,40 +18,30 @@ var gTest = {
     for (var i = 0; i < 2048; ++i) {
       buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
     }
 
     var source = context.createBufferSource();
 
     var delay = context.createDelay();
 
-    var delay2 = context.createDelayNode();
-    isnot(delay, delay2, "createDelayNode should create a different delay node");
-
     source.buffer = buffer;
 
     source.connect(delay);
 
     ok(delay.delayTime, "The audioparam member must exist");
     is(delay.delayTime.value, 0, "Correct initial value");
     is(delay.delayTime.defaultValue, 0, "Correct default value");
     delay.delayTime.value = 0.5;
     is(delay.delayTime.value, 0.5, "Correct initial value");
     is(delay.delayTime.defaultValue, 0, "Correct default value");
     is(delay.channelCount, 2, "delay node has 2 input channels by default");
     is(delay.channelCountMode, "max", "Correct channelCountMode for the delay node");
     is(delay.channelInterpretation, "speakers", "Correct channelCountInterpretation for the delay node");
 
-    var delay2 = context.createDelay(2);
-    is(delay2.delayTime.value, 0, "Correct initial value");
-    is(delay2.delayTime.defaultValue, 0, "Correct default value");
-    delay2.delayTime.value = 0.5;
-    is(delay2.delayTime.value, 0.5, "Correct initial value");
-    is(delay2.delayTime.defaultValue, 0, "Correct default value");
-
     expectException(function() {
       context.createDelay(0);
     }, DOMException.NOT_SUPPORTED_ERR);
     expectException(function() {
       context.createDelay(180);
     }, DOMException.NOT_SUPPORTED_ERR);
     expectTypeError(function() {
       context.createDelay(NaN);
--- a/content/media/webaudio/test/test_gainNode.html
+++ b/content/media/webaudio/test/test_gainNode.html
@@ -18,19 +18,16 @@ var gTest = {
     for (var i = 0; i < 2048; ++i) {
       buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
     }
 
     var source = context.createBufferSource();
 
     var gain = context.createGain();
 
-    var gain2 = context.createGainNode();
-    isnot(gain, gain2, "createGainNode should create a different gain node");
-
     source.buffer = buffer;
 
     source.connect(gain);
 
     ok(gain.gain, "The audioparam member must exist");
     is(gain.gain.value, 1.0, "Correct initial value");
     is(gain.gain.defaultValue, 1.0, "Correct default value");
     gain.gain.value = 0.5;
--- a/content/media/webaudio/test/test_mediaDecoding.html
+++ b/content/media/webaudio/test/test_mediaDecoding.html
@@ -297,36 +297,32 @@ function checkResampledBuffer(buffer, te
   cx.startRendering();
 }
 
 function runResampling(test, response, callback) {
   var sampleRate = test.sampleRate == 44100 ? 48000 : 44100;
   var cx = new OfflineAudioContext(1, 1, sampleRate);
   cx.decodeAudioData(response, function onSuccess(asyncResult) {
     is(asyncResult.sampleRate, sampleRate, "Correct sample rate");
-    syncResult = cx.createBuffer(response, false);
-    compareBuffers(syncResult, asyncResult);
 
     checkResampledBuffer(asyncResult, test, callback);
   }, function onFailure() {
     ok(false, "Expected successful decode with resample");
     callback();
   });
 }
 
 function runTest(test, response, callback) {
   var expectCallback = false;
   var cx = new OfflineAudioContext(test.numberOfChannels || 1,
                                    test.frames || 1, test.sampleRate);
   cx.decodeAudioData(response, function onSuccess(asyncResult) {
     ok(expectCallback, "Success callback should fire asynchronously");
     ok(test.valid, "Did expect success for test " + test.url);
 
-    syncResult = cx.createBuffer(response, false);
-    compareBuffers(syncResult, asyncResult);
     checkAudioBuffer(asyncResult, test);
 
     test.expectedBuffer = asyncResult;
     test.nativeContext = cx;
     runResampling(test, response, callback);
   }, function onFailure() {
     ok(expectCallback, "Failure callback should fire asynchronously");
     ok(!test.valid, "Did expect failure for test " + test.url);
--- a/content/media/webaudio/test/test_scriptProcessorNode.html
+++ b/content/media/webaudio/test/test_scriptProcessorNode.html
@@ -15,17 +15,17 @@
 // not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
 // can experience delays.
 
 SimpleTest.waitForExplicitFinish();
 addLoadEvent(function() {
   var context = new AudioContext();
   var buffer = null;
 
-  var sourceSP = context.createJavaScriptNode(2048);
+  var sourceSP = context.createScriptProcessor(2048);
   sourceSP.addEventListener("audioprocess", function(e) {
     // generate the audio
     for (var i = 0; i < 2048; ++i) {
       // Make sure our first sample won't be zero
       e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate);
       e.outputBuffer.getChannelData(0)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate);
     }
     // Remember our generated audio
--- a/content/media/webaudio/test/test_singleSourceDest.html
+++ b/content/media/webaudio/test/test_singleSourceDest.html
@@ -23,64 +23,16 @@ addLoadEvent(function() {
   is(destination.context, context, "Destination node has proper context");
   is(destination.numberOfInputs, 1, "Destination node has 1 inputs");
   is(destination.numberOfOutputs, 0, "Destination node has 0 outputs");
   is(destination.channelCount, 2, "Destination node has 2 input channels by default");
   is(destination.channelCountMode, "explicit", "Correct channelCountMode for the destination node");
   is(destination.channelInterpretation, "speakers", "Correct channelCountInterpretation for the destination node");
   ok(destination instanceof EventTarget, "AudioNodes must be EventTargets");
 
-  testWith(context, buffer, destination, function(source) {
-    source.start(0);
-  }, function(source) {
-    source.stop();
-  }, function() {
-    testWith(context, buffer, destination, function(source) {
-      source.start(0, 1);
-    }, function(source) {
-      expectTypeError(function() {
-        source.noteOff();
-      });
-      source.noteOff(0);
-    }, function() {
-      testWith(context, buffer, destination, function(source) {
-        source.start(0, 1, 0.5);
-      }, function(source) {
-        source.stop(0);
-      }, function() {
-        testWith(context, buffer, destination, function(source) {
-          source.noteOn(0);
-        }, function(source) {
-          source.noteOff(0);
-        }, function() {
-          testWith(context, buffer, destination, function(source) {
-            source.noteGrainOn(0, 1, 0.5);
-          }, function(source) {
-            source.stop();
-          }, function() {
-            SimpleTest.finish();
-          });
-        });
-      });
-    });
-  });
-});
-
-function testWith(context, buffer, destination, start, stop, callback)
-{
-  var source = createNode(context, buffer, destination);
-  start(source);
-  SimpleTest.executeSoon(function() {
-    stop(source);
-    callback();
-    source.disconnect();
-  });
-}
-
-function createNode(context, buffer, destination) {
   var source = context.createBufferSource();
   is(source.context, context, "Source node has proper context");
   is(source.numberOfInputs, 0, "Source node has 0 inputs");
   is(source.numberOfOutputs, 1, "Source node has 1 outputs");
   is(source.loop, false, "Source node is not looping");
   is(source.loopStart, 0, "Correct default value for loopStart");
   is(source.loopEnd, 0, "Correct default value for loopEnd");
   ok(!source.buffer, "Source node should not have a buffer when it's created");
@@ -97,15 +49,22 @@ function createNode(context, buffer, des
 
   source.connect(destination);
 
   is(source.numberOfInputs, 0, "Source node has 0 inputs");
   is(source.numberOfOutputs, 1, "Source node has 0 outputs");
   is(destination.numberOfInputs, 1, "Destination node has 0 inputs");
   is(destination.numberOfOutputs, 0, "Destination node has 0 outputs");
 
-  return source;
-}
+  source.start(0);
+  SimpleTest.executeSoon(function() {
+    source.stop(0);
+    source.disconnect();
+
+    SpecialPowers.clearUserPref("media.webaudio.enabled");
+    SimpleTest.finish();
+  });
+});
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/webidl/AudioBufferSourceNode.webidl
+++ b/dom/webidl/AudioBufferSourceNode.webidl
@@ -23,25 +23,8 @@ interface AudioBufferSourceNode : AudioN
     [Throws]
     void start(optional double when = 0, optional double grainOffset = 0,
                optional double grainDuration);
     [Throws]
     void stop(optional double when = 0);
 
     attribute EventHandler onended;
 };
-
-/*
- * The origin of this IDL file is
- * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#AlternateNames
- */
-partial interface AudioBufferSourceNode {
-    // Same as start()
-    [Throws,Pref="media.webaudio.legacy.AudioBufferSourceNode"]
-    void noteOn(double when);
-    [Throws,Pref="media.webaudio.legacy.AudioBufferSourceNode"]
-    void noteGrainOn(double when, double grainOffset, double grainDuration);
-    
-    [Throws,Pref="media.webaudio.legacy.AudioBufferSourceNode"]
-    // Same as stop()
-    void noteOff(double when);
-};
-
--- a/dom/webidl/AudioContext.webidl
+++ b/dom/webidl/AudioContext.webidl
@@ -69,37 +69,14 @@ interface AudioContext : EventTarget {
 
     [NewObject]
     OscillatorNode createOscillator();
     [NewObject, Throws]
     PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
 
 };
 
-/*
- * The origin of this IDL file is
- * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#AlternateNames
- */
-partial interface AudioContext {
-    [NewObject, Throws]
-    AudioBuffer? createBuffer(ArrayBuffer buffer, boolean mixToMono);
-
-    // Same as createGain()
-    [NewObject,Pref="media.webaudio.legacy.AudioContext"]
-    GainNode createGainNode();
-
-    // Same as createDelay()
-    [NewObject, Throws, Pref="media.webaudio.legacy.AudioContext"]
-    DelayNode createDelayNode(optional double maxDelayTime = 1);
-
-    // Same as createScriptProcessor()
-    [NewObject, Throws, Pref="media.webaudio.legacy.AudioContext"]
-    ScriptProcessorNode createJavaScriptNode(optional unsigned long bufferSize = 0,
-                                             optional unsigned long numberOfInputChannels = 2,
-                                             optional unsigned long numberOfOutputChannels = 2);
-};
-
 // Mozilla extensions
 partial interface AudioContext {
   // Read AudioChannel.webidl for more information about this attribute.
   [Pref="media.useAudioChannelService", SetterThrows]
   attribute AudioChannel mozAudioChannelType;
 };
--- a/dom/webidl/AudioParam.webidl
+++ b/dom/webidl/AudioParam.webidl
@@ -32,19 +32,8 @@ interface AudioParam {
     [Throws]
     void setValueCurveAtTime(Float32Array values, double startTime, double duration);
 
     // Cancels all scheduled parameter changes with times greater than or equal to startTime. 
     [Throws]
     void cancelScheduledValues(double startTime);
 
 };
-
-/*
- * The origin of this IDL file is
- * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#AlternateNames
- */
-partial interface AudioParam {
-    // Same as setTargetAtTime()
-    [Throws,Pref="media.webaudio.legacy.AudioParam"]
-    void setTargetValueAtTime(float target, double startTime, double timeConstant);
-};
-
--- a/testing/profiles/prefs_general.js
+++ b/testing/profiles/prefs_general.js
@@ -139,19 +139,16 @@ user_pref("dom.mozSettings.enabled", tru
 // Make sure the disk cache doesn't get auto disabled
 user_pref("network.http.bypass-cachelock-threshold", 200000);
 
 // Enable Gamepad
 user_pref("dom.gamepad.enabled", true);
 user_pref("dom.gamepad.non_standard_events.enabled", true);
 
 // Enable Web Audio legacy APIs
-user_pref("media.webaudio.legacy.AudioBufferSourceNode", true);
-user_pref("media.webaudio.legacy.AudioContext", true);
-user_pref("media.webaudio.legacy.AudioParam", true);
 user_pref("media.webaudio.legacy.BiquadFilterNode", true);
 user_pref("media.webaudio.legacy.PannerNode", true);
 user_pref("media.webaudio.legacy.OscillatorNode", true);
 
 // Always use network provider for geolocation tests
 // so we bypass the OSX dialog raised by the corelocation provider
 user_pref("geo.provider.testing", true);