Backed out changeset bf22c9e5c5a3 (bug 1094764) for crashtest failures on a CLOSED TREE
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Mon, 30 Mar 2015 15:14:09 +0200
changeset 236617 f8ccc1fed2047893ee058f7d95eec56c21741fa9
parent 236616 fbd6031f29580de6f43b9208a48cf35abdefdfb3
child 236618 814e2c0ad479ae72ee152a456e1e73c9e9010426
push id28514
push usercbook@mozilla.com
push dateTue, 31 Mar 2015 12:46:33 +0000
treeherdermozilla-central@c20f8549d631 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1094764
milestone39.0a1
backs outbf22c9e5c5a3a082fe72cbbca3f871d7d388e4ab
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset bf22c9e5c5a3 (bug 1094764) for crashtest failures on a CLOSED TREE
dom/base/nsGlobalWindow.cpp
dom/media/GraphDriver.cpp
dom/media/GraphDriver.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/TrackUnionStream.cpp
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioNodeExternalInputStream.cpp
dom/media/webaudio/AudioNodeExternalInputStream.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/MediaStreamAudioSourceNode.h
dom/media/webaudio/moz.build
dom/media/webaudio/test/mochitest.ini
dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
dom/media/webaudio/test/webaudio.js
dom/webidl/AudioContext.webidl
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -12963,18 +12963,17 @@ nsGlobalWindow::SuspendTimeouts(uint32_t
         // passing null for the context, since this shouldn't actually release this
         // timeout.
         t->Release();
       }
     }
 
     // Suspend all of the AudioContexts for this window
     for (uint32_t i = 0; i < mAudioContexts.Length(); ++i) {
-      ErrorResult dummy;
-      unused << mAudioContexts[i]->Suspend(dummy);
+      mAudioContexts[i]->Suspend();
     }
   }
 
   // Suspend our children as well.
   nsCOMPtr<nsIDocShell> docShell = GetDocShell();
   if (docShell) {
     int32_t childCount = 0;
     docShell->GetChildCount(&childCount);
@@ -13024,18 +13023,17 @@ nsGlobalWindow::ResumeTimeouts(bool aTha
     if (ac) {
       for (uint32_t i = 0; i < mEnabledSensors.Length(); i++)
         ac->AddWindowListener(mEnabledSensors[i], this);
     }
     EnableGamepadUpdates();
 
     // Resume all of the AudioContexts for this window
     for (uint32_t i = 0; i < mAudioContexts.Length(); ++i) {
-      ErrorResult dummy;
-      unused << mAudioContexts[i]->Resume(dummy);
+      mAudioContexts[i]->Resume();
     }
 
     // Resume all of the workers for this window.
     mozilla::dom::workers::ResumeWorkersForWindow(this);
 
     // Restore all of the timeouts, using the stored time remaining
     // (stored in timeout->mTimeRemaining).
 
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -18,17 +18,17 @@ extern PRLogModuleInfo* gMediaStreamGrap
 #endif
 
 // We don't use NSPR log here because we want this interleaved with adb logcat
 // on Android/B2G
 // #define ENABLE_LIFECYCLE_LOG
 #ifdef ENABLE_LIFECYCLE_LOG
 #ifdef ANDROID
 #include "android/log.h"
-#define LIFECYCLE_LOG(...)  __android_log_print(ANDROID_LOG_INFO, "Gecko - MSG" , __VA_ARGS__); printf(__VA_ARGS__);printf("\n");
+#define LIFECYCLE_LOG(args...)  __android_log_print(ANDROID_LOG_INFO, "Gecko - MSG" , ## __VA_ARGS__); printf(__VA_ARGS__);printf("\n");
 #else
 #define LIFECYCLE_LOG(...) printf(__VA_ARGS__);printf("\n");
 #endif
 #else
 #define LIFECYCLE_LOG(...)
 #endif
 
 namespace mozilla {
@@ -90,16 +90,19 @@ void GraphDriver::SwitchAtNextIteration(
       mPreviousDriver &&
       mPreviousDriver->AsAudioCallbackDriver()->IsSwitchingDevice() &&
       mPreviousDriver != aNextDriver) {
     return;
   }
   LIFECYCLE_LOG("Switching to new driver: %p (%s)",
       aNextDriver, aNextDriver->AsAudioCallbackDriver() ?
       "AudioCallbackDriver" : "SystemClockDriver");
+  // Sometimes we switch twice to a new driver per iteration, this is probably a
+  // bug.
+  MOZ_ASSERT(!mNextDriver || mNextDriver->AsAudioCallbackDriver());
   mNextDriver = aNextDriver;
 }
 
 void GraphDriver::EnsureImmediateWakeUpLocked()
 {
   mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
   mWaitState = WAITSTATE_WAKING_UP;
   mGraphImpl->mGraphDriverAsleep = false; // atomic
@@ -137,17 +140,17 @@ public:
     LIFECYCLE_LOG("MediaStreamGraphShutdownThreadRunnable for graph %p",
         mDriver->GraphImpl());
     // We can't release an audio driver on the main thread, because it can be
     // blocking.
     if (mDriver->AsAudioCallbackDriver()) {
       LIFECYCLE_LOG("Releasing audio driver off main thread.");
       nsRefPtr<AsyncCubebTask> releaseEvent =
         new AsyncCubebTask(mDriver->AsAudioCallbackDriver(),
-                           AsyncCubebOperation::SHUTDOWN);
+                           AsyncCubebTask::SHUTDOWN);
       mDriver = nullptr;
       releaseEvent->Dispatch();
     } else {
       LIFECYCLE_LOG("Dropping driver reference for SystemClockDriver.");
       mDriver = nullptr;
     }
     return NS_OK;
   }
@@ -155,17 +158,17 @@ private:
   nsRefPtr<GraphDriver> mDriver;
 };
 
 void GraphDriver::Shutdown()
 {
   if (AsAudioCallbackDriver()) {
     LIFECYCLE_LOG("Releasing audio driver off main thread (GraphDriver::Shutdown).\n");
     nsRefPtr<AsyncCubebTask> releaseEvent =
-      new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebOperation::SHUTDOWN);
+      new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebTask::SHUTDOWN);
     releaseEvent->Dispatch();
   } else {
     Stop();
   }
 }
 
 ThreadedDriver::ThreadedDriver(MediaStreamGraphImpl* aGraphImpl)
   : GraphDriver(aGraphImpl)
@@ -196,17 +199,17 @@ public:
                     mDriver->mPreviousDriver.get(),
                     mDriver->GraphImpl());
       MOZ_ASSERT(!mDriver->AsAudioCallbackDriver());
       // Stop and release the previous driver off-main-thread, but only if we're
       // not in the situation where we've fallen back to a system clock driver
       // because the osx audio stack is currently switching output device.
       if (!mDriver->mPreviousDriver->AsAudioCallbackDriver()->IsSwitchingDevice()) {
         nsRefPtr<AsyncCubebTask> releaseEvent =
-          new AsyncCubebTask(mDriver->mPreviousDriver->AsAudioCallbackDriver(), AsyncCubebOperation::SHUTDOWN);
+          new AsyncCubebTask(mDriver->mPreviousDriver->AsAudioCallbackDriver(), AsyncCubebTask::SHUTDOWN);
         mDriver->mPreviousDriver = nullptr;
         releaseEvent->Dispatch();
       }
     } else {
       MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
       MOZ_ASSERT(mDriver->mGraphImpl->MessagesQueued(), "Don't start a graph without messages queued.");
       mDriver->mGraphImpl->SwapMessageQueues();
     }
@@ -497,70 +500,73 @@ AsyncCubebTask::Run()
     // the Dispatch(), and Dispatch/PutEvent itself doesn't hold a ref; it
     // assumes the caller does.
     return NS_OK;
   }
 
   MOZ_ASSERT(mDriver);
 
   switch(mOperation) {
-    case AsyncCubebOperation::INIT: {
+    case AsyncCubebOperation::INIT:
       LIFECYCLE_LOG("AsyncCubebOperation::INIT\n");
       mDriver->Init();
-      mDriver->CompleteAudioContextOperations(mOperation);
       break;
-    }
-    case AsyncCubebOperation::SHUTDOWN: {
+    case AsyncCubebOperation::SHUTDOWN:
       LIFECYCLE_LOG("AsyncCubebOperation::SHUTDOWN\n");
       mDriver->Stop();
-
-      mDriver->CompleteAudioContextOperations(mOperation);
-
       mDriver = nullptr;
       mShutdownGrip = nullptr;
       break;
+    case AsyncCubebOperation::SLEEP: {
+      {
+        LIFECYCLE_LOG("AsyncCubebOperation::SLEEP\n");
+        MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
+        // We might just have been awoken
+        if (mDriver->mGraphImpl->mNeedAnotherIteration) {
+          mDriver->mPauseRequested = false;
+          mDriver->mWaitState = AudioCallbackDriver::WAITSTATE_RUNNING;
+          mDriver->mGraphImpl->mGraphDriverAsleep = false	; // atomic
+          break;
+        }
+        mDriver->Stop();
+        mDriver->mGraphImpl->mGraphDriverAsleep = true; // atomic
+        mDriver->mWaitState = AudioCallbackDriver::WAITSTATE_WAITING_INDEFINITELY;
+        mDriver->mPauseRequested = false;
+        mDriver->mGraphImpl->GetMonitor().Wait(PR_INTERVAL_NO_TIMEOUT);
+      }
+      STREAM_LOG(PR_LOG_DEBUG, ("Restarting audio stream from sleep."));
+      mDriver->StartStream();
+      break;
     }
     default:
       MOZ_CRASH("Operation not implemented.");
   }
 
   // and now kill this thread
   NS_DispatchToMainThread(this);
 
   return NS_OK;
 }
 
-StreamAndPromiseForOperation::StreamAndPromiseForOperation(MediaStream* aStream,
-                                          void* aPromise,
-                                          dom::AudioContextOperation aOperation)
-  : mStream(aStream)
-  , mPromise(aPromise)
-  , mOperation(aOperation)
-{
-  MOZ_ASSERT(aPromise);
-}
-
 AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl, dom::AudioChannel aChannel)
   : GraphDriver(aGraphImpl)
   , mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
   , mStarted(false)
   , mAudioChannel(aChannel)
   , mInCallback(false)
   , mPauseRequested(false)
 #ifdef XP_MACOSX
   , mCallbackReceivedWhileSwitching(0)
 #endif
 {
   STREAM_LOG(PR_LOG_DEBUG, ("AudioCallbackDriver ctor for graph %p", aGraphImpl));
 }
 
 AudioCallbackDriver::~AudioCallbackDriver()
-{
-  MOZ_ASSERT(mPromisesForOperation.IsEmpty());
-}
+{}
 
 void
 AudioCallbackDriver::Init()
 {
   cubeb_stream_params params;
   uint32_t latency;
 
   MOZ_ASSERT(!NS_IsMainThread(),
@@ -640,28 +646,22 @@ AudioCallbackDriver::Resume()
 void
 AudioCallbackDriver::Start()
 {
   // If this is running on the main thread, we can't open the stream directly,
   // because it is a blocking operation.
   if (NS_IsMainThread()) {
     STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
     nsRefPtr<AsyncCubebTask> initEvent =
-      new AsyncCubebTask(this, AsyncCubebOperation::INIT);
+      new AsyncCubebTask(this, AsyncCubebTask::INIT);
     initEvent->Dispatch();
   } else {
     STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from the previous driver's thread", mGraphImpl));
     Init();
 
-    // Check if we need to resolve promises because the driver just got switched
-    // because of a resuming AudioContext
-    if (!mPromisesForOperation.IsEmpty()) {
-      CompleteAudioContextOperations(AsyncCubebOperation::INIT);
-    }
-
     if (mPreviousDriver) {
       nsCOMPtr<nsIRunnable> event =
         new MediaStreamGraphShutdownThreadRunnable(mPreviousDriver);
       mPreviousDriver = nullptr;
       NS_DispatchToMainThread(event);
     }
   }
 }
@@ -699,17 +699,17 @@ AudioCallbackDriver::Revive()
   if (mNextDriver) {
     mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
                               mStateComputedTime, mNextStateComputedTime);
     mGraphImpl->SetCurrentDriver(mNextDriver);
     mNextDriver->Start();
   } else {
     STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
     nsRefPtr<AsyncCubebTask> initEvent =
-      new AsyncCubebTask(this, AsyncCubebOperation::INIT);
+      new AsyncCubebTask(this, AsyncCubebTask::INIT);
     initEvent->Dispatch();
   }
 }
 
 void
 AudioCallbackDriver::GetIntervalForIteration(GraphTime& aFrom,
                                              GraphTime& aTo)
 {
@@ -724,16 +724,30 @@ AudioCallbackDriver::GetCurrentTime()
     NS_WARNING("Could not get current time from cubeb.");
   }
 
   return mSampleRate * position;
 }
 
 void AudioCallbackDriver::WaitForNextIteration()
 {
+#if 0
+  mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
+
+  // We can't block on the monitor in the audio callback, so we kick off a new
+  // thread that will pause the audio stream, and restart it when unblocked.
+  // We don't want to sleep when we haven't started the driver yet.
+  if (!mGraphImpl->mNeedAnotherIteration && mAudioStream && mGraphImpl->Running()) {
+    STREAM_LOG(PR_LOG_DEBUG+1, ("AudioCallbackDriver going to sleep"));
+    mPauseRequested = true;
+    nsRefPtr<AsyncCubebTask> sleepEvent =
+      new AsyncCubebTask(this, AsyncCubebTask::SLEEP);
+    sleepEvent->Dispatch();
+  }
+#endif
 }
 
 void
 AudioCallbackDriver::WakeUp()
 {
   mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
   mGraphImpl->GetMonitor().Notify();
 }
@@ -1055,52 +1069,10 @@ AudioCallbackDriver::IterationDuration()
 }
 
 bool
 AudioCallbackDriver::IsStarted() {
   mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
   return mStarted;
 }
 
-void
-AudioCallbackDriver::EnqueueStreamAndPromiseForOperation(MediaStream* aStream,
-                                          void* aPromise,
-                                          dom::AudioContextOperation aOperation)
-{
-  MonitorAutoLock mon(mGraphImpl->GetMonitor());
-  mPromisesForOperation.AppendElement(StreamAndPromiseForOperation(aStream,
-                                                                   aPromise,
-                                                                   aOperation));
-}
-
-void AudioCallbackDriver::CompleteAudioContextOperations(AsyncCubebOperation aOperation)
-{
-  nsAutoTArray<StreamAndPromiseForOperation, 1> array;
-
-  // We can't lock for the whole function because AudioContextOperationCompleted
-  // will grab the monitor
-  {
-    MonitorAutoLock mon(GraphImpl()->GetMonitor());
-    array.SwapElements(mPromisesForOperation);
-  }
-
-  for (int32_t i = array.Length() - 1; i >= 0; i--) {
-    StreamAndPromiseForOperation& s = array[i];
-    if ((aOperation == AsyncCubebOperation::INIT &&
-         s.mOperation == AudioContextOperation::Resume) ||
-        (aOperation == AsyncCubebOperation::SHUTDOWN &&
-         s.mOperation != AudioContextOperation::Resume)) {
-
-      GraphImpl()->AudioContextOperationCompleted(s.mStream,
-                                                  s.mPromise,
-                                                  s.mOperation);
-      array.RemoveElementAt(i);
-    }
-  }
-
-  if (!array.IsEmpty()) {
-    MonitorAutoLock mon(GraphImpl()->GetMonitor());
-    mPromisesForOperation.AppendElements(array);
-  }
-}
-
 
 } // namepace mozilla
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -8,17 +8,16 @@
 
 #include "nsAutoPtr.h"
 #include "nsAutoRef.h"
 #include "AudioBufferUtils.h"
 #include "AudioMixer.h"
 #include "AudioSegment.h"
 #include "SelfRef.h"
 #include "mozilla/Atomics.h"
-#include "AudioContext.h"
 
 struct cubeb_stream;
 
 template <>
 class nsAutoRefTraits<cubeb_stream> : public nsPointerRefTraits<cubeb_stream>
 {
 public:
   static void Release(cubeb_stream* aStream) { cubeb_stream_destroy(aStream); }
@@ -317,31 +316,16 @@ public:
     return this;
   }
 
 private:
   // Time, in GraphTime, for each iteration
   GraphTime mSlice;
 };
 
-struct StreamAndPromiseForOperation
-{
-  StreamAndPromiseForOperation(MediaStream* aStream,
-                               void* aPromise,
-                               dom::AudioContextOperation aOperation);
-  nsRefPtr<MediaStream> mStream;
-  void* mPromise;
-  dom::AudioContextOperation mOperation;
-};
-
-enum AsyncCubebOperation {
-  INIT,
-  SHUTDOWN
-};
-
 /**
  * This is a graph driver that is based on callback functions called by the
  * audio api. This ensures minimal audio latency, because it means there is no
  * buffering happening: the audio is generated inside the callback.
  *
  * This design is less flexible than running our own thread:
  * - We have no control over the thread:
  * - It cannot block, and it has to run for a shorter amount of time than the
@@ -403,22 +387,16 @@ public:
                              uint32_t aChannels,
                              uint32_t aFrames,
                              uint32_t aSampleRate) override;
 
   virtual AudioCallbackDriver* AsAudioCallbackDriver() override {
     return this;
   }
 
-  /* Enqueue a promise that is going to be resolved when a specific operation
-   * occurs on the cubeb stream. */
-  void EnqueueStreamAndPromiseForOperation(MediaStream* aStream,
-                                         void* aPromise,
-                                         dom::AudioContextOperation aOperation);
-
   bool IsSwitchingDevice() {
 #ifdef XP_MACOSX
     return mSelfReference;
 #else
     return false;
 #endif
   }
 
@@ -431,18 +409,16 @@ public:
 
   /* Whether the underlying cubeb stream has been started. See comment for
    * mStarted for details. */
   bool IsStarted();
 
   /* Tell the driver whether this process is using a microphone or not. This is
    * thread safe. */
   void SetMicrophoneActive(bool aActive);
-
-  void CompleteAudioContextOperations(AsyncCubebOperation aOperation);
 private:
   /**
    * On certain MacBookPro, the microphone is located near the left speaker.
    * We need to pan the sound output to the right speaker if we are using the
    * mic and the built-in speaker, or we will have terrible echo.  */
   void PanOutputIfNeeded(bool aMicrophoneActive);
   /**
    * This is called when the output device used by the cubeb stream changes. */
@@ -490,17 +466,16 @@ private:
     explicit AutoInCallback(AudioCallbackDriver* aDriver);
     ~AutoInCallback();
     AudioCallbackDriver* mDriver;
   };
 
   /* Thread for off-main-thread initialization and
    * shutdown of the audio stream. */
   nsCOMPtr<nsIThread> mInitShutdownThread;
-  nsAutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
   dom::AudioChannel mAudioChannel;
   Atomic<bool> mInCallback;
   /* A thread has been created to be able to pause and restart the audio thread,
    * but has not done so yet. This indicates that the callback should return
    * early */
   bool mPauseRequested;
   /**
    * True if microphone is being used by this process. This is synchronized by
@@ -518,16 +493,22 @@ private:
    * since OSX seems to still call us _sometimes_. */
   uint32_t mCallbackReceivedWhileSwitching;
 #endif
 };
 
 class AsyncCubebTask : public nsRunnable
 {
 public:
+  enum AsyncCubebOperation {
+    INIT,
+    SHUTDOWN,
+    SLEEP
+  };
+
 
   AsyncCubebTask(AudioCallbackDriver* aDriver, AsyncCubebOperation aOperation);
 
   nsresult Dispatch()
   {
     // Can't add 'this' as the event to run, since mThread may not be set yet
     nsresult rv = NS_NewNamedThread("CubebOperation", getter_AddRefs(mThread));
     if (NS_SUCCEEDED(rv)) {
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -19,17 +19,16 @@
 #include "prlog.h"
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
 #include "AudioChannelService.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioNodeExternalInputStream.h"
-#include "mozilla/dom/AudioContextBinding.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 #include "mozilla/unused.h"
 #ifdef MOZ_WEBRTC
 #include "AudioOutputObserver.h"
 #endif
 
@@ -98,41 +97,22 @@ MediaStreamGraphImpl::FinishStream(Media
   // Force at least one more iteration of the control loop, since we rely
   // on UpdateCurrentTimeForStreams to notify our listeners once the stream end
   // has been reached.
   EnsureNextIteration();
 
   SetStreamOrderDirty();
 }
 
-static const GraphTime START_TIME_DELAYED = -1;
-
 void
 MediaStreamGraphImpl::AddStream(MediaStream* aStream)
 {
-  // Check if we're adding a stream to a suspended context, in which case, we
-  // add it to mSuspendedStreams, and delay setting mBufferStartTime
-  bool contextSuspended = false;
-  if (aStream->AsAudioNodeStream()) {
-    for (uint32_t i = 0; i < mSuspendedStreams.Length(); i++) {
-      if (aStream->AudioContextId() == mSuspendedStreams[i]->AudioContextId()) {
-        contextSuspended = true;
-      }
-    }
-  }
-
-  if (contextSuspended) {
-    aStream->mBufferStartTime = START_TIME_DELAYED;
-    mSuspendedStreams.AppendElement(aStream);
-    STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
-  } else {
-    aStream->mBufferStartTime = IterationEnd();
-    mStreams.AppendElement(aStream);
-    STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph", aStream));
-  }
+  aStream->mBufferStartTime = IterationEnd();
+  mStreams.AppendElement(aStream);
+  STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph", aStream));
 
   SetStreamOrderDirty();
 }
 
 void
 MediaStreamGraphImpl::RemoveStream(MediaStream* aStream)
 {
   // Remove references in mStreamUpdates before we allow aStream to die.
@@ -146,18 +126,16 @@ MediaStreamGraphImpl::RemoveStream(Media
       }
     }
   }
 
   // Ensure that mFirstCycleBreaker and mMixer are updated when necessary.
   SetStreamOrderDirty();
 
   mStreams.RemoveElement(aStream);
-  mSuspendedStreams.RemoveElement(aStream);
-
   NS_RELEASE(aStream); // probably destroying it
 
   STREAM_LOG(PR_LOG_DEBUG, ("Removing media stream %p from the graph", aStream));
 }
 
 void
 MediaStreamGraphImpl::UpdateConsumptionState(SourceMediaStream* aStream)
 {
@@ -393,74 +371,59 @@ MediaStreamGraphImpl::IterationEnd()
   return CurrentDriver()->IterationEnd();
 }
 
 void
 MediaStreamGraphImpl::UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime, GraphTime aNextCurrentTime)
 {
   nsTArray<MediaStream*> streamsReadyToFinish;
   nsAutoTArray<bool,800> streamHasOutput;
-
-  nsTArray<MediaStream*>* runningAndSuspendedPair[2];
-  runningAndSuspendedPair[0] = &mStreams;
-  runningAndSuspendedPair[1] = &mSuspendedStreams;
-
   streamHasOutput.SetLength(mStreams.Length());
-
-  for (uint32_t array = 0; array < 2; array++) {
-    for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
-      MediaStream* stream = (*runningAndSuspendedPair[array])[i];
+  for (uint32_t i = 0; i < mStreams.Length(); ++i) {
+    MediaStream* stream = mStreams[i];
 
-      // Calculate blocked time and fire Blocked/Unblocked events
-      GraphTime blockedTime = 0;
-      GraphTime t = aPrevCurrentTime;
-      // include |nextCurrentTime| to ensure NotifyBlockingChanged() is called
-      // before NotifyEvent(this, EVENT_FINISHED) when |nextCurrentTime ==
-      // stream end time|
-      while (t <= aNextCurrentTime) {
-        GraphTime end;
-        bool blocked = stream->mBlocked.GetAt(t, &end);
-        if (blocked) {
-          blockedTime += std::min(end, aNextCurrentTime) - t;
+    // Calculate blocked time and fire Blocked/Unblocked events
+    GraphTime blockedTime = 0;
+    GraphTime t = aPrevCurrentTime;
+    // include |nextCurrentTime| to ensure NotifyBlockingChanged() is called
+    // before NotifyEvent(this, EVENT_FINISHED) when |nextCurrentTime == stream end time|
+    while (t <= aNextCurrentTime) {
+      GraphTime end;
+      bool blocked = stream->mBlocked.GetAt(t, &end);
+      if (blocked) {
+        blockedTime += std::min(end, aNextCurrentTime) - t;
+      }
+      if (blocked != stream->mNotifiedBlocked) {
+        for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
+          MediaStreamListener* l = stream->mListeners[j];
+          l->NotifyBlockingChanged(this,
+              blocked ? MediaStreamListener::BLOCKED : MediaStreamListener::UNBLOCKED);
         }
-        if (blocked != stream->mNotifiedBlocked) {
-          for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
-            MediaStreamListener* l = stream->mListeners[j];
-            l->NotifyBlockingChanged(this, blocked
-                                             ? MediaStreamListener::BLOCKED
-                                             : MediaStreamListener::UNBLOCKED);
-          }
-          stream->mNotifiedBlocked = blocked;
-        }
-        t = end;
+        stream->mNotifiedBlocked = blocked;
       }
+      t = end;
+    }
+
+
+    stream->AdvanceTimeVaryingValuesToCurrentTime(aNextCurrentTime, blockedTime);
+    // Advance mBlocked last so that implementations of
+    // AdvanceTimeVaryingValuesToCurrentTime can rely on the value of mBlocked.
+    stream->mBlocked.AdvanceCurrentTime(aNextCurrentTime);
 
-      stream->AdvanceTimeVaryingValuesToCurrentTime(aNextCurrentTime,
-                                                    blockedTime);
-      // Advance mBlocked last so that implementations of
-      // AdvanceTimeVaryingValuesToCurrentTime can rely on the value of
-      // mBlocked.
-      stream->mBlocked.AdvanceCurrentTime(aNextCurrentTime);
+    streamHasOutput[i] = blockedTime < aNextCurrentTime - aPrevCurrentTime;
+    // Make this an assertion when bug 957832 is fixed.
+    NS_WARN_IF_FALSE(!streamHasOutput[i] || !stream->mNotifiedFinished,
+      "Shouldn't have already notified of finish *and* have output!");
 
-      if (runningAndSuspendedPair[array] == &mStreams) {
-        streamHasOutput[i] = blockedTime < aNextCurrentTime - aPrevCurrentTime;
-        // Make this an assertion when bug 957832 is fixed.
-        NS_WARN_IF_FALSE(
-          !streamHasOutput[i] || !stream->mNotifiedFinished,
-          "Shouldn't have already notified of finish *and* have output!");
-
-        if (stream->mFinished && !stream->mNotifiedFinished) {
-          streamsReadyToFinish.AppendElement(stream);
-        }
-      }
-      STREAM_LOG(PR_LOG_DEBUG + 1,
-                 ("MediaStream %p bufferStartTime=%f blockedTime=%f", stream,
-                  MediaTimeToSeconds(stream->mBufferStartTime),
-                  MediaTimeToSeconds(blockedTime)));
+    if (stream->mFinished && !stream->mNotifiedFinished) {
+      streamsReadyToFinish.AppendElement(stream);
     }
+    STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p bufferStartTime=%f blockedTime=%f",
+                                stream, MediaTimeToSeconds(stream->mBufferStartTime),
+                                MediaTimeToSeconds(blockedTime)));
   }
 
 
   for (uint32_t i = 0; i < streamHasOutput.Length(); ++i) {
     if (!streamHasOutput[i]) {
       continue;
     }
     MediaStream* stream = mStreams[i];
@@ -548,38 +511,28 @@ MediaStreamGraphImpl::MarkConsumed(Media
     return;
   }
   // Mark all the inputs to this stream as consumed
   for (uint32_t i = 0; i < ps->mInputs.Length(); ++i) {
     MarkConsumed(ps->mInputs[i]->mSource);
   }
 }
 
-bool
-MediaStreamGraphImpl::StreamSuspended(MediaStream* aStream)
-{
-  // Only AudioNodeStreams can be suspended, so we can shortcut here.
-  return aStream->AsAudioNodeStream() &&
-         mSuspendedStreams.IndexOf(aStream) != mSuspendedStreams.NoIndex;
-}
-
-namespace {
-  // Value of mCycleMarker for unvisited streams in cycle detection.
-  const uint32_t NOT_VISITED = UINT32_MAX;
-  // Value of mCycleMarker for ordered streams in muted cycles.
-  const uint32_t IN_MUTED_CYCLE = 1;
-}
-
 void
 MediaStreamGraphImpl::UpdateStreamOrder()
 {
 #ifdef MOZ_WEBRTC
   bool shouldAEC = false;
 #endif
   bool audioTrackPresent = false;
+  // Value of mCycleMarker for unvisited streams in cycle detection.
+  const uint32_t NOT_VISITED = UINT32_MAX;
+  // Value of mCycleMarker for ordered streams in muted cycles.
+  const uint32_t IN_MUTED_CYCLE = 1;
+
   for (uint32_t i = 0; i < mStreams.Length(); ++i) {
     MediaStream* stream = mStreams[i];
     stream->mIsConsumed = false;
     stream->mInBlockingSet = false;
 #ifdef MOZ_WEBRTC
     if (stream->AsSourceStream() &&
         stream->AsSourceStream()->NeedsMixing()) {
       shouldAEC = true;
@@ -685,45 +638,35 @@ MediaStreamGraphImpl::UpdateStreamOrder(
     if (ps->mCycleMarker == NOT_VISITED) {
       // Record the position on the visited stack, so that any searches
       // finding this stream again know how much of the stack is in the cycle.
       ps->mCycleMarker = nextStackMarker;
       --nextStackMarker;
       // Not-visited input streams should be processed first.
       // SourceMediaStreams have already been ordered.
       for (uint32_t i = inputs.Length(); i--; ) {
-        if (StreamSuspended(inputs[i]->mSource)) {
-          continue;
-        }
         auto input = inputs[i]->mSource->AsProcessedStream();
         if (input && input->mCycleMarker == NOT_VISITED) {
-          // It can be that this stream has an input which is from a suspended
-          // AudioContext.
-          if (input->isInList()) {
-            input->remove();
-            dfsStack.insertFront(input);
-          }
+          input->remove();
+          dfsStack.insertFront(input);
         }
       }
       continue;
     }
 
     // Returning from DFS.  Pop from dfsStack.
     ps->remove();
 
     // cycleStackMarker keeps track of the highest marker value on any
     // upstream stream, if any, found receiving input, directly or indirectly,
     // from the visited stack (and so from |ps|, making a cycle).  In a
     // variation from Tarjan's SCC algorithm, this does not include |ps|
     // unless it is part of the cycle.
     uint32_t cycleStackMarker = 0;
     for (uint32_t i = inputs.Length(); i--; ) {
-      if (StreamSuspended(inputs[i]->mSource)) {
-        continue;
-      }
       auto input = inputs[i]->mSource->AsProcessedStream();
       if (input) {
         cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
       }
     }
 
     if (cycleStackMarker <= IN_MUTED_CYCLE) {
       // All inputs have been ordered and their stack markers have been removed.
@@ -809,46 +752,39 @@ MediaStreamGraphImpl::UpdateStreamOrder(
 
 void
 MediaStreamGraphImpl::RecomputeBlocking(GraphTime aEndBlockingDecisions)
 {
   bool blockingDecisionsWillChange = false;
 
   STREAM_LOG(PR_LOG_DEBUG+1, ("Media graph %p computing blocking for time %f",
                               this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime())));
-  nsTArray<MediaStream*>* runningAndSuspendedPair[2];
-  runningAndSuspendedPair[0] = &mStreams;
-  runningAndSuspendedPair[1] = &mSuspendedStreams;
+  for (uint32_t i = 0; i < mStreams.Length(); ++i) {
+    MediaStream* stream = mStreams[i];
+    if (!stream->mInBlockingSet) {
+      // Compute a partition of the streams containing 'stream' such that we can
+      // compute the blocking status of each subset independently.
+      nsAutoTArray<MediaStream*,10> streamSet;
+      AddBlockingRelatedStreamsToSet(&streamSet, stream);
 
-  for (uint32_t array = 0; array < 2; array++) {
-    for (uint32_t i = 0; i < (*runningAndSuspendedPair[array]).Length(); ++i) {
-      MediaStream* stream = (*runningAndSuspendedPair[array])[i];
-      if (!stream->mInBlockingSet) {
-        // Compute a partition of the streams containing 'stream' such that we
-        // can
-        // compute the blocking status of each subset independently.
-        nsAutoTArray<MediaStream*, 10> streamSet;
-        AddBlockingRelatedStreamsToSet(&streamSet, stream);
-
-        GraphTime end;
-        for (GraphTime t = CurrentDriver()->StateComputedTime();
-             t < aEndBlockingDecisions; t = end) {
-          end = GRAPH_TIME_MAX;
-          RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
-          if (end < GRAPH_TIME_MAX) {
-            blockingDecisionsWillChange = true;
-          }
+      GraphTime end;
+      for (GraphTime t = CurrentDriver()->StateComputedTime();
+           t < aEndBlockingDecisions; t = end) {
+        end = GRAPH_TIME_MAX;
+        RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
+        if (end < GRAPH_TIME_MAX) {
+          blockingDecisionsWillChange = true;
         }
       }
+    }
 
-      GraphTime end;
-      stream->mBlocked.GetAt(IterationEnd(), &end);
-      if (end < GRAPH_TIME_MAX) {
-        blockingDecisionsWillChange = true;
-      }
+    GraphTime end;
+    stream->mBlocked.GetAt(IterationEnd(), &end);
+    if (end < GRAPH_TIME_MAX) {
+      blockingDecisionsWillChange = true;
     }
   }
   STREAM_LOG(PR_LOG_DEBUG+1, ("Media graph %p computed blocking for interval %f to %f",
                               this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime()),
                               MediaTimeToSeconds(aEndBlockingDecisions)));
 
   CurrentDriver()->UpdateStateComputedTime(aEndBlockingDecisions);
 
@@ -1053,16 +989,24 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
     AudioSegment* audio = track->Get<AudioSegment>();
     AudioSegment output;
 
     // offset and audioOutput.mLastTickWritten can differ by at most one sample,
     // because of the rounding issue. We track that to ensure we don't skip a
     // sample. One sample may be played twice, but this should not happen
     // again during an unblocked sequence of track samples.
     StreamTime offset = GraphTimeToStreamTime(aStream, aFrom);
+    if (audioOutput.mLastTickWritten &&
+        audioOutput.mLastTickWritten != offset) {
+      // If there is a global underrun of the MSG, this property won't hold, and
+      // we reset the sample count tracking.
+      if (offset - audioOutput.mLastTickWritten == 1) {
+        offset = audioOutput.mLastTickWritten;
+      }
+    }
 
     // We don't update aStream->mBufferStartTime here to account for time spent
     // blocked. Instead, we'll update it in UpdateCurrentTimeForStreams after
     // the blocked period has completed. But we do need to make sure we play
     // from the right offsets in the stream buffer, even if we've already
     // written silence for some amount of blocked time after the current time.
     GraphTime t = aFrom;
     while (ticksNeeded) {
@@ -1084,49 +1028,37 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
         output.InsertNullDataAtStart(toWrite);
         ticksWritten += toWrite;
         STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld blocking-silence samples for %f to %f (%ld to %ld)\n",
                                     aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
                                     offset, offset + toWrite));
       } else {
         StreamTime endTicksNeeded = offset + toWrite;
         StreamTime endTicksAvailable = audio->GetDuration();
+        STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld samples for %f to %f (samples %ld to %ld)\n",
+                                     aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
+                                     offset, endTicksNeeded));
 
         if (endTicksNeeded <= endTicksAvailable) {
-          STREAM_LOG(PR_LOG_DEBUG + 1,
-                     ("MediaStream %p writing %ld samples for %f to %f "
-                      "(samples %ld to %ld)\n",
-                      aStream, toWrite, MediaTimeToSeconds(t),
-                      MediaTimeToSeconds(end), offset, endTicksNeeded));
           output.AppendSlice(*audio, offset, endTicksNeeded);
           ticksWritten += toWrite;
           offset = endTicksNeeded;
         } else {
           // MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not ended.");
           // If we are at the end of the track, maybe write the remaining
           // samples, and pad with/output silence.
           if (endTicksNeeded > endTicksAvailable &&
               offset < endTicksAvailable) {
             output.AppendSlice(*audio, offset, endTicksAvailable);
-            STREAM_LOG(PR_LOG_DEBUG + 1,
-                       ("MediaStream %p writing %ld samples for %f to %f "
-                        "(samples %ld to %ld)\n",
-                        aStream, toWrite, MediaTimeToSeconds(t),
-                        MediaTimeToSeconds(end), offset, endTicksNeeded));
             uint32_t available = endTicksAvailable - offset;
             ticksWritten += available;
             toWrite -= available;
             offset = endTicksAvailable;
           }
           output.AppendNullData(toWrite);
-          STREAM_LOG(PR_LOG_DEBUG + 1,
-                     ("MediaStream %p writing %ld padding slsamples for %f to "
-                      "%f (samples %ld to %ld)\n",
-                      aStream, toWrite, MediaTimeToSeconds(t),
-                      MediaTimeToSeconds(end), offset, endTicksNeeded));
           ticksWritten += toWrite;
         }
         output.ApplyVolume(volume);
       }
       t = end;
     }
     audioOutput.mLastTickWritten = offset;
 
@@ -1848,17 +1780,17 @@ MediaStreamGraphImpl::EnsureStableStateE
   mPostedRunInStableStateEvent = true;
   nsCOMPtr<nsIRunnable> event = new MediaStreamGraphStableStateRunnable(this, true);
   NS_DispatchToMainThread(event);
 }
 
 void
 MediaStreamGraphImpl::AppendMessage(ControlMessage* aMessage)
 {
-  MOZ_ASSERT(NS_IsMainThread(), "main thread only");
+  NS_ASSERTION(NS_IsMainThread(), "main thread only");
   NS_ASSERTION(!aMessage->GetStream() ||
                !aMessage->GetStream()->IsDestroyed(),
                "Stream already destroyed");
 
   if (mDetectedNotRunning &&
       mLifecycleState > LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
     // The graph control loop is not running and main thread cleanup has
     // happened. From now on we can't append messages to mCurrentTaskMessageQueue,
@@ -2207,56 +2139,16 @@ MediaStream::ChangeExplicitBlockerCount(
   // stream has been destroyed since then.
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(new Message(this, aDelta));
 }
 
 void
-MediaStream::BlockStreamIfNeeded()
-{
-  class Message : public ControlMessage {
-  public:
-    explicit Message(MediaStream* aStream) : ControlMessage(aStream)
-    { }
-    virtual void Run()
-    {
-      mStream->BlockStreamIfNeededImpl(
-          mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
-    }
-  };
-
-  if (mMainThreadDestroyed) {
-    return;
-  }
-  GraphImpl()->AppendMessage(new Message(this));
-}
-
-void
-MediaStream::UnblockStreamIfNeeded()
-{
-  class Message : public ControlMessage {
-  public:
-    explicit Message(MediaStream* aStream) : ControlMessage(aStream)
-    { }
-    virtual void Run()
-    {
-      mStream->UnblockStreamIfNeededImpl(
-          mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
-    }
-  };
-
-  if (mMainThreadDestroyed) {
-    return;
-  }
-  GraphImpl()->AppendMessage(new Message(this));
-}
-
-void
 MediaStream::AddListenerImpl(already_AddRefed<MediaStreamListener> aListener)
 {
   MediaStreamListener* listener = *mListeners.AppendElement() = aListener;
   listener->NotifyBlockingChanged(GraphImpl(),
     mNotifiedBlocked ? MediaStreamListener::BLOCKED : MediaStreamListener::UNBLOCKED);
   if (mNotifiedFinished) {
     listener->NotifyEvent(GraphImpl(), MediaStreamListener::EVENT_FINISHED);
   }
@@ -3127,319 +3019,46 @@ MediaStreamGraph::CreateTrackUnionStream
 
 AudioNodeExternalInputStream*
 MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!aSampleRate) {
     aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
   }
-  AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(
-      aEngine, aSampleRate, aEngine->NodeMainThread()->Context()->Id());
+  AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(aEngine, aSampleRate);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
 AudioNodeStream*
 MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
                                         AudioNodeStreamKind aKind,
                                         TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!aSampleRate) {
     aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
   }
-  // MediaRecorders use an AudioNodeStream, but no AudioNode
-  AudioNode* node = aEngine->NodeMainThread();
-  dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
-                                                                NO_AUDIO_CONTEXT;
-  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate,
-                                                contextIdForStream);
+  AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   if (aEngine->HasNode()) {
     stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
                                            aEngine->NodeMainThread()->ChannelCountModeValue(),
                                            aEngine->NodeMainThread()->ChannelInterpretationValue());
   }
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
-class GraphStartedRunnable final : public nsRunnable
-{
-public:
-  GraphStartedRunnable(AudioNodeStream* aStream, MediaStreamGraph* aGraph)
-  : mStream(aStream)
-  , mGraph(aGraph)
-  { }
-
-  NS_IMETHOD Run() {
-    mGraph->NotifyWhenGraphStarted(mStream);
-    return NS_OK;
-  }
-
-private:
-  nsRefPtr<AudioNodeStream> mStream;
-  MediaStreamGraph* mGraph;
-};
-
-void
-MediaStreamGraph::NotifyWhenGraphStarted(AudioNodeStream* aStream)
-{
-  class GraphStartedNotificationControlMessage : public ControlMessage
-  {
-  public:
-    explicit GraphStartedNotificationControlMessage(AudioNodeStream* aStream)
-      : ControlMessage(aStream)
-    {
-    }
-    virtual void Run()
-    {
-      // This runs on the graph thread, so when this runs, and the current
-      // driver is an AudioCallbackDriver, we know the audio hardware is
-      // started. If not, we are going to switch soon, keep reposting this
-      // ControlMessage.
-      MediaStreamGraphImpl* graphImpl = mStream->GraphImpl();
-      if (graphImpl->CurrentDriver()->AsAudioCallbackDriver()) {
-        nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
-            mStream->AsAudioNodeStream(), nullptr, AudioContextState::Running);
-        NS_DispatchToMainThread(event);
-      } else {
-        nsCOMPtr<nsIRunnable> event = new GraphStartedRunnable(
-            mStream->AsAudioNodeStream(), mStream->Graph());
-        NS_DispatchToMainThread(event);
-      }
-    }
-    virtual void RunDuringShutdown()
-    {
-      MOZ_ASSERT(false, "We should be reviving the graph?");
-    }
-  };
-
-  MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
-  graphImpl->AppendMessage(new GraphStartedNotificationControlMessage(aStream));
-}
-
-void
-MediaStreamGraphImpl::ResetVisitedStreamState()
-{
-  // Reset the visited/consumed/blocked state of the streams.
-  nsTArray<MediaStream*>* runningAndSuspendedPair[2];
-  runningAndSuspendedPair[0] = &mStreams;
-  runningAndSuspendedPair[1] = &mSuspendedStreams;
-
-  for (uint32_t array = 0; array < 2; array++) {
-    for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
-      ProcessedMediaStream* ps =
-        (*runningAndSuspendedPair[array])[i]->AsProcessedStream();
-      if (ps) {
-        ps->mCycleMarker = NOT_VISITED;
-        ps->mIsConsumed = false;
-        ps->mInBlockingSet = false;
-      }
-    }
-  }
-}
-
-void
-MediaStreamGraphImpl::StreamSetForAudioContext(dom::AudioContext::AudioContextId aAudioContextId,
-                                  mozilla::LinkedList<MediaStream>& aStreamSet)
-{
-   nsTArray<MediaStream*>* runningAndSuspendedPair[2];
-   runningAndSuspendedPair[0] = &mStreams;
-   runningAndSuspendedPair[1] = &mSuspendedStreams;
-
-  for (uint32_t array = 0; array < 2; array++) {
-    for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
-      MediaStream* stream = (*runningAndSuspendedPair[array])[i];
-      if (aAudioContextId == stream->AudioContextId()) {
-        aStreamSet.insertFront(stream);
-      }
-    }
-  }
-}
-
-void
-MediaStreamGraphImpl::MoveStreams(AudioContextOperation aAudioContextOperation,
-                                  mozilla::LinkedList<MediaStream>& aStreamSet)
-{
-  // For our purpose, Suspend and Close are equivalent: we want to remove the
-  // streams from the set of streams that are going to be processed.
-  nsTArray<MediaStream*>& from =
-    aAudioContextOperation == AudioContextOperation::Resume ? mSuspendedStreams
-                                                            : mStreams;
-  nsTArray<MediaStream*>& to =
-    aAudioContextOperation == AudioContextOperation::Resume ? mStreams
-                                                            : mSuspendedStreams;
-
-  MediaStream* stream;
-  while ((stream = aStreamSet.getFirst())) {
-    // It is posible to not find the stream here, if there has been two
-    // suspend/resume/close calls in a row.
-    auto i = from.IndexOf(stream);
-    if (i != from.NoIndex) {
-      from.RemoveElementAt(i);
-      to.AppendElement(stream);
-    }
-
-    // If streams got added during a period where an AudioContext was suspended,
-    // set their buffer start time to the appropriate value now:
-    if (aAudioContextOperation == AudioContextOperation::Resume &&
-        stream->mBufferStartTime == START_TIME_DELAYED) {
-      stream->mBufferStartTime = IterationEnd();
-    }
-
-    stream->remove();
-  }
-  STREAM_LOG(PR_LOG_DEBUG, ("Moving streams between suspended and running"
-      "state: mStreams: %d, mSuspendedStreams: %d\n", mStreams.Length(),
-      mSuspendedStreams.Length()));
-#ifdef DEBUG
-  // The intersection of the two arrays should be null.
-  for (uint32_t i = 0; i < mStreams.Length(); i++) {
-    for (uint32_t j = 0; j < mSuspendedStreams.Length(); j++) {
-      MOZ_ASSERT(
-        mStreams[i] != mSuspendedStreams[j],
-        "The suspended stream set and running stream set are not disjoint.");
-    }
-  }
-#endif
-}
-
-void
-MediaStreamGraphImpl::AudioContextOperationCompleted(MediaStream* aStream,
-                                                     void* aPromise,
-                                                     AudioContextOperation aOperation)
-{
-  // This can be called from the thread created to do cubeb operation, or the
-  // MSG thread. The pointers passed back here are refcounted, so are still
-  // alive.
-  MonitorAutoLock lock(mMonitor);
-
-  AudioContextState state;
-  switch (aOperation) {
-    case Suspend: state = AudioContextState::Suspended; break;
-    case Resume: state = AudioContextState::Running; break;
-    case Close: state = AudioContextState::Closed; break;
-    default: MOZ_CRASH("Not handled.");
-  }
-
-  nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
-      aStream->AsAudioNodeStream(), aPromise, state);
-  NS_DispatchToMainThread(event);
-}
-
-void
-MediaStreamGraphImpl::ApplyAudioContextOperationImpl(AudioNodeStream* aStream,
-                                               AudioContextOperation aOperation,
-                                               void* aPromise)
-{
-  MOZ_ASSERT(CurrentDriver()->OnThread());
-  mozilla::LinkedList<MediaStream> streamSet;
-
-  SetStreamOrderDirty();
-
-  ResetVisitedStreamState();
-
-  StreamSetForAudioContext(aStream->AudioContextId(), streamSet);
-
-  MoveStreams(aOperation, streamSet);
-  MOZ_ASSERT(!streamSet.getFirst(),
-      "Streams should be removed from the list after having been moved.");
-
-  // If we have suspended the last AudioContext, and we don't have other
-  // streams that have audio, this graph will automatically switch to a
-  // SystemCallbackDriver, because it can't find a MediaStream that has an audio
-  // track. When resuming, force switching to an AudioCallbackDriver. It would
-  // have happened at the next iteration anyways, but doing this now save
-  // some time.
-  if (aOperation == AudioContextOperation::Resume) {
-    if (!CurrentDriver()->AsAudioCallbackDriver()) {
-      AudioCallbackDriver* driver = new AudioCallbackDriver(this);
-      driver->EnqueueStreamAndPromiseForOperation(aStream, aPromise, aOperation);
-      mMixer.AddCallback(driver);
-      CurrentDriver()->SwitchAtNextIteration(driver);
-    } else {
-      // We are resuming a context, but we are already using an
-      // AudioCallbackDriver, we can resolve the promise now.
-      AudioContextOperationCompleted(aStream, aPromise, aOperation);
-    }
-  }
-  // Close, suspend: check if we are going to switch to a
-  // SystemAudioCallbackDriver, and pass the promise to the AudioCallbackDriver
-  // if that's the case, so it can notify the content.
-  // This is the same logic as in UpdateStreamOrder, but it's simpler to have it
-  // here as well so we don't have to store the Promise(s) on the Graph.
-  if (aOperation != AudioContextOperation::Resume) {
-    bool audioTrackPresent = false;
-    for (uint32_t i = 0; i < mStreams.Length(); ++i) {
-      MediaStream* stream = mStreams[i];
-      if (stream->AsAudioNodeStream()) {
-        audioTrackPresent = true;
-      }
-      for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer(), MediaSegment::AUDIO);
-          !tracks.IsEnded(); tracks.Next()) {
-        audioTrackPresent = true;
-      }
-    }
-    if (!audioTrackPresent && CurrentDriver()->AsAudioCallbackDriver()) {
-      CurrentDriver()->AsAudioCallbackDriver()->
-        EnqueueStreamAndPromiseForOperation(aStream, aPromise, aOperation);
-
-      SystemClockDriver* driver = new SystemClockDriver(this);
-      CurrentDriver()->SwitchAtNextIteration(driver);
-    } else {
-      // We are closing or suspending an AudioContext, but something else is
-      // using the audio stream, we can resolve the promise now.
-      AudioContextOperationCompleted(aStream, aPromise, aOperation);
-    }
-  }
-}
-
-void
-MediaStreamGraph::ApplyAudioContextOperation(AudioNodeStream* aNodeStream,
-                                             AudioContextOperation aOperation,
-                                             void* aPromise)
-{
-  class AudioContextOperationControlMessage : public ControlMessage
-  {
-  public:
-    AudioContextOperationControlMessage(AudioNodeStream* aStream,
-                                        AudioContextOperation aOperation,
-                                        void* aPromise)
-      : ControlMessage(aStream)
-      , mAudioContextOperation(aOperation)
-      , mPromise(aPromise)
-    {
-    }
-    virtual void Run()
-    {
-      mStream->GraphImpl()->ApplyAudioContextOperationImpl(
-        mStream->AsAudioNodeStream(), mAudioContextOperation, mPromise);
-    }
-    virtual void RunDuringShutdown()
-    {
-      MOZ_ASSERT(false, "We should be reviving the graph?");
-    }
-
-  private:
-    AudioContextOperation mAudioContextOperation;
-    void* mPromise;
-  };
-
-  MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
-  graphImpl->AppendMessage(
-    new AudioContextOperationControlMessage(aNodeStream, aOperation, aPromise));
-}
-
 bool
 MediaStreamGraph::IsNonRealtime() const
 {
   const MediaStreamGraphImpl* impl = static_cast<const MediaStreamGraphImpl*>(this);
   MediaStreamGraphImpl* graph;
 
   return !gGraphs.Get(impl->AudioChannel(), &graph) || graph != impl;
 }
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -17,17 +17,16 @@
 #include "VideoSegment.h"
 #include "MainThreadUtils.h"
 #include "MediaTaskQueue.h"
 #include "nsAutoRef.h"
 #include "GraphDriver.h"
 #include <speex/speex_resampler.h>
 #include "mozilla/dom/AudioChannelBinding.h"
 #include "DOMMediaStream.h"
-#include "AudioContext.h"
 
 class nsIRunnable;
 
 template <>
 class nsAutoRefTraits<SpeexResamplerState> : public nsPointerRefTraits<SpeexResamplerState>
 {
   public:
   static void Release(SpeexResamplerState* aState) { speex_resampler_destroy(aState); }
@@ -314,17 +313,16 @@ class CameraPreviewMediaStream;
  * for those objects in arbitrary order and the MediaStreamGraph has to be able
  * to handle this.
  */
 class MediaStream : public mozilla::LinkedListElement<MediaStream> {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream)
 
   explicit MediaStream(DOMMediaStream* aWrapper);
-  virtual dom::AudioContext::AudioContextId AudioContextId() const { return 0; }
 
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~MediaStream()
   {
     MOZ_COUNT_DTOR(MediaStream);
     NS_ASSERTION(mMainThreadDestroyed, "Should have been destroyed already");
     NS_ASSERTION(mMainThreadListeners.IsEmpty(),
@@ -361,18 +359,16 @@ public:
   // Since a stream can be played multiple ways, we need to be able to
   // play to multiple VideoFrameContainers.
   // Only the first enabled video track is played.
   virtual void AddVideoOutput(VideoFrameContainer* aContainer);
   virtual void RemoveVideoOutput(VideoFrameContainer* aContainer);
   // Explicitly block. Useful for example if a media element is pausing
   // and we need to stop its stream emitting its buffered data.
   virtual void ChangeExplicitBlockerCount(int32_t aDelta);
-  void BlockStreamIfNeeded();
-  void UnblockStreamIfNeeded();
   // Events will be dispatched by calling methods of aListener.
   virtual void AddListener(MediaStreamListener* aListener);
   virtual void RemoveListener(MediaStreamListener* aListener);
   // A disabled track has video replaced by black, and audio replaced by
   // silence.
   void SetTrackEnabled(TrackID aTrackID, bool aEnabled);
   // Events will be dispatched by calling methods of aListener. It is the
   // responsibility of the caller to remove aListener before it is destroyed.
@@ -464,32 +460,16 @@ public:
   void RemoveVideoOutputImpl(VideoFrameContainer* aContainer)
   {
     mVideoOutputs.RemoveElement(aContainer);
   }
   void ChangeExplicitBlockerCountImpl(GraphTime aTime, int32_t aDelta)
   {
     mExplicitBlockerCount.SetAtAndAfter(aTime, mExplicitBlockerCount.GetAt(aTime) + aDelta);
   }
-  void BlockStreamIfNeededImpl(GraphTime aTime)
-  {
-    bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
-    if (blocked) {
-      return;
-    }
-    ChangeExplicitBlockerCountImpl(aTime, 1);
-  }
-  void UnblockStreamIfNeededImpl(GraphTime aTime)
-  {
-    bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
-    if (!blocked) {
-      return;
-    }
-    ChangeExplicitBlockerCountImpl(aTime, -1);
-  }
   void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
   void RemoveListenerImpl(MediaStreamListener* aListener);
   void RemoveAllListenersImpl();
   virtual void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled);
   /**
    * Returns true when this stream requires the contents of its inputs even if
    * its own outputs are not being consumed. This is used to signal inputs to
    * this stream that they are being consumed; when they're not being consumed,
@@ -1242,31 +1222,16 @@ public:
   AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
                                          AudioNodeStreamKind aKind,
                                          TrackRate aSampleRate = 0);
 
   AudioNodeExternalInputStream*
   CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine,
                                      TrackRate aSampleRate = 0);
 
-  /* From the main thread, ask the MSG to send back an event when the graph
-   * thread is running, and audio is being processed. */
-  void NotifyWhenGraphStarted(AudioNodeStream* aNodeStream);
-  /* From the main thread, suspend, resume or close an AudioContext.
-   * aNodeStream is the stream of the DestinationNode of the AudioContext.
-   *
-   * This can possibly pause the graph thread, releasing system resources, if
-   * all streams have been suspended/closed.
-   *
-   * When the operation is complete, aPromise is resolved.
-   */
-  void ApplyAudioContextOperation(AudioNodeStream* aNodeStream,
-                                  dom::AudioContextOperation aState,
-                                  void * aPromise);
-
   bool IsNonRealtime() const;
   /**
    * Start processing non-realtime for a specific number of ticks.
    */
   void StartNonRealtimeProcessing(uint32_t aTicksToProcess);
 
   /**
    * Media graph thread only.
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -243,59 +243,16 @@ public:
   /**
    * Update "have enough data" flags in aStream.
    */
   void UpdateBufferSufficiencyState(SourceMediaStream* aStream);
   /**
    * Mark aStream and all its inputs (recursively) as consumed.
    */
   static void MarkConsumed(MediaStream* aStream);
-
-  /**
-   * Given the Id of an AudioContext, return the set of all MediaStreams that
-   * are part of this context.
-   */
-  void StreamSetForAudioContext(dom::AudioContext::AudioContextId aAudioContextId,
-                                mozilla::LinkedList<MediaStream>& aStreamSet);
-
-  /**
-   * Called when a suspend/resume/close operation has been completed, on the
-   * graph thread.
-   */
-  void AudioContextOperationCompleted(MediaStream* aStream,
-                                      void* aPromise,
-                                      dom::AudioContextOperation aOperation);
-
-  /**
-   * Apply and AudioContext operation (suspend/resume/closed), on the graph
-   * thread.
-   */
-  void ApplyAudioContextOperationImpl(AudioNodeStream* aStream,
-                                      dom::AudioContextOperation aOperation,
-                                      void* aPromise);
-
-  /*
-   * Move streams from the mStreams to mSuspendedStream if suspending/closing an
-   * AudioContext, or the inverse when resuming an AudioContext.
-   */
-  void MoveStreams(dom::AudioContextOperation aAudioContextOperation,
-                   mozilla::LinkedList<MediaStream>& aStreamSet);
-
-  /*
-   * Reset some state about the streams before suspending them, or resuming
-   * them.
-   */
-  void ResetVisitedStreamState();
-
-  /*
-   * True if a stream is suspended, that is, is not in mStreams, but in
-   * mSuspendedStream.
-   */
-  bool StreamSuspended(MediaStream* aStream);
-
   /**
    * Sort mStreams so that every stream not in a cycle is after any streams
    * it depends on, and every stream in a cycle is marked as being in a cycle.
    * Also sets mIsConsumed on every stream.
    */
   void UpdateStreamOrder();
   /**
    * Compute the blocking states of streams from mStateComputedTime
@@ -406,20 +363,17 @@ public:
   void FinishStream(MediaStream* aStream);
   /**
    * Compute how much stream data we would like to buffer for aStream.
    */
   StreamTime GetDesiredBufferEnd(MediaStream* aStream);
   /**
    * Returns true when there are no active streams.
    */
-  bool IsEmpty()
-  {
-    return mStreams.IsEmpty() && mSuspendedStreams.IsEmpty() && mPortCount == 0;
-  }
+  bool IsEmpty() { return mStreams.IsEmpty() && mPortCount == 0; }
 
   // For use by control messages, on graph thread only.
   /**
    * Identify which graph update index we are currently processing.
    */
   int64_t GetProcessingGraphUpdateIndex() { return mProcessingGraphUpdateIndex; }
   /**
    * Add aStream to the graph and initializes its graph-specific state.
@@ -529,23 +483,16 @@ public:
 
   /**
    * The graph keeps a reference to each stream.
    * References are maintained manually to simplify reordering without
    * unnecessary thread-safe refcount changes.
    */
   nsTArray<MediaStream*> mStreams;
   /**
-   * This stores MediaStreams that are part of suspended AudioContexts.
-   * mStreams and mSuspendStream are disjoint sets: a stream is either suspended
-   * or not suspended. Suspended streams are not ordered in UpdateStreamOrder,
-   * and are therefore not doing any processing.
-   */
-  nsTArray<MediaStream*> mSuspendedStreams;
-  /**
    * Streams from mFirstCycleBreaker to the end of mStreams produce output
    * before they receive input.  They correspond to DelayNodes that are in
    * cycles.
    */
   uint32_t mFirstCycleBreaker;
   /**
    * Date of the last time we updated the main thread with the graph state.
    */
--- a/dom/media/TrackUnionStream.cpp
+++ b/dom/media/TrackUnionStream.cpp
@@ -19,20 +19,20 @@
 #include "prlog.h"
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
 #include "AudioChannelService.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioNodeExternalInputStream.h"
-#include "webaudio/MediaStreamAudioDestinationNode.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
+#include "mozilla/unused.h"
 #ifdef MOZ_WEBRTC
 #include "AudioOutputObserver.h"
 #endif
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 
@@ -270,26 +270,22 @@ TrackUnionStream::TrackUnionStream(DOMMe
       if (interval.mInputIsBlocked) {
         // Maybe the input track ended?
         segment->AppendNullData(ticks);
         STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
                    this, (long long)ticks, outputTrack->GetID()));
       } else if (InMutedCycle()) {
         segment->AppendNullData(ticks);
       } else {
-        if (GraphImpl()->StreamSuspended(source)) {
-          segment->AppendNullData(aTo - aFrom);
-        } else {
-          MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
-                     "Samples missing");
-          StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
-          segment->AppendSlice(*aInputTrack->GetSegment(),
-                               std::min(inputTrackEndPoint, inputStart),
-                               std::min(inputTrackEndPoint, inputEnd));
-        }
+        MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
+                   "Samples missing");
+        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
+        segment->AppendSlice(*aInputTrack->GetSegment(),
+                             std::min(inputTrackEndPoint, inputStart),
+                             std::min(inputTrackEndPoint, inputEnd));
       }
       ApplyTrackDisabling(outputTrack->GetID(), segment);
       for (uint32_t j = 0; j < mListeners.Length(); ++j) {
         MediaStreamListener* l = mListeners[j];
         l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
                                     outputStart, 0, *segment);
       }
       outputTrack->GetSegment()->AppendFrom(segment);
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -4,18 +4,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioContext.h"
 
 #include "nsPIDOMWindow.h"
 #include "mozilla/ErrorResult.h"
 #include "mozilla/dom/AnalyserNode.h"
+#include "mozilla/dom/AudioContextBinding.h"
 #include "mozilla/dom/HTMLMediaElement.h"
-#include "mozilla/dom/AudioContextBinding.h"
 #include "mozilla/dom/OfflineAudioContextBinding.h"
 #include "mozilla/dom/OwningNonNull.h"
 #include "MediaStreamGraph.h"
 #include "AudioChannelService.h"
 #include "AudioDestinationNode.h"
 #include "AudioBufferSourceNode.h"
 #include "AudioBuffer.h"
 #include "GainNode.h"
@@ -37,20 +37,16 @@
 #include "OscillatorNode.h"
 #include "nsNetUtil.h"
 #include "AudioStream.h"
 #include "mozilla/dom/Promise.h"
 
 namespace mozilla {
 namespace dom {
 
-// 0 is a special value that MediaStreams use to denote they are not part of a
-// AudioContext.
-static dom::AudioContext::AudioContextId gAudioContextId = 1;
-
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
   if (!tmp->mIsStarted) {
     NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
   }
@@ -84,25 +80,22 @@ static float GetSampleRateForAudioContex
 
 AudioContext::AudioContext(nsPIDOMWindow* aWindow,
                            bool aIsOffline,
                            AudioChannel aChannel,
                            uint32_t aNumberOfChannels,
                            uint32_t aLength,
                            float aSampleRate)
   : DOMEventTargetHelper(aWindow)
-  , mId(gAudioContextId++)
   , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate))
-  , mAudioContextState(AudioContextState::Suspended)
   , mNumberOfChannels(aNumberOfChannels)
   , mNodeCount(0)
   , mIsOffline(aIsOffline)
   , mIsStarted(!aIsOffline)
   , mIsShutDown(false)
-  , mCloseCalled(false)
 {
   aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
   mDestination = new AudioDestinationNode(this, aIsOffline, aChannel,
                                           aNumberOfChannels, aLength, aSampleRate);
   // We skip calling SetIsOnlyNodeForContext and the creation of the
@@ -199,32 +192,19 @@ AudioContext::Constructor(const GlobalOb
                                                    aLength,
                                                    aSampleRate);
 
   RegisterWeakMemoryReporter(object);
 
   return object.forget();
 }
 
-bool AudioContext::CheckClosed(ErrorResult& aRv)
+already_AddRefed<AudioBufferSourceNode>
+AudioContext::CreateBufferSource()
 {
-  if (mAudioContextState == AudioContextState::Closed) {
-    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
-    return true;
-  }
-  return false;
-}
-
-already_AddRefed<AudioBufferSourceNode>
-AudioContext::CreateBufferSource(ErrorResult& aRv)
-{
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<AudioBufferSourceNode> bufferNode =
     new AudioBufferSourceNode(this);
   return bufferNode.forget();
 }
 
 already_AddRefed<AudioBuffer>
 AudioContext::CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
                            uint32_t aLength, float aSampleRate,
@@ -262,20 +242,16 @@ bool IsValidBufferSize(uint32_t aBufferS
 already_AddRefed<MediaStreamAudioDestinationNode>
 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv)
 {
   if (mIsOffline) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
 
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<MediaStreamAudioDestinationNode> node =
       new MediaStreamAudioDestinationNode(this);
   return node.forget();
 }
 
 already_AddRefed<ScriptProcessorNode>
 AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
                                     uint32_t aNumberOfInputChannels,
@@ -285,44 +261,32 @@ AudioContext::CreateScriptProcessor(uint
   if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) ||
       aNumberOfInputChannels > WebAudioUtils::MaxChannelCount ||
       aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount ||
       !IsValidBufferSize(aBufferSize)) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<ScriptProcessorNode> scriptProcessor =
     new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels,
                             aNumberOfOutputChannels);
   return scriptProcessor.forget();
 }
 
 already_AddRefed<AnalyserNode>
-AudioContext::CreateAnalyser(ErrorResult& aRv)
+AudioContext::CreateAnalyser()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<AnalyserNode> analyserNode = new AnalyserNode(this);
   return analyserNode.forget();
 }
 
 already_AddRefed<StereoPannerNode>
-AudioContext::CreateStereoPanner(ErrorResult& aRv)
+AudioContext::CreateStereoPanner()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<StereoPannerNode> stereoPannerNode = new StereoPannerNode(this);
   return stereoPannerNode.forget();
 }
 
 already_AddRefed<MediaElementAudioSourceNode>
 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
                                        ErrorResult& aRv)
 {
@@ -331,21 +295,16 @@ AudioContext::CreateMediaElementSource(H
     return nullptr;
   }
 #ifdef MOZ_EME
   if (aMediaElement.ContainsRestrictedContent()) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
 #endif
-
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<DOMMediaStream> stream = aMediaElement.MozCaptureStream(aRv);
   if (aRv.Failed()) {
     return nullptr;
   }
   nsRefPtr<MediaElementAudioSourceNode> mediaElementAudioSourceNode =
     new MediaElementAudioSourceNode(this, stream);
   return mediaElementAudioSourceNode.forget();
 }
@@ -353,154 +312,108 @@ AudioContext::CreateMediaElementSource(H
 already_AddRefed<MediaStreamAudioSourceNode>
 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
                                       ErrorResult& aRv)
 {
   if (mIsOffline) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
-
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<MediaStreamAudioSourceNode> mediaStreamAudioSourceNode =
     new MediaStreamAudioSourceNode(this, &aMediaStream);
   return mediaStreamAudioSourceNode.forget();
 }
 
 already_AddRefed<GainNode>
-AudioContext::CreateGain(ErrorResult& aRv)
+AudioContext::CreateGain()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<GainNode> gainNode = new GainNode(this);
   return gainNode.forget();
 }
 
 already_AddRefed<WaveShaperNode>
-AudioContext::CreateWaveShaper(ErrorResult& aRv)
+AudioContext::CreateWaveShaper()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<WaveShaperNode> waveShaperNode = new WaveShaperNode(this);
   return waveShaperNode.forget();
 }
 
 already_AddRefed<DelayNode>
 AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv)
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   if (aMaxDelayTime > 0. && aMaxDelayTime < 180.) {
     nsRefPtr<DelayNode> delayNode = new DelayNode(this, aMaxDelayTime);
     return delayNode.forget();
   }
-
   aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
   return nullptr;
 }
 
 already_AddRefed<PannerNode>
-AudioContext::CreatePanner(ErrorResult& aRv)
+AudioContext::CreatePanner()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<PannerNode> pannerNode = new PannerNode(this);
   mPannerNodes.PutEntry(pannerNode);
   return pannerNode.forget();
 }
 
 already_AddRefed<ConvolverNode>
-AudioContext::CreateConvolver(ErrorResult& aRv)
+AudioContext::CreateConvolver()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<ConvolverNode> convolverNode = new ConvolverNode(this);
   return convolverNode.forget();
 }
 
 already_AddRefed<ChannelSplitterNode>
 AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv)
 {
   if (aNumberOfOutputs == 0 ||
       aNumberOfOutputs > WebAudioUtils::MaxChannelCount) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<ChannelSplitterNode> splitterNode =
     new ChannelSplitterNode(this, aNumberOfOutputs);
   return splitterNode.forget();
 }
 
 already_AddRefed<ChannelMergerNode>
 AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv)
 {
   if (aNumberOfInputs == 0 ||
       aNumberOfInputs > WebAudioUtils::MaxChannelCount) {
     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<ChannelMergerNode> mergerNode =
     new ChannelMergerNode(this, aNumberOfInputs);
   return mergerNode.forget();
 }
 
 already_AddRefed<DynamicsCompressorNode>
-AudioContext::CreateDynamicsCompressor(ErrorResult& aRv)
+AudioContext::CreateDynamicsCompressor()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<DynamicsCompressorNode> compressorNode =
     new DynamicsCompressorNode(this);
   return compressorNode.forget();
 }
 
 already_AddRefed<BiquadFilterNode>
-AudioContext::CreateBiquadFilter(ErrorResult& aRv)
+AudioContext::CreateBiquadFilter()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<BiquadFilterNode> filterNode =
     new BiquadFilterNode(this);
   return filterNode.forget();
 }
 
 already_AddRefed<OscillatorNode>
-AudioContext::CreateOscillator(ErrorResult& aRv)
+AudioContext::CreateOscillator()
 {
-  if (CheckClosed(aRv)) {
-    return nullptr;
-  }
-
   nsRefPtr<OscillatorNode> oscillatorNode =
     new OscillatorNode(this);
   return oscillatorNode.forget();
 }
 
 already_AddRefed<PeriodicWave>
 AudioContext::CreatePeriodicWave(const Float32Array& aRealData,
                                  const Float32Array& aImagData,
@@ -678,249 +591,32 @@ AudioContext::Shutdown()
   mActiveNodes.Clear();
 
   // For offline contexts, we can destroy the MediaStreamGraph at this point.
   if (mIsOffline && mDestination) {
     mDestination->OfflineShutdown();
   }
 }
 
-AudioContextState AudioContext::State() const
-{
-  return mAudioContextState;
-}
-
-StateChangeTask::StateChangeTask(AudioContext* aAudioContext,
-                                 void* aPromise,
-                                 AudioContextState aNewState)
-  : mAudioContext(aAudioContext)
-  , mPromise(aPromise)
-  , mAudioNodeStream(nullptr)
-  , mNewState(aNewState)
+void
+AudioContext::Suspend()
 {
-  MOZ_ASSERT(NS_IsMainThread(),
-             "This constructor should be used from the main thread.");
-}
-
-StateChangeTask::StateChangeTask(AudioNodeStream* aStream,
-                                 void* aPromise,
-                                 AudioContextState aNewState)
-  : mAudioContext(nullptr)
-  , mPromise(aPromise)
-  , mAudioNodeStream(aStream)
-  , mNewState(aNewState)
-{
-  MOZ_ASSERT(!NS_IsMainThread(),
-             "This constructor should be used from the graph thread.");
-}
-
-NS_IMETHODIMP
-StateChangeTask::Run()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  if (!mAudioContext && !mAudioNodeStream) {
-    return NS_OK;
+  MediaStream* ds = DestinationStream();
+  if (ds) {
+    ds->ChangeExplicitBlockerCount(1);
   }
-  if (mAudioNodeStream) {
-    AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread();
-    if (!node) {
-      return NS_OK;
-    }
-    mAudioContext = node->Context();
-    if (!mAudioContext) {
-      return NS_OK;
-    }
-  }
-
-  mAudioContext->OnStateChanged(mPromise, mNewState);
-  // We have can't call Release() on the AudioContext on the MSG thread, so we
-  // unref it here, on the main thread.
-  mAudioContext = nullptr;
-
-  return NS_OK;
 }
 
-/* This runnable allows to fire the "statechange" event */
-class OnStateChangeTask final : public nsRunnable
-{
-public:
-  explicit OnStateChangeTask(AudioContext* aAudioContext)
-    : mAudioContext(aAudioContext)
-  {}
-
-  NS_IMETHODIMP
-  Run() override
-  {
-    nsCOMPtr<nsPIDOMWindow> parent = do_QueryInterface(mAudioContext->GetParentObject());
-    if (!parent) {
-      return NS_ERROR_FAILURE;
-    }
-
-    nsIDocument* doc = parent->GetExtantDoc();
-    if (!doc) {
-      return NS_ERROR_FAILURE;
-    }
-
-    return nsContentUtils::DispatchTrustedEvent(doc,
-                                static_cast<DOMEventTargetHelper*>(mAudioContext),
-                                NS_LITERAL_STRING("statechange"),
-                                false, false);
-  }
-
-private:
-  nsRefPtr<AudioContext> mAudioContext;
-};
-
-
-
 void
-AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState)
+AudioContext::Resume()
 {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  MOZ_ASSERT((mAudioContextState == AudioContextState::Suspended &&
-              aNewState == AudioContextState::Running)   ||
-             (mAudioContextState == AudioContextState::Running   &&
-              aNewState == AudioContextState::Suspended) ||
-             (mAudioContextState == AudioContextState::Running   &&
-              aNewState == AudioContextState::Closed)    ||
-             (mAudioContextState == AudioContextState::Suspended &&
-              aNewState == AudioContextState::Closed)    ||
-             (mAudioContextState == aNewState),
-             "Invalid AudioContextState transition");
-
-  MOZ_ASSERT(
-    mIsOffline || aPromise || aNewState == AudioContextState::Running,
-    "We should have a promise here if this is a real-time AudioContext."
-    "Or this is the first time we switch to \"running\".");
-
-  if (aPromise) {
-    Promise* promise = reinterpret_cast<Promise*>(aPromise);
-    promise->MaybeResolve(JS::UndefinedHandleValue);
-    DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
-    MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
-  }
-
-  if (mAudioContextState != aNewState) {
-    nsRefPtr<OnStateChangeTask> onStateChangeTask =
-      new OnStateChangeTask(this);
-    NS_DispatchToMainThread(onStateChangeTask);
-  }
-
-  mAudioContextState = aNewState;
-}
-
-already_AddRefed<Promise>
-AudioContext::Suspend(ErrorResult& aRv)
-{
-  nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
-  nsRefPtr<Promise> promise;
-  promise = Promise::Create(parentObject, aRv);
-  if (aRv.Failed()) {
-    return nullptr;
-  }
-  if (mIsOffline) {
-    promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-    return promise.forget();
-  }
-
-  if (mAudioContextState == AudioContextState::Closed ||
-      mCloseCalled) {
-    promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
-    return promise.forget();
-  }
-
-  if (mAudioContextState == AudioContextState::Suspended) {
-    promise->MaybeResolve(JS::UndefinedHandleValue);
-    return promise.forget();
-  }
-
   MediaStream* ds = DestinationStream();
   if (ds) {
-    ds->BlockStreamIfNeeded();
-  }
-
-  mPromiseGripArray.AppendElement(promise);
-  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                      AudioContextOperation::Suspend, promise);
-
-  return promise.forget();
-}
-
-already_AddRefed<Promise>
-AudioContext::Resume(ErrorResult& aRv)
-{
-  nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
-  nsRefPtr<Promise> promise;
-  promise = Promise::Create(parentObject, aRv);
-  if (aRv.Failed()) {
-    return nullptr;
-  }
-
-  if (mIsOffline) {
-    promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-    return promise.forget();
-  }
-
-  if (mAudioContextState == AudioContextState::Closed ||
-      mCloseCalled) {
-    promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
-    return promise.forget();
-  }
-
-  if (mAudioContextState == AudioContextState::Running) {
-    promise->MaybeResolve(JS::UndefinedHandleValue);
-    return promise.forget();
-  }
-
-  MediaStream* ds = DestinationStream();
-  if (ds) {
-    ds->UnblockStreamIfNeeded();
+    ds->ChangeExplicitBlockerCount(-1);
   }
-
-  mPromiseGripArray.AppendElement(promise);
-  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                      AudioContextOperation::Resume, promise);
-
-  return promise.forget();
-}
-
-already_AddRefed<Promise>
-AudioContext::Close(ErrorResult& aRv)
-{
-  nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
-  nsRefPtr<Promise> promise;
-  promise = Promise::Create(parentObject, aRv);
-  if (aRv.Failed()) {
-    return nullptr;
-  }
-
-  if (mIsOffline) {
-    promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-    return promise.forget();
-  }
-
-  if (mAudioContextState == AudioContextState::Closed) {
-    promise->MaybeResolve(NS_ERROR_DOM_INVALID_STATE_ERR);
-    return promise.forget();
-  }
-
-  mCloseCalled = true;
-
-  mPromiseGripArray.AppendElement(promise);
-  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                      AudioContextOperation::Close, promise);
-
-  MediaStream* ds = DestinationStream();
-  if (ds) {
-    ds->BlockStreamIfNeeded();
-  }
-
-  return promise.forget();
 }
 
 void
 AudioContext::UpdateNodeCount(int32_t aDelta)
 {
   bool firstNode = mNodeCount == 0;
   mNodeCount += aDelta;
   MOZ_ASSERT(mNodeCount >= 0);
@@ -951,19 +647,16 @@ AudioContext::StartRendering(ErrorResult
   if (mIsStarted) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return nullptr;
   }
 
   mIsStarted = true;
   nsRefPtr<Promise> promise = Promise::Create(parentObject, aRv);
   mDestination->StartRendering(promise);
-
-  OnStateChanged(nullptr, AudioContextState::Running);
-
   return promise.forget();
 }
 
 void
 AudioContext::Mute() const
 {
   MOZ_ASSERT(!mIsOffline);
   if (mDestination) {
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -30,22 +30,19 @@
 class nsPIDOMWindow;
 
 namespace mozilla {
 
 class DOMMediaStream;
 class ErrorResult;
 class MediaStream;
 class MediaStreamGraph;
-class AudioNodeEngine;
-class AudioNodeStream;
 
 namespace dom {
 
-enum class AudioContextState : uint32_t;
 class AnalyserNode;
 class AudioBuffer;
 class AudioBufferSourceNode;
 class AudioDestinationNode;
 class AudioListener;
 class AudioNode;
 class BiquadFilterNode;
 class ChannelMergerNode;
@@ -62,65 +59,41 @@ class MediaStreamAudioSourceNode;
 class OscillatorNode;
 class PannerNode;
 class ScriptProcessorNode;
 class StereoPannerNode;
 class WaveShaperNode;
 class PeriodicWave;
 class Promise;
 
-/* This runnable allows the MSG to notify the main thread when audio is actually
- * flowing */
-class StateChangeTask final : public nsRunnable
-{
-public:
-  /* This constructor should be used when this event is sent from the main
-   * thread. */
-  StateChangeTask(AudioContext* aAudioContext, void* aPromise, AudioContextState aNewState);
-
-  /* This constructor should be used when this event is sent from the audio
-   * thread. */
-  StateChangeTask(AudioNodeStream* aStream, void* aPromise, AudioContextState aNewState);
-
-  NS_IMETHOD Run() override;
-
-private:
-  nsRefPtr<AudioContext> mAudioContext;
-  void* mPromise;
-  nsRefPtr<AudioNodeStream> mAudioNodeStream;
-  AudioContextState mNewState;
-};
-
-enum AudioContextOperation { Suspend, Resume, Close };
-
 class AudioContext final : public DOMEventTargetHelper,
                            public nsIMemoryReporter
 {
   AudioContext(nsPIDOMWindow* aParentWindow,
                bool aIsOffline,
                AudioChannel aChannel,
                uint32_t aNumberOfChannels = 0,
                uint32_t aLength = 0,
                float aSampleRate = 0.0f);
   ~AudioContext();
 
 public:
-  typedef uint64_t AudioContextId;
-
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioContext,
                                            DOMEventTargetHelper)
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
 
   nsPIDOMWindow* GetParentObject() const
   {
     return GetOwner();
   }
 
   void Shutdown(); // idempotent
+  void Suspend();
+  void Resume();
 
   virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   using DOMEventTargetHelper::DispatchTrustedEvent;
 
   // Constructor for regular AudioContext
   static already_AddRefed<AudioContext>
   Constructor(const GlobalObject& aGlobal, ErrorResult& aRv);
@@ -146,96 +119,76 @@ public:
     return mDestination;
   }
 
   float SampleRate() const
   {
     return mSampleRate;
   }
 
-  AudioContextId Id() const
-  {
-    return mId;
-  }
-
   double CurrentTime() const;
 
   AudioListener* Listener();
 
-  AudioContextState State() const;
-  // Those three methods return a promise to content, that is resolved when an
-  // (possibly long) operation is completed on the MSG (and possibly other)
-  // thread(s). To avoid having to match the calls and asychronous result when
-  // the operation is completed, we keep a reference to the promises on the main
-  // thread, and then send the promises pointers down the MSG thread, as a void*
-  // (to make it very clear that the pointer is to merely be treated as an ID).
-  // When back on the main thread, we can resolve or reject the promise, by
-  // casting it back to a `Promise*` while asserting we're back on the main
-  // thread and removing the reference we added.
-  already_AddRefed<Promise> Suspend(ErrorResult& aRv);
-  already_AddRefed<Promise> Resume(ErrorResult& aRv);
-  already_AddRefed<Promise> Close(ErrorResult& aRv);
-  IMPL_EVENT_HANDLER(statechange)
-
-  already_AddRefed<AudioBufferSourceNode> CreateBufferSource(ErrorResult& aRv);
+  already_AddRefed<AudioBufferSourceNode> CreateBufferSource();
 
   already_AddRefed<AudioBuffer>
   CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
                uint32_t aLength, float aSampleRate,
                ErrorResult& aRv);
 
   already_AddRefed<MediaStreamAudioDestinationNode>
   CreateMediaStreamDestination(ErrorResult& aRv);
 
   already_AddRefed<ScriptProcessorNode>
   CreateScriptProcessor(uint32_t aBufferSize,
                         uint32_t aNumberOfInputChannels,
                         uint32_t aNumberOfOutputChannels,
                         ErrorResult& aRv);
 
   already_AddRefed<StereoPannerNode>
-  CreateStereoPanner(ErrorResult& aRv);
+  CreateStereoPanner();
 
   already_AddRefed<AnalyserNode>
-  CreateAnalyser(ErrorResult& aRv);
+  CreateAnalyser();
 
   already_AddRefed<GainNode>
-  CreateGain(ErrorResult& aRv);
+  CreateGain();
 
   already_AddRefed<WaveShaperNode>
-  CreateWaveShaper(ErrorResult& aRv);
+  CreateWaveShaper();
 
   already_AddRefed<MediaElementAudioSourceNode>
   CreateMediaElementSource(HTMLMediaElement& aMediaElement, ErrorResult& aRv);
   already_AddRefed<MediaStreamAudioSourceNode>
   CreateMediaStreamSource(DOMMediaStream& aMediaStream, ErrorResult& aRv);
 
   already_AddRefed<DelayNode>
   CreateDelay(double aMaxDelayTime, ErrorResult& aRv);
 
   already_AddRefed<PannerNode>
-  CreatePanner(ErrorResult& aRv);
+  CreatePanner();
 
   already_AddRefed<ConvolverNode>
-  CreateConvolver(ErrorResult& aRv);
+  CreateConvolver();
 
   already_AddRefed<ChannelSplitterNode>
   CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv);
 
   already_AddRefed<ChannelMergerNode>
   CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv);
 
   already_AddRefed<DynamicsCompressorNode>
-  CreateDynamicsCompressor(ErrorResult& aRv);
+  CreateDynamicsCompressor();
 
   already_AddRefed<BiquadFilterNode>
-  CreateBiquadFilter(ErrorResult& aRv);
+  CreateBiquadFilter();
 
   already_AddRefed<OscillatorNode>
-  CreateOscillator(ErrorResult& aRv);
+  CreateOscillator();
 
   already_AddRefed<PeriodicWave>
   CreatePeriodicWave(const Float32Array& aRealData, const Float32Array& aImagData,
                      ErrorResult& aRv);
 
   already_AddRefed<Promise>
   DecodeAudioData(const ArrayBuffer& aBuffer,
                   const Optional<OwningNonNull<DecodeSuccessCallback> >& aSuccessCallback,
@@ -286,18 +239,16 @@ public:
     return aTime - ExtraCurrentTime();
   }
 
   double StreamTimeToDOMTime(double aTime) const
   {
     return aTime + ExtraCurrentTime();
   }
 
-  void OnStateChanged(void* aPromise, AudioContextState aNewState);
-
   IMPL_EVENT_HANDLER(mozinterruptbegin)
   IMPL_EVENT_HANDLER(mozinterruptend)
 
 private:
   /**
    * Returns the amount of extra time added to the current time of the
    * AudioDestinationNode's MediaStream to get this AudioContext's currentTime.
    * Must be subtracted from all DOM API parameter times that are on the same
@@ -310,49 +261,35 @@ private:
   void ShutdownDecoder();
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData, bool aAnonymize) override;
 
   friend struct ::mozilla::WebAudioDecodeJob;
 
-  bool CheckClosed(ErrorResult& aRv);
-
 private:
-  // Each AudioContext has an id, that is passed down the MediaStreams that
-  // back the AudioNodes, so we can easily compute the set of all the
-  // MediaStreams for a given context, on the MediasStreamGraph side.
-  const AudioContextId mId;
   // Note that it's important for mSampleRate to be initialized before
   // mDestination, as mDestination's constructor needs to access it!
   const float mSampleRate;
-  AudioContextState mAudioContextState;
   nsRefPtr<AudioDestinationNode> mDestination;
   nsRefPtr<AudioListener> mListener;
   nsTArray<nsRefPtr<WebAudioDecodeJob> > mDecodeJobs;
-  // This array is used to keep the suspend/resume/close promises alive until
-  // they are resolved, so we can safely pass them accross threads.
-  nsTArray<nsRefPtr<Promise>> mPromiseGripArray;
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Hashsets containing all the PannerNodes, to compute the doppler shift.
   // These are weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
   // Number of channels passed in the OfflineAudioContext ctor.
   uint32_t mNumberOfChannels;
   // Number of nodes that currently exist for this AudioContext
   int32_t mNodeCount;
   bool mIsOffline;
   bool mIsStarted;
   bool mIsShutDown;
-  // Close has been called, reject suspend and resume call.
-  bool mCloseCalled;
 };
 
-static const dom::AudioContext::AudioContextId NO_AUDIO_CONTEXT = 0;
-
 }
 }
 
 #endif
 
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -1,16 +1,15 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioDestinationNode.h"
-#include "AudioContext.h"
 #include "mozilla/dom/AudioDestinationNodeBinding.h"
 #include "mozilla/dom/ScriptSettings.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Services.h"
 #include "AudioChannelAgent.h"
 #include "AudioChannelService.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
@@ -172,21 +171,19 @@ public:
       return;
     }
     for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
       renderedBuffer->SetRawChannelContents(i, mInputChannels[i]);
     }
 
     aNode->ResolvePromise(renderedBuffer);
 
-    nsRefPtr<OnCompleteTask> onCompleteTask =
+    nsRefPtr<OnCompleteTask> task =
       new OnCompleteTask(context, renderedBuffer);
-    NS_DispatchToMainThread(onCompleteTask);
-
-    context->OnStateChanged(nullptr, AudioContextState::Closed);
+    NS_DispatchToMainThread(task);
   }
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
     amount += mInputChannels.SizeOfExcludingThis(aMallocSizeOf);
     return amount;
   }
@@ -365,20 +362,16 @@ AudioDestinationNode::AudioDestinationNo
                             new OfflineDestinationNodeEngine(this, aNumberOfChannels,
                                                              aLength, aSampleRate) :
                             static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
 
   mStream = graph->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
   mStream->AddMainThreadListener(this);
   mStream->AddAudioOutput(&gWebAudioOutputKey);
 
-  if (!aIsOffline) {
-    graph->NotifyWhenGraphStarted(mStream->AsAudioNodeStream());
-  }
-
   if (aChannel != AudioChannel::Normal) {
     ErrorResult rv;
     SetMozAudioChannelType(aChannel, rv);
   }
 }
 
 AudioDestinationNode::~AudioDestinationNode()
 {
--- a/dom/media/webaudio/AudioNodeExternalInputStream.cpp
+++ b/dom/media/webaudio/AudioNodeExternalInputStream.cpp
@@ -7,18 +7,18 @@
 #include "AudioNodeExternalInputStream.h"
 #include "AudioChannelFormat.h"
 #include "mozilla/dom/MediaStreamAudioSourceNode.h"
 
 using namespace mozilla::dom;
 
 namespace mozilla {
 
-AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId)
-  : AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate, aContextId)
+AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
+  : AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate)
 {
   MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
 }
 
 AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
 {
   MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
 }
--- a/dom/media/webaudio/AudioNodeExternalInputStream.h
+++ b/dom/media/webaudio/AudioNodeExternalInputStream.h
@@ -15,17 +15,17 @@ namespace mozilla {
 /**
  * This is a MediaStream implementation that acts for a Web Audio node but
  * unlike other AudioNodeStreams, supports any kind of MediaStream as an
  * input --- handling any number of audio tracks and handling blocking of
  * the input MediaStream.
  */
 class AudioNodeExternalInputStream : public AudioNodeStream {
 public:
-  AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId);
+  AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate);
 protected:
   ~AudioNodeExternalInputStream();
 
 public:
   virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
 private:
   /**
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -22,22 +22,20 @@ namespace mozilla {
  * for regular audio contexts, and the rate requested by the web content
  * for offline audio contexts.
  * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
  * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
  */
 
 AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
                                  MediaStreamGraph::AudioNodeStreamKind aKind,
-                                 TrackRate aSampleRate,
-                                 AudioContext::AudioContextId aContextId)
+                                 TrackRate aSampleRate)
   : ProcessedMediaStream(nullptr),
     mEngine(aEngine),
     mSampleRate(aSampleRate),
-    mAudioContextId(aContextId),
     mKind(aKind),
     mNumberOfInputChannels(2),
     mMarkAsFinishedAfterThisBlock(false),
     mAudioParamStream(false),
     mPassThrough(false)
 {
   MOZ_ASSERT(NS_IsMainThread());
   mChannelCountMode = ChannelCountMode::Max;
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -42,18 +42,17 @@ public:
 
   typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
 
   /**
    * Transfers ownership of aEngine to the new AudioNodeStream.
    */
   AudioNodeStream(AudioNodeEngine* aEngine,
                   MediaStreamGraph::AudioNodeStreamKind aKind,
-                  TrackRate aSampleRate,
-                  AudioContext::AudioContextId aContextId);
+                  TrackRate aSampleRate);
 
 protected:
   ~AudioNodeStream();
 
 public:
   // Control API
   /**
    * Sets a parameter that's a time relative to some stream's played time.
@@ -117,17 +116,16 @@ public:
   virtual bool IsIntrinsicallyConsumed() const override
   {
     return true;
   }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
   TrackRate SampleRate() const { return mSampleRate; }
-  AudioContext::AudioContextId AudioContextId() const override { return mAudioContextId; }
 
   /**
    * Convert a time in seconds on the destination stream to ticks
    * on this stream, including fractional position between ticks.
    */
   double FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
                                             double aSeconds);
   /**
@@ -144,17 +142,16 @@ public:
                                   StreamTime aPosition);
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
-
 protected:
   void AdvanceOutputSegment();
   void FinishOutput();
   void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
                             AudioChunk* aBlock,
                             nsTArray<float>* aDownmixBuffer);
   void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
                          nsTArray<const void*>& aOutputChannels,
@@ -164,21 +161,18 @@ protected:
   void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
-  // This is necessary to be able to find all the nodes for a given
-  // AudioContext. It is set on the main thread, in the constructor.
-  const AudioContext::AudioContextId mAudioContextId;
   // Whether this is an internal or external stream
-  const MediaStreamGraph::AudioNodeStreamKind mKind;
+  MediaStreamGraph::AudioNodeStreamKind mKind;
   // The number of input channels that this stream requires. 0 means don't care.
   uint32_t mNumberOfInputChannels;
   // The mixing modes
   ChannelCountMode mChannelCountMode;
   ChannelInterpretation mChannelInterpretation;
   // Whether the stream should be marked as finished as soon
   // as the current time range has been computed block by block.
   bool mMarkAsFinishedAfterThisBlock;
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -30,17 +30,16 @@ public:
     switch (aIndex) {
     case ENABLE:
       mEnabled = !!aValue;
       break;
     default:
       NS_ERROR("MediaStreamAudioSourceNodeEngine bad parameter index");
     }
   }
-
 private:
   bool mEnabled;
 };
 
 class MediaStreamAudioSourceNode : public AudioNode,
                                    public DOMMediaStream::PrincipalChangeObserver
 {
 public:
--- a/dom/media/webaudio/moz.build
+++ b/dom/media/webaudio/moz.build
@@ -26,17 +26,16 @@ EXPORTS += [
     'AudioParamTimeline.h',
     'MediaBufferDecoder.h',
     'ThreeDPoint.h',
     'WebAudioUtils.h',
 ]
 
 EXPORTS.mozilla += [
     'FFTBlock.h',
-    'MediaStreamAudioDestinationNode.h',
 ]
 
 EXPORTS.mozilla.dom += [
     'AnalyserNode.h',
     'AudioBuffer.h',
     'AudioBufferSourceNode.h',
     'AudioContext.h',
     'AudioDestinationNode.h',
--- a/dom/media/webaudio/test/mochitest.ini
+++ b/dom/media/webaudio/test/mochitest.ini
@@ -39,17 +39,16 @@ support-files =
 [test_audioBufferSourceNodeNeutered.html]
 skip-if = (toolkit == 'android' && (processor == 'x86' || debug)) || os == 'win' # bug 1127845, bug 1138468
 [test_audioBufferSourceNodeNoStart.html]
 [test_audioBufferSourceNodeNullBuffer.html]
 [test_audioBufferSourceNodeOffset.html]
 skip-if = (toolkit == 'gonk') || (toolkit == 'android') || debug #bug 906752
 [test_audioBufferSourceNodePassThrough.html]
 [test_AudioContext.html]
-[test_audioContextSuspendResumeClose.html]
 [test_audioDestinationNode.html]
 [test_AudioListener.html]
 [test_audioParamExponentialRamp.html]
 [test_audioParamGain.html]
 [test_audioParamLinearRamp.html]
 [test_audioParamSetCurveAtTime.html]
 [test_audioParamSetCurveAtTimeZeroDuration.html]
 [test_audioParamSetTargetAtTime.html]
deleted file mode 100644
--- a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
+++ /dev/null
@@ -1,393 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
-  <title>Test suspend, resume and close method of the AudioContext</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="text/javascript" src="webaudio.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-function tryToToCreateNodeOnClosedContext(ctx) {
-  ok(ctx.state, "closed", "The context is in closed state");
-
-  [ { name: "createBufferSource" },
-    { name: "createMediaStreamDestination",
-      onOfflineAudioContext: false},
-    { name: "createScriptProcessor" },
-    { name: "createStereoPanner" },
-    { name: "createAnalyser" },
-    { name: "createGain" },
-    { name: "createDelay" },
-    { name: "createBiquadFilter" },
-    { name: "createWaveShaper" },
-    { name: "createPanner" },
-    { name: "createConvolver" },
-    { name: "createChannelSplitter" },
-    { name: "createChannelMerger" },
-    { name: "createDynamicsCompressor" },
-    { name: "createOscillator" },
-    { name: "createMediaElementSource",
-      args: [new Audio()],
-      onOfflineAudioContext: false },
-    { name: "createMediaStreamSource",
-      args: [new Audio().mozCaptureStream()],
-      onOfflineAudioContext: false } ].forEach(function(e) {
-
-      if (e.onOfflineAudioContext == false &&
-          ctx instanceof OfflineAudioContext) {
-        return;
-      }
-
-      expectException(function() {
-        ctx[e.name].apply(ctx, e.args);
-      }, DOMException.INVALID_STATE_ERR);
-    });
-}
-
-function loadFile(url, callback) {
-  var xhr = new XMLHttpRequest();
-  xhr.open("GET", url, true);
-  xhr.responseType = "arraybuffer";
-  xhr.onload = function() {
-    callback(xhr.response);
-  };
-  xhr.send();
-}
-
-// createBuffer, createPeriodicWave and decodeAudioData should work on a context
-// that has `state` == "closed"
-function tryLegalOpeerationsOnClosedContext(ctx) {
-  ok(ctx.state, "closed", "The context is in closed state");
-
-  [ { name: "createBuffer",
-      args: [1, 44100, 44100] },
-    { name: "createPeriodicWave",
-      args: [new Float32Array(10), new Float32Array(10)] }
-  ].forEach(function(e) {
-    expectNoException(function() {
-      ctx[e.name].apply(ctx, e.args);
-    });
-  });
-  loadFile("ting-44.1k-1ch.ogg", function(buf) {
-    ctx.decodeAudioData(buf).then(function(decodedBuf) {
-      ok(true, "decodeAudioData on a closed context should work, it did.")
-      finish();
-    }).catch(function(e){
-      ok(false, "decodeAudioData on a closed context should work, it did not");
-      finish();
-    });
-  });
-}
-
-// Test that MediaStreams that are the output of a suspended AudioContext are
-// producing silence
-// ac1 produce a sine fed to a MediaStreamAudioDestinationNode
-// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that
-// there is silence when ac1 is suspended
-function testMultiContextOutput() {
-  var ac1 = new AudioContext(),
-      ac2 = new AudioContext();
-
-  var osc1 = ac1.createOscillator(),
-      mediaStreamDestination1 = ac1.createMediaStreamDestination();
-
-  var mediaStreamAudioSourceNode2 =
-    ac2.createMediaStreamSource(mediaStreamDestination1.stream),
-    sp2 = ac2.createScriptProcessor(),
-    suspendCalled = false,
-    silentBuffersInARow = 0;
-
-
-  sp2.onaudioprocess = function(e) {
-    if (!suspendCalled) {
-      ac1.suspend();
-      suspendCalled = true;
-    } else {
-      // Wait until the context that produce the tone is actually suspended. It
-      // can be that the second context receives a little amount of data because
-      // of the buffering between the two contexts.
-      if (ac1.state == "suspended") {
-        var input = e.inputBuffer.getChannelData(0);
-        var silent = true;
-        for (var i = 0; i < input.length; i++) {
-          if (input[i] != 0.0) {
-            silent = false;
-          }
-        }
-
-        if (silent) {
-          silentBuffersInARow++;
-          if (silentBuffersInARow == 10) {
-            ok(true,
-                "MediaStreams produce silence when their input is blocked.");
-            sp2.onaudioprocess = null;
-            ac1.close();
-            ac2.close();
-            finish();
-          }
-        } else {
-          is(silentBuffersInARow, 0,
-              "No non silent buffer inbetween silent buffers.");
-        }
-      }
-    }
-  }
-
-  osc1.connect(mediaStreamDestination1);
-
-  mediaStreamAudioSourceNode2.connect(sp2);
-  osc1.start();
-}
-
-
-// Test that there is no buffering between contexts when connecting a running
-// AudioContext to a suspended AudioContext. Our ScriptProcessorNode does some
-// buffering internally, so we ensure this by using a very very low frequency
-// on a sine, and oberve that the phase has changed by a big enough margin.
-function testMultiContextInput() {
-  var ac1 = new AudioContext(),
-      ac2 = new AudioContext();
-
-  var osc1 = ac1.createOscillator(),
-      mediaStreamDestination1 = ac1.createMediaStreamDestination(),
-      sp1 = ac1.createScriptProcessor();
-
-  var mediaStreamAudioSourceNode2 =
-    ac2.createMediaStreamSource(mediaStreamDestination1.stream),
-    sp2 = ac2.createScriptProcessor(),
-    resumed = false,
-    suspended = false,
-    countEventOnFirstSP = true,
-    eventReceived = 0;
-
-
-  osc1.frequency.value = 0.0001;
-
-  // We keep a first ScriptProcessor to get a periodic callback, since we can't
-  // use setTimeout anymore.
-  sp1.onaudioprocess = function(e) {
-    if (countEventOnFirstSP) {
-      eventReceived++;
-    }
-    if (eventReceived > 3 && suspended) {
-      countEventOnFirstSP = false;
-      eventReceived = 0;
-      ac2.resume().then(function() {
-        resumed = true;
-      });
-    }
-  }
-
-  sp2.onaudioprocess = function(e) {
-    var inputBuffer = e.inputBuffer.getChannelData(0);
-    if (!resumed) {
-      // save the last value of the buffer before suspending.
-      sp2.value = inputBuffer[inputBuffer.length - 1];
-      ac2.suspend().then(function() {
-        suspended = true;
-      });
-    } else {
-      eventReceived++;
-      if (eventReceived == 3) {
-        var delta = Math.abs(inputBuffer[1] - sp2.value),
-            theoreticalIncrement = 2048 * 3 * Math.PI * 2 * osc1.frequency.value / ac1.sampleRate;
-        ok(delta >= theoreticalIncrement,
-            "Buffering did not occur when the context was suspended (delta:" + delta + " increment: " + theoreticalIncrement+")");
-        ac1.close();
-        ac2.close();
-        finish();
-      }
-    }
-  }
-
-  osc1.connect(mediaStreamDestination1);
-  osc1.connect(sp1);
-
-  mediaStreamAudioSourceNode2.connect(sp2);
-  osc1.start();
-}
-
-// Test that ScriptProcessorNode's onaudioprocess don't get called while the
-// context is suspended/closed. It is possible that we get the handler called
-// exactly once after suspend, because the event has already been sent to the
-// event loop.
-function testScriptProcessNodeSuspended() {
-  var ac = new AudioContext();
-  var sp = ac.createScriptProcessor();
-  var remainingIterations = 3;
-  var afterResume = false;
-  sp.onaudioprocess = function() {
-    ok(ac.state == "running" || remainingIterations == 3, "If onaudioprocess is called, the context" +
-                              " must be running (was " + ac.state + ", remainingIterations:" + remainingIterations +")");
-    remainingIterations--;
-    if (!afterResume) {
-      if (remainingIterations == 0) {
-        ac.suspend().then(function() {
-          ac.resume().then(function() {
-            remainingIterations = 3;
-            afterResume = true;
-          });
-        });
-      }
-    } else {
-      ac.close().then(function() {
-        finish();
-      });
-    }
-  }
-  sp.connect(ac.destination);
-}
-
-// Take an AudioContext, make sure it switches to running when the audio starts
-// flowing, and then, call suspend, resume and close on it, tracking its state.
-function testAudioContext() {
-  var ac = new AudioContext();
-  is(ac.state, "suspended", "AudioContext should start in suspended state.");
-  var stateTracker = {
-    previous: ac.state,
-     // no promise for the initial suspended -> running
-    initial: {  handler: false },
-    suspend: { promise: false, handler: false },
-    resume: { promise: false, handler: false },
-    close: { promise: false, handler: false }
-  };
-
-  function initialSuspendToRunning() {
-    ok(stateTracker.previous == "suspended" &&
-       ac.state == "running",
-       "AudioContext should switch to \"running\" when the audio hardware is" +
-       " ready.");
-
-    stateTracker.previous = ac.state;
-    ac.onstatechange = afterSuspend;
-    stateTracker.initial.handler = true;
-
-    ac.suspend().then(function() {
-      ok(!stateTracker.suspend.promise && !stateTracker.suspend.handler,
-        "Promise should be resolved before the callback, and only once.")
-      stateTracker.suspend.promise = true;
-    });
-  }
-
-  function afterSuspend() {
-    ok(stateTracker.previous == "running" &&
-       ac.state == "suspended",
-       "AudioContext should switch to \"suspend\" when the audio stream is" +
-       "suspended.");
-    ok(stateTracker.suspend.promise && !stateTracker.suspend.handler,
-        "Handler should be called after the callback, and only once");
-
-    stateTracker.suspend.handler = true;
-    stateTracker.previous = ac.state;
-    ac.onstatechange = afterResume;
-
-    ac.resume().then(function() {
-      ok(!stateTracker.resume.promise && !stateTracker.resume.handler,
-        "Promise should be called before the callback, and only once");
-      stateTracker.resume.promise = true;
-    });
-  }
-
-  function afterResume() {
-    ok(stateTracker.previous == "suspended" &&
-       ac.state == "running",
-   "AudioContext should switch to \"running\" when the audio stream resumes.");
-
-    ok(stateTracker.resume.promise && !stateTracker.resume.handler,
-       "Handler should be called after the callback, and only once");
-
-    stateTracker.resume.handler = true;
-    stateTracker.previous = ac.state;
-    ac.onstatechange = afterClose;
-
-    ac.close().then(function() {
-      ok(!stateTracker.close.promise && !stateTracker.close.handler,
-        "Promise should be called before the callback, and only once");
-      stateTracker.close.promise = true;
-      tryToToCreateNodeOnClosedContext(ac);
-      tryLegalOpeerationsOnClosedContext(ac);
-    });
-  }
-
-  function afterClose() {
-    ok(stateTracker.previous == "running" &&
-       ac.state == "closed",
-       "AudioContext should switch to \"closed\" when the audio stream is" +
-       " closed.");
-    ok(stateTracker.close.promise && !stateTracker.close.handler,
-       "Handler should be called after the callback, and only once");
-  }
-
-  ac.onstatechange = initialSuspendToRunning;
-}
-
-function testOfflineAudioContext() {
-  var o = new OfflineAudioContext(1, 44100, 44100);
-  is(o.state, "suspended", "OfflineAudioContext should start in suspended state.");
-
-  expectRejectedPromise(o, "suspend", "NotSupportedError");
-  expectRejectedPromise(o, "resume", "NotSupportedError");
-  expectRejectedPromise(o, "close", "NotSupportedError");
-
-  var previousState = o.state,
-      finishedRendering = false;
-  function beforeStartRendering() {
-    ok(previousState == "suspended" && o.state == "running", "onstatechanged" +
-        "handler is called on state changed, and the new state is running");
-    previousState = o.state;
-    o.onstatechange = onRenderingFinished;
-  }
-
-  function onRenderingFinished() {
-    ok(previousState == "running" && o.state == "closed",
-        "onstatechanged handler is called when rendering finishes, " +
-        "and the new state is closed");
-    ok(finishedRendering, "The Promise that is resolved when the rendering is" +
-                    "done should be resolved earlier than the state change.");
-    previousState = o.state;
-    o.onstatechange = afterRenderingFinished;
-
-    tryToToCreateNodeOnClosedContext(o);
-    tryLegalOpeerationsOnClosedContext(o);
-  }
-
-  function afterRenderingFinished() {
-    ok(false, "There should be no transition out of the closed state.");
-  }
-
-  o.onstatechange = beforeStartRendering;
-
-  o.startRendering().then(function(buffer) {
-    finishedRendering = true;
-  });
-}
-
-var remaining = 0;
-function finish() {
-  remaining--;
-  if (remaining == 0) {
-    SimpleTest.finish();
-  }
-}
-
-
-SimpleTest.waitForExplicitFinish();
-addLoadEvent(function() {
-  var tests = [
-    testAudioContext,
-    testOfflineAudioContext,
-    testScriptProcessNodeSuspended,
-    testMultiContextOutput,
-    testMultiContextInput
-  ];
-  remaining = tests.length;
-  tests.forEach(function(f) { f() });
-});
-
-</script>
-</pre>
-</body>
-</html>
--- a/dom/media/webaudio/test/webaudio.js
+++ b/dom/media/webaudio/test/webaudio.js
@@ -28,28 +28,16 @@ function expectTypeError(func) {
     func();
   } catch (ex) {
     threw = true;
     ok(ex instanceof TypeError, "Expect a TypeError");
   }
   ok(threw, "The exception was thrown");
 }
 
-function expectRejectedPromise(that, func, exceptionName) {
-  var promise = that[func]();
-
-  ok(promise instanceof Promise, "Expect a Promise");
-
-  promise.then(function(res) {
-    ok(false, "Promise resolved when it should have been rejected.");
-  }).catch(function(err) {
-    is(err.name, exceptionName, "Promise correctly reject with " + exceptionName);
-  });
-}
-
 function fuzzyCompare(a, b) {
   return Math.abs(a - b) < 9e-3;
 }
 
 function compareChannels(buf1, buf2,
                         /*optional*/ length,
                         /*optional*/ sourceOffset,
                         /*optional*/ destOffset,
--- a/dom/webidl/AudioContext.webidl
+++ b/dom/webidl/AudioContext.webidl
@@ -8,89 +8,75 @@
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
 callback DecodeSuccessCallback = void (AudioBuffer decodedData);
 callback DecodeErrorCallback = void ();
 
-enum AudioContextState {
-    "suspended",
-    "running",
-    "closed"
-};
-
 [Constructor,
  Constructor(AudioChannel audioChannelType)]
 interface AudioContext : EventTarget {
 
     readonly attribute AudioDestinationNode destination;
     readonly attribute float sampleRate;
     readonly attribute double currentTime;
     readonly attribute AudioListener listener;
-    readonly attribute AudioContextState state;
-    [Throws]
-    Promise<void> suspend();
-    [Throws]
-    Promise<void> resume();
-    [Throws]
-    Promise<void> close();
-    attribute EventHandler onstatechange;
 
     [NewObject, Throws]
     AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
 
     [Throws]
     Promise<AudioBuffer> decodeAudioData(ArrayBuffer audioData,
                                          optional DecodeSuccessCallback successCallback,
                                          optional DecodeErrorCallback errorCallback);
 
     // AudioNode creation
-    [NewObject, Throws]
+    [NewObject]
     AudioBufferSourceNode createBufferSource();
 
     [NewObject, Throws]
     MediaStreamAudioDestinationNode createMediaStreamDestination();
 
     [NewObject, Throws]
     ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
                                               optional unsigned long numberOfInputChannels = 2,
                                               optional unsigned long numberOfOutputChannels = 2);
 
-    [NewObject, Throws]
+    [NewObject]
     StereoPannerNode createStereoPanner();
-    [NewObject, Throws]
+    [NewObject]
     AnalyserNode createAnalyser();
     [NewObject, Throws, UnsafeInPrerendering]
     MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
     [NewObject, Throws, UnsafeInPrerendering]
     MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
-    [NewObject, Throws]
+    [NewObject]
     GainNode createGain();
     [NewObject, Throws]
     DelayNode createDelay(optional double maxDelayTime = 1);
-    [NewObject, Throws]
+    [NewObject]
     BiquadFilterNode createBiquadFilter();
-    [NewObject, Throws]
+    [NewObject]
     WaveShaperNode createWaveShaper();
-    [NewObject, Throws]
+    [NewObject]
     PannerNode createPanner();
-    [NewObject, Throws]
+    [NewObject]
     ConvolverNode createConvolver();
 
     [NewObject, Throws]
     ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
     [NewObject, Throws]
     ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
 
-    [NewObject, Throws]
+    [NewObject]
     DynamicsCompressorNode createDynamicsCompressor();
 
-    [NewObject, Throws]
+    [NewObject]
     OscillatorNode createOscillator();
     [NewObject, Throws]
     PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
 
 };
 
 // Mozilla extensions
 partial interface AudioContext {