Backed out 5 changesets (bug 1413098) for frequent failure in toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js on a CLOSED TREE
authorMargareta Eliza Balazs <ebalazs@mozilla.com>
Thu, 31 May 2018 15:21:15 +0300
changeset 420699 4024c45ceac838156c1a38b8d2dfb932cf3cec88
parent 420698 d09813f5a6a52abc4ae14e32a80e865087a93bd4
child 420700 05126faafa98d99fa8d5d7deb7a59188d6a4794a
push id34077
push usernerli@mozilla.com
push dateThu, 31 May 2018 21:51:59 +0000
treeherdermozilla-central@42880a726964 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1413098
milestone62.0a1
backs out3c186b3bb90961748566b7c39af27beebb3ec855
b12730d42016a38c7869dfc50a4646198c9002df
49ef875bd65ef3776cde50c223c4d2f6d1699a09
747764af7143667f473fb1853aa50ba943b4b12d
5031770d70fd643230cb4caf6a5106616adaf0fd
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 5 changesets (bug 1413098) for frequent failure in toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js on a CLOSED TREE Backed out changeset 3c186b3bb909 (bug 1413098) Backed out changeset b12730d42016 (bug 1413098) Backed out changeset 49ef875bd65e (bug 1413098) Backed out changeset 747764af7143 (bug 1413098) Backed out changeset 5031770d70fd (bug 1413098)
dom/media/AutoplayPolicy.cpp
dom/media/AutoplayPolicy.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
dom/media/webaudio/test/mochitest.ini
dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html
testing/specialpowers/content/specialpowersAPI.js
toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
--- a/dom/media/AutoplayPolicy.cpp
+++ b/dom/media/AutoplayPolicy.cpp
@@ -3,17 +3,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AutoplayPolicy.h"
 
 #include "mozilla/EventStateManager.h"
 #include "mozilla/Preferences.h"
-#include "mozilla/dom/AudioContext.h"
 #include "mozilla/dom/HTMLMediaElement.h"
 #include "mozilla/dom/HTMLMediaElementBinding.h"
 #include "nsContentUtils.h"
 #include "nsIDocument.h"
 #include "MediaManager.h"
 
 namespace mozilla {
 namespace dom {
@@ -64,56 +63,10 @@ AutoplayPolicy::IsMediaElementAllowedToP
   // Activated by user gesture.
   if (aElement->OwnerDoc()->HasBeenUserActivated()) {
     return true;
   }
 
   return false;
 }
 
-/* static */ bool
-AutoplayPolicy::IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext)
-{
-  if (Preferences::GetBool("media.autoplay.enabled")) {
-    return true;
-  }
-
-  if (!Preferences::GetBool("media.autoplay.enabled.user-gestures-needed", false)) {
-    return true;
-  }
-
-  // Offline context won't directly output sound to audio devices.
-  if (aContext->IsOffline()) {
-    return true;
-  }
-
-  nsPIDOMWindowInner* window = aContext->GetOwner();
-  if (!window) {
-    return false;
-  }
-
-  // Pages which have been granted permission to capture WebRTC camera or
-  // microphone are assumed to be trusted, and are allowed to autoplay.
-  MediaManager* manager = MediaManager::GetIfExists();
-  if (manager) {
-    if (manager->IsActivelyCapturingOrHasAPermission(window->WindowID())) {
-      return true;
-    }
-  }
-
-  nsCOMPtr<nsIPrincipal> principal = aContext->GetParentObject()->AsGlobal()->PrincipalOrNull();
-
-  // Whitelisted.
-  if (principal &&
-      nsContentUtils::IsExactSitePermAllow(principal, "autoplay-media")) {
-    return true;
-  }
-
-  // Activated by user gesture.
-  if (window->GetExtantDoc()->HasBeenUserActivated()) {
-    return true;
-  }
-
-  return false;
-}
-
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/AutoplayPolicy.h
+++ b/dom/media/AutoplayPolicy.h
@@ -10,17 +10,16 @@
 #include "mozilla/NotNull.h"
 
 class nsIDocument;
 
 namespace mozilla {
 namespace dom {
 
 class HTMLMediaElement;
-class AudioContext;
 
 /**
  * AutoplayPolicy is used to manage autoplay logic for all kinds of media,
  * including MediaElement, Web Audio and Web Speech.
  *
  * Autoplay could be disable by turn off the pref "media.autoplay.enabled".
  * Once user disable autoplay, media could only be played if one of following
  * conditions is true.
@@ -28,17 +27,14 @@ class AudioContext;
  *    We restrict user gestures to "mouse click", "keyboard press" and "touch".
  * 2) Muted media content or video without audio content.
  * 3) Document's origin has the "autoplay-media" permission.
  */
 class AutoplayPolicy
 {
 public:
   static bool IsMediaElementAllowedToPlay(NotNull<HTMLMediaElement*> aElement);
-  static bool IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext);
-private:
-  static bool IsDocumentAllowedToPlay(nsIDocument* aDoc);
 };
 
 } // namespace dom
 } // namespace mozilla
 
-#endif
+#endif
\ No newline at end of file
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioContext.h"
 
 #include "blink/PeriodicWave.h"
 
 #include "mozilla/ErrorResult.h"
-#include "mozilla/NotNull.h"
 #include "mozilla/OwningNonNull.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/Preferences.h"
 
 #include "mozilla/dom/AnalyserNode.h"
 #include "mozilla/dom/AnalyserNodeBinding.h"
 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
 #include "mozilla/dom/AudioContextBinding.h"
@@ -40,17 +39,16 @@
 
 #include "AudioBuffer.h"
 #include "AudioBufferSourceNode.h"
 #include "AudioChannelService.h"
 #include "AudioDestinationNode.h"
 #include "AudioListener.h"
 #include "AudioNodeStream.h"
 #include "AudioStream.h"
-#include "AutoplayPolicy.h"
 #include "BiquadFilterNode.h"
 #include "ChannelMergerNode.h"
 #include "ChannelSplitterNode.h"
 #include "ConstantSourceNode.h"
 #include "ConvolverNode.h"
 #include "DelayNode.h"
 #include "DynamicsCompressorNode.h"
 #include "GainNode.h"
@@ -80,17 +78,16 @@ namespace dom {
 static dom::AudioContext::AudioContextId gAudioContextId = 1;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises)
   if (!tmp->mIsStarted) {
     NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
   }
   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly.
   // mAllNodes is an array of weak pointers, ignore it here.
   // mPannerNodes is an array of weak pointers, ignore it here.
   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
 
@@ -99,17 +96,16 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Au
   tmp->DisconnectFromWindow();
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
 
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
                                                   DOMEventTargetHelper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises)
   if (!tmp->mIsStarted) {
     MOZ_ASSERT(tmp->mIsOffline,
                "Online AudioContexts should always be started");
     NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
   }
   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly.
   // mAllNodes is an array of weak pointers, ignore it here.
   // mPannerNodes is an array of weak pointers, ignore it here.
@@ -148,38 +144,23 @@ AudioContext::AudioContext(nsPIDOMWindow
   , mCloseCalled(false)
   , mSuspendCalled(false)
   , mIsDisconnecting(false)
 {
   bool mute = aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
-  bool allowedToStart = AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this));
-  mDestination = new AudioDestinationNode(this,
-                                          aIsOffline,
-                                          allowedToStart,
-                                          aNumberOfChannels,
-                                          aLength,
-                                          aSampleRate);
+  mDestination = new AudioDestinationNode(this, aIsOffline,
+                                          aNumberOfChannels, aLength, aSampleRate);
 
   // The context can't be muted until it has a destination.
   if (mute) {
     Mute();
   }
-
-  // If we won't allow audio context to start, we need to suspend all its stream
-  // in order to delay the state changing from 'suspend' to 'start'.
-  if (!allowedToStart) {
-    ErrorResult rv;
-    RefPtr<Promise> dummy = Suspend(rv);
-    MOZ_ASSERT(!rv.Failed(), "can't create promise");
-    MOZ_ASSERT(dummy->State() != Promise::PromiseState::Rejected,
-               "suspend failed");
-  }
 }
 
 nsresult
 AudioContext::Init()
 {
   if (!mIsOffline) {
     nsresult rv = mDestination->CreateAudioChannelAgent();
     if (NS_WARN_IF(NS_FAILED(rv))) {
@@ -740,21 +721,16 @@ AudioContext::Shutdown()
       RefPtr<Promise> ignored = Close(IgnoreErrors());
     }
 
     for (auto p : mPromiseGripArray) {
       p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     }
 
     mPromiseGripArray.Clear();
-
-    for (const auto& p : mPendingResumePromises) {
-      p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
-    }
-    mPendingResumePromises.Clear();
   }
 
   // Release references to active nodes.
   // Active AudioNodes don't unregister in destructors, at which point the
   // Node is already unregistered.
   mActiveNodes.Clear();
 
   // For offline contexts, we can destroy the MediaStreamGraph at this point.
@@ -924,26 +900,16 @@ AudioContext::OnStateChanged(void* aProm
     // already freed memory.
     if (mPromiseGripArray.Contains(promise)) {
       promise->MaybeResolveWithUndefined();
       DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
       MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
     }
   }
 
-  // Resolve all pending promises once the audio context has been allowed to
-  // start.
-  if (mAudioContextState == AudioContextState::Suspended &&
-      aNewState == AudioContextState::Running) {
-    for (const auto& p : mPendingResumePromises) {
-      p->MaybeResolveWithUndefined();
-    }
-    mPendingResumePromises.Clear();
-  }
-
   if (mAudioContextState != aNewState) {
     RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
     Dispatch(task.forget());
   }
 
   mAudioContextState = aNewState;
 }
 
@@ -1017,34 +983,32 @@ AudioContext::Resume(ErrorResult& aRv)
   }
 
   if (mAudioContextState == AudioContextState::Closed ||
       mCloseCalled) {
     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     return promise.forget();
   }
 
-  mPendingResumePromises.AppendElement(promise);
-
-  if (AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this))) {
-    Destination()->Resume();
+  Destination()->Resume();
 
-    nsTArray<MediaStream*> streams;
-    // If mSuspendCalled is false then we already resumed all our streams,
-    // so don't resume them again (since suspend(); resume(); resume(); should
-    // be OK). But we still need to do ApplyAudioContextOperation
-    // to ensure our new promise is resolved.
-    if (mSuspendCalled) {
-      streams = GetAllStreams();
-    }
-    Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                        streams,
-                                        AudioContextOperation::Resume, promise);
-    mSuspendCalled = false;
+  nsTArray<MediaStream*> streams;
+  // If mSuspendCalled is false then we already resumed all our streams,
+  // so don't resume them again (since suspend(); resume(); resume(); should
+  // be OK). But we still need to do ApplyAudioContextOperation
+  // to ensure our new promise is resolved.
+  if (mSuspendCalled) {
+    streams = GetAllStreams();
   }
+  mPromiseGripArray.AppendElement(promise);
+  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
+                                      streams,
+                                      AudioContextOperation::Resume, promise);
+
+  mSuspendCalled = false;
 
   return promise.forget();
 }
 
 already_AddRefed<Promise>
 AudioContext::Close(ErrorResult& aRv)
 {
   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -348,24 +348,19 @@ private:
   const AudioContextId mId;
   // Note that it's important for mSampleRate to be initialized before
   // mDestination, as mDestination's constructor needs to access it!
   const float mSampleRate;
   AudioContextState mAudioContextState;
   RefPtr<AudioDestinationNode> mDestination;
   RefPtr<AudioListener> mListener;
   nsTArray<UniquePtr<WebAudioDecodeJob> > mDecodeJobs;
-  // This array is used to keep the suspend/close promises alive until
+  // This array is used to keep the suspend/resume/close promises alive until
   // they are resolved, so we can safely pass them accross threads.
   nsTArray<RefPtr<Promise>> mPromiseGripArray;
-  // This array is used to onlly keep the resume promises alive until they are
-  // resolved, so we can safely pass them accross threads. If the audio context
-  // is not allowed to play, the promise would be pending in this array and be
-  // resolved until audio context has been allowed and user call resume() again.
-  nsTArray<RefPtr<Promise>> mPendingResumePromises;
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Raw (non-owning) references to all AudioNodes for this AudioContext.
   nsTHashtable<nsPtrHashKey<AudioNode> > mAllNodes;
   // Hashsets containing all the PannerNodes, to compute the doppler shift.
   // These are weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -318,20 +318,18 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
   NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
 
 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
                                            bool aIsOffline,
-                                           bool aAllowedToStart,
                                            uint32_t aNumberOfChannels,
-                                           uint32_t aLength,
-                                           float aSampleRate)
+                                           uint32_t aLength, float aSampleRate)
   : AudioNode(aContext, aNumberOfChannels,
               ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mIsOffline(aIsOffline)
   , mAudioChannelSuspended(false)
   , mCaptured(false)
   , mAudible(AudioChannelService::AudibleState::eAudible)
 {
@@ -349,17 +347,17 @@ AudioDestinationNode::AudioDestinationNo
   AudioNodeStream::Flags flags =
     AudioNodeStream::NEED_MAIN_THREAD_CURRENT_TIME |
     AudioNodeStream::NEED_MAIN_THREAD_FINISHED |
     AudioNodeStream::EXTERNAL_OUTPUT;
   mStream = AudioNodeStream::Create(aContext, engine, flags, graph);
   mStream->AddMainThreadListener(this);
   mStream->AddAudioOutput(&gWebAudioOutputKey);
 
-  if (!aIsOffline && aAllowedToStart) {
+  if (!aIsOffline) {
     graph->NotifyWhenGraphStarted(mStream);
   }
 }
 
 AudioDestinationNode::~AudioDestinationNode()
 {
 }
 
--- a/dom/media/webaudio/AudioDestinationNode.h
+++ b/dom/media/webaudio/AudioDestinationNode.h
@@ -20,17 +20,16 @@ class AudioDestinationNode final : publi
                                  , public nsIAudioChannelAgentCallback
                                  , public MainThreadMediaStreamListener
 {
 public:
   // This node type knows what MediaStreamGraph to use based on
   // whether it's in offline mode.
   AudioDestinationNode(AudioContext* aContext,
                        bool aIsOffline,
-                       bool aAllowedToStart,
                        uint32_t aNumberOfChannels = 0,
                        uint32_t aLength = 0,
                        float aSampleRate = 0.0f);
 
   void DestroyMediaStream() override;
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioDestinationNode, AudioNode)
--- a/dom/media/webaudio/test/mochitest.ini
+++ b/dom/media/webaudio/test/mochitest.ini
@@ -181,17 +181,16 @@ tags=capturestream
 [test_mediaStreamAudioSourceNodeNoGC.html]
 [test_mediaStreamAudioSourceNodePassThrough.html]
 [test_mediaStreamAudioSourceNodeResampling.html]
 tags=capturestream
 [test_mixingRules.html]
 skip-if = toolkit == 'android' # bug 1091965
 [test_nodeToParamConnection.html]
 [test_nodeCreationDocumentGone.html]
-[test_notAllowedToStartAudioContextGC.html]
 [test_OfflineAudioContext.html]
 [test_offlineDestinationChannelCountLess.html]
 [test_offlineDestinationChannelCountMore.html]
 [test_oscillatorNode.html]
 [test_oscillatorNode2.html]
 [test_oscillatorNodeNegativeFrequency.html]
 [test_oscillatorNodePassThrough.html]
 [test_oscillatorNodeStart.html]
deleted file mode 100644
--- a/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html
+++ /dev/null
@@ -1,57 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
-  <title>Test GC for not-allow-to-start audio context</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-SimpleTest.requestFlakyTimeout(`Checking that something does not happen`);
-
-SimpleTest.waitForExplicitFinish();
-
-var destId;
-
-function observer(subject, topic, data) {
-  let id = parseInt(data);
-  ok(id != destId, "dropping another node, not the context's destination");
-}
-
-SpecialPowers.addAsyncObserver(observer, "webaudio-node-demise", false);
-SimpleTest.registerCleanupFunction(function() {
-  SpecialPowers.removeAsyncObserver(observer, "webaudio-node-demise");
-});
-
-SpecialPowers.pushPrefEnv({"set": [["media.autoplay.enabled", false],
-                                   ["media.autoplay.enabled.user-gestures-needed", true]]},
-                          startTest);
-
-function startTest() {
-  info("- create audio context -");
-  let ac = new AudioContext();
-
-  info("- get node Id -");
-  destId = SpecialPowers.getPrivilegedProps(ac.destination, "id");
-
-  info("- trigger GCs -");
-  SpecialPowers.forceGC();
-  SpecialPowers.forceCC();
-  SpecialPowers.forceGC();
-
-  info("- after three GCs -");
-
-  // We're doing this async so that we can receive observerservice messages.
-  setTimeout(function() {
-    ok(true, `AudioContext that has been prevented
-              from starting has correctly survived GC`)
-    SimpleTest.finish();
-  }, 1);
-}
-
-</script>
-</pre>
-</body>
-</html>
--- a/testing/specialpowers/content/specialpowersAPI.js
+++ b/testing/specialpowers/content/specialpowersAPI.js
@@ -1712,17 +1712,17 @@ SpecialPowersAPI.prototype = {
     syncXHR.send();
   },
 
   // :jdm gets credit for this.  ex: getPrivilegedProps(window, 'location.href');
   getPrivilegedProps(obj, props) {
     var parts = props.split(".");
     for (var i = 0; i < parts.length; i++) {
       var p = parts[i];
-      if (obj[p] != undefined) {
+      if (obj[p]) {
         obj = obj[p];
       } else {
         return null;
       }
     }
     return obj;
   },
 
--- a/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
+++ b/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
@@ -1,10 +1,8 @@
-/* eslint-disable mozilla/no-arbitrary-setTimeout */
-
 const VIDEO_PAGE = "https://example.com/browser/toolkit/content/tests/browser/file_video.html";
 
 var UserGestures = {
   MOUSE_CLICK: "mouse-click",
   MOUSE_MOVE: "mouse-move",
   KEYBOARD_PRESS: "keyboard-press"
 };
 
@@ -12,18 +10,17 @@ var UserGestureTests = [
   {type: UserGestures.MOUSE_CLICK, isActivationGesture: true},
   {type: UserGestures.MOUSE_MOVE, isActivationGesture: false},
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: true}
 ];
 
 function setup_test_preference() {
   return SpecialPowers.pushPrefEnv({"set": [
     ["media.autoplay.enabled", false],
-    ["media.autoplay.enabled.user-gestures-needed", true],
-    ["media.navigator.permission.fake", true]
+    ["media.autoplay.enabled.user-gestures-needed", true]
   ]});
 }
 
 function simulateUserGesture(gesture, targetBrowser) {
   info(`- simulate ${gesture.type} event -`);
   switch (gesture.type) {
     case UserGestures.MOUSE_CLICK:
       return BrowserTestUtils.synthesizeMouseAtCenter("body", {button: 0},
@@ -94,191 +91,26 @@ async function test_play_with_user_gestu
       await video.play();
       ok(gesture.isActivationGesture, "user gesture can activate the page");
       ok(!video.paused, "video starts playing.");
     } catch (e) {
       ok(!gesture.isActivationGesture, "user gesture can not activate the page");
       ok(video.paused, "video can not start playing.");
     }
   }
-
   await ContentTask.spawn(tab.linkedBrowser, gesture, play_video);
 
   info("- remove tab -");
   BrowserTestUtils.removeTab(tab);
 }
 
-function createAudioContext() {
-  content.ac = new content.AudioContext();
-  let ac = content.ac;
-  ac.resumePromises = [];
-  ac.stateChangePromise = new Promise(resolve => {
-    ac.addEventListener("statechange", function() {
-      resolve();
-    }, {once: true});
-  });
-}
-
-async function checking_audio_context_running_state() {
-  let ac = content.ac;
-  await new Promise(r => setTimeout(r, 2000));
-  is(ac.state, "suspended", "audio context is still suspended");
-}
-
-function resume_without_expected_success() {
-  let ac = content.ac;
-  let promise = ac.resume();
-  ac.resumePromises.push(promise);
-  return new Promise((resolve, reject) => {
-    setTimeout(() => {
-      if (ac.state == "suspended") {
-        ok(true, "audio context is still suspended");
-        resolve();
-      } else {
-        reject("audio context should not be allowed to start");
-      }
-    }, 2000);
-  });
-}
-
-function resume_with_expected_success() {
-  let ac = content.ac;
-  ac.resumePromises.push(ac.resume());
-  return Promise.all(ac.resumePromises).then(() => {
-    ok(ac.state == "running", "audio context starts running");
-  });
-}
-
-function callGUM(testParameters) {
-  info("- calling gum with " + JSON.stringify(testParameters.constraints));
-  if (testParameters.shouldAllowStartingContext) {
-    // Because of the prefs we've set and passed, this is going to allow the
-    // window to start an AudioContext synchronously.
-    testParameters.constraints.fake = true;
-    return content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
-  }
-
-  // Call gUM, without sucess: we've made it so that only fake requests
-  // succeed without permission, and this is requesting non-fake-devices. Return
-  // a resolved promise so that the test continues, but the getUserMedia Promise
-  // will never be resolved.
-  // We do this to check that it's not merely calling gUM that allows starting
-  // an AudioContext, it's having the Promise it return resolved successfuly,
-  // because of saved permissions for an origin or explicit user consent using
-  // the prompt.
-  content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
-  return Promise.resolve();
-}
-
-
-async function test_webaudio_with_user_gesture(gesture) {
-  info("- open new tab -");
-  let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
-                                                        "about:blank");
-  info("- create audio context -");
-  // We want the same audio context to be used across different content
-  // tasks, so it needs to be loaded by a frame script.
-  let frameScript = createAudioContext;
-  let mm = tab.linkedBrowser.messageManager;
-  mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
-
-  info("- check whether audio context starts running -");
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, null,
-                            checking_audio_context_running_state);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- calling resume() -");
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, null,
-                            resume_without_expected_success);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- simulate user gesture -");
-  await simulateUserGesture(gesture, tab.linkedBrowser);
-
-  info("- calling resume() again");
-  try {
-    let resumeFunc = gesture.isActivationGesture ?
-      resume_with_expected_success :
-      resume_without_expected_success;
-    await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- remove tab -");
-  await BrowserTestUtils.removeTab(tab);
-}
-
-async function test_webaudio_with_gum(testParameters) {
-  info("- open new tab -");
-  let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
-                                                        "about:blank");
-  info("- create audio context -");
-  // We want the same audio context be used between different content
-  // tasks, so it *must* be loaded by frame script.
-  let frameScript = createAudioContext;
-  let mm = tab.linkedBrowser.messageManager;
-  mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
-
-  info("- check whether audio context starts running -");
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, null,
-                            checking_audio_context_running_state);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, testParameters, callGUM);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- calling resume() again");
-  try {
-    let resumeFunc = testParameters.shouldAllowStartingContext ?
-      resume_with_expected_success :
-      resume_without_expected_success;
-    await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- remove tab -");
-  await BrowserTestUtils.removeTab(tab);
-}
-
 add_task(async function start_test() {
   info("- setup test preference -");
   await setup_test_preference();
 
   info("- test play when page doesn't be activated -");
   await test_play_without_user_gesture();
 
   info("- test play after page got user gesture -");
   for (let idx = 0; idx < UserGestureTests.length; idx++) {
-    info("- test play after page got user gesture -");
     await test_play_with_user_gesture(UserGestureTests[idx]);
-
-    info("- test web audio with user gesture -");
-    await test_webaudio_with_user_gesture(UserGestureTests[idx]);
   }
-
-  await test_webaudio_with_gum({constraints: { audio: true },
-                                shouldAllowStartingContext: true});
-  await test_webaudio_with_gum({constraints: { video: true },
-                                shouldAllowStartingContext: true});
-  await test_webaudio_with_gum({constraints: { video: true,
-                                               audio: true },
-                                shouldAllowStartingContext: true});
-  await test_webaudio_with_gum({constraints: { video: true },
-                                shouldAllowStartingContext: false});
-  await test_webaudio_with_gum({constraints: { audio: true },
-                                shouldAllowStartingContext: false});
-  await test_webaudio_with_gum({constraints: { video: true, audio: true },
-                                shouldAllowStartingContext: false});
 });