Bug 1530220 - part4 : separate tesing web audio with gum to another test. r=cpearce a=lizzard
authorAlastor Wu <alwu@mozilla.com>
Tue, 05 Mar 2019 02:13:55 +0000
changeset 516288 3b0bc8cad66fcf18a00a9185ff534aad7b45f7db
parent 516287 71e48c6607e6c53eed1de2256c5760fa6c1b18aa
child 516289 1639f365cc4cf6034fca350ca332a5d2bf32c56b
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce, lizzard
bugs1530220
milestone66.0
Bug 1530220 - part4 : separate tesing web audio with gum to another test. r=cpearce a=lizzard Testing web audio with GUM is not related with the original purpose of this test. In order to reduce the complexity of this test, separate this part as another new test. Also modify the naming of functions, because Mozilla coding style is to use the camelCase for the name. Differential Revision: https://phabricator.services.mozilla.com/D21257
toolkit/content/tests/browser/browser.ini
toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
toolkit/content/tests/browser/browser_autoplay_policy_web_audio_with_gum.js
--- a/toolkit/content/tests/browser/browser.ini
+++ b/toolkit/content/tests/browser/browser.ini
@@ -52,16 +52,17 @@ support-files =
 [browser_autoplay_policy_user_gestures.js]
 support-files =
   gizmo.mp4
   file_video.html
 [browser_autoplay_policy_touchScroll.js]
 [browser_autoplay_policy_web_audio.js]
 support-files =
   file_empty.html
+[browser_autoplay_policy_web_audio_with_gum.js]
 [browser_autoplay_policy_webRTC_permission.js]
 skip-if = true # Bug 1518429
 support-files =
   file_empty.html
   gizmo.mp4
 [browser_autoplay_videoDocument.js]
 [browser_autoscroll_disabled.js]
 skip-if = true # Bug 1312652
--- a/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
+++ b/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
@@ -1,30 +1,55 @@
 /* eslint-disable mozilla/no-arbitrary-setTimeout */
 
 const VIDEO_PAGE = "https://example.com/browser/toolkit/content/tests/browser/file_video.html";
 
-var UserGestures = {
+const UserGestures = {
   MOUSE_CLICK: "mouse-click",
   MOUSE_MOVE: "mouse-move",
   KEYBOARD_PRESS: "keyboard-press",
 };
 
-var UserGestureTests = [
+const UserGestureTests = [
   {type: UserGestures.MOUSE_CLICK, isActivationGesture: true},
   {type: UserGestures.MOUSE_MOVE, isActivationGesture: false},
   // test different keycode here. printable key, non-printable key and other
   // special keys.
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: true, keyCode: "a"},
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: false, keyCode: "VK_ESCAPE"},
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: true, keyCode: "VK_RETURN"},
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: true, keyCode: "VK_SPACE"},
 ];
 
-function setup_test_preference() {
+/**
+ * This test is used to ensure we would stop blocking autoplay after document
+ * has been activated by user gestures. We would treat mouse clicking, key board
+ * pressing (printable keys or carriage return) as valid user gesture input.
+ */
+add_task(async function startTestUserGestureInput() {
+  info("- setup test preference -");
+  await setupTestPreferences();
+
+  info("- test play when page doesn't be activated -");
+  await testPlayWithoutUserGesture();
+
+  info("- test play after page got user gesture -");
+  for (let idx = 0; idx < UserGestureTests.length; idx++) {
+    info("- test play after page got user gesture -");
+    await testPlayWithUserGesture(UserGestureTests[idx]);
+
+    info("- test web audio with user gesture -");
+    await testWebAudioWithUserGesture(UserGestureTests[idx]);
+  }
+});
+
+/**
+ * testing helper functions
+ */
+function setupTestPreferences() {
   return SpecialPowers.pushPrefEnv({"set": [
     ["media.autoplay.default", SpecialPowers.Ci.nsIAutoplay.BLOCKED],
     ["media.autoplay.enabled.user-gestures-needed", true],
     ["media.autoplay.block-event.enabled", true],
     ["media.autoplay.block-webaudio", true],
     ["media.navigator.permission.fake", true],
   ]});
 }
@@ -34,85 +59,86 @@ function simulateUserGesture(gesture, ta
   switch (gesture.type) {
     case UserGestures.MOUSE_CLICK:
       return BrowserTestUtils.synthesizeMouseAtCenter("body", {button: 0},
                                                       targetBrowser);
     case UserGestures.MOUSE_MOVE:
       return BrowserTestUtils.synthesizeMouseAtCenter("body", {type: "mousemove"},
                                                       targetBrowser);
     case UserGestures.KEYBOARD_PRESS:
+      info(`- keycode=${gesture.keyCode} -`);
       return BrowserTestUtils.synthesizeKey(gesture.keyCode, {}, targetBrowser);
     default:
       ok(false, "undefined user gesture");
       return false;
   }
 }
 
-async function test_play_without_user_gesture() {
+async function testPlayWithoutUserGesture() {
   info("- open new tab -");
   let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
                                                         "about:blank");
   BrowserTestUtils.loadURI(tab.linkedBrowser, VIDEO_PAGE);
   await BrowserTestUtils.browserLoaded(tab.linkedBrowser);
 
-  async function check_autoplay_keyword() {
+  async function checkAutoplayKeyword() {
     info("- create an new autoplay video -");
     let video = content.document.createElement("video");
     video.src = "gizmo.mp4";
     video.autoplay = true;
     let canplayPromise = new Promise(function(resolve) {
       video.addEventListener("canplaythrough", function() {
         resolve();
       }, {once: true});
     });
     content.document.body.appendChild(video);
 
     info("- can't autoplay without user activation -");
     await canplayPromise;
     ok(video.paused, "video can't start without user input.");
   }
-  await ContentTask.spawn(tab.linkedBrowser, null, check_autoplay_keyword);
+  await ContentTask.spawn(tab.linkedBrowser, null, checkAutoplayKeyword);
 
-  async function play_video() {
+  async function playVideo() {
     let video = content.document.getElementById("v");
     info("- call play() without user activation -");
     await video.play().catch(function() {
       ok(video.paused, "video can't start play without user input.");
     });
   }
-  await ContentTask.spawn(tab.linkedBrowser, null, play_video);
+  await ContentTask.spawn(tab.linkedBrowser, null, playVideo);
 
   info("- remove tab -");
   BrowserTestUtils.removeTab(tab);
 }
 
-async function test_play_with_user_gesture(gesture) {
+async function testPlayWithUserGesture(gesture) {
   info("- open new tab -");
   let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
                                                         "about:blank");
   BrowserTestUtils.loadURI(tab.linkedBrowser, VIDEO_PAGE);
   await BrowserTestUtils.browserLoaded(tab.linkedBrowser);
 
   info("- simulate user gesture -");
   await simulateUserGesture(gesture, tab.linkedBrowser);
 
   info("- call play() -");
-  async function play_video(gesture) {
+  async function playVideo(gesture) {
     let video = content.document.getElementById("v");
     try {
       await video.play();
       ok(gesture.isActivationGesture, "user gesture can activate the page");
       ok(!video.paused, "video starts playing.");
     } catch (e) {
       ok(!gesture.isActivationGesture, "user gesture can not activate the page");
       ok(video.paused, "video can not start playing.");
     }
   }
 
-  await ContentTask.spawn(tab.linkedBrowser, gesture, play_video);
+  await ContentTask.spawn(tab.linkedBrowser, gesture, playVideo);
 
   info("- remove tab -");
   BrowserTestUtils.removeTab(tab);
 }
 
 function createAudioContext() {
   content.ac = new content.AudioContext();
   let ac = content.ac;
@@ -124,182 +150,80 @@ function createAudioContext() {
   });
   ac.notAllowedToStart = new Promise(resolve => {
     ac.addEventListener("blocked", function() {
       resolve();
     }, {once: true});
   });
 }
 
-async function checking_audio_context_running_state() {
+async function checkingAudioContextRunningState() {
   let ac = content.ac;
   await ac.notAllowedToStart;
   ok(ac.state === "suspended", `AudioContext is not started yet.`);
 }
 
-function resume_without_expected_success() {
+function resumeWithoutExpectedSuccess() {
   let ac = content.ac;
   let promise = ac.resume();
   ac.resumePromises.push(promise);
   return new Promise((resolve, reject) => {
     setTimeout(() => {
       if (ac.state == "suspended") {
         ok(true, "audio context is still suspended");
         resolve();
       } else {
         reject("audio context should not be allowed to start");
       }
     }, 2000);
   });
 }
 
-function resume_with_expected_success() {
+function resumeWithExpectedSuccess() {
   let ac = content.ac;
   ac.resumePromises.push(ac.resume());
   return Promise.all(ac.resumePromises).then(() => {
     ok(ac.state == "running", "audio context starts running");
   });
 }
 
-function callGUM(testParameters) {
-  info("- calling gum with " + JSON.stringify(testParameters.constraints));
-  if (testParameters.shouldAllowStartingContext) {
-    // Because of the prefs we've set and passed, this is going to allow the
-    // window to start an AudioContext synchronously.
-    testParameters.constraints.fake = true;
-    return content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
-  }
-
-  // Call gUM, without sucess: we've made it so that only fake requests
-  // succeed without permission, and this is requesting non-fake-devices. Return
-  // a resolved promise so that the test continues, but the getUserMedia Promise
-  // will never be resolved.
-  // We do this to check that it's not merely calling gUM that allows starting
-  // an AudioContext, it's having the Promise it return resolved successfuly,
-  // because of saved permissions for an origin or explicit user consent using
-  // the prompt.
-  content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
-  return Promise.resolve();
-}
-
-
-async function test_webaudio_with_user_gesture(gesture) {
+async function testWebAudioWithUserGesture(gesture) {
   info("- open new tab -");
   let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
                                                         "about:blank");
   info("- create audio context -");
   // We want the same audio context to be used across different content
   // tasks, so it needs to be loaded by a frame script.
-  let frameScript = createAudioContext;
-  let mm = tab.linkedBrowser.messageManager;
-  mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
+  const mm = tab.linkedBrowser.messageManager;
+  mm.loadFrameScript("data:,(" + createAudioContext.toString() + ")();", false);
 
   info("- check whether audio context starts running -");
   try {
     await ContentTask.spawn(tab.linkedBrowser, null,
-                            checking_audio_context_running_state);
+                            checkingAudioContextRunningState);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- calling resume() -");
   try {
     await ContentTask.spawn(tab.linkedBrowser, null,
-                            resume_without_expected_success);
+                            resumeWithoutExpectedSuccess);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- simulate user gesture -");
   await simulateUserGesture(gesture, tab.linkedBrowser);
 
   info("- calling resume() again");
   try {
     let resumeFunc = gesture.isActivationGesture ?
-      resume_with_expected_success :
-      resume_without_expected_success;
+      resumeWithExpectedSuccess :
+      resumeWithoutExpectedSuccess;
     await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- remove tab -");
   await BrowserTestUtils.removeTab(tab);
 }
-
-async function test_webaudio_with_gum(testParameters) {
-  info("- open new tab -");
-  let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
-                                                        "https://example.com");
-  info("- create audio context -");
-  // We want the same audio context be used between different content
-  // tasks, so it *must* be loaded by frame script.
-  let frameScript = createAudioContext;
-  let mm = tab.linkedBrowser.messageManager;
-  mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
-
-  info("- check whether audio context starts running -");
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, null,
-                            checking_audio_context_running_state);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  try {
-    await ContentTask.spawn(tab.linkedBrowser, testParameters, callGUM);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- calling resume() again");
-  try {
-    let resumeFunc = testParameters.shouldAllowStartingContext ?
-      resume_with_expected_success :
-      resume_without_expected_success;
-    await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
-  } catch (error) {
-    ok(false, error.toString());
-  }
-
-  info("- remove tab -");
-  await BrowserTestUtils.removeTab(tab);
-}
-
-add_task(async function start_test() {
-  info("- setup test preference -");
-  await setup_test_preference();
-
-  info("- test play when page doesn't be activated -");
-  await test_play_without_user_gesture();
-
-  info("- test play after page got user gesture -");
-  for (let idx = 0; idx < UserGestureTests.length; idx++) {
-    info("- test play after page got user gesture -");
-    await test_play_with_user_gesture(UserGestureTests[idx]);
-
-    info("- test web audio with user gesture -");
-    await test_webaudio_with_user_gesture(UserGestureTests[idx]);
-  }
-
-  info("- test web audio with gUM success -");
-
-  await test_webaudio_with_gum({constraints: { audio: true },
-                                shouldAllowStartingContext: true});
-  await test_webaudio_with_gum({constraints: { video: true },
-                                shouldAllowStartingContext: true});
-  await test_webaudio_with_gum({constraints: { video: true,
-                                               audio: true },
-                                shouldAllowStartingContext: true});
-
-  await SpecialPowers.pushPrefEnv({"set": [
-    ["media.navigator.permission.force", true],
-  ]}).then(async function() {
-    info("- test web audio with gUM denied -");
-    await test_webaudio_with_gum({constraints: { video: true },
-                                  shouldAllowStartingContext: false});
-    await test_webaudio_with_gum({constraints: { audio: true },
-                                  shouldAllowStartingContext: false});
-    await test_webaudio_with_gum({constraints: { video: true,
-                                                 audio: true },
-                                  shouldAllowStartingContext: false});
-  });
-});
new file mode 100644
--- /dev/null
+++ b/toolkit/content/tests/browser/browser_autoplay_policy_web_audio_with_gum.js
@@ -0,0 +1,150 @@
+/* eslint-disable mozilla/no-arbitrary-setTimeout */
+
+/**
+ * This test is used to ensure web audio can be allowed to start when we have
+ * GUM permission.
+ */
+add_task(async function startTestingWebAudioWithGUM() {
+  info("- setup test preference -");
+  await setupTestPreferences();
+
+  info("- test web audio with gUM success -");
+  await testWebAudioWithGUM({constraints: { audio: true },
+                             shouldAllowStartingContext: true});
+  await testWebAudioWithGUM({constraints: { video: true },
+                             shouldAllowStartingContext: true});
+  await testWebAudioWithGUM({constraints: { video: true,
+                                            audio: true },
+                             shouldAllowStartingContext: true});
+
+  await SpecialPowers.pushPrefEnv({"set": [
+    ["media.navigator.permission.force", true],
+  ]}).then(async function() {
+    info("- test web audio with gUM denied -");
+    await testWebAudioWithGUM({constraints: { video: true },
+                               shouldAllowStartingContext: false});
+    await testWebAudioWithGUM({constraints: { audio: true },
+                               shouldAllowStartingContext: false});
+    await testWebAudioWithGUM({constraints: { video: true,
+                                              audio: true },
+                               shouldAllowStartingContext: false});
+  });
+});
+
+/**
+ * testing helper functions
+ */
+function setupTestPreferences() {
+  return SpecialPowers.pushPrefEnv({"set": [
+    ["media.autoplay.default", SpecialPowers.Ci.nsIAutoplay.BLOCKED],
+    ["media.autoplay.enabled.user-gestures-needed", true],
+    ["media.autoplay.block-event.enabled", true],
+    ["media.autoplay.block-webaudio", true],
+    ["media.navigator.permission.fake", true],
+  ]});
+}
+
+function createAudioContext() {
+  content.ac = new content.AudioContext();
+  let ac = content.ac;
+  ac.resumePromises = [];
+  ac.stateChangePromise = new Promise(resolve => {
+    ac.addEventListener("statechange", function() {
+      resolve();
+    }, {once: true});
+  });
+  ac.notAllowedToStart = new Promise(resolve => {
+    ac.addEventListener("blocked", function() {
+      resolve();
+    }, {once: true});
+  });
+}
+
+async function checkingAudioContextRunningState() {
+  let ac = content.ac;
+  await ac.notAllowedToStart;
+  ok(ac.state === "suspended", `AudioContext is not started yet.`);
+}
+
+function resumeWithoutExpectedSuccess() {
+  let ac = content.ac;
+  let promise = ac.resume();
+  ac.resumePromises.push(promise);
+  return new Promise((resolve, reject) => {
+    setTimeout(() => {
+      if (ac.state == "suspended") {
+        ok(true, "audio context is still suspended");
+        resolve();
+      } else {
+        reject("audio context should not be allowed to start");
+      }
+    }, 2000);
+  });
+}
+
+function resumeWithExpectedSuccess() {
+  let ac = content.ac;
+  ac.resumePromises.push(ac.resume());
+  return Promise.all(ac.resumePromises).then(() => {
+    ok(ac.state == "running", "audio context starts running");
+  });
+}
+
+function callGUM(testParameters) {
+  info("- calling gum with " + JSON.stringify(testParameters.constraints));
+  if (testParameters.shouldAllowStartingContext) {
+    // Because of the prefs we've set and passed, this is going to allow the
+    // window to start an AudioContext synchronously.
+    testParameters.constraints.fake = true;
+    return content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
+  }
+
+  // Call gUM, without sucess: we've made it so that only fake requests
+  // succeed without permission, and this is requesting non-fake-devices. Return
+  // a resolved promise so that the test continues, but the getUserMedia Promise
+  // will never be resolved.
+  // We do this to check that it's not merely calling gUM that allows starting
+  // an AudioContext, it's having the Promise it return resolved successfuly,
+  // because of saved permissions for an origin or explicit user consent using
+  // the prompt.
+  content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
+  return Promise.resolve();
+}
+
+async function testWebAudioWithGUM(testParameters) {
+  info("- open new tab -");
+  let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
+                                                        "https://example.com");
+  info("- create audio context -");
+  // We want the same audio context be used between different content
+  // tasks, so it *must* be loaded by frame script.
+  const mm = tab.linkedBrowser.messageManager;
+  mm.loadFrameScript("data:,(" + createAudioContext.toString() + ")();", false);
+
+  info("- check whether audio context starts running -");
+  try {
+    await ContentTask.spawn(tab.linkedBrowser, null,
+                            checkingAudioContextRunningState);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  try {
+    await ContentTask.spawn(tab.linkedBrowser, testParameters, callGUM);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  info("- calling resume() again");
+  try {
+    let resumeFunc = testParameters.shouldAllowStartingContext ?
+      resumeWithExpectedSuccess :
+      resumeWithoutExpectedSuccess;
+    await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  info("- remove tab -");
+  await BrowserTestUtils.removeTab(tab);
+}