Bug 1505370 [wpt PR 13966] - Make testharness tests run in a top-level browsing context, a=testonly
authormoz-wptsync-bot <wptsync@mozilla.com>
Wed, 19 Dec 2018 10:43:53 +0000
changeset 514985 9b149504cdfacae7f3c773121cf8b75b2890a71c
parent 514984 6d5ffb2fa1803a40a3e95c857fa9b018ee9b8821
child 514986 eb308a03c89719a689ada807dbeb401663871be1
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstestonly
bugs1505370, 13966
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1505370 [wpt PR 13966] - Make testharness tests run in a top-level browsing context, a=testonly Automatic update from web-platform-tests Make testharness tests run in a top-level browsing context Traditionally testharness tests ran in a auxillary browsing context opened using window.open and with access to the opener. This works well because the long-lived nature of the opener helps to avoid some of the race conditions that would otherwise occur. But it doesn't work *that* well; the recent refactor to stop continually focusing the opener broke tests that alter document.domain or otherwise prevent the opener being same-domain with the test window. And future platform features may cause the opener to be nulled out entirely so even a postMessage based fix wouldn't work. To solve all of this, this patch refactors things so that the initial window doesn't contain any logic at all and is just used to keep the browser alive between tests. Most of the logic moves to testharnessreport.js which is loaded once per test. In order to get the right timeout when timeout_multiplier is set this requires an addition to the product API in wptrunner to expose a function for getting the timeout multiplier. This allows us to get the timeout_multiplier for testharness tests upfront and avoids the need to change the content of testharnessreport when we start running testharness tests, or to restart the server for each test type. The main issue with the single-window implementation is that we need to start injecting script once the test page has loaded testharnessreport.js. For most browsers we are able to use pageLoadStrategy=eager to control this; in that case we can start running tests once DOMContentLoaded is reached. Chrome doesn't support this pageLoadStrategy, however, so we have to fake support with custom script. -- Fix document.write tests to not assume globals exist after document.open -- Remove reference to missing run() function in CSS tests -- wpt-commits: 74522a275bea481821e789145578e9e16fd27be3, ecf5c9a1b75d5a5901bf123a55e1ed0d5c99e73f, c522b884f77d869a9048e4246a9ff1da165a014f wpt-pr: 13966
testing/web-platform/tests/css/css-conditional/test_group_insertRule.html
testing/web-platform/tests/css/css-transitions/currentcolor-animation-001.html
testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/047-1.html
testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/047.html
testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/048.html
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edge.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/fennec.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ie.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py
testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver.js
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js
testing/web-platform/tests/tools/wptrunner/wptrunner/products.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js
testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
testing/web-platform/tests/tools/wptserve/wptserve/stash.py
--- a/testing/web-platform/tests/css/css-conditional/test_group_insertRule.html
+++ b/testing/web-platform/tests/css/css-conditional/test_group_insertRule.html
@@ -7,17 +7,17 @@
   <link rel="help" href="http://www.w3.org/TR/css3-conditional/#the-cssgroupingrule-interface">
   <meta name="assert" content="requirements in definition of insertRule">
   <script src="/resources/testharness.js"></script>
   <script src="/resources/testharnessreport.js"></script>
 <style id="style">
 @media print {}
 </style>
 </head>
-<body onload="run()">
+<body>
 <div id=log></div>
 <div id="test"></div>
 <script>
 
   var sheet = document.getElementById("style").sheet;
 
   var grouping_rule = sheet.cssRules[0];
 
--- a/testing/web-platform/tests/css/css-transitions/currentcolor-animation-001.html
+++ b/testing/web-platform/tests/css/css-transitions/currentcolor-animation-001.html
@@ -5,17 +5,17 @@
   <link rel="author" title="L. David Baron" href="https://dbaron.org/">
   <link rel="author" title="Mozilla Corporation" href="http://mozilla.com/" />
   <link rel="help" href="http://www.w3.org/TR/css3-transitions/#animatable-types">
   <!-- also see https://www.w3.org/Bugs/Public/show_bug.cgi?id=14605 -->
   <script src="/resources/testharness.js"></script>
   <script src="/resources/testharnessreport.js"></script>
 <style id="style"></style>
 </head>
-<body onload="run()">
+<body>
 <div id=log></div>
 <div id="test"></div>
 <script>
 
  // Transition does not occur when the value is currentColor and color changes
 test(function() {
        var div = document.getElementById("test");
        var cs = getComputedStyle(div, "");
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/047-1.html
@@ -0,0 +1,7 @@
+<script>
+onload = opener.t.step_func_done(function() {
+    document.write("<body>Filler Text<div id='log'></div>");
+    opener.assert_equals(document.body.textContent, "Filler Text");
+});
+</script>
+<body>FAIL
--- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/047.html
+++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/047.html
@@ -1,15 +1,11 @@
 <!doctype html>
 <title>document.write</title>
-<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
 <script>
-var t = async_test();
-onload = function() {
-  t.step(function() {
-    document.write("<body>Filler Text<div id='log'></div>");
-    assert_equals(document.body.textContent, "Filler Text");
-  });
-  t.done();
-};
+var win;
+var t = async_test(() => {
+  win = window.open("047-1.html");
+});
+t.add_cleanup(() => win.close());
 </script>
-<body>FAIL
-<div id="log"></div>
deleted file mode 100644
--- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/document-write/048.html
+++ /dev/null
@@ -1,15 +0,0 @@
-<!doctype html>
-<title>document.write</title>
-<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script>
-<script>
-var t = async_test();
-onload = function() {
-  t.step(function() {
-    document.write("<body>Filler Text<div id='log'></div>");
-    assert_equals(document.body.textContent, "Filler Text");
-  });
-  t.done();
-};
-</script>
-<body>FAIL
-<div id="log"></div>
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py
@@ -11,17 +11,17 @@ here = os.path.split(__file__)[0]
 
 def inherit(super_module, child_globals, product_name):
     super_wptrunner = super_module.__wptrunner__
     child_globals["__wptrunner__"] = child_wptrunner = deepcopy(super_wptrunner)
 
     child_wptrunner["product"] = product_name
 
     for k in ("check_args", "browser", "browser_kwargs", "executor_kwargs",
-              "env_extras", "env_options"):
+              "env_extras", "env_options", "timeout_multiplier"):
         attr = super_wptrunner[k]
         child_globals[attr] = getattr(super_module, attr)
 
     for v in super_module.__wptrunner__["executor"].values():
         child_globals[v] = getattr(super_module, v)
 
     if "run_info_extras" in super_wptrunner:
         attr = super_wptrunner["run_info_extras"]
@@ -76,16 +76,23 @@ def get_free_port(start_port, exclude=No
             s.bind(("127.0.0.1", port))
         except socket.error:
             port += 1
         else:
             return port
         finally:
             s.close()
 
+
+def get_timeout_multiplier(test_type, run_info_data, **kwargs):
+    if kwargs["timeout_multiplier"] is not None:
+        return kwargs["timeout_multiplier"]
+    return 1
+
+
 def browser_command(binary, args, debug_info):
     if debug_info:
         if debug_info.requiresEscapedArgs:
             args = [item.replace("&", "\\&") for item in args]
         debug_args = [debug_info.path] + debug_info.args
     else:
         debug_args = []
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
@@ -1,26 +1,28 @@
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..webdriver_server import ChromeDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorwebdriver import (WebDriverTestharnessExecutor,  # noqa: F401
                                            WebDriverRefTestExecutor)  # noqa: F401
 from ..executors.executorchrome import ChromeDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "chrome",
                  "check_args": "check_args",
                  "browser": "ChromeBrowser",
                  "executor": {"testharness": "WebDriverTestharnessExecutor",
                               "reftest": "WebDriverRefTestExecutor",
                               "wdspec": "ChromeDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier",}
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
@@ -29,30 +31,34 @@ def browser_kwargs(test_type, run_info_d
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
                                            cache_manager, run_info_data,
                                            **kwargs)
     executor_kwargs["close_after_done"] = True
+    executor_kwargs["supports_eager_pageload"] = False
 
     capabilities = {
         "goog:chromeOptions": {
             "prefs": {
                 "profile": {
                     "default_content_setting_values": {
                         "popups": 1
                     }
                 }
             },
             "w3c": True
         }
     }
 
+    if test_type == "testharness":
+        capabilities["pageLoadStrategy"] = "none"
+
     for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
         if kwargs[kwarg] is not None:
             capabilities["goog:chromeOptions"][capability] = kwargs[kwarg]
 
     if kwargs["headless"]:
         if "args" not in capabilities["goog:chromeOptions"]:
             capabilities["goog:chromeOptions"]["args"] = []
         if "--headless" not in capabilities["goog:chromeOptions"]["args"]:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py
@@ -1,28 +1,30 @@
 import subprocess
 
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..webdriver_server import ChromeDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
                                           SeleniumRefTestExecutor)  # noqa: F401
 from ..executors.executorchrome import ChromeDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "chrome_android",
                  "check_args": "check_args",
                  "browser": "ChromeAndroidBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
                               "reftest": "SeleniumRefTestExecutor",
                               "wdspec": "ChromeDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 _wptserve_ports = set()
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edge.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edge.py
@@ -9,52 +9,62 @@ from ..executors.executoredge import Edg
                  "check_args": "check_args",
                  "browser": "EdgeBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
                               "reftest": "SeleniumRefTestExecutor",
                               "wdspec": "EdgeDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
+
 
 def get_timeout_multiplier(test_type, run_info_data, **kwargs):
     if kwargs["timeout_multiplier"] is not None:
         return kwargs["timeout_multiplier"]
     if test_type == "wdspec":
         return 10
     return 1
 
+
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
+
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args"),
             "timeout_multiplier": get_timeout_multiplier(test_type,
                                                          run_info_data,
                                                          **kwargs)}
 
+
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
                                            cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
     executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
                                                                    run_info_data,
                                                                    **kwargs)
     executor_kwargs["capabilities"] = {}
+    if test_type == "testharness":
+        executor_kwargs["capabilities"]["pageLoadStrategy"] = "eager"
     return executor_kwargs
 
+
 def env_extras(**kwargs):
     return []
 
+
 def env_options():
     return {"supports_debugger": False}
 
+
 class EdgeBrowser(Browser):
     used_ports = set()
     init_timeout = 60
 
     def __init__(self, logger, webdriver_binary, timeout_multiplier=None, webdriver_args=None):
         Browser.__init__(self, logger)
         self.server = EdgeDriverServer(self.logger,
                                        binary=webdriver_binary,
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/fennec.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/fennec.py
@@ -24,17 +24,19 @@ from .firefox import (get_timeout_multip
                  "browser": "FennecBrowser",
                  "executor": {"testharness": "MarionetteTestharnessExecutor",
                               "reftest": "MarionetteRefTestExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
                  "env_options": "env_options",
                  "run_info_extras": "run_info_extras",
-                 "update_properties": "update_properties"}
+                 "update_properties": "update_properties",
+                 "timeout_multiplier": "get_timeout_multiplier"}
+
 
 
 def check_args(**kwargs):
     pass
 
 
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"package_name": kwargs["package_name"],
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
@@ -33,17 +33,18 @@ here = os.path.join(os.path.split(__file
                  "executor": {"testharness": "MarionetteTestharnessExecutor",
                               "reftest": "MarionetteRefTestExecutor",
                               "wdspec": "MarionetteWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
                  "env_options": "env_options",
                  "run_info_extras": "run_info_extras",
-                 "update_properties": "update_properties"}
+                 "update_properties": "update_properties",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def get_timeout_multiplier(test_type, run_info_data, **kwargs):
     if kwargs["timeout_multiplier"] is not None:
         return kwargs["timeout_multiplier"]
     if test_type == "reftest":
         if run_info_data["debug"] or run_info_data.get("asan"):
             return 4
@@ -94,16 +95,18 @@ def executor_kwargs(test_type, server_co
                                            cache_manager, run_info_data,
                                            **kwargs)
     executor_kwargs["close_after_done"] = test_type != "reftest"
     executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
                                                                    run_info_data,
                                                                    **kwargs)
     executor_kwargs["e10s"] = run_info_data["e10s"]
     capabilities = {}
+    if test_type == "testharness":
+        capabilities["pageLoadStrategy"] = "eager"
     if test_type == "reftest":
         executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
         executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
     if test_type == "wdspec":
         options = {}
         if kwargs["binary"]:
             options["binary"] = kwargs["binary"]
         if kwargs["binary_args"]:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ie.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ie.py
@@ -1,25 +1,27 @@
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..webdriver_server import InternetExplorerDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
                                           SeleniumRefTestExecutor)  # noqa: F401
 from ..executors.executorinternetexplorer import InternetExplorerDriverWdspecExecutor  # noqa: F401
 
 __wptrunner__ = {"product": "ie",
                  "check_args": "check_args",
                  "browser": "InternetExplorerBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
                               "reftest": "SeleniumRefTestExecutor",
                               "wdspec": "InternetExplorerDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py
@@ -1,26 +1,28 @@
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..webdriver_server import OperaDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
                                           SeleniumRefTestExecutor)  # noqa: F401
 from ..executors.executoropera import OperaDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "opera",
                  "check_args": "check_args",
                  "browser": "OperaBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
                               "reftest": "SeleniumRefTestExecutor",
                               "wdspec": "OperaDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py
@@ -1,26 +1,28 @@
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..webdriver_server import SafariDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorwebdriver import (WebDriverTestharnessExecutor,  # noqa: F401
                                            WebDriverRefTestExecutor)  # noqa: F401
 from ..executors.executorsafari import SafariDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "safari",
                  "check_args": "check_args",
                  "browser": "SafariBrowser",
                  "executor": {"testharness": "WebDriverTestharnessExecutor",
                               "reftest": "WebDriverRefTestExecutor",
                               "wdspec": "SafariDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
 def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
@@ -28,16 +30,18 @@ def browser_kwargs(test_type, run_info_d
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
                                            cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
     executor_kwargs["capabilities"] = {}
+    if test_type == "testharness":
+        executor_kwargs["capabilities"]["pageLoadStrategy"] = "eager"
     if kwargs["binary"] is not None:
         raise ValueError("Safari doesn't support setting executable location")
 
     return executor_kwargs
 
 
 def env_extras(**kwargs):
     return []
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py
@@ -9,16 +9,17 @@ import subprocess
 import tarfile
 import tempfile
 import time
 from cStringIO import StringIO as CStringIO
 
 import requests
 
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorselenium import (SeleniumTestharnessExecutor,
                                           SeleniumRefTestExecutor)
 
 here = os.path.split(__file__)[0]
 # Number of seconds to wait between polling operations when detecting status of
 # Sauce Connect sub-process.
 sc_poll_period = 1
@@ -27,17 +28,18 @@ sc_poll_period = 1
 __wptrunner__ = {"product": "sauce",
                  "check_args": "check_args",
                  "browser": "SauceBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
                               "reftest": "SeleniumRefTestExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
-                 "env_options": "env_options"}
+                 "env_options": "env_options",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def get_capabilities(**kwargs):
     browser_name = kwargs["sauce_browser"]
     platform = kwargs["sauce_platform"]
     version = kwargs["sauce_version"]
     build = kwargs["sauce_build"]
     tags = kwargs["sauce_tags"]
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py
@@ -1,11 +1,12 @@
 import os
 
 from .base import NullBrowser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor  # noqa: F401
 
 here = os.path.join(os.path.split(__file__)[0])
 
 __wptrunner__ = {
     "product": "servo",
     "check_args": "check_args",
@@ -14,16 +15,17 @@ here = os.path.join(os.path.split(__file
         "testharness": "ServoTestharnessExecutor",
         "reftest": "ServoRefTestExecutor",
         "wdspec": "ServoWdspecExecutor",
     },
     "browser_kwargs": "browser_kwargs",
     "executor_kwargs": "executor_kwargs",
     "env_extras": "env_extras",
     "env_options": "env_options",
+    "timeout_multiplier": "get_timeout_multiplier",
     "update_properties": "update_properties",
 }
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "binary")
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py
@@ -2,16 +2,17 @@ import os
 import subprocess
 import tempfile
 
 from mozprocess import ProcessHandler
 
 from tools.serve.serve import make_hosts_file
 
 from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
+from .base import get_timeout_multiplier   # noqa: F401
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,  # noqa: F401
                                              ServoWebDriverRefTestExecutor)  # noqa: F401
 
 here = os.path.join(os.path.split(__file__)[0])
 
 __wptrunner__ = {
     "product": "servodriver",
@@ -20,16 +21,17 @@ here = os.path.join(os.path.split(__file
     "executor": {
         "testharness": "ServoWebDriverTestharnessExecutor",
         "reftest": "ServoWebDriverRefTestExecutor",
     },
     "browser_kwargs": "browser_kwargs",
     "executor_kwargs": "executor_kwargs",
     "env_extras": "env_extras",
     "env_options": "env_options",
+    "timeout_multiplier": "get_timeout_multiplier",
     "update_properties": "update_properties",
 }
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "binary")
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py
@@ -1,9 +1,10 @@
 from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier   # noqa: F401
 from ..executors import executor_kwargs as base_executor_kwargs
 from ..executors.executorwebdriver import (WebDriverTestharnessExecutor,  # noqa: F401
                                            WebDriverRefTestExecutor)  # noqa: F401
 from ..executors.executorwebkit import WebKitDriverWdspecExecutor  # noqa: F401
 from ..webdriver_server import WebKitDriverServer
 
 
 __wptrunner__ = {"product": "webkit",
@@ -11,17 +12,18 @@ from ..webdriver_server import WebKitDri
                  "browser": "WebKitBrowser",
                  "browser_kwargs": "browser_kwargs",
                  "executor": {"testharness": "WebDriverTestharnessExecutor",
                               "reftest": "WebDriverRefTestExecutor",
                               "wdspec": "WebKitDriverWdspecExecutor"},
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
                  "env_options": "env_options",
-                 "run_info_extras": "run_info_extras"}
+                 "run_info_extras": "run_info_extras",
+                 "timeout_multiplier": "get_timeout_multiplier"}
 
 
 def check_args(**kwargs):
     require_arg(kwargs, "binary")
     require_arg(kwargs, "webdriver_binary")
     require_arg(kwargs, "webkit_port")
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
@@ -41,23 +41,24 @@ def serve_path(test_paths):
     return test_paths["/"]["tests_path"]
 
 
 class TestEnvironmentError(Exception):
     pass
 
 
 class TestEnvironment(object):
-    def __init__(self, test_paths, pause_after_test, debug_info, options, ssl_config, env_extras):
+    def __init__(self, test_paths, testharness_timeout_multipler, pause_after_test, debug_info, options, ssl_config, env_extras):
         """Context manager that owns the test environment i.e. the http and
         websockets servers"""
         self.test_paths = test_paths
         self.server = None
         self.config_ctx = None
         self.config = None
+        self.testharness_timeout_multipler = testharness_timeout_multipler
         self.pause_after_test = pause_after_test
         self.test_server_port = options.pop("test_server_port", True)
         self.debug_info = debug_info
         self.options = options if options is not None else {}
 
         self.cache_manager = multiprocessing.Manager()
         self.stash = serve.stash.StashServer()
         self.env_extras = env_extras
@@ -164,17 +165,20 @@ class TestEnvironment(object):
             pass
 
     def get_routes(self):
         route_builder = serve.RoutesBuilder()
 
         for path, format_args, content_type, route in [
                 ("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
                 (self.options.get("testharnessreport", "testharnessreport.js"),
-                 {"output": self.pause_after_test}, "text/javascript;charset=utf8",
+                 {"output": self.pause_after_test,
+                  "timeout_multiplier": self.testharness_timeout_multipler,
+                  "explicit_timeout": "true" if self.debug_info is not None else "false"},
+                 "text/javascript;charset=utf8",
                  "/resources/testharnessreport.js")]:
             path = os.path.normpath(os.path.join(here, path))
             # Note that .headers. files don't apply to static routes, so we need to
             # readd any static headers here.
             headers = {"Cache-Control": "max-age=3600"}
             route_builder.add_static(path, format_args, content_type, route,
                                      headers=headers)
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
@@ -517,17 +517,17 @@ class CallbackHandler(object):
         self.logger.debug("Got async callback: %s" % result[1])
         try:
             callback = self.callbacks[command]
         except KeyError:
             raise ValueError("Unknown callback type %r" % result[1])
         return callback(url, payload)
 
     def process_complete(self, url, payload):
-        rv = [url] + payload
+        rv = [strip_server(url)] + payload
         return True, rv
 
     def process_action(self, url, payload):
         action = payload["action"]
         self.logger.debug("Got action: %s" % action)
         try:
             action_handler = self.actions[action]
         except KeyError:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -1,11 +1,12 @@
 import json
 import os
 import threading
+import time
 import traceback
 import urlparse
 import uuid
 
 errors = None
 marionette = None
 pytestrunner = None
 
@@ -181,39 +182,53 @@ class MarionetteTestharnessProtocolPart(
                 alert = self.marionette.switch_to_alert()
                 try:
                     alert.dismiss()
                 except errors.NoAlertPresentException:
                     pass
             else:
                 break
 
-    def get_test_window(self, window_id, parent):
-        test_window = None
-        if window_id:
-            try:
-                # Try this, it's in Level 1 but nothing supports it yet
-                win_s = self.parent.base.execute_script("return window['%s'];" % self.window_id)
-                win_obj = json.loads(win_s)
-                test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
-            except Exception:
-                pass
+    def get_test_window(self, window_id, parent, timeout=5):
+        """Find the test window amongst all the open windows.
+        This is assumed to be either the named window or the one after the parent in the list of
+        window handles
 
-        if test_window is None:
-            after = self.marionette.window_handles
-            if len(after) == 2:
-                test_window = next(iter(set(after) - set([parent])))
-            elif after[0] == parent and len(after) > 2:
-                # Hope the first one here is the test window
-                test_window = after[1]
-            else:
-                raise Exception("unable to find test window")
+        :param window_id: The DOM name of the Window
+        :param parent: The handle of the runner window
+        :param timeout: The time in seconds to wait for the window to appear. This is because in
+                        some implementations there's a race between calling window.open and the
+                        window being added to the list of WebDriver accessible windows."""
+        test_window = None
+        end_time = time.time() + timeout
+        while time.time() < end_time:
+            if window_id:
+                try:
+                    # Try this, it's in Level 1 but nothing supports it yet
+                    win_s = self.parent.base.execute_script("return window['%s'];" % self.window_id)
+                    win_obj = json.loads(win_s)
+                    test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+                except Exception:
+                    pass
 
-        assert test_window != parent
-        return test_window
+            if test_window is None:
+                handles = self.marionette.window_handles
+                if len(handles) == 2:
+                    test_window = next(iter(set(handles) - set([parent])))
+                elif handles[0] == parent and len(handles) > 2:
+                    # Hope the first one here is the test window
+                    test_window = handles[1]
+
+            if test_window is not None:
+                assert test_window != parent
+                return test_window
+
+            time.sleep(0.1)
+
+        raise Exception("unable to find test window")
 
 
 class MarionettePrefsProtocolPart(PrefsProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
 
     def set(self, name, value):
         if value.lower() not in ("true", "false"):
@@ -620,18 +635,18 @@ class MarionetteTestharnessExecutor(Test
                                      timeout_multiplier=timeout_multiplier,
                                      debug_info=debug_info)
         self.protocol = MarionetteProtocol(self,
                                            browser,
                                            capabilities,
                                            timeout_multiplier,
                                            kwargs["e10s"],
                                            ccov)
-        self.script = open(os.path.join(here, "testharness_webdriver.js")).read()
-        self.script_resume = open(os.path.join(here, "testharness_webdriver_resume.js")).read()
+        with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
+            self.script_resume = f.read()
         self.close_after_done = close_after_done
         self.window_id = str(uuid.uuid4())
         self.debug = debug
 
         self.original_pref_values = {}
 
         if marionette is None:
             do_delayed_imports()
@@ -674,39 +689,28 @@ class MarionetteTestharnessExecutor(Test
         if success:
             return self.convert_result(test, data, extra=extra)
 
         return (test.result_cls(extra=extra, *data), [])
 
     def do_testharness(self, protocol, url, timeout):
         parent_window = protocol.testharness.close_old_windows(protocol)
 
-        if timeout is not None:
-            timeout_ms = str(timeout * 1000)
-        else:
-            timeout_ms = "null"
-
         if self.protocol.coverage.is_enabled:
             self.protocol.coverage.reset()
 
-        format_map = {"abs_url": url,
-                      "url": strip_server(url),
-                      "window_id": self.window_id,
-                      "timeout_multiplier": self.timeout_multiplier,
-                      "timeout": timeout_ms,
-                      "explicit_timeout": timeout is None}
+        format_map = {"url": strip_server(url)}
 
-        script = self.script % format_map
-
-        protocol.base.execute_script(script, async=True)
-        test_window = protocol.testharness.get_test_window(self.window_id, parent_window)
-
+        protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
+        test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
+                                                           timeout=10*self.timeout_multiplier)
+        self.protocol.base.set_window(test_window)
         handler = CallbackHandler(self.logger, protocol, test_window)
+        protocol.marionette.navigate(url)
         while True:
-            self.protocol.base.set_window(test_window)
             result = protocol.base.execute_script(
                 self.script_resume % format_map, async=True)
             if result is None:
                 # This can happen if we get an content process crash
                 return None
             done, rv = handler(result)
             if done:
                 break
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
@@ -1,12 +1,13 @@
 import json
 import os
 import socket
 import threading
+import time
 import traceback
 import urlparse
 import uuid
 
 from .base import (CallbackHandler,
                    RefTestExecutor,
                    RefTestImplementation,
                    TestharnessExecutor,
@@ -97,53 +98,53 @@ class SeleniumTestharnessProtocolPart(Te
             try:
                 self.webdriver.switch_to_window(handle)
                 self.webdriver.close()
             except exceptions.NoSuchWindowException:
                 pass
         self.webdriver.switch_to_window(self.runner_handle)
         return self.runner_handle
 
-    def get_test_window(self, window_id, parent):
-        test_window = None
-        try:
-            # Try using the JSON serialization of the WindowProxy object,
-            # it's in Level 1 but nothing supports it yet
-            win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
-            win_obj = json.loads(win_s)
-            test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
-        except Exception:
-            pass
+    def get_test_window(self, window_id, parent, timeout=5):
+        """Find the test window amongst all the open windows.
+        This is assumed to be either the named window or the one after the parent in the list of
+        window handles
 
-        if test_window is None:
-            after = self.webdriver.window_handles
-
-    def get_test_window(self, window_id, parent):
+        :param window_id: The DOM name of the Window
+        :param parent: The handle of the runner window
+        :param timeout: The time in seconds to wait for the window to appear. This is because in
+                        some implementations there's a race between calling window.open and the
+                        window being added to the list of WebDriver accessible windows."""
         test_window = None
-        try:
-            # Try using the JSON serialization of the WindowProxy object,
-            # it's in Level 1 but nothing supports it yet
-            win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
-            win_obj = json.loads(win_s)
-            test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
-        except Exception:
-            pass
+        end_time = time.time() + timeout
+        while time.time() < end_time:
+            try:
+                # Try using the JSON serialization of the WindowProxy object,
+                # it's in Level 1 but nothing supports it yet
+                win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
+                win_obj = json.loads(win_s)
+                test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+            except Exception:
+                pass
 
-        if test_window is None:
-            after = self.webdriver.window_handles
-            if len(after) == 2:
-                test_window = next(iter(set(after) - set([parent])))
-            elif after[0] == parent and len(after) > 2:
-                # Hope the first one here is the test window
-                test_window = after[1]
-            else:
-                raise Exception("unable to find test window")
+            if test_window is None:
+                after = self.webdriver.window_handles
+                if len(after) == 2:
+                    test_window = next(iter(set(after) - set([parent])))
+                elif after[0] == parent and len(after) > 2:
+                    # Hope the first one here is the test window
+                    test_window = after[1]
 
-        assert test_window != parent
-        return test_window
+            if test_window is not None:
+                assert test_window != parent
+                return test_window
+
+            time.sleep(0.1)
+
+        raise Exception("unable to find test window")
 
 
 class SeleniumSelectorProtocolPart(SelectorProtocolPart):
     def setup(self):
         self.webdriver = self.parent.webdriver
 
     def elements_by_selector(self, selector):
         return self.webdriver.find_elements_by_css_selector(selector)
@@ -291,18 +292,16 @@ class SeleniumTestharnessExecutor(Testha
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  close_after_done=True, capabilities=None, debug_info=None,
                  **kwargs):
         """Selenium-based executor for testharness.js tests"""
         TestharnessExecutor.__init__(self, browser, server_config,
                                      timeout_multiplier=timeout_multiplier,
                                      debug_info=debug_info)
         self.protocol = SeleniumProtocol(self, browser, capabilities)
-        with open(os.path.join(here, "testharness_webdriver.js")) as f:
-            self.script = f.read()
         with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
             self.script_resume = f.read()
         self.close_after_done = close_after_done
         self.window_id = str(uuid.uuid4())
 
     def is_alive(self):
         return self.protocol.is_alive()
 
@@ -319,30 +318,27 @@ class SeleniumTestharnessExecutor(Testha
                                     test.timeout * self.timeout_multiplier).run()
 
         if success:
             return self.convert_result(test, data)
 
         return (test.result_cls(*data), [])
 
     def do_testharness(self, protocol, url, timeout):
-        format_map = {"abs_url": url,
-                      "url": strip_server(url),
-                      "window_id": self.window_id,
-                      "timeout_multiplier": self.timeout_multiplier,
-                      "timeout": timeout * 1000}
+        format_map = {"url": strip_server(url)}
 
         parent_window = protocol.testharness.close_old_windows()
         # Now start the test harness
-        protocol.base.execute_script(self.script % format_map, async=True)
-        test_window = protocol.testharness.get_test_window(self.window_id, parent_window)
-
+        protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
+        test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
+                                                           timeout=5*self.timeout_multiplier)
+        self.protocol.base.set_window(test_window)
+        protocol.webdriver.get(url)
         handler = CallbackHandler(self.logger, protocol, test_window)
         while True:
-            self.protocol.base.set_window(test_window)
             result = protocol.base.execute_script(
                 self.script_resume % format_map, async=True)
             done, rv = handler(result)
             if done:
                 break
         return rv
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
@@ -1,12 +1,13 @@
 import json
 import os
 import socket
 import threading
+import time
 import traceback
 import urlparse
 import uuid
 
 from .base import (CallbackHandler,
                    RefTestExecutor,
                    RefTestImplementation,
                    TestharnessExecutor,
@@ -89,39 +90,53 @@ class WebDriverTestharnessProtocolPart(T
             try:
                 self.webdriver.window_handle = handle
                 self.webdriver.close()
             except client.NoSuchWindowException:
                 pass
         self.webdriver.window_handle = self.runner_handle
         return self.runner_handle
 
-    def get_test_window(self, window_id, parent):
-        test_window = None
-        try:
-            # Try using the JSON serialization of the WindowProxy object,
-            # it's in Level 1 but nothing supports it yet
-            win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
-            win_obj = json.loads(win_s)
-            test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
-        except Exception:
-            pass
+    def get_test_window(self, window_id, parent, timeout=5):
+        """Find the test window amongst all the open windows.
+        This is assumed to be either the named window or the one after the parent in the list of
+        window handles
 
-        if test_window is None:
-            after = self.webdriver.handles
-            if len(after) == 2:
-                test_window = next(iter(set(after) - set([parent])))
-            elif after[0] == parent and len(after) > 2:
-                # Hope the first one here is the test window
-                test_window = after[1]
-            else:
-                raise Exception("unable to find test window")
+        :param window_id: The DOM name of the Window
+        :param parent: The handle of the runner window
+        :param timeout: The time in seconds to wait for the window to appear. This is because in
+                        some implementations there's a race between calling window.open and the
+                        window being added to the list of WebDriver accessible windows."""
+        test_window = None
+        end_time = time.time() + timeout
+        while time.time() < end_time:
+            try:
+                # Try using the JSON serialization of the WindowProxy object,
+                # it's in Level 1 but nothing supports it yet
+                win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
+                win_obj = json.loads(win_s)
+                test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+            except Exception:
+                pass
 
-        assert test_window != parent
-        return test_window
+            if test_window is None:
+                after = self.webdriver.handles
+                if len(after) == 2:
+                    test_window = next(iter(set(after) - set([parent])))
+                elif after[0] == parent and len(after) > 2:
+                    # Hope the first one here is the test window
+                    test_window = after[1]
+
+            if test_window is not None:
+                assert test_window != parent
+                return test_window
+
+            time.sleep(0.1)
+
+        raise Exception("unable to find test window")
 
 
 class WebDriverSelectorProtocolPart(SelectorProtocolPart):
     def setup(self):
         self.webdriver = self.parent.webdriver
 
     def elements_by_selector(self, selector):
         return self.webdriver.find.css(selector)
@@ -277,28 +292,27 @@ class WebDriverRun(object):
             self.result_flag.set()
 
 
 class WebDriverTestharnessExecutor(TestharnessExecutor):
     supports_testdriver = True
 
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  close_after_done=True, capabilities=None, debug_info=None,
-                 **kwargs):
+                 supports_eager_pageload=True, **kwargs):
         """WebDriver-based executor for testharness.js tests"""
         TestharnessExecutor.__init__(self, browser, server_config,
                                      timeout_multiplier=timeout_multiplier,
                                      debug_info=debug_info)
         self.protocol = WebDriverProtocol(self, browser, capabilities)
-        with open(os.path.join(here, "testharness_webdriver.js")) as f:
-            self.script = f.read()
         with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
             self.script_resume = f.read()
         self.close_after_done = close_after_done
         self.window_id = str(uuid.uuid4())
+        self.supports_eager_pageload = supports_eager_pageload
 
     def is_alive(self):
         return self.protocol.is_alive()
 
     def on_environment_change(self, new_environment):
         if new_environment["protocol"] != self.last_environment["protocol"]:
             self.protocol.testharness.load_runner(new_environment["protocol"])
 
@@ -311,37 +325,62 @@ class WebDriverTestharnessExecutor(Testh
                                     test.timeout * self.timeout_multiplier).run()
 
         if success:
             return self.convert_result(test, data)
 
         return (test.result_cls(*data), [])
 
     def do_testharness(self, protocol, url, timeout):
-        format_map = {"abs_url": url,
-                      "url": strip_server(url),
-                      "window_id": self.window_id,
-                      "timeout_multiplier": self.timeout_multiplier,
-                      "timeout": timeout * 1000}
+        format_map = {"url": strip_server(url)}
 
         parent_window = protocol.testharness.close_old_windows()
         # Now start the test harness
-        protocol.base.execute_script(self.script % format_map, async=True)
-        test_window = protocol.testharness.get_test_window(self.window_id, parent_window)
+        protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
+        test_window = protocol.testharness.get_test_window(self.window_id,
+                                                           parent_window,
+                                                           timeout=5*self.timeout_multiplier)
+        self.protocol.base.set_window(test_window)
+        handler = CallbackHandler(self.logger, protocol, test_window)
+        protocol.webdriver.url = url
 
-        handler = CallbackHandler(self.logger, protocol, test_window)
+        if not self.supports_eager_pageload:
+            self.wait_for_load(protocol)
+
         while True:
-            self.protocol.base.set_window(test_window)
             result = protocol.base.execute_script(
                 self.script_resume % format_map, async=True)
             done, rv = handler(result)
             if done:
                 break
         return rv
 
+    def wait_for_load(self, protocol):
+        # pageLoadStrategy=eager doesn't work in Chrome so try to emulate in user script
+        loaded = False
+        seen_error = False
+        while not loaded:
+            try:
+                loaded = protocol.base.execute_script("""
+var callback = arguments[arguments.length - 1];
+if (location.href === "about:blank") {
+  callback(false);
+} else if (document.readyState !== "loading") {
+  callback(true);
+} else {
+  document.addEventListener("readystatechange", () => {if (document.readyState !== "loading") {callback(true)}});
+}""", async=True)
+            except client.JavascriptErrorException:
+                # We can get an error here if the script runs in the initial about:blank
+                # document before it has navigated, with the driver returning an error
+                # indicating that the document was unloaded
+                if seen_error:
+                    raise
+                seen_error = True
+
 
 class WebDriverRefTestExecutor(RefTestExecutor):
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  screenshot_cache=None, close_after_done=True,
                  debug_info=None, capabilities=None, **kwargs):
         """WebDriver-based executor for reftests"""
         RefTestExecutor.__init__(self,
                                  browser,
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js
@@ -1,59 +1,1 @@
 document.title = '%(title)s';
-
-window.addEventListener(
-  "message",
-  function(event) {
-    window.message_queue.push(event);
-    window.process_next_event();
-  },
-  false
-);
-
-
-window.process_next_event = function() {
-  /* This function handles the next testdriver event. The presence of
-     window.testdriver_callback is used as a switch; when that function
-     is present we are able to handle the next event and when is is not
-     present we must wait. Therefore to drive the event processing, this
-     function must be called in two circumstances:
-       * Every time there is a new event that we may be able to handle
-       * Every time we set the callback function
-     This function unsets the callback, so no further testdriver actions
-     will be run until it is reset, which wptrunner does after it has
-     completed handling the current action.
-   */
-  if (!window.testdriver_callback) {
-    return;
-  }
-  var event = window.message_queue.shift();
-  if (!event) {
-    return;
-  }
-  var data = event.data;
-
-  var payload = undefined;
-
-  switch(data.type) {
-  case "complete":
-    var tests = event.data.tests;
-    var status = event.data.status;
-
-    var subtest_results = tests.map(function(x) {
-      return [x.name, x.status, x.message, x.stack];
-    });
-    payload = [status.status,
-               status.message,
-               status.stack,
-               subtest_results];
-    clearTimeout(window.timer);
-    break;
-  case "action":
-    payload = data;
-    break;
-  default:
-    return;
-  }
-  var callback = window.testdriver_callback;
-  window.testdriver_callback = null;
-  callback([window.url, data.type, payload]);
-};
deleted file mode 100644
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver.js
+++ /dev/null
@@ -1,19 +0,0 @@
-var callback = arguments[arguments.length - 1];
-var loaded = false;
-
-window.timeout_multiplier = %(timeout_multiplier)d;
-window.url = "%(url)s";
-window.win = window.open("%(abs_url)s", "%(window_id)s");
-window.win.addEventListener('DOMContentLoaded', (e) => {
-  callback();
-});
-
-
-window.message_queue = [];
-window.testdriver_callback = null;
-
-if (%(timeout)s != null) {
-  window.timer = setTimeout(function() {
-    window.win.timeout();
-  }, %(timeout)s);
-}
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js
@@ -1,21 +1,5 @@
-var callback = arguments[arguments.length - 1];
-window.opener.testdriver_callback = function(results) {
-  /**
-   * The current window and its opener belong to the same domain, making it
-   * technically possible for data structures to be shared directly.
-   * Unfortunately, some browser/WebDriver implementations incorrectly
-   * serialize Arrays from foreign realms [1]. This issue does not extend to
-   * the behavior of `JSON.stringify` and `JSON.parse` in these
-   * implementations. Use that API to re-create the data structure in the local
-   * realm to avoid the problem in the non-conforming browsers.
-   *
-   * [1] This has been observed in Edge version 17 and/or the corresponding
-   *     release of Edgedriver
-   */
-  try {
-    results = JSON.parse(JSON.stringify(results));
-  } catch (error) {}
-
-  callback(results);
-};
-window.opener.process_next_event();
+// We have to set the url here to ensure we get the same escaping as in the harness
+// and also to handle the case where the test changes the fragment
+window.__wptrunner_url = "%(url)s";
+window.__wptrunner_testdriver_callback = arguments[arguments.length - 1];
+window.__wptrunner_process_next_event();
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/products.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/products.py
@@ -1,58 +1,71 @@
 import importlib
 import imp
 
 from .browsers import product_list
 
+
 def products_enabled(config):
     names = config.get("products", {}).keys()
     if not names:
         return product_list
     else:
         return names
 
+
 def product_module(config, product):
     if product not in products_enabled(config):
         raise ValueError("Unknown product %s" % product)
 
     path = config.get("products", {}).get(product, None)
     if path:
         module = imp.load_source('wptrunner.browsers.' + product, path)
     else:
         module = importlib.import_module("wptrunner.browsers." + product)
 
     if not hasattr(module, "__wptrunner__"):
         raise ValueError("Product module does not define __wptrunner__ variable")
 
     return module
 
 
-def load_product(config, product):
-    module = product_module(config, product)
-    data = module.__wptrunner__
+class Product(object):
+    def __init__(self, config, product):
+        module = product_module(config, product)
+        data = module.__wptrunner__
+        self.name = product
+        self.check_args = getattr(module, data["check_args"])
+        self.browser_cls = getattr(module, data["browser"])
+        self.get_browser_kwargs = getattr(module, data["browser_kwargs"])
+        self.get_executor_kwargs = getattr(module, data["executor_kwargs"])
+        self.env_options = getattr(module, data["env_options"])()
+        self.get_env_extras = getattr(module, data["env_extras"])
+        self.run_info_extras = (getattr(module, data["run_info_extras"])
+                           if "run_info_extras" in data else lambda **kwargs:{})
+        self.get_timeout_multiplier = getattr(module, data["timeout_multiplier"])
 
-    check_args = getattr(module, data["check_args"])
-    browser_cls = getattr(module, data["browser"])
-    browser_kwargs = getattr(module, data["browser_kwargs"])
-    executor_kwargs = getattr(module, data["executor_kwargs"])
-    env_options = getattr(module, data["env_options"])()
-    env_extras = getattr(module, data["env_extras"])
-    run_info_extras = (getattr(module, data["run_info_extras"])
-                       if "run_info_extras" in data else lambda **kwargs:{})
+        self.executor_classes = {}
+        for test_type, cls_name in data["executor"].iteritems():
+            cls = getattr(module, cls_name)
+            self.executor_classes[test_type] = cls
+
 
-    executor_classes = {}
-    for test_type, cls_name in data["executor"].iteritems():
-        cls = getattr(module, cls_name)
-        executor_classes[test_type] = cls
-
-    return (check_args,
-            browser_cls, browser_kwargs,
-            executor_classes, executor_kwargs,
-            env_options, env_extras, run_info_extras)
+def load_product(config, product, load_cls=False):
+    rv = Product(config, product)
+    if not load_cls:
+        return (rv.check_args,
+                rv.browser_cls,
+                rv.get_browser_kwargs,
+                rv.executor_classes,
+                rv.get_executor_kwargs,
+                rv.env_options,
+                rv.get_env_extras,
+                rv.run_info_extras)
+    return rv
 
 
 def load_product_update(config, product):
     """Return tuple of (property_order, boolean_properties) indicating the
     run_info properties to use when constructing the expectation data for
     this product. None for either key indicates that the default keys
     appropriate for distinguishing based on platform will be used."""
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
@@ -52,27 +52,27 @@
     };
 
     window.test_driver_internal.click = function(element) {
         const selector = get_selector(element);
         const pending_promise = new Promise(function(resolve, reject) {
             pending_resolve = resolve;
             pending_reject = reject;
         });
-        window.opener.postMessage({"type": "action", "action": "click", "selector": selector}, "*");
+        window.__wptrunner_message_queue.push({"type": "action", "action": "click", "selector": selector});
         return pending_promise;
     };
 
     window.test_driver_internal.send_keys = function(element, keys) {
         const selector = get_selector(element);
         const pending_promise = new Promise(function(resolve, reject) {
             pending_resolve = resolve;
             pending_reject = reject;
         });
-        window.opener.postMessage({"type": "action", "action": "send_keys", "selector": selector, "keys": keys}, "*");
+        window.__wptrunner_message_queue.push({"type": "action", "action": "send_keys", "selector": selector, "keys": keys});
         return pending_promise;
     };
 
     window.test_driver_internal.action_sequence = function(actions) {
         const pending_promise = new Promise(function(resolve, reject) {
             pending_resolve = resolve;
             pending_reject = reject;
         });
@@ -80,12 +80,12 @@
             if (actionSequence.type == "pointer") {
                 for (let action of actionSequence.actions) {
                     if (action.type == "pointerMove" && action.origin instanceof Element) {
                         action.origin = {selector: get_selector(action.origin)};
                     }
                 }
             }
         }
-        window.opener.postMessage({"type": "action", "action": "action_sequence", "actions": actions}, "*");
+        window.__wptrunner_message_queue.push({"type": "action", "action": "action_sequence", "actions": actions});
         return pending_promise;
     };
 })();
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js
@@ -1,13 +1,68 @@
-var props = {output:%(output)d,
-             explicit_timeout: true,
-             message_events: ["completion"]};
+window.__wptrunner_testdriver_callback = null;
+window.__wptrunner_message_queue = [];
+window.__wptrunner_url = null;
+
+window.__wptrunner_process_next_event = function() {
+  /* This function handles the next testdriver event. The presence of
+     window.testdriver_callback is used as a switch; when that function
+     is present we are able to handle the next event and when is is not
+     present we must wait. Therefore to drive the event processing, this
+     function must be called in two circumstances:
+       * Every time there is a new event that we may be able to handle
+       * Every time we set the callback function
+     This function unsets the callback, so no further testdriver actions
+     will be run until it is reset, which wptrunner does after it has
+     completed handling the current action.
+   */
+
+  if (!window.__wptrunner_testdriver_callback) {
+    return;
+  }
+  var data = window.__wptrunner_message_queue.shift();
+  if (!data) {
+    return;
+  }
+
+  var payload = undefined;
+
+  switch(data.type) {
+  case "complete":
+    var tests = data.tests;
+    var status = data.status;
 
-if (window.opener && "timeout_multiplier" in window.opener) {
-    props["timeout_multiplier"] = window.opener.timeout_multiplier;
-}
+    var subtest_results = tests.map(function(x) {
+      return [x.name, x.status, x.message, x.stack];
+    });
+    payload = [status.status,
+               status.message,
+               status.stack,
+               subtest_results];
+    clearTimeout(window.__wptrunner_timer);
+    break;
+  case "action":
+    payload = data;
+    break;
+  default:
+    return;
+  }
+  var callback = window.__wptrunner_testdriver_callback;
+  window.__wptrunner_testdriver_callback = null;
+  callback([__wptrunner_url, data.type, payload]);
+};
 
-if (window.opener && window.opener.explicit_timeout) {
-    props["explicit_timeout"] = window.opener.explicit_timeout;
-}
+(function() {
+  var props = {output: %(output)d,
+               timeout_multiplier: %(timeout_multiplier)s,
+               explicit_timeout: %(explicit_timeout)s,
+               message_events: ["completion"]};
 
-setup(props);
+  add_completion_callback(function(tests, harness_status) {
+    __wptrunner_message_queue.push({
+      "type": "complete",
+      "tests": tests,
+      "status": harness_status});
+    __wptrunner_process_next_event();
+  });
+  setup(props);
+})();
+
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py
@@ -41,16 +41,17 @@ def test_server_start_config(product):
      target_browser_cls, get_browser_kwargs,
      executor_classes, get_executor_kwargs,
      env_options, get_env_extras, run_info_extras) = products.load_product({}, product)
 
     env_extras = get_env_extras()
 
     with mock.patch.object(environment.serve, "start") as start:
         with environment.TestEnvironment(test_paths,
+                                         1,
                                          False,
                                          None,
                                          env_options,
                                          {"type": "none"},
                                          env_extras):
             start.assert_called_once()
             args = start.call_args
             config = args[0][0]
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
@@ -133,34 +133,31 @@ def get_pause_after_test(test_loader, **
         return False
     return kwargs["pause_after_test"]
 
 
 def run_tests(config, test_paths, product, **kwargs):
     with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
         env.do_delayed_imports(logger, test_paths)
 
-        (check_args,
-         target_browser_cls, get_browser_kwargs,
-         executor_classes, get_executor_kwargs,
-         env_options, get_env_extras, run_info_extras) = products.load_product(config, product)
+        product = products.load_product(config, product, load_cls=True)
 
-        env_extras = get_env_extras(**kwargs)
+        env_extras = product.get_env_extras(**kwargs)
 
-        check_args(**kwargs)
+        product.check_args(**kwargs)
 
         if kwargs["install_fonts"]:
             env_extras.append(FontInstaller(
                 font_dir=kwargs["font_dir"],
                 ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
             ))
 
         run_info, test_loader = get_loader(test_paths,
-                                           product,
-                                           run_info_extras=run_info_extras(**kwargs),
+                                           product.name,
+                                           run_info_extras=product.run_info_extras(**kwargs),
                                            **kwargs)
 
         test_source_kwargs = {"processes": kwargs["processes"]}
         if kwargs["run_by_dir"] is False:
             test_source_cls = testloader.SingleTestSource
         else:
             # A value of None indicates infinite depth
             test_source_cls = testloader.PathGroupedSource
@@ -181,20 +178,23 @@ def run_tests(config, test_paths, produc
         kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
 
         ssl_config = {"type": kwargs["ssl_type"],
                       "openssl": {"openssl_binary": kwargs["openssl_binary"]},
                       "pregenerated": {"host_key_path": kwargs["host_key_path"],
                                        "host_cert_path": kwargs["host_cert_path"],
                                        "ca_cert_path": kwargs["ca_cert_path"]}}
 
+        testharness_timeout_multipler = product.get_timeout_multiplier("testharness", run_info, **kwargs)
+
         with env.TestEnvironment(test_paths,
+                                 testharness_timeout_multipler,
                                  kwargs["pause_after_test"],
                                  kwargs["debug_info"],
-                                 env_options,
+                                 product.env_options,
                                  ssl_config,
                                  env_extras) as test_environment:
             try:
                 test_environment.ensure_started()
             except env.TestEnvironmentError as e:
                 logger.critical("Error starting test environment: %s" % e.message)
                 raise
 
@@ -206,58 +206,60 @@ def run_tests(config, test_paths, produc
                 repeat_count += 1
                 if repeat_until_unexpected:
                     logger.info("Repetition %i" % (repeat_count))
                 elif repeat > 1:
                     logger.info("Repetition %i / %i" % (repeat_count, repeat))
 
                 test_count = 0
                 unexpected_count = 0
-                logger.suite_start(test_loader.test_ids, name='web-platform-test', run_info=run_info,
+                logger.suite_start(test_loader.test_ids,
+                                   name='web-platform-test',
+                                   run_info=run_info,
                                    extra={"run_by_dir": kwargs["run_by_dir"]})
                 for test_type in kwargs["test_types"]:
                     logger.info("Running %s tests" % test_type)
 
                     # WebDriver tests may create and destroy multiple browser
                     # processes as part of their expected behavior. These
                     # processes are managed by a WebDriver server binary. This
                     # obviates the need for wptrunner to provide a browser, so
                     # the NullBrowser is used in place of the "target" browser
                     if test_type == "wdspec":
                         browser_cls = NullBrowser
                     else:
-                        browser_cls = target_browser_cls
+                        browser_cls = product.browser_cls
 
-                    browser_kwargs = get_browser_kwargs(test_type,
-                                                        run_info,
-                                                        config=test_environment.config,
-                                                        **kwargs)
+                    browser_kwargs = product.get_browser_kwargs(test_type,
+                                                                run_info,
+                                                                config=test_environment.config,
+                                                                **kwargs)
 
-                    executor_cls = executor_classes.get(test_type)
-                    executor_kwargs = get_executor_kwargs(test_type,
-                                                          test_environment.config,
-                                                          test_environment.cache_manager,
-                                                          run_info,
-                                                          **kwargs)
+                    executor_cls = product.executor_classes.get(test_type)
+                    executor_kwargs = product.get_executor_kwargs(test_type,
+                                                                  test_environment.config,
+                                                                  test_environment.cache_manager,
+                                                                  run_info,
+                                                                  **kwargs)
 
                     if executor_cls is None:
                         logger.error("Unsupported test type %s for product %s" %
-                                     (test_type, product))
+                                     (test_type, product.name))
                         continue
 
                     for test in test_loader.disabled_tests[test_type]:
                         logger.test_start(test.id)
                         logger.test_end(test.id, status="SKIP")
                         skipped_tests += 1
 
                     if test_type == "testharness":
                         run_tests = {"testharness": []}
                         for test in test_loader.tests["testharness"]:
-                            if (test.testdriver and not executor_cls.supports_testdriver) or (
-                                    test.jsshell and not executor_cls.supports_jsshell):
+                            if ((test.testdriver and not executor_cls.supports_testdriver) or
+                                (test.jsshell and not executor_cls.supports_jsshell)):
                                 logger.test_start(test.id)
                                 logger.test_end(test.id, status="SKIP")
                                 skipped_tests += 1
                             else:
                                 run_tests["testharness"].append(test)
                     else:
                         run_tests = test_loader.tests
 
--- a/testing/web-platform/tests/tools/wptserve/wptserve/stash.py
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/stash.py
@@ -1,60 +1,69 @@
 import base64
 import json
 import os
 import uuid
 import threading
 from multiprocessing.managers import AcquirerProxy, BaseManager, DictProxy
 from six import text_type
 
+
 class ServerDictManager(BaseManager):
     shared_data = {}
 
+
 def _get_shared():
     return ServerDictManager.shared_data
 
+
 ServerDictManager.register("get_dict",
                            callable=_get_shared,
                            proxytype=DictProxy)
 ServerDictManager.register('Lock', threading.Lock, AcquirerProxy)
 
+
 class ClientDictManager(BaseManager):
     pass
 
+
 ClientDictManager.register("get_dict")
 ClientDictManager.register("Lock")
 
+
 class StashServer(object):
     def __init__(self, address=None, authkey=None):
         self.address = address
         self.authkey = authkey
         self.manager = None
 
     def __enter__(self):
         self.manager, self.address, self.authkey = start_server(self.address, self.authkey)
         store_env_config(self.address, self.authkey)
 
     def __exit__(self, *args, **kwargs):
         if self.manager is not None:
             self.manager.shutdown()
 
+
 def load_env_config():
     address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
     if isinstance(address, list):
         address = tuple(address)
     else:
         address = str(address)
     authkey = base64.b64decode(authkey)
     return address, authkey
 
+
 def store_env_config(address, authkey):
     authkey = base64.b64encode(authkey)
     os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey.decode("ascii")))
 
+
 def start_server(address=None, authkey=None):
     if isinstance(authkey, text_type):
         authkey = authkey.encode("ascii")
     manager = ServerDictManager(address, authkey)
     manager.start()
 
     return (manager, manager._address, manager._authkey)
 
@@ -70,16 +79,17 @@ class LockWrapper(object):
         self.lock.release()
 
     def __enter__(self):
         self.acquire()
 
     def __exit__(self, *args, **kwargs):
         self.release()
 
+
 #TODO: Consider expiring values after some fixed time for long-running
 #servers
 
 class Stash(object):
     """Key-value store for persisting data across HTTP/S and WS/S requests.
 
     This data store is specifically designed for persisting data across server
     requests. The synchronization is achieved by using the BaseManager from
@@ -168,10 +178,11 @@ class Stash(object):
             try:
                 self.data.pop(internal_key)
             except KeyError:
                 # Silently continue when pop error occurs.
                 pass
 
         return value
 
+
 class StashError(Exception):
     pass