Bug 1363428 - Add per-test-queue metadata to wptrunner, r=maja_zf
This adds a metadata object associated with each test queue, and uses
it to pass cache information into the marionette internal reftest
implementation so that we are able to cache only those canvases that
will be reused.
MozReview-Commit-ID: zASrlvnri3
--- a/testing/profiles/prefs_general.js
+++ b/testing/profiles/prefs_general.js
@@ -189,16 +189,19 @@ user_pref("layout.css.prefixes.device-pi
user_pref("layout.css.shape-outside.enabled", true);
// Enable CSS text-justify for testing
user_pref("layout.css.text-justify.enabled", true);
// Disable spammy layout warnings because they pollute test logs
user_pref("layout.spammy_warnings.enabled", false);
+// Disable interuptable reflow
+user_pref("layout.interruptible-reflow.enabled", false);
+
// Enable Media Source Extensions for testing
user_pref("media.mediasource.mp4.enabled", true);
user_pref("media.mediasource.webm.enabled", true);
// Enable Gamepad
user_pref("dom.gamepad.enabled", true);
user_pref("dom.gamepad.non_standard_events.enabled", true);
--- a/testing/web-platform/harness/wptrunner/executors/executormarionette.py
+++ b/testing/web-platform/harness/wptrunner/executors/executormarionette.py
@@ -499,33 +499,34 @@ class MarionetteTestharnessExecutor(Test
rv = marionette.execute_async_script(script, new_sandbox=False)
return rv
class MarionetteRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, internal_reftest=False,
- profile_path=None, **kwargs):
+ queue_metadata=None, profile_path=None, **kwargs):
"""Marionette-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser)
self.implementation = (InternalRefTestImplementation
if internal_reftest
else RefTestImplementation)(self)
self.close_after_done = close_after_done
self.has_window = False
self.original_pref_values = {}
self.profile_path = profile_path
+ self.queue_metadata = queue_metadata
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait.js")) as f:
self.wait_script = f.read()
def setup(self, runner):
super(self.__class__, self).setup(runner)
--- a/testing/web-platform/harness/wptrunner/testloader.py
+++ b/testing/web-platform/harness/wptrunner/testloader.py
@@ -561,31 +561,32 @@ class TestLoader(object):
return groups
class TestSource(object):
__metaclass__ = ABCMeta
def __init__(self, test_queue):
self.test_queue = test_queue
self.current_queue = None
+ self.current_metadata = None
@abstractmethod
def queue_tests(self, test_queue, test_type, tests, **kwargs):
pass
def __enter__(self):
return self
def get_queue(self):
if not self.current_queue or len(self.current_queue) == 0:
try:
- self.current_queue = self.test_queue.get(block=False)
+ self.current_queue, self.current_metadata = self.test_queue.get(block=False)
except Empty:
return None, None
- return self.current_queue
+ return self.current_queue, self.current_metadata
def __exit__(self, *args, **kwargs):
pass
class GroupedSource(TestSource):
@classmethod
def new_group(cls, state, test, **kwargs):
@@ -594,32 +595,37 @@ class GroupedSource(TestSource):
@classmethod
def queue_tests(cls, test_queue, test_type, tests, **kwargs):
state = {}
group = None
for test in tests[test_type]:
if cls.new_group(state, test, **kwargs):
group = deque([])
- test_queue.put(group)
+ metadata = {}
+ test_queue.put((group, metadata))
group.append(test)
+ test.update_metadata(metadata)
class SingleTestSource(TestSource):
@classmethod
def queue_tests(cls, test_queue, test_type, tests, **kwargs):
processes = kwargs["processes"]
queues = [deque([]) for _ in xrange(processes)]
+ metadatas = [{} for _ in xrange(processes)]
for test in tests[test_type]:
idx = hash(test.id) % processes
group = queues[idx]
+ metadata = metadatas[idx]
group.append(test)
+ test.update_metadata(metadata)
- for item in queues:
+ for item in zip(queues, metadatas):
test_queue.put(item)
class PathGroupedSource(GroupedSource):
@classmethod
def new_group(cls, state, test, **kwargs):
depth = kwargs.get("depth")
if depth is True:
--- a/testing/web-platform/harness/wptrunner/testrunner.py
+++ b/testing/web-platform/harness/wptrunner/testrunner.py
@@ -234,19 +234,19 @@ class BrowserManager(object):
def is_alive(self):
return self.browser.is_alive()
class _RunnerManagerState(object):
before_init = namedtuple("before_init", [])
initializing = namedtuple("initializing_browser",
- ["test", "test_queue", "failure_count"])
+ ["test", "test_queue", "queue_metadata", "failure_count"])
running = namedtuple("running", ["test", "test_queue"])
- restarting = namedtuple("restarting", ["test", "test_queue"])
+ restarting = namedtuple("restarting", ["test", "test_queue", "queue_metadata"])
error = namedtuple("error", [])
stop = namedtuple("stop", [])
RunnerManagerState = _RunnerManagerState()
class TestRunnerManager(threading.Thread):
@@ -433,38 +433,40 @@ class TestRunnerManager(threading.Thread
return
return f(*data)
def should_stop(self):
return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
def start_init(self):
- test, test_queue = self.get_next_test()
+ test, test_queue, queue_metadata = self.get_next_test()
if test is None:
return RunnerManagerState.stop()
else:
- return RunnerManagerState.initializing(test, test_queue, 0)
+ return RunnerManagerState.initializing(test, test_queue, queue_metadata, 0)
def init(self):
assert isinstance(self.state, RunnerManagerState.initializing)
if self.state.failure_count > self.max_restarts:
self.logger.error("Max restarts exceeded")
return RunnerManagerState.error()
self.browser.update_settings(self.state.test)
result = self.browser.init()
if result is Stop:
return RunnerManagerState.error()
elif not result:
return RunnerManagerState.initializing(self.state.test,
self.state.test_queue,
+ self.state.queue_metadata,
self.state.failure_count + 1)
else:
+ self.executor_kwargs["queue_metadata"] = self.state.queue_metadata
self.start_test_runner()
def start_test_runner(self):
# Note that we need to be careful to start the browser before the
# test runner to ensure that any state set when the browser is started
# can be passed in to the test runner.
assert isinstance(self.state, RunnerManagerState.initializing)
assert self.command_queue is not None
@@ -495,40 +497,42 @@ class TestRunnerManager(threading.Thread
self.state.test_queue)
def init_failed(self):
assert isinstance(self.state, RunnerManagerState.initializing)
self.browser.after_init()
self.stop_runner(force=True)
return RunnerManagerState.initializing(self.state.test,
self.state.test_queue,
+ self.state.queue_metadata,
self.state.failure_count + 1)
def get_next_test(self, test_queue=None):
test = None
while test is None:
if test_queue is None:
- test_queue = self.test_source.get_queue()
+ test_queue, queue_metadata = self.test_source.get_queue()
if test_queue is None:
self.logger.info("No more tests")
- return None, None
+ return None, None, None
if len(test_queue) == 0:
- test_queue = None
+ test_queue, queue_metadata = None, None
else:
test = test_queue.popleft()
- return test, test_queue
+ return test, test_queue, queue_metadata
def run_test(self):
assert isinstance(self.state, RunnerManagerState.running)
assert self.state.test is not None
if self.browser.update_settings(self.state.test):
self.logger.info("Restarting browser for new test environment")
return RunnerManagerState.restarting(self.state.test,
- self.state.test_queue)
+ self.state.test_queue,
+ self.state.queue_metadata)
self.logger.test_start(self.state.test.id)
self.send_message("run_test", self.state.test)
def test_ended(self, test, results):
"""Handle the end of a test.
Output the result of each subtest, and the result of the overall
@@ -591,32 +595,32 @@ class TestRunnerManager(threading.Thread
# The browser should be stopped already, but this ensures we do any post-stop
# processing
self.logger.debug("Wait finished")
return self.after_test_end(True)
def after_test_end(self, restart):
assert isinstance(self.state, RunnerManagerState.running)
- test, test_queue = self.get_next_test()
+ test, test_queue, queue_metadata = self.get_next_test()
if test is None:
return RunnerManagerState.stop()
if test_queue != self.state.test_queue:
# We are starting a new group of tests, so force a restart
restart = True
if restart:
- return RunnerManagerState.restarting(test, test_queue)
+ return RunnerManagerState.restarting(test, test_queue, queue_metadata)
else:
return RunnerManagerState.running(test, test_queue)
def restart_runner(self):
"""Stop and restart the TestRunner"""
assert isinstance(self.state, RunnerManagerState.restarting)
self.stop_runner()
- return RunnerManagerState.initializing(self.state.test, self.state.test_queue, 0)
+ return RunnerManagerState.initializing(self.state.test, self.state.test_queue, self.state.queue_metadata, 0)
def log(self, action, kwargs):
getattr(self.logger, action)(**kwargs)
def error(self, message):
self.logger.error(message)
self.restart_runner()
--- a/testing/web-platform/harness/wptrunner/wpttest.py
+++ b/testing/web-platform/harness/wptrunner/wpttest.py
@@ -1,13 +1,14 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
+from collections import defaultdict
import mozinfo
from wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = set(["testharness", "reftest", "wdspec"])
@@ -112,16 +113,21 @@ class Test(object):
self._test_metadata = test_metadata
self.timeout = timeout if timeout is not None else self.default_timeout
self.path = path
self.environment = {"protocol": protocol, "prefs": self.prefs}
def __eq__(self, other):
return self.id == other.id
+ def update_metadata(self, metadata=None):
+ if metadata is None:
+ metadata = {}
+ return metadata
+
@classmethod
def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
return cls(manifest_item.source_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
@@ -319,16 +325,27 @@ class ReftestTest(Test):
[],
None,
[])
node.references.append((reference, ref_type))
return node
+ def update_metadata(self, metadata):
+ if not "url_count" in metadata:
+ metadata["url_count"] = defaultdict(int)
+ for reference, _ in self.references:
+ # We assume a naive implementation in which a url with multiple
+ # possible screenshots will need to take both the lhs and rhs screenshots
+ # for each possible match
+ metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
+ reference.update_metadata(metadata)
+ return metadata
+
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")