Bug 1531365 [wpt PR 15488] - [wptrunner] Reset internal state during "rerun", a=testonly
authorjugglinmike <mike@mikepennisi.com>
Wed, 06 Mar 2019 16:53:23 +0000
changeset 464708 d0a58f55ff232c19ed79b376cceb341762275110
parent 464707 b58846c0e583fc323c5958484ad4d4909490f727
child 464709 0b9ff65fb560cbf087eaa9b23e3377db31772f0d
push id80669
push useraciure@mozilla.com
push dateSun, 17 Mar 2019 09:47:54 +0000
treeherderautoland@e0861be8d6c0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstestonly
bugs1531365, 15488
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1531365 [wpt PR 15488] - [wptrunner] Reset internal state during "rerun", a=testonly Automatic update from web-platform-tests [wptrunner] Reset internal state during "rerun" (#15488) The "reftest" implementation uses an internal cache for screenshots as an optimization for running similar tests. That optimization is inappropriate for the CLI's "rerun" feature since in that context, repeatedly running the same tests is an explicit goal. Introduce a generic "reset" message that is emitted by the TestRunnerManager during "rerun", and extend the RefTestExecutor to handle this message by emptying its internal cache. -- wpt-commits: f650eb264890a42067f0703fa1e7350c4d8f31d2 wpt-pr: 15488
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
@@ -147,16 +147,21 @@ class TestExecutor(object):
         if self.protocol is not None:
             self.protocol.setup(runner)
 
     def teardown(self):
         """Run cleanup steps after tests have finished"""
         if self.protocol is not None:
             self.protocol.teardown()
 
+    def reset(self):
+        """Re-initialize internal state to facilitate repeated test execution
+        as implemented by the `--rerun` command-line argument."""
+        pass
+
     def run_test(self, test):
         """Run a particular test.
 
         :param test: The test to run"""
         if test.environment != self.last_environment:
             self.on_environment_change(test.environment)
         try:
             result = self.do_test(test)
@@ -262,16 +267,19 @@ class RefTestImplementation(object):
 
             rv = (hash_value, screenshot)
         else:
             rv = self.screenshot_cache[key]
 
         self.message.append("%s %s" % (test.url, rv[0]))
         return True, rv
 
+    def reset(self):
+        self.screenshot_cache.clear()
+
     def is_pass(self, lhs_hash, rhs_hash, relation):
         assert relation in ("==", "!=")
         self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
         return ((relation == "==" and lhs_hash == rhs_hash) or
                 (relation == "!=" and lhs_hash != rhs_hash))
 
     def run_test(self, test):
         viewport_size = test.viewport_size
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -775,16 +775,19 @@ class MarionetteRefTestExecutor(RefTestE
                 if handles:
                     self.protocol.marionette.switch_to_window(handles[0])
             super(self.__class__, self).teardown()
         except Exception as e:
             # Ignore errors during teardown
             self.logger.warning("Exception during reftest teardown:\n%s" %
                                 traceback.format_exc(e))
 
+    def reset(self):
+        self.implementation.reset(**self.implementation_kwargs)
+
     def is_alive(self):
         return self.protocol.is_alive
 
     def on_environment_change(self, new_environment):
         self.protocol.on_environment_change(self.last_environment, new_environment)
 
     def do_test(self, test):
         if not isinstance(self.implementation, InternalRefTestImplementation):
@@ -856,16 +859,20 @@ class InternalRefTestImplementation(obje
         data = {"screenshot": screenshot}
         if self.executor.group_metadata is not None:
             data["urlCount"] = {urlparse.urljoin(self.executor.server_url(key[0]), key[1]):value
                                 for key, value in self.executor.group_metadata.get("url_count", {}).iteritems()
                                 if value > 1}
         self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
         self.executor.protocol.marionette._send_message("reftest:setup", data)
 
+    def reset(self, screenshot=None):
+        self.teardown()
+        self.setup(screenshot)
+
     def run_test(self, test):
         references = self.get_references(test)
         timeout = (test.timeout * 1000) * self.timeout_multiplier
         rv = self.executor.protocol.marionette._send_message("reftest:run",
                                                              {"test": self.executor.test_url(test),
                                                               "references": references,
                                                               "expected": test.expected(),
                                                               "timeout": timeout,
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
@@ -354,16 +354,19 @@ class SeleniumRefTestExecutor(RefTestExe
                                          capabilities=capabilities)
         self.implementation = RefTestImplementation(self)
         self.close_after_done = close_after_done
         self.has_window = False
 
         with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
             self.wait_script = f.read()
 
+    def reset(self):
+        self.implementation.reset()
+
     def is_alive(self):
         return self.protocol.is_alive()
 
     def do_test(self, test):
         self.logger.info("Test requires OS-level window focus")
 
         width_offset, height_offset = self.protocol.webdriver.execute_script(
             """return [window.outerWidth - window.innerWidth,
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
@@ -178,16 +178,19 @@ class ServoRefTestExecutor(ProcessTestEx
                                      debug_info=debug_info)
 
         self.protocol = ConnectionlessProtocol(self, browser)
         self.screenshot_cache = screenshot_cache
         self.implementation = RefTestImplementation(self)
         self.tempdir = tempfile.mkdtemp()
         self.hosts_path = write_hosts_file(server_config)
 
+    def reset(self):
+        self.implementation.reset()
+
     def teardown(self):
         try:
             os.unlink(self.hosts_path)
         except OSError:
             pass
         os.rmdir(self.tempdir)
         ProcessTestExecutor.teardown(self)
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
@@ -255,16 +255,19 @@ class ServoWebDriverRefTestExecutor(RefT
                                  debug_info=debug_info)
         self.protocol = ServoWebDriverProtocol(self, browser,
                                                capabilities=capabilities)
         self.implementation = RefTestImplementation(self)
         self.timeout = None
         with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
             self.wait_script = f.read()
 
+    def reset(self):
+        self.implementation.reset()
+
     def is_alive(self):
         return self.protocol.is_alive()
 
     def do_test(self, test):
         try:
             result = self.implementation.run_test(test)
             return self.convert_result(test, result)
         except IOError:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
@@ -400,16 +400,19 @@ class WebDriverRefTestExecutor(RefTestEx
                                           capabilities=capabilities)
         self.implementation = RefTestImplementation(self)
         self.close_after_done = close_after_done
         self.has_window = False
 
         with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
             self.wait_script = f.read()
 
+    def reset(self):
+        self.implementation.reset()
+
     def is_alive(self):
         return self.protocol.is_alive()
 
     def do_test(self, test):
         width_offset, height_offset = self.protocol.webdriver.execute_script(
             """return [window.outerWidth - window.innerWidth,
                        window.outerHeight - window.innerHeight];"""
         )
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
@@ -84,16 +84,17 @@ class TestRunner(object):
         self.command_queue = None
         self.browser = None
 
     def run(self):
         """Main loop accepting commands over the pipe and triggering
         the associated methods"""
         self.setup()
         commands = {"run_test": self.run_test,
+                    "reset": self.reset,
                     "stop": self.stop,
                     "wait": self.wait}
         while True:
             command, args = self.command_queue.get()
             try:
                 rv = commands[command](*args)
             except Exception:
                 self.send_message("error",
@@ -101,16 +102,19 @@ class TestRunner(object):
                                   (command, args, traceback.format_exc()))
             else:
                 if rv is Stop:
                     break
 
     def stop(self):
         return Stop
 
+    def reset(self):
+        self.executor.reset()
+
     def run_test(self, test):
         try:
             return self.executor.run_test(test)
         except Exception:
             self.logger.critical(traceback.format_exc())
             raise
 
     def wait(self):
@@ -533,16 +537,17 @@ class TestRunnerManager(threading.Thread
             self.logger.info("Restarting browser for new test environment")
             return RunnerManagerState.restarting(self.state.test,
                                                  self.state.test_group,
                                                  self.state.group_metadata)
 
         self.logger.test_start(self.state.test.id)
         if self.rerun > 1:
             self.logger.info("Run %d/%d" % (self.run_count, self.rerun))
+            self.send_message("reset")
         self.run_count += 1
         self.send_message("run_test", self.state.test)
 
     def test_ended(self, test, results):
         """Handle the end of a test.
 
         Output the result of each subtest, and the result of the overall
         harness to the logs.