Backed out changeset 97a964bf7f99 (bug 1405141)
authorSebastian Hengst <archaeopteryx@coole-files.de>
Tue, 24 Oct 2017 14:56:56 +0200
changeset 685729 90a44d2859c76b5d6a152e81c330acda05d48e9d
parent 685728 fc51ba0a7e8de41cda1b82f3077b9bcaf7d4d903
child 685730 e4a7c6646eb4fb4f74a8c1295de35193646ca6fe
push id86016
push userkgupta@mozilla.com
push dateWed, 25 Oct 2017 01:53:44 +0000
bugs1405141
milestone58.0a1
backs out97a964bf7f990416e039fc9747e19e46f7a6dcea
Backed out changeset 97a964bf7f99 (bug 1405141)
testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
deleted file mode 100644
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import copy
-import functools
-import imp
-import os
-import sys
-from collections import OrderedDict, defaultdict
-from datetime import datetime
-
-from mozlog import reader
-from mozlog.formatters import JSONFormatter, TbplFormatter
-from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler
-
-here = os.path.dirname(__file__)
-localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, os.pardir, "localpaths.py")))
-from wpt.markdown import markdown_adjust, table
-
-
-class LogActionFilter(BaseHandler):
-
-    """Handler that filters out messages not of a given set of actions.
-
-    Subclasses BaseHandler.
-
-    :param inner: Handler to use for messages that pass this filter
-    :param actions: List of actions for which to fire the handler
-    """
-
-    def __init__(self, inner, actions):
-        """Extend BaseHandler and set inner and actions props on self."""
-        BaseHandler.__init__(self, inner)
-        self.inner = inner
-        self.actions = actions
-
-    def __call__(self, item):
-        """Invoke handler if action is in list passed as constructor param."""
-        if item["action"] in self.actions:
-            return self.inner(item)
-
-
-class LogHandler(reader.LogHandler):
-
-    """Handle updating test and subtest status in log.
-
-    Subclasses reader.LogHandler.
-    """
-    def __init__(self):
-        self.results = OrderedDict()
-
-    def find_or_create_test(self, data):
-        test_name = data["test"]
-        if self.results.get(test_name):
-            return self.results[test_name]
-
-        test = {
-            "subtests": OrderedDict(),
-            "status": defaultdict(int)
-        }
-        self.results[test_name] = test
-        return test
-
-    def find_or_create_subtest(self, data):
-        test = self.find_or_create_test(data)
-        subtest_name = data["subtest"]
-
-        if test["subtests"].get(subtest_name):
-            return test["subtests"][subtest_name]
-
-        subtest = {
-            "status": defaultdict(int),
-            "messages": set()
-        }
-        test["subtests"][subtest_name] = subtest
-
-        return subtest
-
-    def test_status(self, data):
-        subtest = self.find_or_create_subtest(data)
-        subtest["status"][data["status"]] += 1
-        if data.get("message"):
-            subtest["messages"].add(data["message"])
-
-    def test_end(self, data):
-        test = self.find_or_create_test(data)
-        test["status"][data["status"]] += 1
-
-
-def is_inconsistent(results_dict, iterations):
-    """Return whether or not a single test is inconsistent."""
-    return len(results_dict) > 1 or sum(results_dict.values()) != iterations
-
-
-def process_results(log, iterations):
-    """Process test log and return overall results and list of inconsistent tests."""
-    inconsistent = []
-    handler = LogHandler()
-    reader.handle_log(reader.read(log), handler)
-    results = handler.results
-    for test_name, test in results.iteritems():
-        if is_inconsistent(test["status"], iterations):
-            inconsistent.append((test_name, None, test["status"], []))
-        for subtest_name, subtest in test["subtests"].iteritems():
-            if is_inconsistent(subtest["status"], iterations):
-                inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
-    return results, inconsistent
-
-
-def err_string(results_dict, iterations):
-    """Create and return string with errors from test run."""
-    rv = []
-    total_results = sum(results_dict.values())
-    for key, value in sorted(results_dict.items()):
-        rv.append("%s%s" %
-                  (key, ": %s/%s" % (value, iterations) if value != iterations else ""))
-    if total_results < iterations:
-        rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
-    rv = ", ".join(rv)
-    if is_inconsistent(results_dict, iterations):
-        rv = "**%s**" % rv
-    return rv
-
-
-def write_inconsistent(log, inconsistent, iterations):
-    """Output inconsistent tests to logger.error."""
-    log("## Unstable results ##\n")
-    strings = [(
-        "`%s`" % markdown_adjust(test),
-        ("`%s`" % markdown_adjust(subtest)) if subtest else "",
-        err_string(results, iterations),
-        ("`%s`" % markdown_adjust(";".join(messages))) if len(messages) else "")
-        for test, subtest, results, messages in inconsistent]
-    table(["Test", "Subtest", "Results", "Messages"], strings, log)
-
-
-def write_results(log, results, iterations, pr_number=None, use_details=False):
-    log("## All results ##\n")
-    if use_details:
-        log("<details>\n")
-        log("<summary>%i %s ran</summary>\n\n" % (len(results),
-                                                  "tests" if len(results) > 1
-                                                  else "test"))
-
-    for test_name, test in results.iteritems():
-        baseurl = "http://w3c-test.org/submissions"
-        if "https" in os.path.splitext(test_name)[0].split(".")[1:]:
-            baseurl = "https://w3c-test.org/submissions"
-        title = test_name
-        if use_details:
-            log("<details>\n")
-            if pr_number:
-                title = "<a href=\"%s/%s%s\">%s</a>" % (baseurl, pr_number, test_name, title)
-            log('<summary>%s</summary>\n\n' % title)
-        else:
-            log("### %s ###" % title)
-        strings = [("", err_string(test["status"], iterations), "")]
-
-        strings.extend(((
-            ("`%s`" % markdown_adjust(subtest_name)) if subtest else "",
-            err_string(subtest["status"], iterations),
-            ("`%s`" % markdown_adjust(';'.join(subtest["messages"]))) if len(subtest["messages"]) else "")
-            for subtest_name, subtest in test["subtests"].items()))
-        table(["Subtest", "Results", "Messages"], strings, log)
-        if use_details:
-            log("</details>\n")
-
-    if use_details:
-        log("</details>\n")
-
-
-def run_step(logger, iterations, restart_after_iteration, kwargs_extras, **kwargs):
-    import wptrunner
-    kwargs = copy.deepcopy(kwargs)
-
-    if restart_after_iteration:
-        kwargs["repeat"] = iterations
-    else:
-        kwargs["rerun"] = iterations
-
-    kwargs["pause_after_test"] = False
-    kwargs.update(kwargs_extras)
-
-    handler = LogActionFilter(
-        LogLevelFilter(
-            StreamHandler(
-                sys.stdout,
-                TbplFormatter()
-            ),
-            "WARNING"),
-        ["log", "process_output"])
-
-    # There is a public API for this in the next mozlog
-    initial_handlers = logger._state.handlers
-    logger._state.handlers = []
-
-    with open("raw.log", "wb") as log:
-        # Setup logging for wptrunner that keeps process output and
-        # warning+ level logs only
-        logger.add_handler(handler)
-        logger.add_handler(StreamHandler(log, JSONFormatter()))
-
-        wptrunner.run_tests(**kwargs)
-
-    logger._state.handlers = initial_handlers
-
-    with open("raw.log", "rb") as log:
-        results, inconsistent = process_results(log, iterations)
-    return results, inconsistent, iterations
-
-
-def get_steps(logger, repeat_loop, repeat_restart, kwargs_extras):
-    steps = []
-    for kwargs_extra in kwargs_extras:
-        if kwargs_extra:
-            flags_string = " with flags %s" % " ".join(
-                "%s=%s" % item for item in kwargs_extra.iteritems())
-        else:
-            flags_string = ""
-
-        if repeat_loop:
-            desc = "Running tests in a loop %d times%s" % (repeat_loop,
-                                                           flags_string)
-            steps.append((desc, functools.partial(run_step, logger, repeat_loop, False, kwargs_extra)))
-
-        if repeat_restart:
-            desc = "Running tests in a loop with restarts %s times%s" % (repeat_restart,
-                                                                         flags_string)
-            steps.append((desc, functools.partial(run_step, logger, repeat_restart, True, kwargs_extra)))
-
-    return steps
-
-
-def write_summary(logger, step_results, final_result):
-    for desc, result in step_results:
-        logger.info('::: %s : %s' % (desc, result))
-    logger.info(':::')
-    if final_result == "PASS":
-        log = logger.info
-    elif final_result == "TIMEOUT":
-        log = logger.warning
-    else:
-        log = logger.error
-    log('::: Test verification %s' % final_result)
-
-    logger.info(':::')
-
-
-def check_stability(logger, repeat_loop=10, repeat_restart=5, chaos_mode=True, max_time=None,
-                    output_results=True, **kwargs):
-    kwargs_extras = [{}]
-    if chaos_mode and kwargs["product"] == "firefox":
-        kwargs_extras.append({"chaos_mode_flags": 3})
-
-    steps = get_steps(logger, repeat_loop, repeat_restart, kwargs_extras)
-
-    start_time = datetime.now()
-    step_results = []
-
-    for desc, step_func in steps:
-        if max_time and datetime.now() - start_time > max_time:
-            logger.info("::: Test verification is taking too long: Giving up!")
-            logger.info("::: So far, all checks passed, but not all checks were run.")
-            write_summary(logger, step_results, "TIMEOUT")
-            return 2
-
-        logger.info(':::')
-        logger.info('::: Running test verification step "%s"...' % desc)
-        logger.info(':::')
-        results, inconsistent, iterations = step_func(**kwargs)
-        if output_results:
-            write_results(logger.info, results, iterations)
-
-        if inconsistent:
-            step_results.append((desc, "FAIL"))
-            write_inconsistent(logger.info, inconsistent, iterations)
-            write_summary(logger, step_results, "FAIL")
-            return 1
-
-        step_results.append((desc,  "PASS"))
-
-    write_summary(logger, step_results, "PASS")
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
@@ -69,19 +69,16 @@ scheme host and port.""")
                             default=False,
                             help="List the top level directories containing tests that will run.")
     mode_group.add_argument("--list-disabled", action="store_true",
                             default=False,
                             help="List the tests that are disabled on the current platform")
     mode_group.add_argument("--list-tests", action="store_true",
                             default=False,
                             help="List all tests that will run")
-    mode_group.add_argument("--verify", action="store_true",
-                            default=False,
-                            help="Run a stability check on the selected tests")
 
     test_selection_group = parser.add_argument_group("Test Selection")
     test_selection_group.add_argument("--test-types", action="store",
                                       nargs="*", default=wpttest.enabled_tests,
                                       choices=wpttest.enabled_tests,
                                       help="Test types to run")
     test_selection_group.add_argument("--include", action="append",
                                       help="URL prefix to include")
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
@@ -117,17 +117,17 @@ def list_tests(test_paths, product, **kw
         print test
 
 
 def get_pause_after_test(test_loader, **kwargs):
     total_tests = sum(len(item) for item in test_loader.tests.itervalues())
     if kwargs["pause_after_test"] is None:
         if kwargs["repeat_until_unexpected"]:
             return False
-        if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and total_tests == 1:
+        if kwargs["repeat"] == 1 and total_tests == 1:
             return True
         return False
     return kwargs["pause_after_test"]
 
 
 def run_tests(config, test_paths, product, **kwargs):
     with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
         env.do_delayed_imports(logger, test_paths)
@@ -256,35 +256,26 @@ def run_tests(config, test_paths, produc
 
                 unexpected_total += unexpected_count
                 logger.info("Got %i unexpected results" % unexpected_count)
                 if repeat_until_unexpected and unexpected_total > 0:
                     break
                 logger.suite_end()
     return unexpected_total == 0
 
-
-def check_stability(**kwargs):
-    import stability
-    return stability.check_stability(logger, **kwargs)
-
-
 def start(**kwargs):
     if kwargs["list_test_groups"]:
         list_test_groups(**kwargs)
     elif kwargs["list_disabled"]:
         list_disabled(**kwargs)
     elif kwargs["list_tests"]:
         list_tests(**kwargs)
-    elif kwargs["verify"]:
-        check_stability(**kwargs)
     else:
         return not run_tests(**kwargs)
 
-
 def main():
     """Main entry point when calling from the command line"""
     kwargs = wptcommandline.parse_args()
 
     try:
         if kwargs["prefs_root"] is None:
             kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))