Bug 1506352 - Add a --log-wptreport option to jstests; r=bbouvier,jgraham
authorMs2ger <Ms2ger@igalia.com>
Tue, 20 Nov 2018 14:00:23 +0100
changeset 503694 5931ccb19e50e7e40e53173b8caa4403c9b35b8e
parent 503693 f7cd1062ca6045ae2d8eb9c794818d3924f8a6d4
child 503695 55d9c869610101ebe70367d525e87c44f75efdc0
push id10290
push userffxbld-merge
push dateMon, 03 Dec 2018 16:23:23 +0000
treeherdermozilla-beta@700bed2445e6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier, jgraham
bugs1506352
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1506352 - Add a --log-wptreport option to jstests; r=bbouvier,jgraham
js/src/tests/jstests.py
js/src/tests/lib/results.py
js/src/tests/lib/wptreport.py
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -198,16 +198,18 @@ def parse_args():
                          help='Do not show the progress bar.')
     output_og.add_option('--tinderbox', dest='format', action='store_const',
                          const='automation',
                          help='Use automation-parseable output format.')
     output_og.add_option('--format', dest='format', default='none',
                          type='choice', choices=['automation', 'none'],
                          help='Output format. Either automation or none'
                          ' (default %default).')
+    output_og.add_option('--log-wptreport', dest='wptreport', action='store',
+                         help='Path to write a Web Platform Tests report (wptreport)')
     op.add_option_group(output_og)
 
     special_og = OptionGroup(op, "Special",
                              "Special modes that do not run tests.")
     special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                           help='Generate reftest manifest files.')
     op.add_option_group(special_og)
     options, args = op.parse_args()
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -52,20 +52,21 @@ class NullTestOutput:
 
 class TestResult:
     PASS = 'PASS'
     FAIL = 'FAIL'
     CRASH = 'CRASH'
 
     """Classified result from a test run."""
 
-    def __init__(self, test, result, results):
+    def __init__(self, test, result, results, wpt_results=None):
         self.test = test
         self.result = result
         self.results = results
+        self.wpt_results = wpt_results  # Only used for wpt tests.
 
     @classmethod
     def from_wpt_output(cls, output):
         """Parse the output from a web-platform test that uses testharness.js.
         (The output is written to stdout in js/src/tests/testharnessreport.js.)
         """
         from wptrunner.executors.base import testharness_result_converter
 
@@ -92,17 +93,19 @@ class TestResult:
             else:
                 harness_status = "ERROR"
                 harness_message = "No harness output found"
                 tests = []
         stdout.append("Harness status: %s (%s)" % (harness_status, harness_message))
 
         result = cls.PASS
         results = []
-        if harness_status != output.test.wpt.expected():
+        subtests = []
+        expected_harness_status = output.test.wpt.expected()
+        if harness_status != expected_harness_status:
             if harness_status == "CRASH":
                 result = cls.CRASH
             else:
                 result = cls.FAIL
         else:
             for test in tests:
                 test_output = "Subtest \"%s\": " % (test.name,)
                 expected = output.test.wpt.expected(test.name)
@@ -110,22 +113,35 @@ class TestResult:
                     test_result = (cls.PASS, "")
                     test_output += "as expected: %s" % (test.status,)
                 else:
                     test_result = (cls.FAIL, test.message)
                     result = cls.FAIL
                     test_output += "expected %s, found %s" % (expected, test.status)
                     if test.message:
                         test_output += " (with message: \"%s\")" % (test.message,)
+                subtests.append({
+                    "test": output.test.wpt.id,
+                    "subtest": test.name,
+                    "status": test.status,
+                    "expected": expected,
+                })
                 results.append(test_result)
                 stdout.append(test_output)
 
         output.out = "\n".join(stdout) + "\n"
 
-        return cls(output.test, result, results)
+        wpt_results = {
+            "name": output.test.wpt.id,
+            "status": harness_status,
+            "expected": expected_harness_status,
+            "subtests": subtests,
+        }
+
+        return cls(output.test, result, results, wpt_results)
 
     @classmethod
     def from_output(cls, output):
         test = output.test
         result = None          # str:      overall result, see class-level variables
         results = []           # (str,str) list: subtest results (pass/fail, message)
 
         if test.wpt:
@@ -184,16 +200,25 @@ class TestDuration:
 class ResultsSink:
     def __init__(self, testsuite, options, testcount):
         self.options = options
         self.fp = options.output_fp
         if self.options.format == 'automation':
             self.slog = TestLogger(testsuite)
             self.slog.suite_start()
 
+        self.wptreport = None
+        if self.options.wptreport:
+            try:
+                from .wptreport import WptreportHandler
+                self.wptreport = WptreportHandler(self.options.wptreport)
+                self.wptreport.suite_start()
+            except ImportError:
+                pass
+
         self.groups = {}
         self.output_dict = {}
         self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0}
         self.slow_tests = []
         self.n = 0
 
         if options.hide_progress:
             self.pb = NullProgressBar()
@@ -215,16 +240,20 @@ class ResultsSink:
             if self.options.format == 'automation':
                 self.print_automation_result(
                     'TEST-KNOWN-FAIL', output.test, time=output.dt,
                     skip=True)
             self.counts['SKIP'] += 1
             self.n += 1
         else:
             result = TestResult.from_output(output)
+
+            if self.wptreport is not None and result.wpt_results:
+                self.wptreport.test(result.wpt_results, output.dt)
+
             tup = (result.result, result.test.expect, result.test.random)
             dev_label = self.LABELS[tup][1]
 
             if self.options.check_output:
                 if output.test.path in self.output_dict.keys():
                     if self.output_dict[output.test.path] != output:
                         self.counts['FAIL'] += 1
                         self.print_automation_result(
@@ -302,16 +331,19 @@ class ResultsSink:
 
     def finish(self, completed):
         self.pb.finish(completed)
         if self.options.format == 'automation':
             self.slog.suite_end()
         else:
             self.list(completed)
 
+        if self.wptreport is not None:
+            self.wptreport.suite_end()
+
     # Conceptually, this maps (test result x test expection) to text labels.
     #      key   is (result, expect, random)
     #      value is (automation label, dev test category)
     LABELS = {
         (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
new file mode 100644
--- /dev/null
+++ b/js/src/tests/lib/wptreport.py
@@ -0,0 +1,75 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Integration between the jstests harness and `WptreportFormatter`.
+#
+# `WptreportFormatter` uses the data format specified in
+# <https://firefox-source-docs.mozilla.org/mozbase/mozlog.html>.
+
+from time import time
+
+from wptrunner.formatters import WptreportFormatter
+
+class WptreportHandler(object):
+    def __init__(self, out):
+        """
+        Initialize the WptreportHandler handler.
+
+        :param str out: path to a file to write output to.
+        """
+        self.out = out
+        self.formatter = WptreportFormatter()
+
+    def suite_start(self):
+        """
+        Produce the "suite_start" message at the present time.
+        """
+        self.formatter.suite_start({
+            "time": time(),
+        })
+
+    def suite_end(self):
+        """
+        Produce the "suite_end" message at the present time and write the
+        results to the file path given in the constructor.
+        """
+        result = self.formatter.suite_end({
+            "time": time(),
+        })
+        with open(self.out, "w") as fp:
+            fp.write(result)
+
+    def test(self, result, duration):
+        """
+        Produce the "test_start", "test_status" and "test_end" messages, as
+        appropriate.
+
+        :param dict result: a dictionary with the test results. It should
+                            include the following keys:
+                            * "name": the ID of the test;
+                            * "status": the actual status of the whole test;
+                            * "expected": the expected status of the whole test;
+                            * "subtests": a list of dicts with keys "test",
+                              "subtest", "status" and "expected".
+        :param float duration: the runtime of the test
+        """
+        testname = result["name"]
+
+        end_time = time()
+        start_time = end_time - duration
+
+        self.formatter.test_start({
+            "test": testname,
+            "time": start_time,
+        })
+
+        for result in result["subtests"]:
+            self.formatter.test_status(result)
+
+        self.formatter.test_end({
+            "test": testname,
+            "time": end_time,
+            "status": result["status"],
+            "expected": result["expected"],
+        })