Bug 1417267 - Output structured logs for jstests and jit-tests in automation, r=jonco
☠☠ backed out by 010374bce606 ☠ ☠
authorSteve Fink <sfink@mozilla.com>
Tue, 14 Nov 2017 15:36:47 -0800
changeset 444040 a83b373d2d2ec45343f1612929881e00c099843e
parent 444039 3059e975eef8c81c720d9450bd0792fdd501d6d6
child 444041 3cb41a9d9526bfc21ff215b05d3ee0bfafec48d6
push id8527
push userCallek@gmail.com
push dateThu, 11 Jan 2018 21:05:50 +0000
treeherdermozilla-beta@95342d212a7a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1417267
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1417267 - Output structured logs for jstests and jit-tests in automation, r=jonco
js/src/jit-test/jit_test.py
js/src/tests/jstests.py
js/src/tests/lib/jittests.py
js/src/tests/lib/results.py
js/src/tests/lib/structuredlog.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -362,17 +362,17 @@ def main(argv):
             subprocess.call(debug_cmd + tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
             if options.debugger == 'rr':
                 subprocess.call(['rr', 'replay'])
         sys.exit()
 
     try:
         ok = None
         if options.remote:
-            ok = jittests.run_tests_remote(job_list, job_count, prefix, options)
+            ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True)
         else:
             with change_env(test_environment):
                 ok = jittests.run_tests(job_list, job_count, prefix, options)
         if not ok:
             sys.exit(2)
     except OSError:
         if not os.path.exists(prefix[0]):
             print("JS shell argument: file does not exist:"
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -372,17 +372,17 @@ def main():
         cmd = test_gen.next().get_command(prefix)
         if options.show_cmd:
             print(list2cmdline(cmd))
         with changedir(test_dir), change_env(test_environment):
             call(cmd)
         return 0
 
     with changedir(test_dir), change_env(test_environment):
-        results = ResultsSink(options, test_count)
+        results = ResultsSink('jstests', options, test_count)
         try:
             for out in run_all_tests(test_gen, prefix, results.pb, options):
                 results.push(out)
             results.finish(True)
         except KeyboardInterrupt:
             results.finish(False)
 
         return 0 if results.all_passed() else 1
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -14,16 +14,17 @@ import StringIO
 
 if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
     from tasks_unix import run_all_tests
 else:
     from tasks_win import run_all_tests
 
 from progressbar import ProgressBar, NullProgressBar
 from results import TestOutput
+from structuredlog import TestLogger
 
 TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
 JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
 TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
 TEST_DIR = os.path.join(JS_DIR, 'jit-test', 'tests')
 LIB_DIR = os.path.join(JS_DIR, 'jit-test', 'lib') + os.path.sep
 MODULE_DIR = os.path.join(JS_DIR, 'jit-test', 'modules') + os.path.sep
 JS_CACHE_DIR = os.path.join(JS_DIR, 'jit-test', '.js-cache')
@@ -462,17 +463,17 @@ def check_output(out, err, rc, timed_out
         # forces the exit status.
         if test.expect_status != 0 and options.unusable_error_status:
             return True
 
         return False
 
     return True
 
-def print_automation_format(ok, res):
+def print_automation_format(ok, res, slog):
     # Output test failures in a parsable format suitable for automation, eg:
     # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
     #
     # Example:
     # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
     # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
     # INFO exit-status     : 3
     # INFO timed-out       : False
@@ -482,16 +483,24 @@ def print_automation_format(ok, res):
     # INFO stderr         2> TypeError: or something
     # TEST-UNEXPECTED-FAIL | jit_test.py: Test execution interrupted by user
     result = "TEST-PASS" if ok else "TEST-UNEXPECTED-FAIL"
     message = "Success" if ok else res.describe_failure()
     jitflags = " ".join(res.test.jitflags)
     print("{} | {} | {} (code {}, args \"{}\") [{:.1f} s]".format(
         result, res.test.relpath_top, message, res.rc, jitflags, res.dt))
 
+    details = {
+        'message': message,
+        'extra': {
+            'jitflags': jitflags,
+        }
+    }
+    slog.test(res.test.relpath_tests, 'PASS' if ok else 'FAIL', res.dt, **details)
+
     # For failed tests, print as much information as we have, to aid debugging.
     if ok:
         return
     print("INFO exit-status     : {}".format(res.rc))
     print("INFO timed-out       : {}".format(res.timed_out))
     for line in res.out.splitlines():
         print("INFO stdout          > " + line.strip())
     for line in res.err.splitlines():
@@ -556,17 +565,17 @@ def create_progressbar(num_tests, option
             {'value': 'PASS',    'color': 'green'},
             {'value': 'FAIL',    'color': 'red'},
             {'value': 'TIMEOUT', 'color': 'blue'},
             {'value': 'SKIP',    'color': 'brightgray'},
         ]
         return ProgressBar(num_tests, fmt)
     return NullProgressBar()
 
-def process_test_results(results, num_tests, pb, options):
+def process_test_results(results, num_tests, pb, options, slog):
     failures = []
     timeouts = 0
     complete = False
     output_dict = {}
     doing = 'before starting'
 
     if num_tests == 0:
         pb.finish(True)
@@ -605,50 +614,64 @@ def process_test_results(results, num_te
                 failures.append(res)
                 if res.timed_out:
                     pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
                     timeouts += 1
                 else:
                     pb.message("FAIL - {}".format(res.test.relpath_tests))
 
             if options.format == 'automation':
-                print_automation_format(ok, res)
+                print_automation_format(ok, res, slog)
 
             n = i + 1
             pb.update(n, {
                 'PASS': n - len(failures),
                 'FAIL': len(failures),
                 'TIMEOUT': timeouts,
                 'SKIP': 0
             })
         complete = True
     except KeyboardInterrupt:
         print("TEST-UNEXPECTED-FAIL | jit_test.py" +
               " : Test execution interrupted by user")
 
     pb.finish(True)
     return print_test_summary(num_tests, failures, complete, doing, options)
 
-def run_tests(tests, num_tests, prefix, options):
+def run_tests(tests, num_tests, prefix, options, remote=False):
+    slog = None
+    if options.format == 'automation':
+        slog = TestLogger("jittests")
+        slog.suite_start()
+
+    if remote:
+        run_tests_remote(tests, num_tests, prefix, options, slog)
+    else:
+        run_tests_local(tests, num_tests, prefix, options, slog)
+
+    if slog:
+        slog.suite_end()
+
+def run_tests_local(tests, num_tests, prefix, options, slog):
     # The jstests tasks runner requires the following options. The names are
     # taken from the jstests options processing code, which are frequently
     # subtly different from the options jit-tests expects. As such, we wrap
     # them here, as needed.
     AdaptorOptions = namedtuple("AdaptorOptions", [
         "worker_count", "passthrough", "timeout", "output_fp",
         "hide_progress", "run_skipped", "show_cmd"])
     shim_options = AdaptorOptions(options.max_jobs, False, options.timeout,
                                   sys.stdout, False, True, options.show_cmd)
 
     # The test runner wants the prefix as a static on the Test class.
     JitTest.js_cmd_prefix = prefix
 
     pb = create_progressbar(num_tests, options)
     gen = run_all_tests(tests, prefix, pb, shim_options)
-    ok = process_test_results(gen, num_tests, pb, options)
+    ok = process_test_results(gen, num_tests, pb, options, slog)
     return ok
 
 def get_remote_results(tests, device, prefix, options):
     from mozdevice import devicemanager
 
     try:
         for i in xrange(0, options.repeat):
             for test in tests:
@@ -671,17 +694,17 @@ def push_libs(options, device):
             device.pushFile(os.path.join(options.local_lib, file), remote_file)
 
 def push_progs(options, device, progs):
     for local_file in progs:
         remote_file = posixpath.join(options.remote_test_root,
                                      os.path.basename(local_file))
         device.pushFile(local_file, remote_file)
 
-def run_tests_remote(tests, num_tests, prefix, options):
+def run_tests_remote(tests, num_tests, prefix, options, slog):
     # Setup device with everything needed to run our tests.
     from mozdevice import devicemanagerADB
 
     if options.device_ip:
         dm = devicemanagerADB.DeviceManagerADB(
             options.device_ip, options.device_port,
             deviceSerial=options.device_serial,
             packageName=None,
@@ -712,17 +735,17 @@ def run_tests_remote(tests, num_tests, p
 
     dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root,
                timeout=600)
     prefix[0] = os.path.join(options.remote_test_root, 'js')
 
     # Run all tests.
     pb = create_progressbar(num_tests, options)
     gen = get_remote_results(tests, dm, prefix, options)
-    ok = process_test_results(gen, num_tests, pb, options)
+    ok = process_test_results(gen, num_tests, pb, options, slog)
     return ok
 
 def platform_might_be_android():
     try:
         # The python package for SL4A provides an |android| module.
         # If that module is present, we're likely in SL4A-python on
         # device.  False positives and negatives are possible,
         # however.
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -1,13 +1,15 @@
 from __future__ import print_function
 
+import pipes
 import re
+
 from progressbar import NullProgressBar, ProgressBar
-import pipes
+from structuredlog import TestLogger
 
 # subprocess.list2cmdline does not properly escape for sh-like shells
 def escape_cmdline(args):
     return ' '.join([pipes.quote(a) for a in args])
 
 class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt, timed_out):
@@ -101,19 +103,22 @@ class TestResult:
         return cls(test, result, results)
 
 class TestDuration:
     def __init__(self, test, duration):
         self.test = test
         self.duration = duration
 
 class ResultsSink:
-    def __init__(self, options, testcount):
+    def __init__(self, testsuite, options, testcount):
         self.options = options
         self.fp = options.output_fp
+        if self.options.format == 'automation':
+            self.slog = TestLogger(testsuite)
+            self.slog.suite_start()
 
         self.groups = {}
         self.output_dict = {}
         self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0}
         self.slow_tests = []
         self.n = 0
 
         if options.hide_progress:
@@ -202,31 +207,32 @@ class ResultsSink:
                         tup = (sub_ok, result.test.expect, result.test.random)
                         label = self.LABELS[tup][0]
                         if label == 'TEST-UNEXPECTED-PASS':
                             label = 'TEST-PASS (EXPECTED RANDOM)'
                         self.print_automation_result(
                             label, result.test, time=output.dt,
                             message=msg)
                 tup = (result.result, result.test.expect, result.test.random)
-                self.print_automation_result(
-                    self.LABELS[tup][0], result.test, time=output.dt)
+                self.print_automation_result(self.LABELS[tup][0], result.test, time=output.dt)
                 return
 
             if dev_label:
                 def singular(label):
                     return "FIXED" if label == "FIXES" else label[:-1]
                 self.pb.message("{} - {}".format(singular(dev_label),
                                                  output.test.path))
 
         self.pb.update(self.n, self.counts)
 
     def finish(self, completed):
         self.pb.finish(completed)
-        if not self.options.format == 'automation':
+        if self.options.format == 'automation':
+            self.slog.suite_end()
+        else:
             self.list(completed)
 
     # Conceptually, this maps (test result x test expection) to text labels.
     #      key   is (result, expect, random)
     #      value is (automation label, dev test category)
     LABELS = {
         (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
@@ -297,8 +303,18 @@ class ResultsSink:
         if message:
             result += " | " + message
         if skip:
             result += ' | (SKIP)'
         if time > self.options.timeout:
             result += ' | (TIMEOUT)'
         result += ' [{:.1f} s]'.format(time)
         print(result)
+
+        details = { 'extra': {} }
+        if self.options.shell_args:
+            details['extra']['shell_args'] = self.options.shell_args
+        details['extra']['jitflags'] = test.jitflags
+        if message:
+            details['message'] = message
+        status = 'FAIL' if 'TEST-UNEXPECTED' in label else 'PASS'
+
+        self.slog.test(test.path, status, time or 0, **details)
new file mode 100644
--- /dev/null
+++ b/js/src/tests/lib/structuredlog.py
@@ -0,0 +1,51 @@
+# produce mozlog-compatible log messages, following the spec at https://mozbase.readthedocs.io/en/latest/mozlog.html
+
+import json
+import os
+
+from time import time
+
+class TestLogger(object):
+    def __init__(self, source, threadname='main'):
+        self.template = {
+            'source': source,
+            'thread': threadname,
+            'pid': os.getpid(),
+        }
+
+    def _record(self, **kwargs):
+        record = self.template.copy()
+        record.update(**kwargs)
+        if 'time' not in record:
+            record['time'] = time()
+        return record
+
+    def _log_obj(self, obj):
+        print(json.dumps(obj))
+
+    def _log(self, **kwargs):
+        self._log_obj(self._record(**kwargs))
+
+    def suite_start(self):
+        self._log(action='suite_start', tests=[])
+
+    def suite_end(self):
+        self._log(action='suite_end')
+
+    def test_start(self, testname):
+        self._log(action='test_start', test=testname)
+
+    def test_end(self, testname, status):
+        self._log(action='test_end', test=testname, status=status)
+
+    def test(self, testname, status, duration, **details):
+        record = self._record(action='test_start', test=testname)
+        end_time = record['time']
+        record['time'] -= duration
+        self._log_obj(record)
+
+        record['action'] = 'test_end'
+        record['time'] = end_time
+        record['status'] = status
+        record.update(**details)
+        self._log_obj(record)