Bug 1162456 - Part 1: Add --format option in jstests.py and jit_test.py as a replacement for --tinderbox. r=sfink, a=test-only
authorTooru Fujisawa <arai_a@mac.com>
Fri, 08 May 2015 05:05:42 +0900
changeset 266173 9d6eae8f4c6e
parent 266172 b8a4ed23ed26
child 266174 ec01909d713e
push id4774
push userryanvm@gmail.com
push date2015-06-03 18:00 +0000
treeherdermozilla-beta@05122c19b3d7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink, test-only
bugs1162456
milestone39.0
Bug 1162456 - Part 1: Add --format option in jstests.py and jit_test.py as a replacement for --tinderbox. r=sfink, a=test-only
js/src/jit-test/jit_test.py
js/src/tests/jstests.py
js/src/tests/lib/jittests.py
js/src/tests/lib/results.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -67,18 +67,23 @@ def main(argv):
     op.add_option('--slow', dest='run_slow', action='store_true',
                   help='also run tests marked as slow')
     op.add_option('--no-slow', dest='run_slow', action='store_false',
                   help='do not run tests marked as slow (the default)')
     op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
                   help='set test timeout in seconds')
     op.add_option('--no-progress', dest='hide_progress', action='store_true',
                   help='hide progress bar')
-    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
-                  help='Tinderbox-parseable output format')
+    op.add_option('--tinderbox', dest='format', action='store_const',
+                  const='automation',
+                  help='Use automation-parseable output format')
+    op.add_option('--format', dest='format', default='none', type='choice',
+                  choices=['automation', 'none'],
+                  help='Output format. Either automation or none'
+                  ' (default %default).')
     op.add_option('--args', dest='shell_args', default='',
                   help='extra args to pass to the JS shell')
     op.add_option('-w', '--write-failures', dest='write_failures',
                   metavar='FILE',
                   help='Write a list of failed tests to [FILE]')
     op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                   help='Run test files listed in [FILE]')
     op.add_option('-R', '--retest', dest='retest', metavar='FILE',
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -154,18 +154,23 @@ def parse_args():
     output_og.add_option('-O', '--output-file',
                          help='Write all output to the given file'
                          ' (default: stdout).')
     output_og.add_option('--failure-file',
                          help='Write all not-passed tests to the given file.')
     output_og.add_option('--no-progress', dest='hide_progress',
                          action='store_true',
                          help='Do not show the progress bar.')
-    output_og.add_option('--tinderbox', action='store_true',
-                         help='Use tinderbox-parseable output format.')
+    output_og.add_option('--tinderbox', dest='format', action='store_const',
+                         const='automation',
+                         help='Use automation-parseable output format.')
+    output_og.add_option('--format', dest='format', default='none',
+                          type='choice', choices=['automation', 'none'],
+                          help='Output format. Either automation or none'
+                         ' (default %default).')
     op.add_option_group(output_og)
 
     special_og = OptionGroup(op, "Special",
                              "Special modes that do not run tests.")
     special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                           help='Generate reftest manifest files.')
     op.add_option_group(special_og)
     options, args = op.parse_args()
@@ -234,17 +239,17 @@ def parse_args():
         if not options.show_cmd:
             options.show_output = True
         try:
             options.output_fp = open(options.output_file, 'w')
         except IOError as ex:
             raise SystemExit("Failed to open output file: " + str(ex))
 
     # Hide the progress bar if it will get in the way of other output.
-    options.hide_progress = (options.tinderbox or
+    options.hide_progress = (options.format == 'automation' or
                              not ProgressBar.conservative_isatty() or
                              options.hide_progress)
 
     return (options, requested_paths, excluded_paths)
 
 def load_tests(options, requested_paths, excluded_paths):
     """
     Returns a tuple: (skipped_tests, test_list)
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -467,18 +467,18 @@ def check_output(out, err, rc, timed_out
         if test.allow_overrecursed and 'too much recursion' in err \
            and 'Assertion failure' not in err:
             return True
 
         return False
 
     return True
 
-def print_tinderbox(ok, res):
-    # Output test failures in a TBPL parsable format, eg:
+def print_automation_format(ok, res):
+    # Output test failures in a parsable format suitable for automation, eg:
     # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
     #
     # Example:
     # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
     # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
     # INFO exit-status     : 3
     # INFO timed-out       : False
     # INFO stdout          > foo
@@ -659,17 +659,17 @@ def print_test_summary(num_tests, failur
         for res in failures:
             if res.timed_out:
                 show_test(res)
     else:
         print('PASSED ALL'
               + ('' if complete
                  else ' (partial run -- interrupted by user {})'.format(doing)))
 
-    if options.tinderbox:
+    if options.format == 'automation':
         num_failures = len(failures) if failures else 0
         print('Result summary:')
         print('Passed: {:d}'.format(num_tests - num_failures))
         print('Failed: {:d}'.format(num_failures))
 
     return not failures
 
 def process_test_results(results, num_tests, options):
@@ -718,18 +718,18 @@ def process_test_results(results, num_te
             if not ok:
                 failures.append(res)
                 if res.timed_out:
                     pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
                     timeouts += 1
                 else:
                     pb.message("FAIL - {}".format(res.test.relpath_tests))
 
-            if options.tinderbox:
-                print_tinderbox(ok, res)
+            if options.format == 'automation':
+                print_automation_format(ok, res)
 
             n = i + 1
             pb.update(n, {
                 'PASS': n - len(failures),
                 'FAIL': len(failures),
                 'TIMEOUT': timeouts,
                 'SKIP': 0
             })
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -113,18 +113,18 @@ class ResultsSink:
                 {'value': 'SKIP',    'color': 'brightgray'},
             ]
             self.pb = ProgressBar(testcount, fmt)
 
     def push(self, output):
         if output.timed_out:
             self.counts['TIMEOUT'] += 1
         if isinstance(output, NullTestOutput):
-            if self.options.tinderbox:
-                self.print_tinderbox_result(
+            if self.options.format == 'automation':
+                self.print_automation_result(
                     'TEST-KNOWN-FAIL', output.test, time=output.dt,
                     skip=True)
             self.counts['SKIP'] += 1
             self.n += 1
         else:
             result = TestResult.from_output(output)
             tup = (result.result, result.test.expect, result.test.random)
             dev_label = self.LABELS[tup][1]
@@ -165,47 +165,47 @@ class ResultsSink:
 
             if result.result == TestResult.PASS and not result.test.random:
                 self.counts['PASS'] += 1
             elif result.test.expect and not result.test.random:
                 self.counts['FAIL'] += 1
             else:
                 self.counts['SKIP'] += 1
 
-            if self.options.tinderbox:
+            if self.options.format == 'automation':
                 if result.result != TestResult.PASS and len(result.results) > 1:
                     for sub_ok, msg in result.results:
                         tup = (sub_ok, result.test.expect, result.test.random)
                         label = self.LABELS[tup][0]
                         if label == 'TEST-UNEXPECTED-PASS':
                             label = 'TEST-PASS (EXPECTED RANDOM)'
-                        self.print_tinderbox_result(
+                        self.print_automation_result(
                             label, result.test, time=output.dt,
                             message=msg)
                 tup = (result.result, result.test.expect, result.test.random)
-                self.print_tinderbox_result(
+                self.print_automation_result(
                     self.LABELS[tup][0], result.test, time=output.dt)
                 return
 
             if dev_label:
                 def singular(label):
                     return "FIXED" if label == "FIXES" else label[:-1]
                 self.pb.message("{} - {}".format(singular(dev_label),
                                                  output.test.path))
 
         self.pb.update(self.n, self.counts)
 
     def finish(self, completed):
         self.pb.finish(completed)
-        if not self.options.tinderbox:
+        if not self.options.format == 'automation':
             self.list(completed)
 
     # Conceptually, this maps (test result x test expection) to text labels.
     #      key   is (result, expect, random)
-    #      value is (tinderbox label, dev test category)
+    #      value is (automation label, dev test category)
     LABELS = {
         (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
 
         (TestResult.FAIL,  False, False): ('TEST-KNOWN-FAIL',                    ''),
         (TestResult.FAIL,  False, True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
@@ -243,18 +243,18 @@ class ResultsSink:
         if self.all_passed():
             print('PASS' + suffix)
         else:
             print('FAIL' + suffix)
 
     def all_passed(self):
         return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
 
-    def print_tinderbox_result(self, label, test, message=None, skip=False,
-                               time=None):
+    def print_automation_result(self, label, test, message=None, skip=False,
+                                time=None):
         result = label
         result += " | " + test.path
         args = []
         if self.options.shell_args:
             args.append(self.options.shell_args)
         args += test.jitflags
         result += ' | (args: "{}")'.format(' '.join(args))
         if message: