Bug 753619 - Fix progress bar totals in jstests.py; r=dmandelin
authorTerrence Cole <terrence@mozilla.com>
Thu, 10 May 2012 16:06:41 -0700
changeset 93753 0ec49b73ea28e258668f52b13538deeb623ed6f7
parent 93752 0cd6ed757687814bdf4e8436c0de7ecc2b577f0f
child 93754 ae9aef30b549960a80ded494bdf7e9cac78753eb
push id9305
push usertcole@mozilla.com
push dateFri, 11 May 2012 01:08:47 +0000
treeherdermozilla-inbound@0ec49b73ea28 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdmandelin
bugs753619
milestone15.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 753619 - Fix progress bar totals in jstests.py; r=dmandelin Something in the previous refactorings has messed up the display of the progress bar such that it occasionally jumps over 100% and 80chars.
js/src/tests/jstests.py
js/src/tests/lib/progressbar.py
js/src/tests/lib/results.py
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -16,35 +16,22 @@ if (sys.platform.startswith('linux') or
     sys.platform.startswith('darwin')
    ):
     from lib.tasks_unix import run_all_tests
 else:
     from lib.tasks_win import run_all_tests
 
 def run_tests(options, tests, results):
     """Run the given tests, sending raw results to the given results accumulator."""
-    pb = None
-    if not options.hide_progress:
-        try:
-            from lib.progressbar import ProgressBar
-            pb = ProgressBar('', len(tests), 16)
-        except ImportError:
-            pass
-    results.pb = pb
+    try:
+        completed = run_all_tests(tests, results, options)
+    except KeyboardInterrupt:
+        completed = False
 
-    try:
-        results.finished = run_all_tests(tests, results, options)
-    except KeyboardInterrupt:
-        results.finished = False
-
-    if pb:
-        pb.finish()
-
-    if not options.tinderbox:
-        results.list()
+    results.finish(completed)
 
 def parse_args():
     """
     Parse command line arguments.
     Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
         options :object: The raw OptionParser output.
         js_shell :str: The absolute location of the shell to test with.
         requested_paths :set<str>: Test paths specially requested on the CLI.
@@ -102,38 +89,38 @@ def parse_args():
 
     special_og = OptionGroup(op, "Special", "Special modes that do not run tests.")
     special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                           help='Generate reftest manifest files.')
     op.add_option_group(special_og)
     options, args = op.parse_args()
 
     # Acquire the JS shell given on the command line.
-    js_shell = None
+    options.js_shell = None
     requested_paths = set()
     if len(args) > 0:
-        js_shell = os.path.abspath(args[0])
+        options.js_shell = os.path.abspath(args[0])
         requested_paths |= set(args[1:])
 
     # If we do not have a shell, we must be in a special mode.
-    if js_shell is None and not options.make_manifests:
+    if options.js_shell is None and not options.make_manifests:
         op.error('missing JS_SHELL argument')
 
     # Valgrind and gdb are mutually exclusive.
     if options.valgrind and options.debug:
         op.error("--valgrind and --debug are mutually exclusive.")
 
     # Fill the debugger field, as needed.
     prefix = options.debugger.split() if options.debug else []
     if options.valgrind:
         prefix = ['valgrind'] + options.valgrind_args.split()
         if os.uname()[0] == 'Darwin':
             prefix.append('--dsymutil=yes')
         options.show_output = True
-    TestCase.set_js_cmd_prefix(js_shell, options.shell_args.split(), prefix)
+    TestCase.set_js_cmd_prefix(options.js_shell, options.shell_args.split(), prefix)
 
     # If files with lists of tests to run were specified, add them to the
     # requested tests set.
     if options.test_file:
         for test_file in options.test_file:
             requested_paths |= set([line.strip() for line in open(test_file).readlines()])
 
     # If files with lists of tests to exclude were specified, add them to the
@@ -147,46 +134,47 @@ def parse_args():
                     if line.startswith('#'): continue
                     line = line.strip()
                     if not line: continue
                     excluded_paths |= set((line,))
             finally:
                 fp.close()
 
     # Handle output redirection, if requested and relevant.
-    output_file = sys.stdout
+    options.output_fp = sys.stdout
     if options.output_file and (options.show_cmd or options.show_output):
-        output_file = open(options.output_file, 'w')
-    ResultsSink.output_file = output_file
+        try:
+            options.output_fp = open(options.output_file, 'w')
+        except IOError, ex:
+            raise SystemExit("Failed to open output file: " + str(ex))
 
     # Hide the progress bar if it will get in the way of other output.
-    if ((options.show_cmd or options.show_output) and
-        output_file == sys.stdout or options.tinderbox):
-        options.hide_progress = True
+    options.hide_progress = ((options.show_cmd or options.show_output) and
+                             options.output_fp == sys.stdout or options.tinderbox)
 
-    return (options, js_shell, requested_paths, excluded_paths)
+    return (options, requested_paths, excluded_paths)
 
-def load_tests(options, js_shell, requested_paths, excluded_paths):
+def load_tests(options, requested_paths, excluded_paths):
     """
     Returns a tuple: (skipped_tests, test_list)
         skip_list: [iterable<Test>] Tests found but skipped.
         test_list: [iterable<Test>] Tests found that should be run.
     """
     import lib.manifest as manifest
 
-    if js_shell is None:
+    if options.js_shell is None:
         xul_tester = manifest.NullXULInfoTester()
     else:
         if options.xul_info_src is None:
-            xul_info = manifest.XULInfo.create(js_shell)
+            xul_info = manifest.XULInfo.create(options.js_shell)
         else:
             xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
             xul_debug = xul_debug.lower() is 'true'
             xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
-        xul_tester = manifest.XULInfoTester(xul_info, js_shell)
+        xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
 
     test_dir = os.path.dirname(os.path.abspath(__file__))
     test_list = manifest.load(test_dir, xul_tester)
     skip_list = []
 
     if options.make_manifests:
         manifest.make_manifests(options.make_manifests, test_list)
         sys.exit()
@@ -224,18 +212,18 @@ def load_tests(options, js_shell, reques
 
     if not options.run_skipped:
         skip_list = [ _ for _ in test_list if not _.enable ]
         test_list = [ _ for _ in test_list if _.enable ]
 
     return skip_list, test_list
 
 def main():
-    options, js_shell, requested_paths, excluded_paths = parse_args()
-    skip_list, test_list = load_tests(options, js_shell, requested_paths, excluded_paths)
+    options, requested_paths, excluded_paths = parse_args()
+    skip_list, test_list = load_tests(options, requested_paths, excluded_paths)
 
     if not test_list:
         print 'no tests selected'
         return 1
 
     test_dir = os.path.dirname(os.path.abspath(__file__))
 
     if options.debug:
@@ -254,25 +242,22 @@ def main():
         return 0
 
     curdir = os.getcwd()
     if test_dir not in ('', '.'):
         os.chdir(test_dir)
 
     results = None
     try:
-        results = ResultsSink(ResultsSink.output_file, options)
+        results = ResultsSink(options, len(skip_list) + len(test_list))
         for t in skip_list:
             results.push(NullTestOutput(t))
         run_tests(options, test_list, results)
     finally:
         os.chdir(curdir)
 
-    if ResultsSink.output_file != sys.stdout:
-        ResultsSink.output_file.close()
-
     if results is None or not results.all_passed():
         return 1
 
     return 0
 
 if __name__ == '__main__':
     sys.exit(main())
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -18,18 +18,19 @@ class ProgressBar:
         pct = int(100.0 * self.cur / self.limit)
         barlen = int(1.0 * self.barlen * self.cur / self.limit) - 1
         bar = '='*barlen + '>'
         dt = datetime.datetime.now() - self.t0
         dt = dt.seconds + dt.microseconds * 1e-6
         sys.stdout.write(self.fmt % (self.label[:self.label_width], pct, bar, dt))
         sys.stdout.flush()
 
-    def finish(self):
-        self.update(self.limit)
+    def finish(self, complete=True):
+        final_count = self.limit if complete else self.cur
+        self.update(final_count)
         sys.stdout.write('\n')
 
 if __name__ == '__main__':
     pb = ProgressBar('test', 12)
     for i in range(12):
         pb.update(i)
         time.sleep(0.5)
     pb.finish()
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -1,10 +1,11 @@
 import re
 from subprocess import list2cmdline
+from progressbar import ProgressBar
 
 class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt, timed_out):
         self.test = test   # Test
         self.cmd = cmd     # str:   command line of test
         self.out = out     # str:   stdout
         self.err = err     # str:   stderr
@@ -72,41 +73,42 @@ class TestResult:
             if (rc or passes > 0) and failures == 0:
                 result = cls.PASS
             else:
                 result = cls.FAIL
 
         return cls(test, result, results)
 
 class ResultsSink:
-    def __init__(self, output_file, options):
-        self.output_file = output_file
+    def __init__(self, options, testcount):
         self.options = options
+        self.fp = options.output_fp
 
         self.groups = {}
         self.counts = [ 0, 0, 0 ]
         self.n = 0
 
-        self.finished = False
         self.pb = None
+        if not options.hide_progress:
+            self.pb = ProgressBar('', testcount, 16)
 
     def push(self, output):
         if isinstance(output, NullTestOutput):
             if self.options.tinderbox:
                 self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
             self.counts[2] += 1
             self.n += 1
         else:
             if self.options.show_cmd:
-                print >> self.output_file, list2cmdline(output.cmd)
+                print >> self.fp, list2cmdline(output.cmd)
 
             if self.options.show_output:
-                print >> self.output_file, '    rc = %d, run time = %f' % (output.rc, output.dt)
-                self.output_file.write(output.out)
-                self.output_file.write(output.err)
+                print >> self.fp, '    rc = %d, run time = %f' % (output.rc, output.dt)
+                self.fp.write(output.out)
+                self.fp.write(output.err)
 
             result = TestResult.from_output(output)
             tup = (result.result, result.test.expect, result.test.random)
             dev_label = self.LABELS[tup][1]
             if output.timed_out:
                 dev_label = 'TIMEOUTS'
             self.groups.setdefault(dev_label, []).append(result.test.path)
 
@@ -129,16 +131,22 @@ class ResultsSink:
                 self.print_tinderbox_result(self.LABELS[
                     (result.result, result.test.expect, result.test.random)][0],
                     result.test.path, time=output.dt)
 
         if self.pb:
             self.pb.label = '[%4d|%4d|%4d]'%tuple(self.counts)
             self.pb.update(self.n)
 
+    def finish(self, completed):
+        if self.pb:
+            self.pb.finish(completed)
+        if not self.options.tinderbox:
+            self.list(completed)
+
     # Conceptually, this maps (test result x test expection) to text labels.
     #      key   is (result, expect, random)
     #      value is (tinderbox label, dev test category)
     LABELS = {
         (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.CRASH, True,  True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
@@ -149,17 +157,17 @@ class ResultsSink:
         (TestResult.FAIL,  True,  True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
 
         (TestResult.PASS,  False, False): ('TEST-UNEXPECTED-PASS',               'FIXES'),
         (TestResult.PASS,  False, True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
         (TestResult.PASS,  True,  False): ('TEST-PASS',                          ''),
         (TestResult.PASS,  True,  True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
         }
 
-    def list(self):
+    def list(self, completed):
         for label, paths in sorted(self.groups.items()):
             if label == '': continue
 
             print label
             for path in paths:
                 print '    %s'%path
 
         if self.options.failure_file:
@@ -168,17 +176,17 @@ class ResultsSink:
                   if 'REGRESSIONS' in self.groups:
                       for path in self.groups['REGRESSIONS']:
                           print >> failure_file, path
                   if 'TIMEOUTS' in self.groups:
                       for path in self.groups['TIMEOUTS']:
                           print >> failure_file, path
               failure_file.close()
 
-        suffix = '' if self.finished else ' (partial run -- interrupted by user)'
+        suffix = '' if completed else ' (partial run -- interrupted by user)'
         if self.all_passed():
             print 'PASS' + suffix
         else:
             print 'FAIL' + suffix
 
     def all_passed(self):
         return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups