Bug 725500 - Eliminate various jstest freezes on Unix; r=dmandelin
authorTerrence Cole <terrence@mozilla.com>
Tue, 14 Feb 2012 11:24:48 -0800
changeset 87861 0b557b56a6b430970fd33ac47f89868a34594526
parent 87860 5176e915dfd91d5d53ad10b4dc0cb7c9dde6d89a
child 87862 ffa9354e5ce78f4d560af5dd8adbe353b71d2fb9
push id22160
push usermbrubeck@mozilla.com
push dateTue, 28 Feb 2012 17:21:33 +0000
treeherdermozilla-central@dde4e0089a18 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdmandelin
bugs725500
milestone13.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 725500 - Eliminate various jstest freezes on Unix; r=dmandelin This is a rewrite of the jstest loop for Unix to prevent a freeze that is possible here. See the writeup in the bug for details.
js/src/tests/jstests.py
js/src/tests/manifest.py
js/src/tests/results.py
js/src/tests/tasks_unix.py
js/src/tests/tasks_win.py
js/src/tests/tests.py
js/src/tests/workers.py
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -1,213 +1,61 @@
 #!/usr/bin/env python
 
 # Test harness for JSTests, controlled by manifest files.
 
-import datetime, os, sys, subprocess
-from subprocess import *
+import datetime, os, sys
+from subprocess import list2cmdline
 
-from tests import TestResult, NullTestOutput
-from workers import Source
+from results import NullTestOutput
+from tests import TestCase
+from tasks_win import Source
 from progressbar import ProgressBar
+from results import ResultsSink
+
+if (sys.platform.startswith('linux') or
+    sys.platform.startswith('darwin')
+   ):
+    from tasks_unix import run_all_tests
+else:
+    from tasks_win import run_all_tests
 
 def exclude_tests(test_list, exclude_files):
     exclude_paths = []
     for filename in exclude_files:
         for line in open(filename):
             if line.startswith('#'): continue
             line = line.strip('\n')
             if not line: continue
             exclude_paths.append(line)
     return [ _ for _ in test_list if _.path not in exclude_paths ]
 
-def check_manifest(test_list):
-    test_set = set([ _.path for _ in test_list ])
-
-    missing = []
-
-    for dirpath, dirnames, filenames in os.walk('.'):
-        for filename in filenames:
-            if dirpath == '.': continue
-            if not filename.endswith('.js'): continue
-            if filename in ('browser.js', 'shell.js', 'jsref.js', 'template.js'): continue
-
-            path = os.path.join(dirpath, filename)
-            if path.startswith('./'):
-                path = path[2:]
-            if path not in test_set:
-                missing.append(path)
-
-    if missing:
-        print "Test files not contained in any manifest:"
-        for path in missing:
-            print path
-    else:
-        print 'All test files are listed in manifests'
-
-def print_tinderbox_result(label, path, message=None, skip=False, time=None):
-    result = label
-    result += " | " + path
-    result += " |" + OPTIONS.shell_args
-    if message:
-        result += " | " + message
-    if skip:
-        result += ' | (SKIP)'
-    if time > OPTIONS.timeout:
-        result += ' | (TIMEOUT)'
-    print result
-
-class TestTask:
-    js_cmd_prefix = None
-
-    def __init__(self, test):
-        self.test = test
-
-    def __call__(self):
-        if self.test.enable or OPTIONS.run_skipped:
-            return self.test.run(self.js_cmd_prefix, OPTIONS.timeout)
-        else:
-            return NullTestOutput(self.test)
-
-    def __str__(self):
-        return str(self.test)
-
-    @classmethod
-    def set_js_cmd_prefix(self, js_path, js_args, debugger_prefix):
-        parts = []
-        if debugger_prefix:
-            parts += debugger_prefix
-        parts.append(js_path)
-        if js_args:
-            parts += js_args
-        self.js_cmd_prefix = parts
-
-class ResultsSink:
-    output_file = None
-
-    def __init__(self):
-        self.groups = {}
-        self.counts = [ 0, 0, 0 ]
-        self.n = 0
-
-        self.finished = False
-        self.pb = None
-
-    def push(self, output):
-        if isinstance(output, NullTestOutput):
-            if OPTIONS.tinderbox:
-                print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
-            self.counts[2] += 1
-            self.n += 1
-        else:
-            if OPTIONS.show_cmd:
-                print >> self.output_file, subprocess.list2cmdline(output.cmd)
-
-            if OPTIONS.show_output:
-                print >> self.output_file, '    rc = %d, run time = %f' % (output.rc, output.dt)
-                self.output_file.write(output.out)
-                self.output_file.write(output.err)
-
-            result = TestResult.from_output(output)
-            tup = (result.result, result.test.expect, result.test.random)
-            dev_label = self.LABELS[tup][1]
-            if output.timed_out:
-                dev_label = 'TIMEOUTS'
-            self.groups.setdefault(dev_label, []).append(result.test.path)
-
-            self.n += 1
-
-            if result.result == TestResult.PASS and not result.test.random:
-                self.counts[0] += 1
-            elif result.test.expect and not result.test.random:
-                self.counts[1] += 1
-            else:
-                self.counts[2] += 1
-
-            if OPTIONS.tinderbox:
-                if len(result.results) > 1:
-                    for sub_ok, msg in result.results:
-                        label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
-                        if label == 'TEST-UNEXPECTED-PASS':
-                            label = 'TEST-PASS (EXPECTED RANDOM)'
-                        print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
-                print_tinderbox_result(self.LABELS[
-                    (result.result, result.test.expect, result.test.random)][0],
-                    result.test.path, time=output.dt)
-           
-        if self.pb:
-            self.pb.label = '[%4d|%4d|%4d]'%tuple(self.counts)
-            self.pb.update(self.n)
-
-    # Conceptually, this maps (test result x test expection) to text labels.
-    #      key   is (result, expect, random)
-    #      value is (tinderbox label, dev test category)
-    LABELS = {
-        (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
-        (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
-        (TestResult.CRASH, True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
-        (TestResult.CRASH, True,  True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
-
-        (TestResult.FAIL,  False, False): ('TEST-KNOWN-FAIL',                    ''),
-        (TestResult.FAIL,  False, True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
-        (TestResult.FAIL,  True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
-        (TestResult.FAIL,  True,  True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
-
-        (TestResult.PASS,  False, False): ('TEST-UNEXPECTED-PASS',               'FIXES'),
-        (TestResult.PASS,  False, True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
-        (TestResult.PASS,  True,  False): ('TEST-PASS',                          ''),
-        (TestResult.PASS,  True,  True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
-        }
-
-    def list(self):
-        for label, paths in sorted(self.groups.items()):
-            if label == '': continue
-
-            print label
-            for path in paths:
-                print '    %s'%path
-
-        if OPTIONS.failure_file:
-              failure_file = open(OPTIONS.failure_file, 'w')
-              if not self.all_passed():
-                  for path in self.groups['REGRESSIONS'] + self.groups['TIMEOUTS']:
-                      print >> failure_file, path
-              failure_file.close()
-
-        suffix = '' if self.finished else ' (partial run -- interrupted by user)'
-        if self.all_passed():
-            print 'PASS' + suffix
-        else:
-            print 'FAIL' + suffix
-
-    def all_passed(self):
-        return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
-
 def run_tests(tests, results):
     """Run the given tests, sending raw results to the given results accumulator."""
     pb = None
     if not OPTIONS.hide_progress:
         try:
             from progressbar import ProgressBar
             pb = ProgressBar('', len(tests), 16)
         except ImportError:
             pass
     results.pb = pb
 
-    test_list = [ TestTask(test) for test in tests ]
-    pipeline = Source(test_list, results, False)
-    results.finished = pipeline.start(OPTIONS.worker_count)
+    try:
+        results.finished = run_all_tests(tests, results, OPTIONS)
+    except KeyboardInterrupt:
+        results.finished = False
 
-    if pb: 
+    if pb:
         pb.finish()
 
     if not OPTIONS.tinderbox:
         results.list()
 
-if __name__ == '__main__':        
+if __name__ == '__main__':
     from optparse import OptionParser
     op = OptionParser(usage='%prog JS_SHELL [TEST-SPECS]')
     op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                   help='show js shell command run')
     op.add_option('-o', '--show-output', dest='show_output', action='store_true',
                   help='show output from js shell')
     op.add_option('-O', '--output-file', dest='output_file',
                   help='write command output to the given file')
@@ -267,28 +115,28 @@ if __name__ == '__main__':
         debugger_prefix = ['gdb', '-q', '--args']
     elif OPTIONS.valgrind:
         debugger_prefix = ['valgrind']
         if os.uname()[0] == 'Darwin':
             debugger_prefix.append('--dsymutil=yes')
         if OPTIONS.valgrind_args:
             debugger_prefix.append(OPTIONS.valgrind_args)
         # Running under valgrind is not very useful if we don't show results.
-        OPTIONS.show_output = True 
+        OPTIONS.show_output = True
     else:
         debugger_prefix = []
 
-    TestTask.set_js_cmd_prefix(JS, OPTIONS.shell_args.split(), debugger_prefix)
+    TestCase.set_js_cmd_prefix(JS, OPTIONS.shell_args.split(), debugger_prefix)
 
     output_file = sys.stdout
     if OPTIONS.output_file and (OPTIONS.show_cmd or OPTIONS.show_output):
         output_file = open(OPTIONS.output_file, 'w')
     ResultsSink.output_file = output_file
 
-    if ((OPTIONS.show_cmd or OPTIONS.show_output) and 
+    if ((OPTIONS.show_cmd or OPTIONS.show_output) and
         output_file == sys.stdout or OPTIONS.tinderbox):
         OPTIONS.hide_progress = True
 
     if OPTIONS.manifest is None:
         filename = os.path.join(os.path.dirname(__file__), 'jstests.list')
         if os.path.isfile(filename):
             OPTIONS.manifest = filename
         else:
@@ -302,19 +150,20 @@ if __name__ == '__main__':
         if OPTIONS.xul_info_src is None:
             xul_info = manifest.XULInfo.create(JS)
         else:
             xul_abi, xul_os, xul_debug = OPTIONS.xul_info_src.split(r':')
             xul_debug = xul_debug.lower() is 'true'
             xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
         xul_tester = manifest.XULInfoTester(xul_info, JS)
     test_list = manifest.parse(OPTIONS.manifest, xul_tester)
+    skipped_list = []
 
     if OPTIONS.check_manifest:
-        check_manifest(test_list)
+        manifest.check_manifest(test_list)
         if JS is None:
             sys.exit()
 
     if OPTIONS.test_file:
         paths = set()
         for test_file in OPTIONS.test_file:
             paths |= set([ line.strip() for line in open(test_file).readlines()])
         test_list = [ _ for _ in test_list if _.path in paths ]
@@ -339,43 +188,52 @@ if __name__ == '__main__':
 
     if OPTIONS.run_only_skipped:
         OPTIONS.run_skipped = True
         test_list = [ _ for _ in test_list if not _.enable ]
 
     if not OPTIONS.run_slow_tests:
         test_list = [ _ for _ in test_list if not _.slow ]
 
-    if OPTIONS.debug and test_list:
+    if not OPTIONS.run_skipped:
+        skipped_list = [ _ for _ in test_list if not _.enable ]
+        test_list = [ _ for _ in test_list if _.enable ]
+
+    if not test_list:
+        print 'no tests selected'
+        sys.exit(1)
+
+    if OPTIONS.debug:
         if len(test_list) > 1:
             print('Multiple tests match command line arguments, debugger can only run one')
             for tc in test_list:
                 print('    %s'%tc.path)
             sys.exit(2)
 
-        cmd = test_list[0].get_command(TestTask.js_cmd_prefix)
+        cmd = test_list[0].get_command(TestCase.js_cmd_prefix)
         if OPTIONS.show_cmd:
-            print subprocess.list2cmdline(cmd)
+            print list2cmdline(cmd)
         manifest_dir = os.path.dirname(OPTIONS.manifest)
         if manifest_dir not in ('', '.'):
-            os.chdir(os.path.dirname(OPTIONS.manifest))
+            os.chdir(manifest_dir)
         call(cmd)
         sys.exit()
 
+    curdir = os.getcwd()
+    manifest_dir = os.path.dirname(OPTIONS.manifest)
+    if manifest_dir not in ('', '.'):
+        os.chdir(manifest_dir)
+
     results = None
-    if not test_list:
-        print 'no tests selected'
-    else:
-        curdir = os.getcwd()
-        manifest_dir = os.path.dirname(OPTIONS.manifest)
-        if manifest_dir not in ('', '.'):
-            os.chdir(os.path.dirname(OPTIONS.manifest))
-        try:
-            results = ResultsSink()
-            run_tests(test_list, results)
-        finally:
-            os.chdir(curdir)
+    try:
+        results = ResultsSink(output_file, OPTIONS)
+        run_tests(test_list, results)
+    finally:
+        os.chdir(curdir)
+
+    for t in skipped_list:
+        results.push(NullTestOutput(t))
 
     if output_file != sys.stdout:
         output_file.close()
 
     if results is None or not results.all_passed():
         sys.exit(1)
--- a/js/src/tests/manifest.py
+++ b/js/src/tests/manifest.py
@@ -50,17 +50,18 @@ class XULInfo:
         path = None
         for dir in dirs:
           _path = os.path.join(dir, 'config/autoconf.mk')
           if os.path.isfile(_path):
               path = _path
               break
 
         if path == None:
-            print "Can't find config/autoconf.mk on a directory containing the JS shell (searched from %s)"%jsdir
+            print ("Can't find config/autoconf.mk on a directory containing the JS shell"
+                   " (searched from %s)") % jsdir
             sys.exit(1)
 
         # Read the values.
         val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)')
         kw = {}
         for line in open(path):
             m = val_re.match(line)
             if m:
@@ -102,34 +103,34 @@ class XULInfoTester:
 class NullXULInfoTester:
     """Can be used to parse manifests without a JS shell."""
     def test(self, cond):
         return False
 
 def parse(filename, xul_tester, reldir = ''):
     ans = []
     comment_re = re.compile(r'#.*')
-    dir = os.path.dirname(filename)
+    dirname = os.path.dirname(filename)
 
     try:
         f = open(filename)
     except IOError:
         print "warning: include file not found: '%s'"%filename
         return ans
 
     for line in f:
         sline = comment_re.sub('', line)
         parts = sline.split()
         if len(parts) == 0:
             # line is empty or just a comment, skip
             pass
         elif parts[0] == 'include':
             include_file = parts[1]
             include_reldir = os.path.join(reldir, os.path.dirname(include_file))
-            ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)
+            ans += parse(os.path.join(dirname, include_file), xul_tester, include_reldir)
         elif parts[0] == 'url-prefix':
             # Doesn't apply to shell tests
             pass
         else:
             script = None
             enable = True
             expect = True
             random = False
@@ -177,17 +178,18 @@ def parse(filename, xul_tester, reldir =
                         else:
                             if fallback_action == "skip":
                                 expect = enable = False
                             elif fallback_action == "fail":
                                 expect = False
                             elif fallback_action == "random":
                                 random = True
                             else:
-                                raise Exception("Invalid precondition '%s' or fallback action '%s'" % (precondition, fallback_action))
+                                raise Exception(("Invalid precondition '%s' or fallback " +
+                                                 " action '%s'") % (precondition, fallback_action))
                             break
                     pos += 1
                 elif parts[pos] == 'script':
                     script = parts[pos+1]
                     pos += 2
                 elif parts[pos] == 'slow':
                     slow = True
                     pos += 1
@@ -199,8 +201,32 @@ def parse(filename, xul_tester, reldir =
                 else:
                     print 'warning: invalid manifest line element "%s"'%parts[pos]
                     pos += 1
 
             assert script is not None
             ans.append(TestCase(os.path.join(reldir, script),
                                 enable, expect, random, slow, debugMode))
     return ans
+
+def check_manifest(test_list):
+    test_set = set([ _.path for _ in test_list ])
+
+    missing = []
+
+    for dirpath, dirnames, filenames in os.walk('.'):
+        for filename in filenames:
+            if dirpath == '.': continue
+            if not filename.endswith('.js'): continue
+            if filename in ('browser.js', 'shell.js', 'jsref.js', 'template.js'): continue
+
+            path = os.path.join(dirpath, filename)
+            if path.startswith('./'):
+                path = path[2:]
+            if path not in test_set:
+                missing.append(path)
+
+    if missing:
+        print "Test files not contained in any manifest:"
+        for path in missing:
+            print path
+    else:
+        print 'All test files are listed in manifests'
new file mode 100644
--- /dev/null
+++ b/js/src/tests/results.py
@@ -0,0 +1,191 @@
+import re
+
+class TestOutput:
+    """Output from a test run."""
+    def __init__(self, test, cmd, out, err, rc, dt, timed_out):
+        self.test = test   # Test
+        self.cmd = cmd     # str:   command line of test
+        self.out = out     # str:   stdout
+        self.err = err     # str:   stderr
+        self.rc = rc       # int:   return code
+        self.dt = dt       # float: run time
+        self.timed_out = timed_out # bool: did the test time out
+
+class NullTestOutput:
+    """Variant of TestOutput that indicates a test was not run."""
+    def __init__(self, test):
+        self.test = test
+        self.cmd = ''
+        self.out = ''
+        self.err = ''
+        self.rc = 0
+        self.dt = 0.0
+        self.timed_out = False
+
+class TestResult:
+    PASS = 'PASS'
+    FAIL = 'FAIL'
+    CRASH = 'CRASH'
+
+    """Classified result from a test run."""
+    def __init__(self, test, result, results):
+        self.test = test
+        self.result = result
+        self.results = results
+
+    @classmethod
+    def from_output(cls, output):
+        test = output.test
+        result = None          # str:      overall result, see class-level variables
+        results = []           # (str,str) list: subtest results (pass/fail, message)
+
+        out, rc = output.out, output.rc
+
+        failures = 0
+        passes = 0
+
+        expected_rcs = []
+        if test.path.endswith('-n.js'):
+            expected_rcs.append(3)
+
+        for line in out.split('\n'):
+            if line.startswith(' FAILED!'):
+                failures += 1
+                msg = line[len(' FAILED! '):]
+                results.append((cls.FAIL, msg))
+            elif line.startswith(' PASSED!'):
+                passes += 1
+                msg = line[len(' PASSED! '):]
+                results.append((cls.PASS, msg))
+            else:
+                m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
+                if m:
+                    expected_rcs.append(int(m.group(1)))
+
+        if rc and not rc in expected_rcs:
+            if rc == 3:
+                result = cls.FAIL
+            else:
+                result = cls.CRASH
+        else:
+            if (rc or passes > 0) and failures == 0:
+                result = cls.PASS
+            else:
+                result = cls.FAIL
+
+        return cls(test, result, results)
+
+class ResultsSink:
+    def __init__(self, output_file, options):
+        self.output_file = output_file
+        self.options = options
+
+        self.groups = {}
+        self.counts = [ 0, 0, 0 ]
+        self.n = 0
+
+        self.finished = False
+        self.pb = None
+
+    def push(self, output):
+        if isinstance(output, NullTestOutput):
+            if self.options.tinderbox:
+                print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
+            self.counts[2] += 1
+            self.n += 1
+        else:
+            if self.options.show_cmd:
+                print >> self.output_file, list2cmdline(output.cmd)
+
+            if self.options.show_output:
+                print >> self.output_file, '    rc = %d, run time = %f' % (output.rc, output.dt)
+                self.output_file.write(output.out)
+                self.output_file.write(output.err)
+
+            result = TestResult.from_output(output)
+            tup = (result.result, result.test.expect, result.test.random)
+            dev_label = self.LABELS[tup][1]
+            if output.timed_out:
+                dev_label = 'TIMEOUTS'
+            self.groups.setdefault(dev_label, []).append(result.test.path)
+
+            self.n += 1
+
+            if result.result == TestResult.PASS and not result.test.random:
+                self.counts[0] += 1
+            elif result.test.expect and not result.test.random:
+                self.counts[1] += 1
+            else:
+                self.counts[2] += 1
+
+            if self.options.tinderbox:
+                if len(result.results) > 1:
+                    for sub_ok, msg in result.results:
+                        label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
+                        if label == 'TEST-UNEXPECTED-PASS':
+                            label = 'TEST-PASS (EXPECTED RANDOM)'
+                        print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
+                print_tinderbox_result(self.LABELS[
+                    (result.result, result.test.expect, result.test.random)][0],
+                    result.test.path, time=output.dt)
+
+        if self.pb:
+            self.pb.label = '[%4d|%4d|%4d]'%tuple(self.counts)
+            self.pb.update(self.n)
+
+    # Conceptually, this maps (test result x test expection) to text labels.
+    #      key   is (result, expect, random)
+    #      value is (tinderbox label, dev test category)
+    LABELS = {
+        (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
+        (TestResult.CRASH, False, True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
+        (TestResult.CRASH, True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
+        (TestResult.CRASH, True,  True):  ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
+
+        (TestResult.FAIL,  False, False): ('TEST-KNOWN-FAIL',                    ''),
+        (TestResult.FAIL,  False, True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
+        (TestResult.FAIL,  True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
+        (TestResult.FAIL,  True,  True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
+
+        (TestResult.PASS,  False, False): ('TEST-UNEXPECTED-PASS',               'FIXES'),
+        (TestResult.PASS,  False, True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
+        (TestResult.PASS,  True,  False): ('TEST-PASS',                          ''),
+        (TestResult.PASS,  True,  True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
+        }
+
+    def list(self):
+        for label, paths in sorted(self.groups.items()):
+            if label == '': continue
+
+            print label
+            for path in paths:
+                print '    %s'%path
+
+        if self.options.failure_file:
+              failure_file = open(self.options.failure_file, 'w')
+              if not self.all_passed():
+                  for path in self.groups['REGRESSIONS'] + self.groups['TIMEOUTS']:
+                      print >> failure_file, path
+              failure_file.close()
+
+        suffix = '' if self.finished else ' (partial run -- interrupted by user)'
+        if self.all_passed():
+            print 'PASS' + suffix
+        else:
+            print 'FAIL' + suffix
+
+    def all_passed(self):
+        return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
+
+    def print_tinderbox_result(self, label, path, message=None, skip=False, time=None):
+        result = label
+        result += " | " + path
+        result += " |" + self.options.shell_args
+        if message:
+            result += " | " + message
+        if skip:
+            result += ' | (SKIP)'
+        if time > self.options.timeout:
+            result += ' | (TIMEOUT)'
+        print result
+
new file mode 100644
--- /dev/null
+++ b/js/src/tests/tasks_unix.py
@@ -0,0 +1,202 @@
+# A unix-oriented process dispatcher.  Uses a single thread with select and
+# waitpid to dispatch tasks.  This avoids several deadlocks that are possible
+# with fork/exec + threads + Python.
+
+import errno, os, sys, select
+from datetime import datetime, timedelta
+from results import TestOutput
+
+PROGRESS_BAR_GRANULARITY = 0.1 #sec
+
+class Task(object):
+    def __init__(self, test, pid, stdout, stderr):
+        self.test = test
+        self.cmd = test.get_command(test.js_cmd_prefix)
+        self.pid = pid
+        self.stdout = stdout
+        self.stderr = stderr
+        self.start = datetime.now()
+        self.out = []
+        self.err = []
+
+def spawn_test(test):
+    """Spawn one child, return a task struct."""
+    (rout, wout) = os.pipe()
+    (rerr, werr) = os.pipe()
+
+    rv = os.fork()
+
+    # Parent.
+    if rv:
+        os.close(wout)
+        os.close(werr)
+        return Task(test, rv, rout, rerr)
+
+    # Child.
+    os.close(rout)
+    os.close(rerr)
+
+    os.dup2(wout, 1)
+    os.dup2(werr, 2)
+
+    cmd = test.get_command(test.js_cmd_prefix)
+    os.execvp(cmd[0], cmd)
+
+def get_max_wait(tasks, results, timeout):
+    """
+    Return the maximum time we can wait before any task should time out.
+    """
+    now = datetime.now()
+    wait = timedelta(0)
+    for task in tasks:
+        remaining = timedelta(seconds=timeout) - (now - task.start)
+        if remaining > wait:
+            wait = remaining
+    wait = wait.total_seconds()
+
+    # The test harness uses a timeout of 0 to indicate we should wait forever,
+    # but for select(), a timeout of 0 indicates a zero-length wait.  Instead,
+    # translate the timeout into None to tell select to wait forever.
+    if wait == 0:
+        return None
+
+    # If we have a progress-meter, we need to wake up to update it frequently.
+    if results.pb is not None:
+        wait = min(wait, PROGRESS_BAR_GRANULARITY)
+
+    return wait
+
+def flush_input(fd, frags):
+    """
+    Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
+    """
+    rv = os.read(fd, 4096)
+    frags.append(rv)
+    while len(rv) == 4096:
+        # If read() returns a full buffer, it may indicate there was 1 buffer
+        # worth of data, or that there is more data to read.  Poll the socket
+        # before we read again to ensure that we will not block indefinitly.
+        readable, _, _ = select.select([fd], [], [], 0)
+        if not readable:
+            return
+
+        rv = os.read(fd, 4096)
+        frags.append(rv)
+
+def read_input(tasks, timeout):
+    """
+    Select on input or errors from the given task list for a max of timeout
+    seconds.
+    """
+    rlist = []
+    exlist = []
+    outmap = {} # Fast access to fragment list given fd.
+    for t in tasks:
+        rlist.append(t.stdout)
+        rlist.append(t.stderr)
+        outmap[t.stdout] = t.out
+        outmap[t.stderr] = t.err
+        # This will trigger with a close event when the child dies, allowing
+        # us to respond immediately and not leave cores idle.
+        exlist.append(t.stdout)
+
+    readable, _, _ = select.select(rlist, [], exlist, timeout)
+    for fd in readable:
+        flush_input(fd, outmap[fd])
+
+def remove_task(tasks, pid):
+    """
+    Return a pair with the removed task and the new, modified tasks list.
+    """
+    index = None
+    for i, t in enumerate(tasks):
+        if t.pid == pid:
+            index = i
+            break
+    else:
+        raise KeyError("No such pid: %s" % pid)
+
+    out = tasks[index]
+    tasks.pop(index)
+    return out
+
+def timed_out(task, timeout):
+    """
+    Return True if the given task has been running for longer than |timeout|.
+    |timeout| may be falsy, indicating an infinite timeout (in which case
+    timed_out always returns False).
+    """
+    if timeout:
+        now = datetime.now()
+        return (now - task.start) > timedelta(seconds=timeout)
+    return False
+
+def reap_zombies(tasks, results, timeout):
+    """
+    Search for children of this process that have finished.  If they are tasks,
+    then this routine will clean up the child and send a TestOutput to the
+    results channel.  This method returns a new task list that has had the ended
+    tasks removed.
+    """
+    while True:
+        try:
+            pid, status = os.waitpid(0, os.WNOHANG)
+            if pid == 0:
+                break
+        except OSError, e:
+            if e.errno == errno.ECHILD:
+                break
+            raise e
+
+        ended = remove_task(tasks, pid)
+        flush_input(ended.stdout, ended.out)
+        flush_input(ended.stderr, ended.err)
+        os.close(ended.stdout)
+        os.close(ended.stderr)
+
+        out = TestOutput(
+                   ended.test,
+                   ended.cmd,
+                   ''.join(ended.out),
+                   ''.join(ended.err),
+                   os.WEXITSTATUS(status),
+                   (datetime.now() - ended.start).total_seconds(),
+                   timed_out(ended, timeout))
+        results.push(out)
+    return tasks
+
+def kill_undead(tasks, results, timeout):
+    """
+    Signal all children that are over the given timeout.
+    """
+    for task in tasks:
+        if timed_out(task, timeout):
+            os.kill(task.pid, 9)
+
+def run_all_tests(tests, results, options):
+    # Copy and reverse for fast pop off end.
+    tests = tests[:]
+    tests.reverse()
+
+    # The set of currently running tests.
+    tasks = []
+
+    while len(tests) or len(tasks):
+        while len(tests) and len(tasks) < options.worker_count:
+            tasks.append(spawn_test(tests.pop()))
+
+        timeout = get_max_wait(tasks, results, options.timeout)
+        read_input(tasks, timeout)
+
+        # We attempt to reap once before forcibly killing timed out tasks so
+        # that anything that died during our sleep is not marked as timed out
+        # in the test results.
+        tasks = reap_zombies(tasks, results, False)
+        if kill_undead(tasks, results, options.timeout):
+            tasks = reap_zombies(tasks, results, options.timeout)
+
+        if results.pb:
+            results.pb.update(results.n)
+
+    return True
+
new file mode 100644
--- /dev/null
+++ b/js/src/tests/tasks_win.py
@@ -0,0 +1,94 @@
+# Multiprocess activities with a push-driven divide-process-collect model.
+
+import os, sys, time
+from threading import Thread, Lock
+from Queue import Queue, Empty
+from datetime import datetime
+
+class Source:
+    def __init__(self, task_list, results, timeout, verbose = False):
+        self.tasks = Queue()
+        for task in task_list:
+            self.tasks.put_nowait(task)
+
+        self.results = results
+        self.timeout = timeout
+        self.verbose = verbose
+
+    def start(self, worker_count):
+        t0 = datetime.now()
+
+        sink = Sink(self.results)
+        self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
+        if self.verbose: print '[P] Starting workers.'
+        for w in self.workers:
+            w.t0 = t0
+            w.start()
+        ans = self.join_workers()
+        if self.verbose: print '[P] Finished.'
+
+        t1 = datetime.now()
+        dt = t1-t0
+
+        return ans
+
+    def join_workers(self):
+        try:
+            for w in self.workers:
+                w.join(20000)
+            return True
+        except KeyboardInterrupt:
+            for w in self.workers:
+                w.stop = True
+            return False
+
+class Sink:
+    def __init__(self, results):
+        self.results = results
+        self.lock = Lock()
+
+    def push(self, result):
+        self.lock.acquire()
+        try:
+            self.results.push(result)
+        finally:
+            self.lock.release()
+
+class Worker(Thread):
+    def __init__(self, id, tasks, sink, timeout, verbose):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.id = id
+        self.tasks = tasks
+        self.sink = sink
+        self.timeout = timeout
+        self.verbose = verbose
+
+        self.thread = None
+        self.stop = False
+
+    def log(self, msg):
+        if self.verbose:
+            dd = datetime.now() - self.t0
+            dt = dd.seconds + 1e-6 * dd.microseconds
+            print '[W%d %.3f] %s' % (self.id, dt, msg)
+
+    def run(self):
+        try:
+            while True:
+                if self.stop:
+                    break
+                self.log('Get next task.')
+                task = self.tasks.get(False)
+                self.log('Start task %s.'%str(task))
+                result = task.run(task.js_cmd_prefix, self.timeout)
+                self.log('Finished task.')
+                self.sink.push(result)
+                self.log('Pushed result.')
+        except Empty:
+            pass
+
+def run_all_tests(tests, results, options):
+    pipeline = Source(tests, results, options.timeout, False)
+    return pipeline.start(options.worker_count)
+
--- a/js/src/tests/tests.py
+++ b/js/src/tests/tests.py
@@ -2,16 +2,18 @@
 #
 # This contains classes that represent an individual test, including
 # metadata, and know how to run the tests and determine failures.
 
 import datetime, os, re, sys, time
 from subprocess import *
 from threading import *
 
+from results import TestOutput
+
 def do_run_cmd(cmd):
     l = [ None, None ]
     th_run_cmd(cmd, l)
     return l[1]
 
 def set_limits():
     # resource module not supported on all platforms
     try:
@@ -52,17 +54,17 @@ def run_cmd(cmd, timeout=60.0):
         if l[0] is not None:
             try:
                 # In Python 3, we could just do l[0].kill().
                 import signal
                 if sys.platform != 'win32':
                     os.kill(l[0].pid, signal.SIGKILL)
                 time.sleep(.1)
                 timed_out = True
-            except OSError:
+            except OSError, e:
                 # Expecting a "No such process" error
                 pass
     th.join()
     return l[1] + (timed_out,)
 
 class Test(object):
     """A runnable test."""
     def __init__(self, path):
@@ -72,31 +74,32 @@ class Test(object):
     def prefix_command(path):
         """Return the '-f shell.js' options needed to run a test with the given path."""
         if path == '':
             return [ '-f', 'shell.js' ]
         head, base = os.path.split(path)
         return Test.prefix_command(head) + [ '-f', os.path.join(path, 'shell.js') ]
 
     def get_command(self, js_cmd_prefix):
-        dir, filename = os.path.split(self.path)
-        cmd = js_cmd_prefix + Test.prefix_command(dir)
+        dirname, filename = os.path.split(self.path)
+        cmd = js_cmd_prefix + Test.prefix_command(dirname)
         if self.debugMode:
             cmd += [ '-d' ]
         # There is a test that requires the path to start with './'.
         cmd += [ '-f', './' + self.path ]
         return cmd
 
     def run(self, js_cmd_prefix, timeout=30.0):
         cmd = self.get_command(js_cmd_prefix)
         out, err, rc, dt, timed_out = run_cmd(cmd, timeout)
         return TestOutput(self, cmd, out, err, rc, dt, timed_out)
 
 class TestCase(Test):
     """A test case consisting of a test and an expected result."""
+    js_cmd_prefix = None
 
     def __init__(self, path, enable, expect, random, slow, debugMode):
         Test.__init__(self, path)
         self.enable = enable     # bool: True => run test, False => don't run
         self.expect = expect     # bool: expected result, True => pass
         self.random = random     # bool: True => ignore output as 'random'
         self.slow = slow         # bool: True => test may run slowly
         self.debugMode = debugMode # bool: True => must be run in debug mode
@@ -110,82 +113,17 @@ class TestCase(Test):
         if self.random:
             ans += ', random'
         if self.slow:
             ans += ', slow'
         if self.debugMode:
             ans += ', debugMode'
         return ans
 
-class TestOutput:
-    """Output from a test run."""
-    def __init__(self, test, cmd, out, err, rc, dt, timed_out):
-        self.test = test   # Test
-        self.cmd = cmd     # str:   command line of test
-        self.out = out     # str:   stdout
-        self.err = err     # str:   stderr
-        self.rc = rc       # int:   return code
-        self.dt = dt       # float: run time
-        self.timed_out = timed_out # bool: did the test time out
-
-class NullTestOutput:
-    """Variant of TestOutput that indicates a test was not run."""
-    def __init__(self, test):
-        self.test = test
-        self.cmd = ''
-        self.out = ''
-        self.err = ''
-        self.rc = 0
-        self.dt = 0.0
-        self.timed_out = False
-
-class TestResult:
-    PASS = 'PASS'
-    FAIL = 'FAIL'
-    CRASH = 'CRASH'
-
-    """Classified result from a test run."""
-    def __init__(self, test, result, results):
-        self.test = test
-        self.result = result
-        self.results = results
-
     @classmethod
-    def from_output(cls, output):
-        test = output.test
-        result = None          # str:      overall result, see class-level variables
-        results = []           # (str,str) list: subtest results (pass/fail, message)
-
-        out, rc = output.out, output.rc
-
-        failures = 0
-        passes = 0
-
-        expected_rcs = []
-        if test.path.endswith('-n.js'):
-            expected_rcs.append(3)
-
-        for line in out.split('\n'):
-            if line.startswith(' FAILED!'):
-                failures += 1
-                msg = line[len(' FAILED! '):]
-                results.append((cls.FAIL, msg))
-            elif line.startswith(' PASSED!'):
-                passes += 1
-                msg = line[len(' PASSED! '):]
-                results.append((cls.PASS, msg))
-            else:
-                m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
-                if m:
-                    expected_rcs.append(int(m.group(1)))
-
-        if rc and not rc in expected_rcs:
-            if rc == 3:
-                result = cls.FAIL
-            else:
-                result = cls.CRASH
-        else:
-            if (rc or passes > 0) and failures == 0:
-                result = cls.PASS
-            else:
-                result = cls.FAIL
-
-        return cls(test, result, results)
+    def set_js_cmd_prefix(self, js_path, js_args, debugger_prefix):
+        parts = []
+        if debugger_prefix:
+            parts += debugger_prefix
+        parts.append(js_path)
+        if js_args:
+            parts += js_args
+        self.js_cmd_prefix = parts
deleted file mode 100644
--- a/js/src/tests/workers.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Multiprocess activities with a push-driven divide-process-collect model.
-
-import os, sys, time
-from threading import Thread, Lock
-from Queue import Queue, Empty
-from datetime import datetime
-
-class Source:
-    def __init__(self, task_list, results, verbose = False):
-        self.tasks = Queue()
-        for task in task_list:
-            self.tasks.put_nowait(task)
-
-        self.results = results
-        self.verbose = verbose
-    
-    def start(self, worker_count):
-        t0 = datetime.now()
-
-        sink = Sink(self.results)
-        self.workers = [ Worker(_+1, self.tasks, sink, self.verbose) for _ in range(worker_count) ]
-        if self.verbose: print '[P] Starting workers.'
-        for w in self.workers:
-            w.t0 = t0
-            w.start()
-        ans = self.join_workers()
-        if self.verbose: print '[P] Finished.'
-
-        t1 = datetime.now()
-        dt = t1-t0
-
-        return ans
-
-    def join_workers(self):
-        try:
-            for w in self.workers:
-                w.thread.join(20000)
-            return True
-        except KeyboardInterrupt:
-            for w in self.workers:
-                w.stop = True
-            return False
-
-class Sink:
-    def __init__(self, results):
-        self.results = results
-        self.lock = Lock()
-
-    def push(self, result):
-        self.lock.acquire()
-        try:
-            self.results.push(result)
-        finally:
-            self.lock.release()
-
-class Worker(object):
-    def __init__(self, id, tasks, sink, verbose):
-        self.id = id
-        self.tasks = tasks
-        self.sink = sink
-        self.verbose = verbose
-
-        self.thread = None
-        self.stop = False
-
-    def log(self, msg):
-        dd = datetime.now() - self.t0
-        dt = dd.seconds + 1e-6 * dd.microseconds
-        
-        if self.verbose:
-            print '[W%d %.3f] %s' % (self.id, dt, msg)
-
-    def start(self):
-        self.thread = Thread(target=self.run)
-        self.thread.setDaemon(True)
-        self.thread.start()
-
-    def run(self):
-        try:
-            while True:
-                if self.stop:
-                    break
-                self.log('Get next task.')
-                task = self.tasks.get(False)
-                self.log('Start task %s.'%str(task))
-                result = task()
-                self.log('Finished task.')
-                self.sink.push(result)
-                self.log('Pushed result.')
-        except Empty:
-            pass