Bug 638219 - Move jittests.main() function back into jit_test.py script. r=terrence
authorDirkjan Ochtman <dirkjan@ochtman.nl>
Wed, 23 Jan 2013 18:46:55 +0100
changeset 131278 08b008e9759c35068768bc231f4a132390b8d72b
parent 131277 86287769a004fc227610c330abd7ccfaecc52cbd
child 131279 d38f1897c28fbb63840cc1c7bf1a17bb8258554e
push id2323
push userbbajaj@mozilla.com
push dateMon, 01 Apr 2013 19:47:02 +0000
treeherdermozilla-beta@7712be144d91 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs638219
milestone21.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 638219 - Move jittests.main() function back into jit_test.py script. r=terrence
js/src/jit-test/jit_test.py
js/src/tests/lib/jittests.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -1,20 +1,212 @@
 #!/usr/bin/env python
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-import sys
+import os, shlex, subprocess, sys, traceback
 
 def add_libdir_to_path():
     from os.path import dirname, exists, join, realpath
     js_src_dir = dirname(dirname(realpath(sys.argv[0])))
     assert exists(join(js_src_dir,'jsapi.h'))
     sys.path.append(join(js_src_dir, 'lib'))
     sys.path.append(join(js_src_dir, 'tests', 'lib'))
 
 add_libdir_to_path()
 
 import jittests
 
+def main(argv):
+
+    script_path = os.path.abspath(__file__)
+    script_dir = os.path.dirname(script_path)
+    test_dir = os.path.join(script_dir, 'tests')
+    lib_dir = os.path.join(script_dir, 'lib')
+
+    # If no multiprocessing is available, fallback to serial test execution
+    max_jobs_default = 1
+    if jittests.HAVE_MULTIPROCESSING:
+        try:
+            max_jobs_default = jittests.cpu_count()
+        except NotImplementedError:
+            pass
+
+    # The [TESTS] optional arguments are paths of test files relative
+    # to the jit-test/tests directory.
+
+    from optparse import OptionParser
+    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
+    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
+                  help='show js shell command run')
+    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
+                  action='store_true', help='show command lines of failed tests')
+    op.add_option('-o', '--show-output', dest='show_output', action='store_true',
+                  help='show output from js shell')
+    op.add_option('-x', '--exclude', dest='exclude', action='append',
+                  help='exclude given test dir or path')
+    op.add_option('--no-slow', dest='run_slow', action='store_false',
+                  help='do not run tests marked as slow')
+    op.add_option('-t', '--timeout', dest='timeout',  type=float, default=150.0,
+                  help='set test timeout in seconds')
+    op.add_option('--no-progress', dest='hide_progress', action='store_true',
+                  help='hide progress bar')
+    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
+                  help='Tinderbox-parseable output format')
+    op.add_option('--args', dest='shell_args', default='',
+                  help='extra args to pass to the JS shell')
+    op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
+                  help='Write a list of failed tests to [FILE]')
+    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
+                  help='Run test files listed in [FILE]')
+    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
+                  help='Retest using test list file [FILE]')
+    op.add_option('-g', '--debug', dest='debug', action='store_true',
+                  help='Run test in gdb')
+    op.add_option('--valgrind', dest='valgrind', action='store_true',
+                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
+    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
+                  help='Run all tests with valgrind, if valgrind is in $PATH.')
+    op.add_option('--jitflags', dest='jitflags', default='',
+                  help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
+                       'Long flags, such as "--no-jm", should be set using --args.')
+    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
+                  help='Use js-shell file indirection instead of piping stdio.')
+    op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
+                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
+    op.add_option('--ion', dest='ion', action='store_true',
+                  help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)')
+    op.add_option('--tbpl', dest='tbpl', action='store_true',
+                  help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
+    op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
+                  help='Number of tests to run in parallel (default %default)')
+
+    options, args = op.parse_args(argv)
+    if len(args) < 1:
+        op.error('missing JS_SHELL argument')
+    # We need to make sure we are using backslashes on Windows.
+    options.js_shell, test_args = os.path.abspath(args[0]), args[1:]
+
+    if jittests.stdio_might_be_broken():
+        # Prefer erring on the side of caution and not using stdio if
+        # it might be broken on this platform.  The file-redirect
+        # fallback should work on any platform, so at worst by
+        # guessing wrong we might have slowed down the tests a bit.
+        #
+        # XXX technically we could check for broken stdio, but it
+        # really seems like overkill.
+        options.avoid_stdio = True
+
+    if options.retest:
+        options.read_tests = options.retest
+        options.write_failures = options.retest
+
+    test_list = []
+    read_all = True
+
+    if test_args:
+        read_all = False
+        for arg in test_args:
+            test_list += jittests.find_tests(test_dir, arg)
+
+    if options.read_tests:
+        read_all = False
+        try:
+            f = open(options.read_tests)
+            for line in f:
+                test_list.append(os.path.join(test_dir, line.strip('\n')))
+            f.close()
+        except IOError:
+            if options.retest:
+                read_all = True
+            else:
+                sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
+                                 options.read_tests)
+                traceback.print_exc()
+                sys.stderr.write('---\n')
+
+    if read_all:
+        test_list = jittests.find_tests(test_dir)
+
+    if options.exclude:
+        exclude_list = []
+        for exclude in options.exclude:
+            exclude_list += jittests.find_tests(test_dir, exclude)
+        test_list = [ test for test in test_list if test not in set(exclude_list) ]
+
+    if not test_list:
+        print >> sys.stderr, "No tests found matching command line arguments."
+        sys.exit(0)
+
+    test_list = [jittests.Test.from_file(_, options) for _ in test_list]
+
+    if not options.run_slow:
+        test_list = [ _ for _ in test_list if not _.slow ]
+
+    # The full test list is ready. Now create copies for each JIT configuration.
+    job_list = []
+    if options.tbpl:
+        # Running all bits would take forever. Instead, we test a few interesting combinations.
+        flags = [
+                      ['--no-jm'],
+                      ['--ion-eager'],
+                      # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn
+                      ['--no-ion', '--no-jm', '--no-ti'],
+                      ['--no-ion', '--no-ti'],
+                      ['--no-ion', '--no-ti', '-a', '-d'],
+                      ['--no-ion', '--no-jm'],
+                      ['--no-ion'],
+                      ['--no-ion', '-a'],
+                      ['--no-ion', '-a', '-d'],
+                      ['--no-ion', '-d']
+                    ]
+        for test in test_list:
+            for variant in flags:
+                new_test = test.copy()
+                new_test.jitflags.extend(variant)
+                job_list.append(new_test)
+    elif options.ion:
+        flags = [['--no-jm'], ['--ion-eager']]
+        for test in test_list:
+            for variant in flags:
+                new_test = test.copy()
+                new_test.jitflags.extend(variant)
+                job_list.append(new_test)
+    else:
+        jitflags_list = jittests.parse_jitflags(options)
+        for test in test_list:
+            for jitflags in jitflags_list:
+                new_test = test.copy()
+                new_test.jitflags.extend(jitflags)
+                job_list.append(new_test)
+
+    shell_args = shlex.split(options.shell_args)
+
+    if options.debug:
+        if len(job_list) > 1:
+            print 'Multiple tests match command line arguments, debugger can only run one'
+            for tc in job_list:
+                print '    %s' % tc.path
+            sys.exit(1)
+
+        tc = job_list[0]
+        cmd = [ 'gdb', '--args' ] + jittests.get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args)
+        subprocess.call(cmd)
+        sys.exit()
+
+    try:
+        ok = None
+        if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
+            ok = jittests.run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options)
+        else:
+            ok = jittests.run_tests(job_list, test_dir, lib_dir, shell_args, options)
+        if not ok:
+            sys.exit(2)
+    except OSError:
+        if not os.path.exists(options.js_shell):
+            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % options.js_shell
+            sys.exit(1)
+        else:
+            raise
+
 if __name__ == '__main__':
-    jittests.main(sys.argv[1:])
+    main(sys.argv[1:])
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -2,19 +2,19 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 
 # jit_test.py -- Python harness for JavaScript trace tests.
 
 from __future__ import print_function
-import os, sys, tempfile, traceback, time, shlex
+import os, sys, tempfile, traceback, time
 import subprocess
-from subprocess import Popen, PIPE, call
+from subprocess import Popen, PIPE
 from threading import Thread
 import signal
 
 try:
     from multiprocessing import Process, Manager, cpu_count
     HAVE_MULTIPROCESSING = True
 except ImportError:
     HAVE_MULTIPROCESSING = False
@@ -540,202 +540,10 @@ def platform_might_be_android():
         import android
         return True
     except ImportError:
         return False
 
 def stdio_might_be_broken():
     return platform_might_be_android()
 
-def main(argv):
-
-    script_path = os.path.abspath(sys.modules['__main__'].__file__)
-    script_dir = os.path.dirname(script_path)
-    test_dir = os.path.join(script_dir, 'tests')
-    lib_dir = os.path.join(script_dir, 'lib')
-
-    # If no multiprocessing is available, fallback to serial test execution
-    max_jobs_default = 1
-    if HAVE_MULTIPROCESSING:
-        try:
-            max_jobs_default = cpu_count()
-        except NotImplementedError:
-            pass
-
-    # The [TESTS] optional arguments are paths of test files relative
-    # to the jit-test/tests directory.
-
-    from optparse import OptionParser
-    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
-    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
-                  help='show js shell command run')
-    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
-                  action='store_true', help='show command lines of failed tests')
-    op.add_option('-o', '--show-output', dest='show_output', action='store_true',
-                  help='show output from js shell')
-    op.add_option('-x', '--exclude', dest='exclude', action='append',
-                  help='exclude given test dir or path')
-    op.add_option('--no-slow', dest='run_slow', action='store_false',
-                  help='do not run tests marked as slow')
-    op.add_option('-t', '--timeout', dest='timeout',  type=float, default=150.0,
-                  help='set test timeout in seconds')
-    op.add_option('--no-progress', dest='hide_progress', action='store_true',
-                  help='hide progress bar')
-    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
-                  help='Tinderbox-parseable output format')
-    op.add_option('--args', dest='shell_args', default='',
-                  help='extra args to pass to the JS shell')
-    op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
-                  help='Write a list of failed tests to [FILE]')
-    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
-                  help='Run test files listed in [FILE]')
-    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
-                  help='Retest using test list file [FILE]')
-    op.add_option('-g', '--debug', dest='debug', action='store_true',
-                  help='Run test in gdb')
-    op.add_option('--valgrind', dest='valgrind', action='store_true',
-                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
-    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
-                  help='Run all tests with valgrind, if valgrind is in $PATH.')
-    op.add_option('--jitflags', dest='jitflags', default='',
-                  help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
-                       'Long flags, such as "--no-jm", should be set using --args.')
-    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
-                  help='Use js-shell file indirection instead of piping stdio.')
-    op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
-                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
-    op.add_option('--ion', dest='ion', action='store_true',
-                  help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)')
-    op.add_option('--tbpl', dest='tbpl', action='store_true',
-                  help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
-    op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
-                  help='Number of tests to run in parallel (default %default)')
-
-    options, args = op.parse_args(argv)
-    if len(args) < 1:
-        op.error('missing JS_SHELL argument')
-    # We need to make sure we are using backslashes on Windows.
-    options.js_shell, test_args = os.path.abspath(args[0]), args[1:]
-
-    if stdio_might_be_broken():
-        # Prefer erring on the side of caution and not using stdio if
-        # it might be broken on this platform.  The file-redirect
-        # fallback should work on any platform, so at worst by
-        # guessing wrong we might have slowed down the tests a bit.
-        #
-        # XXX technically we could check for broken stdio, but it
-        # really seems like overkill.
-        options.avoid_stdio = True
-
-    if options.retest:
-        options.read_tests = options.retest
-        options.write_failures = options.retest
-
-    test_list = []
-    read_all = True
-
-    if test_args:
-        read_all = False
-        for arg in test_args:
-            test_list += find_tests(test_dir, arg)
-
-    if options.read_tests:
-        read_all = False
-        try:
-            f = open(options.read_tests)
-            for line in f:
-                test_list.append(os.path.join(test_dir, line.strip('\n')))
-            f.close()
-        except IOError:
-            if options.retest:
-                read_all = True
-            else:
-                sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
-                                 options.read_tests)
-                traceback.print_exc()
-                sys.stderr.write('---\n')
-
-    if read_all:
-        test_list = find_tests(test_dir)
-
-    if options.exclude:
-        exclude_list = []
-        for exclude in options.exclude:
-            exclude_list += find_tests(test_dir, exclude)
-        test_list = [ test for test in test_list if test not in set(exclude_list) ]
-
-    if not test_list:
-        print("No tests found matching command line arguments.", file=sys.stderr)
-        sys.exit(0)
-
-    test_list = [ Test.from_file(_, options) for _ in test_list ]
-
-    if not options.run_slow:
-        test_list = [ _ for _ in test_list if not _.slow ]
-
-    # The full test list is ready. Now create copies for each JIT configuration.
-    job_list = []
-    if options.tbpl:
-        # Running all bits would take forever. Instead, we test a few interesting combinations.
-        flags = [
-                      ['--no-jm'],
-                      ['--ion-eager'],
-                      # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn
-                      ['--no-ion', '--no-jm', '--no-ti'],
-                      ['--no-ion', '--no-ti'],
-                      ['--no-ion', '--no-ti', '-a', '-d'],
-                      ['--no-ion', '--no-jm'],
-                      ['--no-ion'],
-                      ['--no-ion', '-a'],
-                      ['--no-ion', '-a', '-d'],
-                      ['--no-ion', '-d']
-                    ]
-        for test in test_list:
-            for variant in flags:
-                new_test = test.copy()
-                new_test.jitflags.extend(variant)
-                job_list.append(new_test)
-    elif options.ion:
-        flags = [['--no-jm'], ['--ion-eager']]
-        for test in test_list:
-            for variant in flags:
-                new_test = test.copy()
-                new_test.jitflags.extend(variant)
-                job_list.append(new_test)
-    else:
-        jitflags_list = parse_jitflags(options)
-        for test in test_list:
-            for jitflags in jitflags_list:
-                new_test = test.copy()
-                new_test.jitflags.extend(jitflags)
-                job_list.append(new_test)
-
-    shell_args = shlex.split(options.shell_args)
-
-    if options.debug:
-        if len(job_list) > 1:
-            print('Multiple tests match command line arguments, debugger can only run one')
-            for tc in job_list:
-                print('    %s' % tc.path)
-            sys.exit(1)
-
-        tc = job_list[0]
-        cmd = ['gdb', '--args'] + get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args)
-        call(cmd)
-        sys.exit()
-
-    try:
-        ok = None
-        if options.max_jobs > 1 and HAVE_MULTIPROCESSING:
-            ok = run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options)
-        else:
-            ok = run_tests(job_list, test_dir, lib_dir, shell_args, options)
-        if not ok:
-            sys.exit(2)
-    except OSError:
-        if not os.path.exists(options.js_shell):
-            print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%options.js_shell
-            sys.exit(1)
-        else:
-            raise
-
 if __name__ == '__main__':
     print('Use ../jit-test/jit_test.py to run these tests.')