--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -5,17 +5,17 @@
from __future__ import print_function, unicode_literals
import math, os, posixpath, shlex, shutil, subprocess, sys, traceback
def add_libdir_to_path():
from os.path import dirname, exists, join, realpath
js_src_dir = dirname(dirname(realpath(sys.argv[0])))
- assert exists(join(js_src_dir,'jsapi.h'))
+ assert exists(join(js_src_dir, 'jsapi.h'))
sys.path.insert(0, join(js_src_dir, 'lib'))
sys.path.insert(0, join(js_src_dir, 'tests', 'lib'))
add_libdir_to_path()
import jittests
from tests import TBPL_FLAGS
@@ -44,82 +44,98 @@ def main(argv):
# The [TESTS] optional arguments are paths of test files relative
# to the jit-test/tests directory.
from optparse import OptionParser
op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
help='show js shell command run')
op.add_option('-f', '--show-failed-cmd', dest='show_failed',
- action='store_true', help='show command lines of failed tests')
- op.add_option('-o', '--show-output', dest='show_output', action='store_true',
+ action='store_true',
+ help='show command lines of failed tests')
+ op.add_option('-o', '--show-output', dest='show_output',
+ action='store_true',
help='show output from js shell')
- op.add_option('-F', '--failed-only', dest='failed_only', action='store_true',
- help="if --show-output is given, only print output for failed tests")
- op.add_option('--no-show-failed', dest='no_show_failed', action='store_true',
- help="don't print output for failed tests (no-op with --show-output)")
+ op.add_option('-F', '--failed-only', dest='failed_only',
+ action='store_true',
+ help="if --show-output is given, only print output for"
+ " failed tests")
+ op.add_option('--no-show-failed', dest='no_show_failed',
+ action='store_true',
+ help="don't print output for failed tests"
+ " (no-op with --show-output)")
op.add_option('-x', '--exclude', dest='exclude', action='append',
help='exclude given test dir or path')
op.add_option('--slow', dest='run_slow', action='store_true',
help='also run tests marked as slow')
op.add_option('--no-slow', dest='run_slow', action='store_false',
help='do not run tests marked as slow (the default)')
- op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
+ op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
help='set test timeout in seconds')
op.add_option('--no-progress', dest='hide_progress', action='store_true',
help='hide progress bar')
op.add_option('--tinderbox', dest='tinderbox', action='store_true',
help='Tinderbox-parseable output format')
op.add_option('--args', dest='shell_args', default='',
help='extra args to pass to the JS shell')
- op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
+ op.add_option('-w', '--write-failures', dest='write_failures',
+ metavar='FILE',
help='Write a list of failed tests to [FILE]')
op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
help='Run test files listed in [FILE]')
op.add_option('-R', '--retest', dest='retest', metavar='FILE',
help='Retest using test list file [FILE]')
op.add_option('-g', '--debug', dest='debug', action='store_true',
help='Run test in gdb')
op.add_option('--valgrind', dest='valgrind', action='store_true',
help='Enable the |valgrind| flag, if valgrind is in $PATH.')
op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
help='Run all tests with valgrind, if valgrind is in $PATH.')
op.add_option('--jitflags', dest='jitflags', default='',
- help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
- 'Long flags, such as "--ion-eager", should be set using --args.')
+ help='Example: --jitflags=m,mn to run each test with "-m"'
+ ' and "-m -n" [default="%default"]. Long flags, such as'
+ ' "--ion-eager", should be set using --args.')
op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
help='Use js-shell file indirection instead of piping stdio.')
- op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
- help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
+ op.add_option('--write-failure-output', dest='write_failure_output',
+ action='store_true',
+ help='With --write-failures=FILE, additionally write the'
+ ' output of failed tests to [FILE]')
op.add_option('--ion', dest='ion', action='store_true',
- help='Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)')
+ help='Run tests once with --ion-eager and once with'
+ ' --baseline-eager (ignores --jitflags)')
op.add_option('--tbpl', dest='tbpl', action='store_true',
- help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
- op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
+ help='Run tests with all IonMonkey option combinations'
+ ' (ignores --jitflags)')
+ op.add_option('-j', '--worker-count', dest='max_jobs', type=int,
+ default=max_jobs_default,
help='Number of tests to run in parallel (default %default)')
op.add_option('--remote', action='store_true',
help='Run tests on a remote device')
op.add_option('--deviceIP', action='store',
type='string', dest='device_ip',
help='IP address of remote device to test')
op.add_option('--devicePort', action='store',
type=int, dest='device_port', default=20701,
help='port of remote device to test')
op.add_option('--deviceSerial', action='store',
type='string', dest='device_serial', default=None,
help='ADB device serial number of remote device to test')
op.add_option('--deviceTransport', action='store',
type='string', dest='device_transport', default='sut',
- help='The transport to use to communicate with device: [adb|sut]; default=sut')
+ help='The transport to use to communicate with device:'
+ ' [adb|sut]; default=sut')
op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
type='string', default='/data/local/tests',
- help='The remote directory to use as test root (eg. /data/local/tests)')
+ help='The remote directory to use as test root'
+ ' (eg. /data/local/tests)')
op.add_option('--localLib', dest='local_lib', action='store',
type='string',
- help='The location of libraries to push -- preferably stripped')
+ help='The location of libraries to push -- preferably'
+ ' stripped')
op.add_option('--repeat', type=int, default=1,
help='Repeat tests the given number of times.')
op.add_option('--this-chunk', type=int, default=1,
help='The test chunk to run.')
op.add_option('--total-chunks', type=int, default=1,
help='The total number of test chunks.')
op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
help='Ignore timeouts of tests listed in [FILE]')
@@ -155,93 +171,102 @@ def main(argv):
for arg in test_args:
test_list += jittests.find_tests(arg)
if options.read_tests:
read_all = False
try:
f = open(options.read_tests)
for line in f:
- test_list.append(os.path.join(jittests.TEST_DIR, line.strip('\n')))
+ test_list.append(os.path.join(jittests.TEST_DIR,
+ line.strip('\n')))
f.close()
except IOError:
if options.retest:
read_all = True
else:
- sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
- options.read_tests)
+ sys.stderr.write("Exception thrown trying to read test file"
+ " '{}'\n".format(options.read_tests))
traceback.print_exc()
sys.stderr.write('---\n')
if read_all:
test_list = jittests.find_tests()
if options.exclude:
exclude_list = []
for exclude in options.exclude:
exclude_list += jittests.find_tests(exclude)
- test_list = [ test for test in test_list if test not in set(exclude_list) ]
+ test_list = [test for test in test_list
+ if test not in set(exclude_list)]
if not test_list:
- print("No tests found matching command line arguments.", file=sys.stderr)
+ print("No tests found matching command line arguments.",
+ file=sys.stderr)
sys.exit(0)
test_list = [jittests.Test.from_file(_, options) for _ in test_list]
if not options.run_slow:
- test_list = [ _ for _ in test_list if not _.slow ]
+ test_list = [_ for _ in test_list if not _.slow]
# If chunking is enabled, determine which tests are part of this chunk.
# This code was adapted from testing/mochitest/runtestsremote.py.
if options.total_chunks > 1:
total_tests = len(test_list)
tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
start = int(round((options.this_chunk - 1) * tests_per_chunk))
end = int(round(options.this_chunk * tests_per_chunk))
test_list = test_list[start:end]
# The full test list is ready. Now create copies for each JIT configuration.
job_list = []
test_flags = []
if options.tbpl:
- # Running all bits would take forever. Instead, we test a few interesting combinations.
+ # Running all bits would take forever. Instead, we test a few
+ # interesting combinations.
test_flags = TBPL_FLAGS
elif options.ion:
- test_flags = [['--baseline-eager'], ['--ion-eager', '--ion-offthread-compile=off']]
+ test_flags = [['--baseline-eager'],
+ ['--ion-eager', '--ion-offthread-compile=off']]
else:
test_flags = jittests.parse_jitflags(options)
- job_list = [ _ for test in test_list for _ in test.copy_variants(test_flags) ]
+ job_list = [_ for test in test_list
+ for _ in test.copy_variants(test_flags)]
if options.ignore_timeouts:
read_all = False
try:
with open(options.ignore_timeouts) as f:
- options.ignore_timeouts = set([line.strip('\n') for line in f.readlines()])
+ options.ignore_timeouts = set(
+ [line.strip('\n') for line in f.readlines()])
except IOError:
sys.exit("Error reading file: " + options.ignore_timeouts)
else:
options.ignore_timeouts = set()
prefix = [which(args[0])] + shlex.split(options.shell_args)
prolog = os.path.join(jittests.LIB_DIR, 'prolog.js')
if options.remote:
- prolog = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prolog.js')
+ prolog = posixpath.join(options.remote_test_root,
+ 'jit-tests', 'jit-tests', 'lib', 'prolog.js')
prefix += ['-f', prolog]
# Clean up any remnants from previous crashes etc
shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
os.mkdir(jittests.JS_CACHE_DIR)
if options.debug:
if len(job_list) > 1:
- print('Multiple tests match command line arguments, debugger can only run one')
+ print('Multiple tests match command line'
+ ' arguments, debugger can only run one')
for tc in job_list:
- print(' %s' % tc.path)
+ print(' {}'.format(tc.path))
sys.exit(1)
tc = job_list[0]
cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR)
subprocess.call(cmd)
sys.exit()
try:
@@ -251,15 +276,16 @@ def main(argv):
elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
ok = jittests.run_tests_parallel(job_list, prefix, options)
else:
ok = jittests.run_tests(job_list, prefix, options)
if not ok:
sys.exit(2)
except OSError:
if not os.path.exists(prefix[0]):
- print("JS shell argument: file does not exist: '%s'" % prefix[0], file=sys.stderr)
+ print("JS shell argument: file does not exist:"
+ " '{}'".format(prefix[0]), file=sys.stderr)
sys.exit(1)
else:
raise
if __name__ == '__main__':
main(sys.argv[1:])
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -103,22 +103,24 @@ class Test:
self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
# Path relative to mozilla/js/src/jit-test/tests/.
self.relpath_tests = os.path.relpath(path, TEST_DIR)
self.jitflags = [] # jit flags to enable
self.slow = False # True means the test is slow-running
self.allow_oom = False # True means that OOM is not considered a failure
- self.allow_unhandlable_oom = False # True means CrashAtUnhandlableOOM is not considered a failure
+ self.allow_unhandlable_oom = False # True means CrashAtUnhandlableOOM
+ # is not considered a failure
self.allow_overrecursed = False # True means that hitting recursion the
# limits is not considered a failure.
self.valgrind = False # True means run under valgrind
self.tz_pacific = False # True means force Pacific time for the test
- self.test_also_noasmjs = False # True means run with and without asm.js enabled.
+ self.test_also_noasmjs = False # True means run with and without asm.js
+ # enabled.
self.expect_error = '' # Errors to expect and consider passing
self.expect_status = 0 # Exit status to expect from shell
def copy(self):
t = Test(self.path)
t.jitflags = self.jitflags[:]
t.slow = self.slow
t.allow_oom = self.allow_oom
@@ -132,25 +134,25 @@ class Test:
return t
def copy_and_extend_jitflags(self, variant):
t = self.copy()
t.jitflags.extend(variant)
return t
def copy_variants(self, variants):
- # If the tests are flagged with the |jit-test| test-also-noasmjs flags, then
- # we duplicate the variants such that the test can be used both with the
- # interpreter and asmjs. This is a simple way to check for differential
- # behaviour.
+ # If the tests are flagged with the |jit-test| test-also-noasmjs flags,
+ # then we duplicate the variants such that the test can be used both
+ # with the interpreter and asmjs. This is a simple way to check for
+ # differential behaviour.
if self.test_also_noasmjs:
variants = variants + [['--no-asmjs']]
# For each list of jit flags, make a copy of the test.
- return [ self.copy_and_extend_jitflags(v) for v in variants ]
+ return [self.copy_and_extend_jitflags(v) for v in variants]
COOKIE = '|jit-test|'
CacheDir = JS_CACHE_DIR
@classmethod
def from_file(cls, path, options):
test = cls(path)
@@ -166,26 +168,30 @@ class Test:
continue
name, _, value = part.partition(':')
if value:
value = value.strip()
if name == 'error':
test.expect_error = value
elif name == 'exitstatus':
try:
- test.expect_status = int(value, 0);
+ test.expect_status = int(value, 0)
except ValueError:
- print("warning: couldn't parse exit status %s" % value)
+ print("warning: couldn't parse exit status"
+ " {}".format(value))
elif name == 'thread-count':
try:
- test.jitflags.append('--thread-count=' + int(value, 0));
+ test.jitflags.append('--thread-count={}'.format(
+ int(value, 0)))
except ValueError:
- print("warning: couldn't parse thread-count %s" % value)
+ print("warning: couldn't parse thread-count"
+ " {}".format(value))
else:
- print('%s: warning: unrecognized |jit-test| attribute %s' % (path, part))
+ print('{}: warning: unrecognized |jit-test| attribute'
+ ' {}'.format(path, part))
else:
if name == 'slow':
test.slow = True
elif name == 'allow-oom':
test.allow_oom = True
elif name == 'allow-unhandlable-oom':
test.allow_unhandlable_oom = True
elif name == 'allow-overrecursed':
@@ -195,47 +201,49 @@ class Test:
elif name == 'tz-pacific':
test.tz_pacific = True
elif name == 'test-also-noasmjs':
test.test_also_noasmjs = options.can_test_also_noasmjs
elif name == 'ion-eager':
test.jitflags.append('--ion-eager')
elif name == 'dump-bytecode':
test.jitflags.append('--dump-bytecode')
- elif name.startswith('--'): # // |jit-test| --ion-gvn=off; --no-sse4
+ elif name.startswith('--'):
+ # // |jit-test| --ion-gvn=off; --no-sse4
test.jitflags.append(name)
else:
- print('%s: warning: unrecognized |jit-test| attribute %s' % (path, part))
+ print('{}: warning: unrecognized |jit-test| attribute'
+ ' {}'.format(path, part))
if options.valgrind_all:
test.valgrind = True
return test
def command(self, prefix, libdir, remote_prefix=None):
path = self.path
if remote_prefix:
path = self.path.replace(TEST_DIR, remote_prefix)
- scriptdir_var = os.path.dirname(path);
+ scriptdir_var = os.path.dirname(path)
if not scriptdir_var.endswith('/'):
scriptdir_var += '/'
# Platforms where subprocess immediately invokes exec do not care
# whether we use double or single quotes. On windows and when using
# a remote device, however, we have to be careful to use the quote
# style that is the opposite of what the exec wrapper uses.
if remote_prefix:
quotechar = '"'
else:
quotechar = "'"
- expr = ("const platform=%s; const libdir=%s; const scriptdir=%s"
- % (js_quote(quotechar, sys.platform),
- js_quote(quotechar, libdir),
- js_quote(quotechar, scriptdir_var)))
+ expr = "const platform={}; const libdir={}; const scriptdir={}".format(
+ js_quote(quotechar, sys.platform),
+ js_quote(quotechar, libdir),
+ js_quote(quotechar, scriptdir_var))
# We may have specified '-a' or '-d' twice: once via --jitflags, once
# via the "|jit-test|" line. Remove dups because they are toggles.
cmd = prefix + ['--js-cache', Test.CacheDir]
cmd += list(set(self.jitflags)) + ['-e', expr, '-f', path]
if self.valgrind:
cmd = self.VALGRIND_CMD + cmd
return cmd
@@ -248,17 +256,18 @@ def find_tests(substring=None):
if dirpath == '.':
continue
for filename in filenames:
if not filename.endswith('.js'):
continue
if filename in ('shell.js', 'browser.js', 'jsref.js'):
continue
test = os.path.join(dirpath, filename)
- if substring is None or substring in os.path.relpath(test, TEST_DIR):
+ if substring is None \
+ or substring in os.path.relpath(test, TEST_DIR):
ans.append(test)
return ans
def tmppath(token):
fd, path = tempfile.mkstemp(prefix=token)
os.close(fd)
return path
@@ -275,76 +284,78 @@ def th_run_cmd(cmdline, options, l):
options["close_fds"] = True
p = Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE, **options)
l[0] = p
out, err = p.communicate()
l[1] = (out, err, p.returncode)
def run_timeout_cmd(cmdline, options, timeout=60.0):
- l = [ None, None ]
+ l = [None, None]
timed_out = False
th = Thread(target=th_run_cmd, args=(cmdline, options, l))
# If our SIGINT handler is set to SIG_IGN (ignore)
# then we are running as a child process for parallel
# execution and we must ensure to kill our child
# when we are signaled to exit.
sigint_handler = signal.getsignal(signal.SIGINT)
sigterm_handler = signal.getsignal(signal.SIGTERM)
- if (sigint_handler == signal.SIG_IGN):
+ if sigint_handler == signal.SIG_IGN:
def handleChildSignal(sig, frame):
try:
if sys.platform != 'win32':
os.kill(l[0].pid, signal.SIGKILL)
else:
import ctypes
- ctypes.windll.kernel32.TerminateProcess(int(l[0]._handle), -1)
+ ctypes.windll.kernel32.TerminateProcess(int(l[0]._handle),
+ -1)
except OSError:
pass
- if (sig == signal.SIGTERM):
+ if sig == signal.SIGTERM:
sys.exit(0)
signal.signal(signal.SIGINT, handleChildSignal)
signal.signal(signal.SIGTERM, handleChildSignal)
th.start()
th.join(timeout)
while th.isAlive():
if l[0] is not None:
try:
# In Python 3, we could just do l[0].kill().
if sys.platform != 'win32':
os.kill(l[0].pid, signal.SIGKILL)
else:
import ctypes
- ctypes.windll.kernel32.TerminateProcess(int(l[0]._handle), -1)
+ ctypes.windll.kernel32.TerminateProcess(int(l[0]._handle),
+ -1)
time.sleep(.1)
timed_out = True
except OSError:
# Expecting a "No such process" error
pass
th.join()
# Restore old signal handlers
- if (sigint_handler == signal.SIG_IGN):
+ if sigint_handler == signal.SIG_IGN:
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, sigterm_handler)
(out, err, code) = l[1]
return (out, err, code, timed_out)
def run_cmd(cmdline, env, timeout):
- return run_timeout_cmd(cmdline, { 'env': env }, timeout)
+ return run_timeout_cmd(cmdline, {'env': env}, timeout)
def run_cmd_avoid_stdio(cmdline, env, timeout):
stdoutPath, stderrPath = tmppath('jsstdout'), tmppath('jsstderr')
env['JS_STDOUT'] = stdoutPath
env['JS_STDERR'] = stderrPath
- _, __, code = run_timeout_cmd(cmdline, { 'env': env }, timeout)
+ _, __, code = run_timeout_cmd(cmdline, {'env': env}, timeout)
return read_and_unlink(stdoutPath), read_and_unlink(stderrPath), code
def run_test(test, prefix, options):
cmd = test.command(prefix, LIB_DIR)
if options.show_cmd:
print(subprocess.list2cmdline(cmd))
if options.avoid_stdio:
@@ -362,25 +373,27 @@ def run_test(test, prefix, options):
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform.startswith('darwin'):
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform.startswith('win'):
pathvar = 'PATH'
if pathvar:
bin_dir = os.path.dirname(cmd[0])
if pathvar in env:
- env[pathvar] = '%s%s%s' % (bin_dir, os.pathsep, env[pathvar])
+ env[pathvar] = '{}{}{}'.format(bin_dir, os.pathsep, env[pathvar])
else:
env[pathvar] = bin_dir
out, err, code, timed_out = run(cmd, env, options.timeout)
return TestOutput(test, cmd, out, err, code, None, timed_out)
def run_test_remote(test, device, prefix, options):
- cmd = test.command(prefix, posixpath.join(options.remote_test_root, 'lib/'), posixpath.join(options.remote_test_root, 'tests'))
+ cmd = test.command(prefix,
+ posixpath.join(options.remote_test_root, 'lib/'),
+ posixpath.join(options.remote_test_root, 'tests'))
if options.show_cmd:
print(subprocess.list2cmdline(cmd))
env = {}
if test.tz_pacific:
env['TZ'] = 'PST8PDT'
env['LD_LIBRARY_PATH'] = options.remote_test_root
@@ -440,22 +453,24 @@ def check_output(out, err, rc, timed_out
# Allow a non-zero exit code if we want to allow OOM, but only if we
# actually got OOM.
if test.allow_oom and 'out of memory' in err \
and 'Assertion failure' not in err and 'MOZ_CRASH' not in err:
return True
# Allow a non-zero exit code if we want to allow unhandlable OOM, but
# only if we actually got unhandlable OOM.
- if test.allow_unhandlable_oom and 'Assertion failure: [unhandlable oom]' in err:
+ if test.allow_unhandlable_oom \
+ and 'Assertion failure: [unhandlable oom]' in err:
return True
# Allow a non-zero exit code if we want to all too-much-recursion and
# the test actually over-recursed.
- if test.allow_overrecursed and 'too much recursion' in err and 'Assertion failure' not in err:
+ if test.allow_overrecursed and 'too much recursion' in err \
+ and 'Assertion failure' not in err:
return True
return False
return True
def print_tinderbox(ok, res):
# Output test failures in a TBPL parsable format, eg:
@@ -470,17 +485,17 @@ def print_tinderbox(ok, res):
# INFO stdout > bar
# INFO stdout > baz
# INFO stderr 2> TypeError: or something
# TEST-UNEXPECTED-FAIL | jit_test.py: Test execution interrupted by user
result = "TEST-PASS" if ok else "TEST-UNEXPECTED-FAIL"
message = "Success" if ok else res.describe_failure()
jitflags = " ".join(res.test.jitflags)
print("{} | {} | {} (code {}, args \"{}\")".format(
- result, res.test.relpath_top, message, res.rc, jitflags))
+ result, res.test.relpath_top, message, res.rc, jitflags))
# For failed tests, print as much information as we have, to aid debugging.
if ok:
return
print("INFO exit-status : {}".format(res.rc))
print("INFO timed-out : {}".format(res.timed_out))
for line in res.out.splitlines():
print("INFO stdout > " + line.strip())
@@ -507,17 +522,18 @@ def run_tests_parallel(tests, prefix, op
# check for worker completion ourselves regularly.
notify_queue = queue_manager.Queue()
# This queue will contain the return value of the function
# processing the test results.
total_tests = len(tests) * options.repeat
result_process_return_queue = queue_manager.Queue()
result_process = Process(target=process_test_results_parallel,
- args=(async_test_result_queue, result_process_return_queue,
+ args=(async_test_result_queue,
+ result_process_return_queue,
notify_queue, total_tests, options))
result_process.start()
# Ensure that a SIGTERM is handled the same way as SIGINT
# to terminate all child processes.
sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGTERM, sigint_handler)
@@ -537,43 +553,47 @@ def run_tests_parallel(tests, prefix, op
# Initially start as many jobs as allowed to run parallel
# Always enqueue at least one to avoid a curious deadlock
for i in range(max(1, min(options.max_jobs, total_tests))):
notify_queue.put(True)
# For every item in the notify queue, start one new worker.
# Every completed worker adds a new item to this queue.
while notify_queue.get():
- if (testcnt < total_tests):
+ if testcnt < total_tests:
# Start one new worker
test = tests[testcnt % len(tests)]
- worker_process = Process(target=wrap_parallel_run_test, args=(test, prefix, async_test_result_queue, options))
+ worker_process = Process(target=wrap_parallel_run_test,
+ args=(test, prefix,
+ async_test_result_queue,
+ options))
worker_processes.append(worker_process)
worker_process.start()
testcnt += 1
# Collect completed workers
worker_processes = remove_completed_workers(worker_processes)
else:
break
# Wait for all processes to terminate
while len(worker_processes) > 0:
worker_processes = remove_completed_workers(worker_processes)
- # Signal completion to result processor, then wait for it to complete on its own
+ # Signal completion to result processor, then wait for it to complete
+ # on its own
async_test_result_queue.put(None)
result_process.join()
# Return what the result process has returned to us
return result_process_return_queue.get()
except (Exception, KeyboardInterrupt) as e:
# Print the exception if it's not an interrupt,
# might point to a bug or other faulty condition
- if not isinstance(e,KeyboardInterrupt):
+ if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
for worker in worker_processes:
try:
worker.terminate()
except:
pass
@@ -581,48 +601,51 @@ def run_tests_parallel(tests, prefix, op
return False
def get_parallel_results(async_test_result_queue, notify_queue):
while True:
async_test_result = async_test_result_queue.get()
# Check if we are supposed to terminate
- if (async_test_result == None):
+ if async_test_result == None:
return
# Notify parent that we got a result
notify_queue.put(True)
yield async_test_result
-def process_test_results_parallel(async_test_result_queue, return_queue, notify_queue, num_tests, options):
+def process_test_results_parallel(async_test_result_queue, return_queue,
+ notify_queue, num_tests, options):
gen = get_parallel_results(async_test_result_queue, notify_queue)
ok = process_test_results(gen, num_tests, options)
return_queue.put(ok)
def print_test_summary(num_tests, failures, complete, doing, options):
if failures:
if options.write_failures:
try:
out = open(options.write_failures, 'w')
- # Don't write duplicate entries when we are doing multiple failures per job.
+ # Don't write duplicate entries when we are doing multiple
+ # failures per job.
written = set()
for res in failures:
if res.test.path not in written:
- out.write(os.path.relpath(res.test.path, TEST_DIR) + '\n')
+ out.write(os.path.relpath(res.test.path, TEST_DIR)
+ + '\n')
if options.write_failure_output:
out.write(res.out)
out.write(res.err)
out.write('Exit code: ' + str(res.rc) + "\n")
written.add(res.test.path)
out.close()
except IOError:
- sys.stderr.write("Exception thrown trying to write failure file '%s'\n"%
- options.write_failures)
+ sys.stderr.write("Exception thrown trying to write failure"
+ " file '{}'\n".format(options.write_failures))
traceback.print_exc()
sys.stderr.write('---\n')
def show_test(res):
if options.show_failed:
print(' ' + subprocess.list2cmdline(res.cmd))
else:
print(' ' + ' '.join(res.test.jitflags + [res.test.path]))
@@ -632,85 +655,89 @@ def print_test_summary(num_tests, failur
if not res.timed_out:
show_test(res)
print('TIMEOUTS:')
for res in failures:
if res.timed_out:
show_test(res)
else:
- print('PASSED ALL' + ('' if complete else ' (partial run -- interrupted by user %s)' % doing))
+ print('PASSED ALL'
+ + ('' if complete
+ else ' (partial run -- interrupted by user {})'.format(doing)))
if options.tinderbox:
num_failures = len(failures) if failures else 0
print('Result summary:')
- print('Passed: %d' % (num_tests - num_failures))
- print('Failed: %d' % num_failures)
+ print('Passed: {:d}'.format(num_tests - num_failures))
+ print('Failed: {:d}'.format(num_failures))
return not failures
def process_test_results(results, num_tests, options):
pb = NullProgressBar()
failures = []
timeouts = 0
complete = False
doing = 'before starting'
if num_tests == 0:
pb.finish(True)
complete = True
return print_test_summary(num_tests, failures, complete, doing, options)
- if not options.hide_progress and not options.show_cmd and ProgressBar.conservative_isatty():
+ if not options.hide_progress and not options.show_cmd \
+ and ProgressBar.conservative_isatty():
fmt = [
{'value': 'PASS', 'color': 'green'},
{'value': 'FAIL', 'color': 'red'},
{'value': 'TIMEOUT', 'color': 'blue'},
{'value': 'SKIP', 'color': 'brightgray'},
]
pb = ProgressBar(num_tests, fmt)
try:
for i, res in enumerate(results):
- ok = check_output(res.out, res.err, res.rc, res.timed_out, res.test, options)
+ ok = check_output(res.out, res.err, res.rc, res.timed_out,
+ res.test, options)
if ok:
show_output = options.show_output and not options.failed_only
else:
show_output = options.show_output or not options.no_show_failed
if show_output:
pb.beginline()
sys.stdout.write(res.out)
sys.stdout.write(res.err)
- sys.stdout.write('Exit code: %s\n' % res.rc)
+ sys.stdout.write('Exit code: {}\n'.format(res.rc))
if res.test.valgrind and not show_output:
pb.beginline()
sys.stdout.write(res.err)
- doing = 'after %s' % res.test.relpath_tests
+ doing = 'after {}'.format(res.test.relpath_tests)
if not ok:
failures.append(res)
if res.timed_out:
- pb.message("TIMEOUT - %s" % res.test.relpath_tests)
+ pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
timeouts += 1
else:
- pb.message("FAIL - %s" % res.test.relpath_tests)
+ pb.message("FAIL - {}".format(res.test.relpath_tests))
if options.tinderbox:
print_tinderbox(ok, res)
n = i + 1
pb.update(n, {
'PASS': n - len(failures),
'FAIL': len(failures),
'TIMEOUT': timeouts,
- 'SKIP': 0}
- )
+ 'SKIP': 0
+ })
complete = True
except KeyboardInterrupt:
print("TEST-UNEXPECTED-FAIL | jit_test.py" +
" : Test execution interrupted by user")
pb.finish(True)
return print_test_summary(num_tests, failures, complete, doing, options)
@@ -730,46 +757,58 @@ def get_remote_results(tests, device, pr
try:
for i in xrange(0, options.repeat):
for test in tests:
yield run_test_remote(test, device, prefix, options)
except devicemanager.DMError as e:
# After a devicemanager error, the device is typically in a
# state where all further tests will fail so there is no point in
# continuing here.
- sys.stderr.write("Error running remote tests: %s" % e.message)
+ sys.stderr.write("Error running remote tests: {}".format(e.message))
def push_libs(options, device):
# This saves considerable time in pushing unnecessary libraries
# to the device but needs to be updated if the dependencies change.
- required_libs = ['libnss3.so', 'libmozglue.so', 'libnspr4.so', 'libplc4.so', 'libplds4.so']
+ required_libs = ['libnss3.so', 'libmozglue.so', 'libnspr4.so',
+ 'libplc4.so', 'libplds4.so']
for file in os.listdir(options.local_lib):
if file in required_libs:
remote_file = posixpath.join(options.remote_test_root, file)
device.pushFile(os.path.join(options.local_lib, file), remote_file)
def push_progs(options, device, progs):
for local_file in progs:
- remote_file = posixpath.join(options.remote_test_root, os.path.basename(local_file))
+ remote_file = posixpath.join(options.remote_test_root,
+ os.path.basename(local_file))
device.pushFile(local_file, remote_file)
def run_tests_remote(tests, prefix, options):
# Setup device with everything needed to run our tests.
- from mozdevice import devicemanager, devicemanagerADB, devicemanagerSUT
+ from mozdevice import devicemanagerADB, devicemanagerSUT
if options.device_transport == 'adb':
if options.device_ip:
- dm = devicemanagerADB.DeviceManagerADB(options.device_ip, options.device_port, deviceSerial=options.device_serial, packageName=None, deviceRoot=options.remote_test_root)
+ dm = devicemanagerADB.DeviceManagerADB(
+ options.device_ip, options.device_port,
+ deviceSerial=options.device_serial,
+ packageName=None,
+ deviceRoot=options.remote_test_root)
else:
- dm = devicemanagerADB.DeviceManagerADB(deviceSerial=options.device_serial, packageName=None, deviceRoot=options.remote_test_root)
+ dm = devicemanagerADB.DeviceManagerADB(
+ deviceSerial=options.device_serial,
+ packageName=None,
+ deviceRoot=options.remote_test_root)
else:
- dm = devicemanagerSUT.DeviceManagerSUT(options.device_ip, options.device_port, deviceRoot=options.remote_test_root)
+ dm = devicemanagerSUT.DeviceManagerSUT(
+ options.device_ip, options.device_port,
+ deviceRoot=options.remote_test_root)
if options.device_ip == None:
- print('Error: you must provide a device IP to connect to via the --device option')
+ print('Error: you must provide a device IP to connect to via the'
+ ' --device option')
sys.exit(1)
# Update the test root to point to our test directory.
jit_tests_dir = posixpath.join(options.remote_test_root, 'jit-tests')
options.remote_test_root = posixpath.join(jit_tests_dir, 'jit-tests')
# Push js shell and libraries.
if dm.dirExists(jit_tests_dir):
@@ -777,33 +816,35 @@ def run_tests_remote(tests, prefix, opti
dm.mkDirs(options.remote_test_root)
push_libs(options, dm)
push_progs(options, dm, [prefix[0]])
dm.chmodDir(options.remote_test_root)
Test.CacheDir = posixpath.join(options.remote_test_root, '.js-cache')
dm.mkDir(Test.CacheDir)
- dm.pushDir(JS_TESTS_DIR, posixpath.join(jit_tests_dir, 'tests'), timeout=600)
+ dm.pushDir(JS_TESTS_DIR, posixpath.join(jit_tests_dir, 'tests'),
+ timeout=600)
- dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root, timeout=600)
+ dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root,
+ timeout=600)
prefix[0] = os.path.join(options.remote_test_root, 'js')
# Run all tests.
gen = get_remote_results(tests, dm, prefix, options)
ok = process_test_results(gen, len(tests) * options.repeat, options)
return ok
def parse_jitflags(options):
- jitflags = [ [ '-' + flag for flag in flags ]
- for flags in options.jitflags.split(',') ]
+ jitflags = [['-' + flag for flag in flags]
+ for flags in options.jitflags.split(',')]
for flags in jitflags:
for flag in flags:
if flag not in ('-m', '-a', '-p', '-d', '-n'):
- print('Invalid jit flag: "%s"' % flag)
+ print('Invalid jit flag: "{}"'.format(flag))
sys.exit(1)
return jitflags
def platform_might_be_android():
try:
# The python package for SL4A provides an |android| module.
# If that module is present, we're likely in SL4A-python on
# device. False positives and negatives are possible,