Bug 1256699 - Fix jittests --repeat option r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 16 Mar 2016 09:41:57 +0000
changeset 326820 eb8c477341046617a9a55b0b5f768f9f4abd600a
parent 326819 5e4e30fff99bd6fb2c61be93a5a01eb169921bd2
child 326821 1a5cb1ac5a7c4f7f38bf32013dc0da4712eea186
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1256699
milestone48.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1256699 - Fix jittests --repeat option r=sfink
js/src/jit-test/jit_test.py
js/src/tests/jstests.py
js/src/tests/lib/jittests.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -227,29 +227,32 @@ def main(argv):
     if options.total_chunks > 1:
         total_tests = len(test_list)
         tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
         start = int(round((options.this_chunk - 1) * tests_per_chunk))
         end = int(round(options.this_chunk * tests_per_chunk))
         test_list = test_list[start:end]
 
     # The full test list is ready. Now create copies for each JIT configuration.
-    job_list = []
-    test_flags = []
     if options.tbpl:
         # Running all bits would take forever. Instead, we test a few
         # interesting combinations.
         test_flags = get_jitflags('all')
     elif options.ion:
         test_flags = get_jitflags('ion')
     else:
         test_flags = get_jitflags(options.jitflags)
 
-    job_list = [_ for test in test_list
-                for _ in test.copy_variants(test_flags)]
+    job_list = (_ for test in test_list
+                for _ in test.copy_variants(test_flags))
+    job_count = len(test_list) * len(test_flags)
+
+    if options.repeat:
+        job_list = (test for test in job_list for i in range(options.repeat))
+        job_count *= options.repeat
 
     if options.ignore_timeouts:
         read_all = False
         try:
             with open(options.ignore_timeouts) as f:
                 options.ignore_timeouts = set(
                     [line.strip('\n') for line in f.readlines()])
         except IOError:
@@ -265,24 +268,24 @@ def main(argv):
 
     prefix += ['-f', prologue]
 
     # Clean up any remnants from previous crashes etc
     shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
     os.mkdir(jittests.JS_CACHE_DIR)
 
     if options.debugger:
-        if len(job_list) > 1:
+        if job_count > 1:
             print('Multiple tests match command line'
                   ' arguments, debugger can only run one')
             for tc in job_list:
                 print('    {}'.format(tc.path))
             sys.exit(1)
 
-        tc = job_list[0]
+        tc = job_list.next()
         if options.debugger == 'gdb':
             debug_cmd = ['gdb', '--args']
         elif options.debugger == 'lldb':
             debug_cmd = ['lldb', '--']
         elif options.debugger == 'rr':
             debug_cmd = ['rr', 'record']
         else:
             debug_cmd = options.debugger.split()
@@ -291,20 +294,20 @@ def main(argv):
             subprocess.call(debug_cmd + tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
             if options.debugger == 'rr':
                 subprocess.call(['rr', 'replay'])
         sys.exit()
 
     try:
         ok = None
         if options.remote:
-            ok = jittests.run_tests_remote(job_list, prefix, options)
+            ok = jittests.run_tests_remote(job_list, job_count, prefix, options)
         else:
             with change_env(test_environment):
-                ok = jittests.run_tests(job_list, prefix, options)
+                ok = jittests.run_tests(job_list, job_count, prefix, options)
         if not ok:
             sys.exit(2)
     except OSError:
         if not os.path.exists(prefix[0]):
             print("JS shell argument: file does not exist:"
                   " '{}'".format(prefix[0]), file=sys.stderr)
             sys.exit(1)
         else:
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -313,21 +313,17 @@ def load_tests(options, requested_paths,
     if options.run_only_skipped:
         options.run_skipped = True
         test_gen = (_ for _ in test_gen if not _.enable)
 
     if not options.run_slow_tests:
         test_gen = (_ for _ in test_gen if not _.slow)
 
     if options.repeat:
-        def repeat_gen(tests):
-            for test in tests:
-                for i in range(options.repeat):
-                    yield test
-        test_gen = repeat_gen(test_gen)
+        test_gen = (test for test in test_gen for i in range(options.repeat))
         test_count *= options.repeat
 
     return test_count, test_gen
 
 
 def main():
     options, prefix, requested_paths, excluded_paths = parse_args()
     if options.js_shell is not None and not (isfile(options.js_shell) and
@@ -344,25 +340,24 @@ def main():
 
     if test_count == 0:
         print('no tests selected')
         return 1
 
     test_dir = dirname(abspath(__file__))
 
     if options.debug:
-        tests = list(test_gen)
-        if len(tests) > 1:
+        if test_count > 1:
             print('Multiple tests match command line arguments,'
                   ' debugger can only run one')
-            for tc in tests:
+            for tc in test_gen:
                 print('    {}'.format(tc.path))
             return 2
 
-        cmd = tests[0].get_command(prefix)
+        cmd = test_gen.next().get_command(prefix)
         if options.show_cmd:
             print(list2cmdline(cmd))
         with changedir(test_dir), change_env(test_environment):
             call(cmd)
         return 0
 
     with changedir(test_dir), change_env(test_environment):
         results = ResultsSink(options, test_count)
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -536,31 +536,30 @@ def process_test_results(results, num_te
         complete = True
     except KeyboardInterrupt:
         print("TEST-UNEXPECTED-FAIL | jit_test.py" +
               " : Test execution interrupted by user")
 
     pb.finish(True)
     return print_test_summary(num_tests, failures, complete, doing, options)
 
-def run_tests(tests, prefix, options):
+def run_tests(tests, num_tests, prefix, options):
     # The jstests tasks runner requires the following options. The names are
     # taken from the jstests options processing code, which are frequently
     # subtly different from the options jit-tests expects. As such, we wrap
     # them here, as needed.
     AdaptorOptions = namedtuple("AdaptorOptions", [
         "worker_count", "passthrough", "timeout", "output_fp",
         "hide_progress", "run_skipped", "show_cmd"])
     shim_options = AdaptorOptions(options.max_jobs, False, options.timeout,
                                   sys.stdout, False, True, options.show_cmd)
 
     # The test runner wants the prefix as a static on the Test class.
     JitTest.js_cmd_prefix = prefix
 
-    num_tests = len(tests) * options.repeat
     pb = create_progressbar(num_tests, options)
     gen = run_all_tests(tests, prefix, pb, shim_options)
     ok = process_test_results(gen, num_tests, pb, options)
     return ok
 
 def get_remote_results(tests, device, prefix, options):
     from mozdevice import devicemanager
 
@@ -586,17 +585,17 @@ def push_libs(options, device):
             device.pushFile(os.path.join(options.local_lib, file), remote_file)
 
 def push_progs(options, device, progs):
     for local_file in progs:
         remote_file = posixpath.join(options.remote_test_root,
                                      os.path.basename(local_file))
         device.pushFile(local_file, remote_file)
 
-def run_tests_remote(tests, prefix, options):
+def run_tests_remote(tests, num_tests, prefix, options):
     # Setup device with everything needed to run our tests.
     from mozdevice import devicemanagerADB, devicemanagerSUT
 
     if options.device_transport == 'adb':
         if options.device_ip:
             dm = devicemanagerADB.DeviceManagerADB(
                 options.device_ip, options.device_port,
                 deviceSerial=options.device_serial,
@@ -634,17 +633,16 @@ def run_tests_remote(tests, prefix, opti
     dm.pushDir(JS_TESTS_DIR, posixpath.join(jit_tests_dir, 'tests'),
                timeout=600)
 
     dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root,
                timeout=600)
     prefix[0] = os.path.join(options.remote_test_root, 'js')
 
     # Run all tests.
-    num_tests = len(tests) * options.repeat
     pb = create_progressbar(num_tests, options)
     gen = get_remote_results(tests, dm, prefix, options)
     ok = process_test_results(gen, num_tests, pb, options)
     return ok
 
 def platform_might_be_android():
     try:
         # The python package for SL4A provides an |android| module.