Bug 1176890 - Start running jstests faster on windows; r=sfink
authorTerrence Cole <terrence@mozilla.com>
Thu, 25 Jun 2015 15:10:00 -0700
changeset 268539 198da127cf26f2ae90c3ce33d6d47b034690d9ff
parent 268538 22bc3c64183865a51c1a0ae2814eca1c41ec3578
child 268540 18039b7d024d89f7d136940dcb14682e777c8c9c
push id4932
push userjlund@mozilla.com
push dateMon, 10 Aug 2015 18:23:06 +0000
treeherdermozilla-esr52@6dd5a4f5f745 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1176890
milestone41.0a1
Bug 1176890 - Start running jstests faster on windows; r=sfink
js/src/tests/js1_2/regexp/RegExp_input.js
js/src/tests/js1_2/regexp/RegExp_input_as_array.js
js/src/tests/jstests.py
js/src/tests/lib/manifest.py
js/src/tests/lib/tasks_unix.py
js/src/tests/lib/tasks_win.py
deleted file mode 100644
deleted file mode 100644
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -252,121 +252,121 @@ def parse_args():
                              options.hide_progress)
 
     return (options, prefix, requested_paths, excluded_paths)
 
 
 def load_tests(options, requested_paths, excluded_paths):
     """
     Returns a tuple: (skipped_tests, test_list)
-        skip_list: [iterable<Test>] Tests found but skipped.
-        test_list: [iterable<Test>] Tests found that should be run.
+        test_count: [int] Number of tests that will be in test_gen
+        test_gen: [iterable<Test>] Tests found that should be run.
     """
     import lib.manifest as manifest
 
     if options.js_shell is None:
         xul_tester = manifest.NullXULInfoTester()
     else:
         if options.xul_info_src is None:
             xul_info = manifest.XULInfo.create(options.js_shell)
         else:
             xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
             xul_debug = xul_debug.lower() is 'true'
             xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
         xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
 
     test_dir = dirname(abspath(__file__))
-    test_list = manifest.load(test_dir, requested_paths, excluded_paths,
+    test_count = manifest.count_tests(test_dir, requested_paths, excluded_paths)
+    test_gen = manifest.load(test_dir, requested_paths, excluded_paths,
                               xul_tester)
-    skip_list = []
 
     if options.make_manifests:
-        manifest.make_manifests(options.make_manifests, test_list)
+        manifest.make_manifests(options.make_manifests, test_gen)
         sys.exit()
 
     # Create a new test list. Apply each TBPL configuration to every test.
     flags_list = None
     if options.tbpl:
         flags_list = get_jitflags('all')
     elif options.tbpl_debug:
         flags_list = get_jitflags('debug')
     else:
         flags_list = get_jitflags(options.jitflags, none=None)
 
     if flags_list:
-        new_test_list = []
-        for test in test_list:
-            for jitflags in flags_list:
-                tmp_test = copy(test)
-                tmp_test.jitflags = copy(test.jitflags)
-                tmp_test.jitflags.extend(jitflags)
-                new_test_list.append(tmp_test)
-        test_list = new_test_list
+        def flag_gen(tests):
+            for test in tests:
+                for jitflags in flags_list:
+                    tmp_test = copy(test)
+                    tmp_test.jitflags = copy(test.jitflags)
+                    tmp_test.jitflags.extend(jitflags)
+                    yield tmp_test
+        test_gen = flag_gen(test_gen)
 
     if options.test_file:
         paths = set()
         for test_file in options.test_file:
             paths |= set(
                 [line.strip() for line in open(test_file).readlines()])
-        test_list = [_ for _ in test_list if _.path in paths]
+        test_gen = (_ for _ in test_gen if _.path in paths)
 
     if options.no_extensions:
         pattern = os.sep + 'extensions' + os.sep
-        test_list = [_ for _ in test_list if pattern not in _.path]
+        test_gen = (_ for _ in test_gen if pattern not in _.path)
 
     if not options.random:
-        test_list = [_ for _ in test_list if not _.random]
+        test_gen = (_ for _ in test_gen if not _.random)
 
     if options.run_only_skipped:
         options.run_skipped = True
-        test_list = [_ for _ in test_list if not _.enable]
+        test_gen = (_ for _ in test_gen if not _.enable)
 
     if not options.run_slow_tests:
-        test_list = [_ for _ in test_list if not _.slow]
+        test_gen = (_ for _ in test_gen if not _.slow)
 
-    return test_list
+    return test_count, test_gen
 
 
 def main():
     options, prefix, requested_paths, excluded_paths = parse_args()
     if options.js_shell is not None and not isfile(options.js_shell):
         print('Could not find shell at given path.')
         return 1
-    test_list = load_tests(options, requested_paths, excluded_paths)
+    test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
 
-    if not test_list:
+    if test_count == 0:
         print('no tests selected')
         return 1
 
     test_dir = dirname(abspath(__file__))
 
     if options.debug:
-        if len(test_list) > 1:
+        if len(list(test_gen)) > 1:
             print('Multiple tests match command line arguments,'
                   ' debugger can only run one')
-            for tc in test_list:
+            for tc in test_gen:
                 print('    {}'.format(tc.path))
             return 2
 
-        cmd = test_list[0].get_command(TestCase.js_cmd_prefix)
+        cmd = test_gen[0].get_command(TestCase.js_cmd_prefix)
         if options.show_cmd:
             print(list2cmdline(cmd))
         with changedir(test_dir):
             call(cmd)
         return 0
 
     with changedir(test_dir):
         # Force Pacific time zone to avoid failures in Date tests.
         os.environ['TZ'] = 'PST8PDT'
         # Force date strings to English.
         os.environ['LC_TIME'] = 'en_US.UTF-8'
 
-        results = ResultsSink(options, len(test_list))
+        results = ResultsSink(options, test_count)
         try:
-            for out in run_all_tests(test_list, prefix, results, options):
+            for out in run_all_tests(test_gen, prefix, results, options):
                 results.push(out)
             results.finish(True)
         except KeyboardInterrupt:
             results.finish(False)
 
         return 0 if results.all_passed() else 1
 
     return 0
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -318,64 +318,74 @@ def _apply_external_manifests(filename, 
             # conditions.
             # At this point, we use external manifests only for test cases
             # that can't have their own failure type comments, so we simply
             # use the terms for the most specific path.
             testcase.terms = entry["terms"]
             testcase.comment = entry["comment"]
             _parse_one(testcase, xul_tester)
 
+def _is_test_file(path_from_root, basename, filename, requested_paths,
+                  excluded_paths):
+    # Any file whose basename matches something in this set is ignored.
+    EXCLUDED = set(('browser.js', 'shell.js', 'jsref.js', 'template.js',
+                    'user.js', 'sta.js',
+                    'test262-browser.js', 'test262-shell.js',
+                    'test402-browser.js', 'test402-shell.js',
+                    'testBuiltInObject.js', 'testIntl.js',
+                    'js-test-driver-begin.js', 'js-test-driver-end.js'))
+
+    # Skip js files in the root test directory.
+    if not path_from_root:
+        return False
+
+    # Skip files that we know are not tests.
+    if basename in EXCLUDED:
+        return False
+
+    # If any tests are requested by name, skip tests that do not match.
+    if requested_paths \
+        and not any(req in filename for req in requested_paths):
+        return False
+
+    # Skip excluded tests.
+    if filename in excluded_paths:
+        return False
+
+    return True
+
+
+def count_tests(location, requested_paths, excluded_paths):
+    count = 0
+    for root, basename in _find_all_js_files(location, location):
+        filename = os.path.join(root, basename)
+        if _is_test_file(root, basename, filename, requested_paths, excluded_paths):
+            count += 1
+    return count
+
+
 def load(location, requested_paths, excluded_paths, xul_tester, reldir=''):
     """
     Locates all tests by walking the filesystem starting at |location|.
     Uses xul_tester to evaluate any test conditions in the test header.
     Failure type and comment for a test case can come from
     - an external manifest entry for the test case,
     - an external manifest entry for a containing directory,
     - most commonly: the header of the test case itself.
     """
-    # The list of tests that we are collecting.
-    tests = []
-
-    # Any file whose basename matches something in this set is ignored.
-    EXCLUDED = set(('browser.js', 'shell.js', 'jsref.js', 'template.js',
-                    'user.js', 'sta.js',
-                    'test262-browser.js', 'test262-shell.js',
-                    'test402-browser.js', 'test402-shell.js',
-                    'testBuiltInObject.js', 'testIntl.js',
-                    'js-test-driver-begin.js', 'js-test-driver-end.js'))
-
     manifestFile = os.path.join(location, 'jstests.list')
     externalManifestEntries = _parse_external_manifest(manifestFile, '')
 
     for root, basename in _find_all_js_files(location, location):
-        # Skip js files in the root test directory.
-        if not root:
-            continue
-
-        # Skip files that we know are not tests.
-        if basename in EXCLUDED:
-            continue
-
         # Get the full path and relative location of the file.
         filename = os.path.join(root, basename)
-        fullpath = os.path.join(location, filename)
-
-        # If any tests are requested by name, skip tests that do not match.
-        if requested_paths \
-           and not any(req in filename for req in requested_paths):
+        if not _is_test_file(root, basename, filename, requested_paths, excluded_paths):
             continue
-
-        # Skip excluded tests.
-        if filename in excluded_paths:
-            continue
-
+        
         # Skip empty files.
+        fullpath = os.path.join(location, filename)
         statbuf = os.stat(fullpath)
-        if statbuf.st_size == 0:
-            continue
 
         testcase = TestCase(os.path.join(reldir, filename))
         _apply_external_manifests(filename, testcase, externalManifestEntries,
                                   xul_tester)
         _parse_test_header(fullpath, testcase, xul_tester)
-        tests.append(testcase)
-    return tests
+        yield testcase
--- a/js/src/tests/lib/tasks_unix.py
+++ b/js/src/tests/lib/tasks_unix.py
@@ -179,16 +179,17 @@ def kill_undead(tasks, timeout):
     Signal all children that are over the given timeout.
     """
     for task in tasks:
         if timed_out(task, timeout):
             os.kill(task.pid, 9)
 
 def run_all_tests(tests, prefix, results, options):
     # Copy and reverse for fast pop off end.
+    tests = list(tests)
     tests = tests[:]
     tests.reverse()
 
     # The set of currently running tests.
     tasks = []
 
     while len(tests) or len(tasks):
         while len(tests) and len(tasks) < options.worker_count:
--- a/js/src/tests/lib/tasks_win.py
+++ b/js/src/tests/lib/tasks_win.py
@@ -89,33 +89,38 @@ def run_all_tests(tests, prefix, results
         worker.start()
         workers.append(worker)
 
     # Insert all jobs into the queue, followed by the queue-end
     # marker, one per worker. This will not block on growing the
     # queue, only on waiting for more items in the generator. The
     # workers are already started, however, so this will process as
     # fast as we can produce tests from the filesystem.
-    for test in tests:
-        qTasks.put(test)
-    for _ in workers:
-        qTasks.put(EndMarker)
+    def _do_push(num_workers, qTasks):
+        for test in tests:
+            qTasks.put(test)
+        for _ in range(num_workers):
+            qTasks.put(EndMarker)
+    pusher = Thread(target=_do_push, args=(len(workers), qTasks))
+    pusher.setDaemon(True)
+    pusher.start()
 
     # Read from the results.
     ended = 0
     delay = ProgressBar.update_granularity().total_seconds()
     while ended < len(workers):
         try:
             result = qResults.get(block=True, timeout=delay)
             if result is EndMarker:
                 ended += 1
             else:
                 yield result
         except Empty:
             results.pb.poke()
 
     # Cleanup and exit.
+    pusher.join()
     for worker in workers:
         worker.join()
     for watcher in watchdogs:
         watcher.join()
     assert qTasks.empty(), "Send queue not drained"
     assert qResults.empty(), "Result queue not drained"