Bug 1184393 - Rename some js shell test variables for better readability; r=sfink
authorTerrence Cole <terrence@mozilla.com>
Fri, 17 Jul 2015 08:05:59 -0700
changeset 253519 9a5dd877fb8ee9acbd9635d592b446d9b67b9bd7
parent 253518 d7d10f4a01d67fce489b8ea065066800c39ae6df
child 253520 37a7604e5e1e3b3ebb0de6c98f5b7e9ec6c0a75c
push id29067
push userkwierso@gmail.com
push dateSat, 18 Jul 2015 00:57:04 +0000
treeherdermozilla-central@e2f2eb9ecca0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1184393
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1184393 - Rename some js shell test variables for better readability; r=sfink
js/src/jit-test/jit_test.py
js/src/tests/jstests.py
js/src/tests/lib/jittests.py
js/src/tests/lib/manifest.py
js/src/tests/lib/tests.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -204,17 +204,17 @@ def main(argv):
         test_list = [test for test in test_list
                      if test not in set(exclude_list)]
 
     if not test_list:
         print("No tests found matching command line arguments.",
               file=sys.stderr)
         sys.exit(0)
 
-    test_list = [jittests.Test.from_file(_, options) for _ in test_list]
+    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]
 
     if not options.run_slow:
         test_list = [_ for _ in test_list if not _.slow]
 
     # If chunking is enabled, determine which tests are part of this chunk.
     # This code was adapted from testing/mochitest/runtestsremote.py.
     if options.total_chunks > 1:
         total_tests = len(test_list)
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -8,17 +8,17 @@ See the adjacent README.txt for more det
 from __future__ import print_function
 
 import os, sys, textwrap
 from os.path import abspath, dirname, isfile, realpath
 from contextlib import contextmanager
 from copy import copy
 from subprocess import list2cmdline, call
 
-from lib.tests import TestCase, get_jitflags
+from lib.tests import RefTestCase, get_jitflags
 from lib.results import ResultsSink
 from lib.progressbar import ProgressBar
 
 if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
     from lib.tasks_unix import run_all_tests
 else:
     from lib.tasks_win import run_all_tests
 
@@ -208,18 +208,18 @@ def parse_args():
     if options.jorendb:
         options.passthrough = True
         options.hide_progress = True
         options.worker_count = 1
         debugger_path = realpath(os.path.join(
             abspath(dirname(abspath(__file__))),
             '..', '..', 'examples', 'jorendb.js'))
         js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
-    prefix = TestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
-                                          debugger_prefix)
+    prefix = RefTestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
+                                             debugger_prefix)
 
     # If files with lists of tests to run were specified, add them to the
     # requested tests set.
     if options.test_file:
         for test_file in options.test_file:
             requested_paths |= set(
                 [line.strip() for line in open(test_file).readlines()])
 
@@ -272,18 +272,18 @@ def load_tests(options, requested_paths,
         else:
             xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
             xul_debug = xul_debug.lower() is 'true'
             xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
         xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
 
     test_dir = dirname(abspath(__file__))
     test_count = manifest.count_tests(test_dir, requested_paths, excluded_paths)
-    test_gen = manifest.load(test_dir, requested_paths, excluded_paths,
-                              xul_tester)
+    test_gen = manifest.load_reftests(test_dir, requested_paths, excluded_paths,
+                                      xul_tester)
 
     if options.make_manifests:
         manifest.make_manifests(options.make_manifests, test_gen)
         sys.exit()
 
     # Create a new test list. Apply each TBPL configuration to every test.
     flags_list = None
     if options.tbpl:
@@ -352,17 +352,17 @@ def main():
     if options.debug:
         if len(list(test_gen)) > 1:
             print('Multiple tests match command line arguments,'
                   ' debugger can only run one')
             for tc in test_gen:
                 print('    {}'.format(tc.path))
             return 2
 
-        cmd = test_gen[0].get_command(TestCase.js_cmd_prefix)
+        cmd = test_gen[0].get_command(RefTestCase.js_cmd_prefix)
         if options.show_cmd:
             print(list2cmdline(cmd))
         with changedir(test_dir):
             call(cmd)
         return 0
 
     with changedir(test_dir):
         # Force Pacific time zone to avoid failures in Date tests.
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -73,17 +73,17 @@ def js_quote(quote, s):
             result += QUOTE_MAP[c]
         else:
             result += c
     result += quote
     return result
 
 os.path.relpath = _relpath
 
-class Test:
+class JitTest:
 
     VALGRIND_CMD = []
     paths = (d for d in os.environ['PATH'].split(os.pathsep))
     valgrinds = (os.path.join(d, 'valgrind') for d in paths)
     if any(os.path.exists(p) for p in valgrinds):
         VALGRIND_CMD = [
             'valgrind', '-q', '--smc-check=all-non-file',
             '--error-exitcode=1', '--gen-suppressions=all',
@@ -117,17 +117,17 @@ class Test:
         self.test_also_noasmjs = False # True means run with and without asm.js
                                        # enabled.
         self.test_also = [] # List of other configurations to test with.
         self.test_join = [] # List of other configurations to test with all existing variants.
         self.expect_error = '' # Errors to expect and consider passing
         self.expect_status = 0 # Exit status to expect from shell
 
     def copy(self):
-        t = Test(self.path)
+        t = JitTest(self.path)
         t.jitflags = self.jitflags[:]
         t.slow = self.slow
         t.allow_oom = self.allow_oom
         t.allow_unhandlable_oom = self.allow_unhandlable_oom
         t.allow_overrecursed = self.allow_overrecursed
         t.valgrind = self.valgrind
         t.tz_pacific = self.tz_pacific
         t.test_also_noasmjs = self.test_also_noasmjs
@@ -250,17 +250,17 @@ class Test:
             quotechar = "'"
         expr = "const platform={}; const libdir={}; const scriptdir={}".format(
             js_quote(quotechar, sys.platform),
             js_quote(quotechar, libdir),
             js_quote(quotechar, scriptdir_var))
 
         # We may have specified '-a' or '-d' twice: once via --jitflags, once
         # via the "|jit-test|" line.  Remove dups because they are toggles.
-        cmd = prefix + ['--js-cache', Test.CacheDir]
+        cmd = prefix + ['--js-cache', JitTest.CacheDir]
         cmd += list(set(self.jitflags)) + ['-e', expr, '-f', path]
         if self.valgrind:
             cmd = self.VALGRIND_CMD + cmd
         return cmd
 
 def find_tests(substring=None):
     ans = []
     for dirpath, dirnames, filenames in os.walk(TEST_DIR):
@@ -826,18 +826,18 @@ def run_tests_remote(tests, prefix, opti
     # Push js shell and libraries.
     if dm.dirExists(jit_tests_dir):
         dm.removeDir(jit_tests_dir)
     dm.mkDirs(options.remote_test_root)
     push_libs(options, dm)
     push_progs(options, dm, [prefix[0]])
     dm.chmodDir(options.remote_test_root)
 
-    Test.CacheDir = posixpath.join(options.remote_test_root, '.js-cache')
-    dm.mkDir(Test.CacheDir)
+    JitTest.CacheDir = posixpath.join(options.remote_test_root, '.js-cache')
+    dm.mkDir(JitTest.CacheDir)
 
     dm.pushDir(JS_TESTS_DIR, posixpath.join(jit_tests_dir, 'tests'),
                timeout=600)
 
     dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root,
                timeout=600)
     prefix[0] = os.path.join(options.remote_test_root, 'js')
 
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -2,17 +2,17 @@
 #
 # This includes classes for representing and parsing JS manifests.
 
 from __future__ import print_function
 
 import os, re, sys
 from subprocess import Popen, PIPE
 
-from tests import TestCase
+from tests import RefTestCase
 
 
 def split_path_into_dirs(path):
     dirs = [path]
 
     while True:
         path, tail = os.path.split(path)
         if not tail:
@@ -163,39 +163,39 @@ def _build_manifest_script_entry(script_
         line.append(test.terms)
     line.append("script")
     line.append(script_name)
     if test.comment:
         line.append("#")
         line.append(test.comment)
     return ' '.join(line)
 
-def _map_prefixes_left(test_list):
+def _map_prefixes_left(test_gen):
     """
     Splits tests into a dictionary keyed on the first component of the test
     path, aggregating tests with a common base path into a list.
     """
     byprefix = {}
-    for t in test_list:
+    for t in test_gen:
         left, sep, remainder = t.path.partition(os.sep)
         if left not in byprefix:
             byprefix[left] = []
         if remainder:
             t.path = remainder
         byprefix[left].append(t)
     return byprefix
 
-def _emit_manifest_at(location, relative, test_list, depth):
+def _emit_manifest_at(location, relative, test_gen, depth):
     """
     location  - str: absolute path where we want to write the manifest
     relative  - str: relative path from topmost manifest directory to current
-    test_list - [str]: list of all test paths and directorys
+    test_gen  - (str): generator of all test paths and directorys
     depth     - int: number of dirs we are below the topmost manifest dir
     """
-    manifests = _map_prefixes_left(test_list)
+    manifests = _map_prefixes_left(test_gen)
 
     filename = os.path.join(location, 'jstests.list')
     manifest = []
     numTestFiles = 0
     for k, test_list in manifests.iteritems():
         fullpath = os.path.join(location, k)
         if os.path.isdir(fullpath):
             manifest.append("include " + k + "/jstests.list")
@@ -218,18 +218,18 @@ def _emit_manifest_at(location, relative
             '../' * depth, relative)] + manifest
 
     fp = open(filename, 'w')
     try:
         fp.write('\n'.join(manifest) + '\n')
     finally:
         fp.close()
 
-def make_manifests(location, test_list):
-    _emit_manifest_at(location, '', test_list, 0)
+def make_manifests(location, test_gen):
+    _emit_manifest_at(location, '', test_gen, 0)
 
 def _find_all_js_files(base, location):
     for root, dirs, files in os.walk(location):
         root = root[len(base) + 1:]
         for fn in files:
             if fn.endswith('.js'):
                 yield root, fn
 
@@ -357,35 +357,35 @@ def count_tests(location, requested_path
     count = 0
     for root, basename in _find_all_js_files(location, location):
         filename = os.path.join(root, basename)
         if _is_test_file(root, basename, filename, requested_paths, excluded_paths):
             count += 1
     return count
 
 
-def load(location, requested_paths, excluded_paths, xul_tester, reldir=''):
+def load_reftests(location, requested_paths, excluded_paths, xul_tester, reldir=''):
     """
     Locates all tests by walking the filesystem starting at |location|.
     Uses xul_tester to evaluate any test conditions in the test header.
     Failure type and comment for a test case can come from
     - an external manifest entry for the test case,
     - an external manifest entry for a containing directory,
     - most commonly: the header of the test case itself.
     """
     manifestFile = os.path.join(location, 'jstests.list')
     externalManifestEntries = _parse_external_manifest(manifestFile, '')
 
     for root, basename in _find_all_js_files(location, location):
         # Get the full path and relative location of the file.
         filename = os.path.join(root, basename)
         if not _is_test_file(root, basename, filename, requested_paths, excluded_paths):
             continue
-        
+
         # Skip empty files.
         fullpath = os.path.join(location, filename)
         statbuf = os.stat(fullpath)
 
-        testcase = TestCase(os.path.join(reldir, filename))
+        testcase = RefTestCase(os.path.join(reldir, filename))
         _apply_external_manifests(filename, testcase, externalManifestEntries,
                                   xul_tester)
         _parse_test_header(fullpath, testcase, xul_tester)
         yield testcase
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -40,43 +40,43 @@ JITFLAGS = {
 def get_jitflags(variant, **kwargs):
     if variant not in JITFLAGS:
         print('Invalid jitflag: "{}"'.format(variant))
         sys.exit(1)
     if variant == 'none' and 'none' in kwargs:
         return kwargs['none']
     return JITFLAGS[variant]
 
-class Test(object):
+class RefTest(object):
     """A runnable test."""
     def __init__(self, path):
         self.path = path     # str:  path of JS file relative to tests root dir
         self.options = []    # [str]: Extra options to pass to the shell
         self.jitflags = []   # [str]: JIT flags to pass to the shell
 
     @staticmethod
     def prefix_command(path):
         """Return the '-f shell.js' options needed to run a test with the given
         path."""
         if path == '':
             return ['-f', 'shell.js']
         head, base = os.path.split(path)
-        return Test.prefix_command(head) \
+        return RefTest.prefix_command(head) \
             + ['-f', os.path.join(path, 'shell.js')]
 
     def get_command(self, prefix):
         dirname, filename = os.path.split(self.path)
         cmd = prefix + self.jitflags + self.options \
-              + Test.prefix_command(dirname) + ['-f', self.path]
+              + RefTest.prefix_command(dirname) + ['-f', self.path]
         return cmd
 
-class TestCase(Test):
+class RefTestCase(RefTest):
     """A test case consisting of a test and an expected result."""
     def __init__(self, path):
-        Test.__init__(self, path)
+        RefTest.__init__(self, path)
         self.enable = True   # bool: True => run test, False => don't run
         self.expect = True   # bool: expected result, True => pass
         self.random = False  # bool: True => ignore output as 'random'
         self.slow = False    # bool: True => test may run slowly
 
         # The terms parsed to produce the above properties.
         self.terms = None