Bug 1619475 - partial Python 3 compatibility for jit-tests and jstests r=arai
authorSteve Fink <sfink@mozilla.com>
Fri, 06 Mar 2020 18:07:38 +0000
changeset 517312 c753faaef709238ccb8ce33e9f4830a4acb8f6b7
parent 517311 4459881f26b0f9659e7ce9bb323e08c86e49388e
child 517313 eb037e72681e29f6bcfcd9ff59973cb050c7a323
push id109358
push usersfink@mozilla.com
push dateFri, 06 Mar 2020 18:19:22 +0000
treeherderautoland@eb037e72681e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersarai
bugs1619475
milestone75.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1619475 - partial Python 3 compatibility for jit-tests and jstests r=arai This fixes all of the jit-test problems I encountered, and fixes all but the wpt portion of jstests. This is unnecessary, but it also ports jstests from optparse to argparse (because optparse is deprecated, because I like argparse better, and because it happened to fix the minor compatibility issue I ran into even though it has a trivial fix on its own.) Differential Revision: https://phabricator.services.mozilla.com/D65047
js/src/jit-test/jit_test.py
js/src/jit-test/tests/basic/bug1033946.js
js/src/tests/lib/jittests.py
js/src/tests/lib/manifest.py
js/src/tests/lib/progressbar.py
js/src/tests/lib/results.py
js/src/tests/lib/tasks_unix.py
js/src/tests/lib/tasks_win.py
mach
testing/web-platform/manifestdownload.py
testing/web-platform/manifestupdate.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -10,29 +10,33 @@ import os
 import platform
 import posixpath
 import shlex
 import subprocess
 import sys
 import traceback
 
 
-def add_libdir_to_path():
+def add_tests_dir_to_path():
     from os.path import dirname, exists, join, realpath
     js_src_dir = dirname(dirname(realpath(sys.argv[0])))
     assert exists(join(js_src_dir, 'jsapi.h'))
-    sys.path.insert(0, join(js_src_dir, 'lib'))
-    sys.path.insert(0, join(js_src_dir, 'tests', 'lib'))
+    sys.path.insert(0, join(js_src_dir, 'tests'))
 
 
-add_libdir_to_path()
+add_tests_dir_to_path()
 
-import jittests
-from tests import get_jitflags, valid_jitflags, get_cpu_count, get_environment_overlay, \
-    change_env
+from lib import jittests
+from lib.tests import (
+    get_jitflags,
+    valid_jitflags,
+    get_cpu_count,
+    get_environment_overlay,
+    change_env,
+)
 
 
 def which(name):
     if name.find(os.path.sep) != -1:
         return os.path.abspath(name)
 
     for path in os.environ["PATH"].split(os.pathsep):
         full = os.path.join(path, name)
@@ -61,145 +65,142 @@ def choose_item(jobs, max_items, display
         raise Exception('Unrecognized input')
 
     return jobs[item - 1]
 
 
 def main(argv):
     # The [TESTS] optional arguments are paths of test files relative
     # to the jit-test/tests directory.
-    from optparse import OptionParser, SUPPRESS_HELP
-    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
-    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
-                  help='show js shell command run')
-    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
-                  action='store_true',
-                  help='show command lines of failed tests')
-    op.add_option('-o', '--show-output', dest='show_output',
-                  action='store_true',
-                  help='show output from js shell')
-    op.add_option('-F', '--failed-only', dest='failed_only',
-                  action='store_true',
-                  help="if --show-output is given, only print output for"
-                  " failed tests")
-    op.add_option('--no-show-failed', dest='no_show_failed',
-                  action='store_true',
-                  help="don't print output for failed tests"
-                  " (no-op with --show-output)")
-    op.add_option('-x', '--exclude', dest='exclude',
-                  default=[], action='append',
-                  help='exclude given test dir or path')
-    op.add_option('--exclude-from', dest='exclude_from', type=str,
-                  help='exclude each test dir or path in FILE')
-    op.add_option('--slow', dest='run_slow', action='store_true',
-                  help='also run tests marked as slow')
-    op.add_option('--no-slow', dest='run_slow', action='store_false',
-                  help='do not run tests marked as slow (the default)')
-    op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
-                  help='set test timeout in seconds')
-    op.add_option('--no-progress', dest='hide_progress', action='store_true',
-                  help='hide progress bar')
-    op.add_option('--tinderbox', dest='format', action='store_const',
-                  const='automation',
-                  help='Use automation-parseable output format')
-    op.add_option('--format', dest='format', default='none', type='choice',
-                  choices=['automation', 'none'],
-                  help='Output format. Either automation or none'
-                  ' (default %default).')
-    op.add_option('--args', dest='shell_args', metavar='ARGS', default='',
-                  help='extra args to pass to the JS shell')
-    op.add_option('--feature-args', dest='feature_args', metavar='ARGS',
-                  default='',
-                  help='even more args to pass to the JS shell '
-                       '(for compatibility with jstests.py)')
-    op.add_option('-w', '--write-failures', dest='write_failures',
-                  metavar='FILE',
-                  help='Write a list of failed tests to [FILE]')
-    op.add_option('-C', '--check-output', action='store_true', dest='check_output',
-                  help='Run tests to check output for different jit-flags')
-    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
-                  help='Run test files listed in [FILE]')
-    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
-                  help='Retest using test list file [FILE]')
-    op.add_option('-g', '--debug', action='store_const', const='gdb', dest='debugger',
-                  help='Run a single test under the gdb debugger')
-    op.add_option('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
-                  help='Run a single test under the rr debugger')
-    op.add_option('--debugger', type='string',
-                  help='Run a single test under the specified debugger')
-    op.add_option('--valgrind', dest='valgrind', action='store_true',
-                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
-    op.add_option('--unusable-error-status', action='store_true',
-                  help='Ignore incorrect exit status on tests that should return nonzero.')
-    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
-                  help='Run all tests with valgrind, if valgrind is in $PATH.')
-    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
-                  help='Use js-shell file indirection instead of piping stdio.')
-    op.add_option('--write-failure-output', dest='write_failure_output',
-                  action='store_true',
-                  help='With --write-failures=FILE, additionally write the'
-                  ' output of failed tests to [FILE]')
-    op.add_option('--jitflags', dest='jitflags', default='none',
-                  choices=valid_jitflags(),
-                  help='IonMonkey option combinations. One of %s.' % ', '.join(valid_jitflags()))
-    op.add_option('--ion', dest='jitflags', action='store_const', const='ion',
-                  help='Run tests once with --ion-eager and once with'
-                  ' --baseline-eager (equivalent to --jitflags=ion)')
-    op.add_option('--tbpl', dest='jitflags', action='store_const', const='all',
-                  help='Run tests with all IonMonkey option combinations'
-                  ' (equivalent to --jitflags=all)')
-    op.add_option('-j', '--worker-count', dest='max_jobs', type=int,
-                  default=max(1, get_cpu_count()),
-                  help='Number of tests to run in parallel (default %default)')
-    op.add_option('--remote', action='store_true',
-                  help='Run tests on a remote device')
-    op.add_option('--deviceIP', action='store',
-                  type='string', dest='device_ip',
-                  help='IP address of remote device to test')
-    op.add_option('--devicePort', action='store',
-                  type=int, dest='device_port', default=20701,
-                  help='port of remote device to test')
-    op.add_option('--deviceSerial', action='store',
-                  type='string', dest='device_serial', default=None,
-                  help='ADB device serial number of remote device to test')
-    op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
-                  type='string', default='/data/local/tests',
-                  help='The remote directory to use as test root'
-                  ' (eg. /data/local/tests)')
-    op.add_option('--localLib', dest='local_lib', action='store',
-                  type='string',
-                  help='The location of libraries to push -- preferably'
-                  ' stripped')
-    op.add_option('--repeat', type=int, default=1,
-                  help='Repeat tests the given number of times.')
-    op.add_option('--this-chunk', type=int, default=1,
-                  help='The test chunk to run.')
-    op.add_option('--total-chunks', type=int, default=1,
-                  help='The total number of test chunks.')
-    op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
-                  help='Ignore timeouts of tests listed in [FILE]')
-    op.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
-                  help="instead of running tests, use them to test the "
-                  "Reflect.stringify code in specified file")
-    op.add_option('--run-binast', action='store_true',
-                  dest="run_binast",
-                  help="By default BinAST testcases encoded from JS "
-                  "testcases are skipped. If specified, BinAST testcases "
-                  "are also executed.")
+    import argparse
+    op = argparse.ArgumentParser(description='Run jit-test JS shell tests')
+    op.add_argument('-s', '--show-cmd', dest='show_cmd', action='store_true',
+                    help='show js shell command run')
+    op.add_argument('-f', '--show-failed-cmd', dest='show_failed',
+                    action='store_true',
+                    help='show command lines of failed tests')
+    op.add_argument('-o', '--show-output', dest='show_output',
+                    action='store_true',
+                    help='show output from js shell')
+    op.add_argument('-F', '--failed-only', dest='failed_only',
+                    action='store_true',
+                    help="if --show-output is given, only print output for"
+                    " failed tests")
+    op.add_argument('--no-show-failed', dest='no_show_failed',
+                    action='store_true',
+                    help="don't print output for failed tests"
+                    " (no-op with --show-output)")
+    op.add_argument('-x', '--exclude', dest='exclude',
+                    default=[], action='append',
+                    help='exclude given test dir or path')
+    op.add_argument('--exclude-from', dest='exclude_from', type=str,
+                    help='exclude each test dir or path in FILE')
+    op.add_argument('--slow', dest='run_slow', action='store_true',
+                    help='also run tests marked as slow')
+    op.add_argument('--no-slow', dest='run_slow', action='store_false',
+                    help='do not run tests marked as slow (the default)')
+    op.add_argument('-t', '--timeout', dest='timeout', type=float, default=150.0,
+                    help='set test timeout in seconds')
+    op.add_argument('--no-progress', dest='hide_progress', action='store_true',
+                    help='hide progress bar')
+    op.add_argument('--tinderbox', dest='format', action='store_const',
+                    const='automation',
+                    help='Use automation-parseable output format')
+    op.add_argument('--format', dest='format', default='none',
+                    choices=('automation', 'none'),
+                    help='Output format (default %(default)s).')
+    op.add_argument('--args', dest='shell_args', metavar='ARGS', default='',
+                    help='extra args to pass to the JS shell')
+    op.add_argument('--feature-args', dest='feature_args', metavar='ARGS',
+                    default='',
+                    help='even more args to pass to the JS shell '
+                    '(for compatibility with jstests.py)')
+    op.add_argument('-w', '--write-failures', dest='write_failures',
+                    metavar='FILE',
+                    help='Write a list of failed tests to [FILE]')
+    op.add_argument('-C', '--check-output', action='store_true', dest='check_output',
+                    help='Run tests to check output for different jit-flags')
+    op.add_argument('-r', '--read-tests', dest='read_tests', metavar='FILE',
+                    help='Run test files listed in [FILE]')
+    op.add_argument('-R', '--retest', dest='retest', metavar='FILE',
+                    help='Retest using test list file [FILE]')
+    op.add_argument('-g', '--debug', action='store_const', const='gdb', dest='debugger',
+                    help='Run a single test under the gdb debugger')
+    op.add_argument('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
+                    help='Run a single test under the rr debugger')
+    op.add_argument('--debugger', type=str,
+                    help='Run a single test under the specified debugger')
+    op.add_argument('--valgrind', dest='valgrind', action='store_true',
+                    help='Enable the |valgrind| flag, if valgrind is in $PATH.')
+    op.add_argument('--unusable-error-status', action='store_true',
+                    help='Ignore incorrect exit status on tests that should return nonzero.')
+    op.add_argument('--valgrind-all', dest='valgrind_all', action='store_true',
+                    help='Run all tests with valgrind, if valgrind is in $PATH.')
+    op.add_argument('--avoid-stdio', dest='avoid_stdio', action='store_true',
+                    help='Use js-shell file indirection instead of piping stdio.')
+    op.add_argument('--write-failure-output', dest='write_failure_output',
+                    action='store_true',
+                    help='With --write-failures=FILE, additionally write the'
+                    ' output of failed tests to [FILE]')
+    op.add_argument('--jitflags', dest='jitflags', default='none',
+                    choices=valid_jitflags(),
+                    help='IonMonkey option combinations (default %(default)s).')
+    op.add_argument('--ion', dest='jitflags', action='store_const', const='ion',
+                    help='Run tests once with --ion-eager and once with'
+                    ' --baseline-eager (equivalent to --jitflags=ion)')
+    op.add_argument('--tbpl', dest='jitflags', action='store_const', const='all',
+                    help='Run tests with all IonMonkey option combinations'
+                    ' (equivalent to --jitflags=all)')
+    op.add_argument('-j', '--worker-count', dest='max_jobs', type=int,
+                    default=max(1, get_cpu_count()),
+                    help='Number of tests to run in parallel (default %(default)s).')
+    op.add_argument('--remote', action='store_true',
+                    help='Run tests on a remote device')
+    op.add_argument('--deviceIP', action='store',
+                    type=str, dest='device_ip',
+                    help='IP address of remote device to test')
+    op.add_argument('--devicePort', action='store',
+                    type=int, dest='device_port', default=20701,
+                    help='port of remote device to test')
+    op.add_argument('--deviceSerial', action='store',
+                    type=str, dest='device_serial', default=None,
+                    help='ADB device serial number of remote device to test')
+    op.add_argument('--remoteTestRoot', dest='remote_test_root', action='store',
+                    type=str, default='/data/local/tests',
+                    help='The remote directory to use as test root'
+                    ' (eg. /data/local/tests)')
+    op.add_argument('--localLib', dest='local_lib', action='store',
+                    type=str,
+                    help='The location of libraries to push -- preferably'
+                    ' stripped')
+    op.add_argument('--repeat', type=int, default=1,
+                    help='Repeat tests the given number of times.')
+    op.add_argument('--this-chunk', type=int, default=1,
+                    help='The test chunk to run.')
+    op.add_argument('--total-chunks', type=int, default=1,
+                    help='The total number of test chunks.')
+    op.add_argument('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
+                    help='Ignore timeouts of tests listed in [FILE]')
+    op.add_argument('--test-reflect-stringify', dest="test_reflect_stringify",
+                    help="instead of running tests, use them to test the "
+                    "Reflect.stringify code in specified file")
+    op.add_argument('--run-binast', action='store_true',
+                    dest="run_binast",
+                    help="By default BinAST testcases encoded from JS "
+                    "testcases are skipped. If specified, BinAST testcases "
+                    "are also executed.")
     # --enable-webrender is ignored as it is not relevant for JIT
     # tests, but is required for harness compatibility.
-    op.add_option('--enable-webrender', action='store_true',
-                  dest="enable_webrender", default=False,
-                  help=SUPPRESS_HELP)
+    op.add_argument('--enable-webrender', action='store_true',
+                    dest="enable_webrender", default=False,
+                    help=argparse.SUPPRESS)
+    op.add_argument('js_shell', metavar='JS_SHELL', help='JS shell to run tests with')
 
-    options, args = op.parse_args(argv)
-    if len(args) < 1:
-        op.error('missing JS_SHELL argument')
-    js_shell = which(args[0])
-    test_args = args[1:]
+    options, test_args = op.parse_known_args(argv)
+    js_shell = which(options.js_shell)
     test_environment = get_environment_overlay(js_shell)
 
     if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
         if (platform.system() != 'Windows' or
             os.path.isfile(js_shell) or not
             os.path.isfile(js_shell + ".exe") or not
                 os.access(js_shell + ".exe", os.X_OK)):
             op.error('shell is not executable: ' + js_shell)
--- a/js/src/jit-test/tests/basic/bug1033946.js
+++ b/js/src/jit-test/tests/basic/bug1033946.js
@@ -1,3 +1,2 @@
-
 assertEq((/(?!(?!(?!6)[\Wc]))/i).test(), false);
 assertEq("foobar\xff5baz\u1200".search(/bar\u0178\d/i), 3);
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -11,23 +11,23 @@ import os
 import posixpath
 import re
 import sys
 import traceback
 from collections import namedtuple
 from datetime import datetime
 
 if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
-    from tasks_unix import run_all_tests
+    from .tasks_unix import run_all_tests
 else:
-    from tasks_win import run_all_tests
+    from .tasks_win import run_all_tests
 
-from progressbar import ProgressBar, NullProgressBar
-from results import TestOutput, escape_cmdline
-from structuredlog import TestLogger
+from .progressbar import ProgressBar, NullProgressBar
+from .results import TestOutput, escape_cmdline
+from .structuredlog import TestLogger
 
 TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
 JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
 TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
 TEST_DIR = os.path.join(JS_DIR, 'jit-test', 'tests')
 LIB_DIR = os.path.join(JS_DIR, 'jit-test', 'lib') + os.path.sep
 MODULE_DIR = os.path.join(JS_DIR, 'jit-test', 'modules') + os.path.sep
 JS_TESTS_DIR = posixpath.join(JS_DIR, 'tests')
@@ -198,29 +198,29 @@ class JitTest:
         # test_join.  This will multiply the number of variants by 2 for set of
         # options.
         for join_opts in self.test_join:
             variants = variants + [opts + join_opts for opts in variants]
 
         # For each list of jit flags, make a copy of the test.
         return [self.copy_and_extend_jitflags(v) for v in variants]
 
-    COOKIE = '|jit-test|'
+    COOKIE = b'|jit-test|'
 
     # We would use 500019 (5k19), but quit() only accepts values up to 127, due to fuzzers
     SKIPPED_EXIT_STATUS = 59
     Directives = {}
 
     @classmethod
     def find_directives(cls, file_name):
         meta = ''
-        line = open(file_name).readline()
+        line = open(file_name, "rb").readline()
         i = line.find(cls.COOKIE)
         if i != -1:
-            meta = ';' + line[i + len(cls.COOKIE):].strip('\n')
+            meta = ';' + line[i + len(cls.COOKIE):].decode(errors='strict').strip('\n')
         return meta
 
     @classmethod
     def from_file(cls, path, options):
         test = cls(path)
 
         # If directives.txt exists in the test's directory then it may
         # contain metainformation that will be catenated with
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -4,17 +4,17 @@
 
 from __future__ import print_function
 
 import os
 import re
 import sys
 from subprocess import Popen, PIPE
 
-from tests import RefTestCase
+from .tests import RefTestCase
 
 
 def split_path_into_dirs(path):
     dirs = [path]
 
     while True:
         path, tail = os.path.split(path)
         if not tail:
@@ -101,17 +101,17 @@ class XULInfoTester:
                 # run in safe configuration, since it is hard to debug
                 # crashes when running code here. In particular, msan will
                 # error out if the jit is active.
                 '--no-baseline',
                 '--no-blinterp',
                 '-e', self.js_prologue,
                 '-e', 'print(!!({}))'.format(cond)
             ]
-            p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+            p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
             out, err = p.communicate()
             if out in ('true\n', 'true\r\n'):
                 ans = True
             elif out in ('false\n', 'false\r\n'):
                 ans = False
             else:
                 raise Exception("Failed to test XUL condition {!r};"
                                 " output was {!r}, stderr was {!r}".format(
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -1,18 +1,19 @@
 # Text progress bar library, like curl or scp.
 
-from datetime import datetime, timedelta
 import math
 import sys
 
+from datetime import datetime, timedelta
+
 if sys.platform.startswith('win'):
-    from terminal_win import Terminal
+    from .terminal_win import Terminal
 else:
-    from terminal_unix import Terminal
+    from .terminal_unix import Terminal
 
 
 class NullProgressBar(object):
     def update(self, current, data): pass
 
     def poke(self): pass
 
     def finish(self, complete=True): pass
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -1,16 +1,16 @@
 from __future__ import print_function
 
 import json
 import pipes
 import re
 
-from progressbar import NullProgressBar, ProgressBar
-from structuredlog import TestLogger
+from .progressbar import NullProgressBar, ProgressBar
+from .structuredlog import TestLogger
 
 # subprocess.list2cmdline does not properly escape for sh-like shells
 
 
 def escape_cmdline(args):
     return ' '.join([pipes.quote(a) for a in args])
 
 
--- a/js/src/tests/lib/tasks_unix.py
+++ b/js/src/tests/lib/tasks_unix.py
@@ -2,19 +2,25 @@
 # waitpid to dispatch tasks.  This avoids several deadlocks that are possible
 # with fork/exec + threads + Python.
 
 import errno
 import os
 import select
 import signal
 import sys
+
 from datetime import datetime, timedelta
-from progressbar import ProgressBar
-from results import NullTestOutput, TestOutput, escape_cmdline
+
+from .progressbar import ProgressBar
+from .results import (
+    NullTestOutput,
+    TestOutput,
+    escape_cmdline,
+)
 
 
 class Task(object):
     def __init__(self, test, prefix, pid, stdout, stderr):
         self.test = test
         self.cmd = test.get_command(prefix)
         self.pid = pid
         self.stdout = stdout
@@ -181,18 +187,18 @@ def reap_zombies(tasks, timeout):
         returncode = os.WEXITSTATUS(status)
         if os.WIFSIGNALED(status):
             returncode = -os.WTERMSIG(status)
 
         finished.append(
             TestOutput(
                 ended.test,
                 ended.cmd,
-                ''.join(ended.out),
-                ''.join(ended.err),
+                b''.join(ended.out).decode(errors='replace'),
+                b''.join(ended.err).decode(errors='replace'),
                 returncode,
                 (datetime.now() - ended.start).total_seconds(),
                 timed_out(ended, timeout),
                 {'pid': ended.pid}))
     return tasks, finished
 
 
 def kill_undead(tasks, timeout):
--- a/js/src/tests/lib/tasks_win.py
+++ b/js/src/tests/lib/tasks_win.py
@@ -1,21 +1,23 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 from __future__ import print_function, unicode_literals, division
 
 import subprocess
 import sys
+
 from datetime import datetime, timedelta
-from progressbar import ProgressBar
-from results import NullTestOutput, TestOutput, escape_cmdline
 from threading import Thread
 from Queue import Queue, Empty
 
+from .progressbar import ProgressBar
+from .results import NullTestOutput, TestOutput, escape_cmdline
+
 
 class EndMarker:
     pass
 
 
 class TaskFinishedMarker:
     pass
 
--- a/mach
+++ b/mach
@@ -37,17 +37,16 @@ py2commands="
     geckodriver-test
     geckoview-junit
     gradle
     gtest
     hazards
     ide
     import-pr
     install
-    jit-test
     jsapi-tests
     jsshell-bench
     jstestbrowser
     jstests
     marionette-test
     mochitest
     mozbuild-reference
     mozharness
--- a/testing/web-platform/manifestdownload.py
+++ b/testing/web-platform/manifestdownload.py
@@ -1,19 +1,24 @@
 from __future__ import absolute_import
 
 import argparse
 import os
 from datetime import datetime, timedelta
 import tarfile
 import requests
+import six
 import vcs
-from cStringIO import StringIO
 import logging
 
+try:
+    from cStringIO import StringIO as BytesIO
+except ImportError:
+    from io import BytesIO
+
 HEADERS = {'User-Agent': "wpt manifest download"}
 
 
 def get(logger, url, **kwargs):
     logger.debug(url)
     if "headers" not in kwargs:
         kwargs["headers"] = HEADERS
     return requests.get(url, **kwargs)
@@ -112,17 +117,17 @@ def taskcluster_url(logger, commits):
 
     logger.info("Can't find a commit-specific manifest so just using the most "
                 "recent one")
 
     return default
 
 
 def download_manifest(logger, test_paths, commits_func, url_func, force=False):
-    manifest_paths = [item["manifest_path"] for item in test_paths.itervalues()]
+    manifest_paths = [item["manifest_path"] for item in six.itervalues(test_paths)]
 
     if not force and not should_download(logger, manifest_paths):
         return True
 
     commits = commits_func()
 
     url = url_func(logger, commits)
     if not url:
@@ -136,27 +141,27 @@ def download_manifest(logger, test_paths
         logger.warning("Downloading pregenerated manifest failed")
         return False
 
     if req.status_code != 200:
         logger.warning("Downloading pregenerated manifest failed; got"
                         "HTTP status %d" % req.status_code)
         return False
 
-    tar = tarfile.open(mode="r:gz", fileobj=StringIO(req.content))
-    for paths in test_paths.itervalues():
+    tar = tarfile.open(mode="r:gz", fileobj=BytesIO(req.content))
+    for paths in six.itervalues(test_paths):
         try:
             member = tar.getmember(paths["manifest_rel_path"].replace(os.path.sep, "/"))
         except KeyError:
             logger.warning("Failed to find downloaded manifest %s" % paths["manifest_rel_path"])
         else:
             try:
                 logger.debug("Unpacking %s to %s" % (member.name, paths["manifest_path"]))
                 src = tar.extractfile(member)
-                with open(paths["manifest_path"], "w") as dest:
+                with open(paths["manifest_path"], "wb") as dest:
                     dest.write(src.read())
                 src.close()
             except IOError:
                 import traceback
                 logger.warning("Failed to decompress %s:\n%s" % (paths["manifest_rel_path"], traceback.format_exc()))
                 return False
 
         os.utime(paths["manifest_path"], None)
--- a/testing/web-platform/manifestupdate.py
+++ b/testing/web-platform/manifestupdate.py
@@ -1,16 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import argparse
 import hashlib
 import imp
 import os
+import six
 import sys
 
 from six.moves import configparser
 
 from mozboot.util import get_state_dir
 
 from mozlog.structured import commandline
 from wptrunner.wptcommandline import set_from_config
@@ -87,17 +88,17 @@ def run(src_root, obj_root, logger=None,
         logger.critical("Config file %s does not exist" % config_path)
         return None
 
     logger.debug("Using config path %s" % config_path)
 
     test_paths = wptcommandline.get_test_paths(
         wptcommandline.config.read(config_path))
 
-    for paths in test_paths.itervalues():
+    for paths in six.itervalues(test_paths):
         if "manifest_path" not in paths:
             paths["manifest_path"] = os.path.join(paths["metadata_path"],
                                                   "MANIFEST.json")
 
     ensure_manifest_directories(logger, test_paths)
 
     local_config = read_local_config(src_wpt_dir)
     for section in ["manifest:upstream", "manifest:mozilla"]:
@@ -120,17 +121,17 @@ def run(src_root, obj_root, logger=None,
                                 update=update,
                                 rebuild=kwargs["rebuild"],
                                 cache_root=kwargs["cache_root"])
 
     return manifests
 
 
 def ensure_manifest_directories(logger, test_paths):
-    for paths in test_paths.itervalues():
+    for paths in six.itervalues(test_paths):
         manifest_dir = os.path.dirname(paths["manifest_path"])
         if not os.path.exists(manifest_dir):
             logger.info("Creating directory %s" % manifest_dir)
             os.makedirs(manifest_dir)
         elif not os.path.isdir(manifest_dir):
             raise IOError("Manifest directory is a file")
 
 
@@ -164,27 +165,27 @@ def generate_config(logger, repo_root, w
 
         parser.set(section, "manifest",
                    os.path.join(dest_path, meta_rel_path, 'MANIFEST.json'))
         parser.set(section, "metadata", os.path.join(wpt_dir, meta_rel_path))
         parser.set(section, "tests", os.path.join(wpt_dir, tests_rel_path))
 
     parser.set('paths', 'prefs', os.path.abspath(os.path.join(wpt_dir, parser.get("paths", "prefs"))))
 
-    with open(dest_config_path, 'wb') as config_file:
+    with open(dest_config_path, 'wt') as config_file:
         parser.write(config_file)
 
     return dest_config_path
 
 
 def load_and_update(logger, wpt_dir, test_paths, rebuild=False, config_dir=None, cache_root=None,
                     update=True):
     rv = {}
     wptdir_hash = hashlib.sha256(os.path.abspath(wpt_dir)).hexdigest()
-    for url_base, paths in test_paths.iteritems():
+    for url_base, paths in six.iteritems(test_paths):
         manifest_path = paths["manifest_path"]
         this_cache_root = os.path.join(cache_root, wptdir_hash, os.path.dirname(paths["manifest_rel_path"]))
         m = manifest.manifest.load_and_update(paths["tests_path"],
                                               manifest_path,
                                               url_base,
                                               update=update,
                                               rebuild=rebuild,
                                               working_copy=True,