Bug 1125512 - Part 2: Use .format() instead of modulo operator in js test libs. r=terrence
authorTooru Fujisawa <arai_a@mac.com>
Wed, 28 Jan 2015 05:19:30 +0900
changeset 243619 3de67cecd18d3765b1819dedd9f7db2b67d09849
parent 243618 63e93ddb740f2abaf38f63f5ff40f15e59248816
child 243620 0c982514eb25f7d05736a7eb0202d6a35168c00f
push id7677
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 18:11:24 +0000
treeherdermozilla-aurora@f531d838c055 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1125512
milestone38.0a1
Bug 1125512 - Part 2: Use .format() instead of modulo operator in js test libs. r=terrence
js/src/tests/compare_bench.py
js/src/tests/lib/manifest.py
js/src/tests/lib/progressbar.py
js/src/tests/lib/results.py
js/src/tests/lib/tasks_unix.py
js/src/tests/lib/tasks_win.py
js/src/tests/lib/terminal_unix.py
js/src/tests/lib/terminal_win.py
js/src/tests/lib/tests.py
js/src/tests/parsemark.py
--- a/js/src/tests/compare_bench.py
+++ b/js/src/tests/compare_bench.py
@@ -1,51 +1,55 @@
 #!/usr/bin/env python2.4
 """usage: %progname candidate_path baseline_path
 """
 
+from __future__ import print_function
+
 import json
 import optparse
-from contextlib import nested
 from operator import itemgetter
 
 
 def avg(seq):
     return sum(seq) / len(seq)
 
 
 def compare(current, baseline):
     percent_speedups = []
     for key, current_result in current.iteritems():
         try:
             baseline_result = baseline[key]
         except KeyError:
-            print key, 'missing from baseline'
+            print(key, 'missing from baseline')
             continue
         val_getter = itemgetter('average_ms', 'stddev_ms')
         base_avg, base_stddev = val_getter(baseline_result)
         current_avg, current_stddev = val_getter(current_result)
-        t_best, t_worst = current_avg - current_stddev, current_avg + current_stddev
-        base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
-        fmt = '%30s: %s'
-        if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
+        t_best = current_avg - current_stddev
+        t_worst = current_avg + current_stddev
+        base_t_best = base_avg - base_stddev
+        base_t_worst = base_avg + base_stddev
+        if t_worst < base_t_best:
+            # Worst takes less time (better) than baseline's best.
             speedup = -((t_worst - base_t_best) / base_t_best) * 100
-            result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
-                    (t_worst, base_t_best, speedup)
+            result = 'faster: {:6.2f}ms < baseline {:6.2f}ms ({:+6.2f}%)'.format(
+                t_worst, base_t_best, speedup)
             percent_speedups.append(speedup)
-        elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
+        elif t_best > base_t_worst:
+            # Best takes more time (worse) than baseline's worst.
             slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
-            result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
-                    (t_best, base_t_worst, slowdown)
+            result = 'SLOWER: {:6.2f}ms > baseline {:6.2f}ms ({:+6.2f}%) '.format(
+                t_best, base_t_worst, slowdown)
             percent_speedups.append(slowdown)
         else:
             result = 'Meh.'
-        print '%30s: %s' % (key, result)
+        print('{:30s}: {}'.format(key, result))
     if percent_speedups:
-        print 'Average speedup: %.2f%%' % avg(percent_speedups)
+        print('Average speedup: {:.2f}%'.format(avg(percent_speedups)))
 
 
 def compare_immediate(current_map, baseline_path):
     baseline_file = open(baseline_path)
     baseline_map = json.load(baseline_file)
     baseline_file.close()
     compare(current_map, baseline_map)
 
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -355,17 +355,18 @@ def load(location, requested_paths, excl
         if basename in EXCLUDED:
             continue
 
         # Get the full path and relative location of the file.
         filename = os.path.join(root, basename)
         fullpath = os.path.join(location, filename)
 
         # If any tests are requested by name, skip tests that do not match.
-        if requested_paths and not any(req in filename for req in requested_paths):
+        if requested_paths \
+           and not any(req in filename for req in requested_paths):
             continue
 
         # Skip excluded tests.
         if filename in excluded_paths:
             continue
 
         # Skip empty files.
         statbuf = os.stat(fullpath)
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -44,35 +44,36 @@ class ProgressBar(object):
         # Record prior for poke.
         self.prior = (current, data)
         self.atLineStart = False
 
         # Build counters string.
         sys.stdout.write('\r[')
         for layout in self.counters_fmt:
             Terminal.set_color(layout['color'])
-            sys.stdout.write(('%' + str(self.limit_digits) + 'd') % data[layout['value']])
+            sys.stdout.write(('{:' + str(self.limit_digits) + 'd}').format(
+                data[layout['value']]))
             Terminal.reset_color()
             if layout != self.counters_fmt[-1]:
                 sys.stdout.write('|')
             else:
                 sys.stdout.write('] ')
 
         # Build the bar.
         pct = int(100.0 * current / self.limit)
-        sys.stdout.write('%3d%% ' % pct)
+        sys.stdout.write('{:3d}% '.format(pct))
 
         barlen = int(1.0 * self.barlen * current / self.limit) - 1
         bar = '=' * barlen + '>' + ' ' * (self.barlen - barlen - 1)
         sys.stdout.write(bar + '|')
 
         # Update the bar.
         dt = datetime.now() - self.t0
         dt = dt.seconds + dt.microseconds * 1e-6
-        sys.stdout.write('%6.1fs' % dt)
+        sys.stdout.write('{:6.1f}s'.format(dt))
         Terminal.clear_right()
 
         # Force redisplay, since we didn't write a \n.
         sys.stdout.flush()
 
     def poke(self):
         if not self.prior:
             return
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -1,17 +1,17 @@
 from __future__ import print_function
 
 import re
 from progressbar import NullProgressBar, ProgressBar
 import pipes
 
 # subprocess.list2cmdline does not properly escape for sh-like shells
 def escape_cmdline(args):
-    return ' '.join([ pipes.quote(a) for a in args ])
+    return ' '.join([pipes.quote(a) for a in args])
 
 class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt, timed_out):
         self.test = test   # Test
         self.cmd = cmd     # str:   command line of test
         self.out = out     # str:   stdout
         self.err = err     # str:   stderr
@@ -71,17 +71,18 @@ class TestResult:
                 failures += 1
                 msg = line[len(' FAILED! '):]
                 results.append((cls.FAIL, msg))
             elif line.startswith(' PASSED!'):
                 passes += 1
                 msg = line[len(' PASSED! '):]
                 results.append((cls.PASS, msg))
             else:
-                m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
+                m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE'
+                             ' ((?:-|\\d)+) ---', line)
                 if m:
                     expected_rcs.append(int(m.group(1)))
 
         if rc and not rc in expected_rcs:
             if rc == 3:
                 result = cls.FAIL
             else:
                 result = cls.CRASH
@@ -113,44 +114,50 @@ class ResultsSink:
             ]
             self.pb = ProgressBar(testcount, fmt)
 
     def push(self, output):
         if output.timed_out:
             self.counts['TIMEOUT'] += 1
         if isinstance(output, NullTestOutput):
             if self.options.tinderbox:
-                self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
+                self.print_tinderbox_result(
+                    'TEST-KNOWN-FAIL', output.test.path, time=output.dt,
+                    skip=True)
             self.counts['SKIP'] += 1
             self.n += 1
         else:
             result = TestResult.from_output(output)
             tup = (result.result, result.test.expect, result.test.random)
             dev_label = self.LABELS[tup][1]
             if output.timed_out:
                 dev_label = 'TIMEOUTS'
             self.groups.setdefault(dev_label, []).append(result.test.path)
 
             if dev_label == 'REGRESSIONS':
-                show_output = self.options.show_output or not self.options.no_show_failed
+                show_output = self.options.show_output \
+                              or not self.options.no_show_failed
             elif dev_label == 'TIMEOUTS':
                 show_output = self.options.show_output
             else:
-                show_output = self.options.show_output and not self.options.failed_only
+                show_output = self.options.show_output \
+                              and not self.options.failed_only
 
             if dev_label in ('REGRESSIONS', 'TIMEOUTS'):
                 show_cmd = self.options.show_cmd
             else:
-                show_cmd = self.options.show_cmd and not self.options.failed_only
+                show_cmd = self.options.show_cmd \
+                           and not self.options.failed_only
 
             if show_output or show_cmd:
                 self.pb.beginline()
 
                 if show_output:
-                    print('## %s: rc = %d, run time = %f' % (output.test.path, output.rc, output.dt), file=self.fp)
+                    print('## {}: rc = {:d}, run time = {:f}'.format(
+                        output.test.path, output.rc, output.dt), file=self.fp)
 
                 if show_cmd:
                     print(escape_cmdline(output.cmd), file=self.fp)
 
                 if show_output:
                     self.fp.write(output.out)
                     self.fp.write(output.err)
 
@@ -161,29 +168,33 @@ class ResultsSink:
             elif result.test.expect and not result.test.random:
                 self.counts['FAIL'] += 1
             else:
                 self.counts['SKIP'] += 1
 
             if self.options.tinderbox:
                 if len(result.results) > 1:
                     for sub_ok, msg in result.results:
-                        label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
+                        tup = (sub_ok, result.test.expect, result.test.random)
+                        label = self.LABELS[tup][0]
                         if label == 'TEST-UNEXPECTED-PASS':
                             label = 'TEST-PASS (EXPECTED RANDOM)'
-                        self.print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
-                self.print_tinderbox_result(self.LABELS[
-                    (result.result, result.test.expect, result.test.random)][0],
-                    result.test.path, time=output.dt)
+                        self.print_tinderbox_result(
+                            label, result.test.path, time=output.dt,
+                            message=msg)
+                tup = (result.result, result.test.expect, result.test.random)
+                self.print_tinderbox_result(
+                    self.LABELS[tup][0], result.test.path, time=output.dt)
                 return
 
             if dev_label:
                 def singular(label):
                     return "FIXED" if label == "FIXES" else label[:-1]
-                self.pb.message("%s - %s" % (singular(dev_label), output.test.path))
+                self.pb.message("{} - {}".format(singular(dev_label),
+                                                 output.test.path))
 
         self.pb.update(self.n, self.counts)
 
     def finish(self, completed):
         self.pb.finish(completed)
         if not self.options.tinderbox:
             self.list(completed)
 
@@ -209,39 +220,40 @@ class ResultsSink:
 
     def list(self, completed):
         for label, paths in sorted(self.groups.items()):
             if label == '':
                 continue
 
             print(label)
             for path in paths:
-                print('    %s' % path)
+                print('    {}'.format(path))
 
         if self.options.failure_file:
-              failure_file = open(self.options.failure_file, 'w')
-              if not self.all_passed():
-                  if 'REGRESSIONS' in self.groups:
-                      for path in self.groups['REGRESSIONS']:
-                          print(path, file=failure_file)
-                  if 'TIMEOUTS' in self.groups:
-                      for path in self.groups['TIMEOUTS']:
-                          print(path, file=failure_file)
-              failure_file.close()
+            failure_file = open(self.options.failure_file, 'w')
+            if not self.all_passed():
+                if 'REGRESSIONS' in self.groups:
+                    for path in self.groups['REGRESSIONS']:
+                        print(path, file=failure_file)
+                if 'TIMEOUTS' in self.groups:
+                    for path in self.groups['TIMEOUTS']:
+                        print(path, file=failure_file)
+            failure_file.close()
 
         suffix = '' if completed else ' (partial run -- interrupted by user)'
         if self.all_passed():
             print('PASS' + suffix)
         else:
             print('FAIL' + suffix)
 
     def all_passed(self):
         return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
 
-    def print_tinderbox_result(self, label, path, message=None, skip=False, time=None):
+    def print_tinderbox_result(self, label, path, message=None, skip=False,
+                               time=None):
         result = label
         result += " | " + path
         result += " |" + self.options.shell_args
         if message:
             result += " | " + message
         if skip:
             result += ' | (SKIP)'
         if time > self.options.timeout:
--- a/js/src/tests/lib/tasks_unix.py
+++ b/js/src/tests/lib/tasks_unix.py
@@ -12,17 +12,17 @@ class Task(object):
         self.cmd = test.get_command(test.js_cmd_prefix)
         self.pid = pid
         self.stdout = stdout
         self.stderr = stderr
         self.start = datetime.now()
         self.out = []
         self.err = []
 
-def spawn_test(test, passthrough = False):
+def spawn_test(test, passthrough=False):
     """Spawn one child, return a task struct."""
     if not passthrough:
         (rout, wout) = os.pipe()
         (rerr, werr) = os.pipe()
 
         rv = os.fork()
 
         # Parent.
@@ -40,17 +40,18 @@ def spawn_test(test, passthrough = False
 
     cmd = test.get_command(test.js_cmd_prefix)
     os.execvp(cmd[0], cmd)
 
 def total_seconds(td):
     """
     Return the total number of seconds contained in the duration as a float
     """
-    return (float(td.microseconds) + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
+    return (float(td.microseconds) \
+            + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
 
 def get_max_wait(tasks, results, timeout):
     """
     Return the maximum time we can wait before any task should time out.
     """
 
     # If we have a progress-meter, we need to wake up to update it frequently.
     wait = results.pb.update_granularity()
@@ -111,17 +112,17 @@ def remove_task(tasks, pid):
     Return a pair with the removed task and the new, modified tasks list.
     """
     index = None
     for i, t in enumerate(tasks):
         if t.pid == pid:
             index = i
             break
     else:
-        raise KeyError("No such pid: %s" % pid)
+        raise KeyError("No such pid: {}".format(pid))
 
     out = tasks[index]
     tasks.pop(index)
     return out
 
 def timed_out(task, timeout):
     """
     Return True if the given task has been running for longer than |timeout|.
@@ -156,23 +157,23 @@ def reap_zombies(tasks, results, timeout
         os.close(ended.stdout)
         os.close(ended.stderr)
 
         returncode = os.WEXITSTATUS(status)
         if os.WIFSIGNALED(status):
             returncode = -os.WTERMSIG(status)
 
         out = TestOutput(
-                   ended.test,
-                   ended.cmd,
-                   ''.join(ended.out),
-                   ''.join(ended.err),
-                   returncode,
-                   total_seconds(datetime.now() - ended.start),
-                   timed_out(ended, timeout))
+            ended.test,
+            ended.cmd,
+            ''.join(ended.out),
+            ''.join(ended.err),
+            returncode,
+            total_seconds(datetime.now() - ended.start),
+            timed_out(ended, timeout))
         results.push(out)
     return tasks
 
 def kill_undead(tasks, results, timeout):
     """
     Signal all children that are over the given timeout.
     """
     for task in tasks:
--- a/js/src/tests/lib/tasks_win.py
+++ b/js/src/tests/lib/tasks_win.py
@@ -2,30 +2,32 @@
 
 from __future__ import print_function
 
 from threading import Thread, Lock
 from Queue import Queue, Empty
 from datetime import datetime
 
 class Source:
-    def __init__(self, task_list, results, timeout, verbose = False):
+    def __init__(self, task_list, results, timeout, verbose=False):
         self.tasks = Queue()
         for task in task_list:
             self.tasks.put_nowait(task)
 
         self.results = results
         self.timeout = timeout
         self.verbose = verbose
 
     def start(self, worker_count):
         t0 = datetime.now()
 
         sink = Sink(self.results)
-        self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
+        self.workers = [Worker(_ + 1, self.tasks, sink, self.timeout,
+                               self.verbose)
+                        for _ in range(worker_count)]
         if self.verbose:
             print('[P] Starting workers.')
         for w in self.workers:
             w.t0 = t0
             w.start()
         ans = self.join_workers()
         if self.verbose:
             print('[P] Finished.')
@@ -60,31 +62,32 @@ class Worker(Thread):
         self.id = id
         self.tasks = tasks
         self.sink = sink
         self.timeout = timeout
         self.verbose = verbose
 
         self.thread = None
         self.stop = False
+        self.t0 = 0
 
     def log(self, msg):
         if self.verbose:
             dd = datetime.now() - self.t0
             dt = dd.seconds + 1e-6 * dd.microseconds
-            print('[W%d %.3f] %s' % (self.id, dt, msg))
+            print('[W{:d} {:.3f}] {}'.format(self.id, dt, msg))
 
     def run(self):
         try:
             while True:
                 if self.stop:
                     break
                 self.log('Get next task.')
                 task = self.tasks.get(False)
-                self.log('Start task %s.'%str(task))
+                self.log('Start task {}.'.format(str(task)))
                 result = task.run(task.js_cmd_prefix, self.timeout)
                 self.log('Finished task.')
                 self.sink.push(result)
                 self.log('Pushed result.')
         except Empty:
             pass
 
 def run_all_tests(tests, results, options):
--- a/js/src/tests/lib/terminal_unix.py
+++ b/js/src/tests/lib/terminal_unix.py
@@ -21,17 +21,18 @@ class Terminal(object):
         color: str - color definition string
         """
         mod = Terminal.NORMAL_INTENSITY
         if color.startswith('bright'):
             mod = Terminal.BRIGHT_INTENSITY
             color = color[len('bright'):]
         color_code = Terminal.COLOR[color]
 
-        sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod + cls.COLOR_CODE)
+        sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod
+                         + cls.COLOR_CODE)
 
     @classmethod
     def reset_color(cls):
         sys.stdout.write(cls.ESCAPE + cls.RESET + cls.COLOR_CODE)
 
     @classmethod
     def clear_right(cls):
         sys.stdout.write(cls.ESCAPE + cls.CLEAR_RIGHT_CODE)
--- a/js/src/tests/lib/terminal_win.py
+++ b/js/src/tests/lib/terminal_win.py
@@ -10,37 +10,37 @@ GetConsoleScreenBufferInfo.
 """
 
 from ctypes import windll, Structure, c_short, c_ushort, byref
 
 SHORT = c_short
 WORD = c_ushort
 
 class COORD(Structure):
-  """struct in wincon.h."""
-  _fields_ = [
-    ("X", SHORT),
-    ("Y", SHORT)]
+    """struct in wincon.h."""
+    _fields_ = [
+        ("X", SHORT),
+        ("Y", SHORT)]
 
 class SMALL_RECT(Structure):
-  """struct in wincon.h."""
-  _fields_ = [
-    ("Left", SHORT),
-    ("Top", SHORT),
-    ("Right", SHORT),
-    ("Bottom", SHORT)]
+    """struct in wincon.h."""
+    _fields_ = [
+        ("Left", SHORT),
+        ("Top", SHORT),
+        ("Right", SHORT),
+        ("Bottom", SHORT)]
 
 class CONSOLE_SCREEN_BUFFER_INFO(Structure):
-  """struct in wincon.h."""
-  _fields_ = [
-    ("dwSize", COORD),
-    ("dwCursorPosition", COORD),
-    ("wAttributes", WORD),
-    ("srWindow", SMALL_RECT),
-    ("dwMaximumWindowSize", COORD)]
+    """struct in wincon.h."""
+    _fields_ = [
+        ("dwSize", COORD),
+        ("dwCursorPosition", COORD),
+        ("wAttributes", WORD),
+        ("srWindow", SMALL_RECT),
+        ("dwMaximumWindowSize", COORD)]
 
 # winbase.h
 STD_INPUT_HANDLE = -10
 STD_OUTPUT_HANDLE = -11
 STD_ERROR_HANDLE = -12
 
 # wincon.h
 FOREGROUND_BLACK     = 0x0000
@@ -63,19 +63,19 @@ BACKGROUND_YELLOW    = 0x0060
 BACKGROUND_GREY      = 0x0070
 BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
 
 stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
 SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
 GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
 
 def get_text_attr():
-  csbi = CONSOLE_SCREEN_BUFFER_INFO()
-  GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
-  return csbi.wAttributes
+    csbi = CONSOLE_SCREEN_BUFFER_INFO()
+    GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
+    return csbi.wAttributes
 
 DEFAULT_COLORS = get_text_attr()
 
 class Terminal(object):
     COLOR = {
         'black': 0x0000,
         'blue': 0x0001,
         'green': 0x0002,
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -4,28 +4,30 @@
 # metadata, and know how to run the tests and determine failures.
 
 import datetime, os, sys, time
 from subprocess import Popen, PIPE
 from threading import Thread
 
 from results import TestOutput
 
-# When run on tbpl, we run each test multiple times with the following arguments.
+# When run on tbpl, we run each test multiple times with the following
+# arguments.
 TBPL_FLAGS = [
     [], # no flags, normal baseline and ion
     ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
-    ['--ion-eager', '--ion-offthread-compile=off', '--ion-check-range-analysis', '--no-sse3', '--no-threads'],
+    ['--ion-eager', '--ion-offthread-compile=off',
+     '--ion-check-range-analysis', '--no-sse3', '--no-threads'],
     ['--baseline-eager'],
     ['--baseline-eager', '--no-fpu'],
     ['--no-baseline', '--no-ion'],
 ]
 
 def do_run_cmd(cmd):
-    l = [ None, None ]
+    l = [None, None]
     th_run_cmd(cmd, l)
     return l[1]
 
 def set_limits():
     # resource module not supported on all platforms
     try:
         import resource
         GB = 2**30
@@ -50,17 +52,17 @@ def th_run_cmd(cmd, l):
     dd = t1-t0
     dt = dd.seconds + 1e-6 * dd.microseconds
     l[1] = (out, err, p.returncode, dt)
 
 def run_cmd(cmd, timeout=60.0):
     if timeout is None:
         return do_run_cmd(cmd)
 
-    l = [ None, None ]
+    l = [None, None]
     timed_out = False
     th = Thread(target=th_run_cmd, args=(cmd, l))
     th.start()
     th.join(timeout)
     while th.isAlive():
         if l[0] is not None:
             try:
                 # In Python 3, we could just do l[0].kill().
@@ -73,29 +75,33 @@ def run_cmd(cmd, timeout=60.0):
                 # Expecting a "No such process" error
                 pass
     th.join()
     return l[1] + (timed_out,)
 
 class Test(object):
     """A runnable test."""
     def __init__(self, path):
-        self.path = path         # str:  path of JS file relative to tests root dir
+        self.path = path     # str:  path of JS file relative to tests root dir
+        self.options = []    # [str]: Extra options to pass to the shell
 
     @staticmethod
     def prefix_command(path):
-        """Return the '-f shell.js' options needed to run a test with the given path."""
+        """Return the '-f shell.js' options needed to run a test with the given
+        path."""
         if path == '':
-            return [ '-f', 'shell.js' ]
+            return ['-f', 'shell.js']
         head, base = os.path.split(path)
-        return Test.prefix_command(head) + [ '-f', os.path.join(path, 'shell.js') ]
+        return Test.prefix_command(head) \
+            + ['-f', os.path.join(path, 'shell.js')]
 
     def get_command(self, js_cmd_prefix):
         dirname, filename = os.path.split(self.path)
-        cmd = js_cmd_prefix + self.options + Test.prefix_command(dirname) + [ '-f', self.path ]
+        cmd = js_cmd_prefix + self.options + Test.prefix_command(dirname) \
+              + ['-f', self.path]
         return cmd
 
     def run(self, js_cmd_prefix, timeout=30.0):
         cmd = self.get_command(js_cmd_prefix)
         out, err, rc, dt, timed_out = run_cmd(cmd, timeout)
         return TestOutput(self, cmd, out, err, rc, dt, timed_out)
 
 class TestCase(Test):
@@ -103,17 +109,16 @@ class TestCase(Test):
     js_cmd_prefix = None
 
     def __init__(self, path):
         Test.__init__(self, path)
         self.enable = True   # bool: True => run test, False => don't run
         self.expect = True   # bool: expected result, True => pass
         self.random = False  # bool: True => ignore output as 'random'
         self.slow = False    # bool: True => test may run slowly
-        self.options = []    # [str]: Extra options to pass to the shell
 
         # The terms parsed to produce the above properties.
         self.terms = None
 
         # The tag between |...| in the test header.
         self.tag = None
 
         # Anything occuring after -- in the test header.
--- a/js/src/tests/parsemark.py
+++ b/js/src/tests/parsemark.py
@@ -4,17 +4,17 @@
 
 Pulls performance data on parsing via the js shell.
 Displays the average number of milliseconds it took to parse each file.
 
 For comparison, something apparently approximating a t-test is performed:
 "Faster" means that:
 
     t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev)
-    t_current_badrun = (t_current_avg + t_current_stddev) 
+    t_current_badrun = (t_current_avg + t_current_stddev)
     t_current_badrun < t_baseline_goodrun
 
 Effectively, a bad run from the current data is better than a good run from the
 baseline data, we're probably faster. A similar computation is used for
 determining the "slower" designation.
 
 Arguments:
   shellpath             executable JavaScript shell
@@ -67,63 +67,71 @@ def stddev(seq, mean):
     diffs = ((float(item) - mean) ** 2 for item in seq)
     return math.sqrt(sum(diffs) / len(seq))
 
 
 def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
     """Return a list of milliseconds for the counted runs."""
     assert '"' not in filepath
     code = JS_CODE_TEMPLATE.substitute(filepath=filepath,
-            warmup_run_count=warmup_runs, real_run_count=counted_runs)
+                                       warmup_run_count=warmup_runs,
+                                       real_run_count=counted_runs)
     proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE)
     stdout, _ = proc.communicate()
     milliseconds = [float(val) for val in stdout.split(',')]
     mean = avg(milliseconds)
     sigma = stddev(milliseconds, mean)
     if not stfu:
         print('Runs:', [int(ms) for ms in milliseconds])
         print('Mean:', mean)
-        print('Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100))
+        print('Stddev: {:.2f} ({:.2f}% of mean)'.format(
+            sigma, sigma / mean * 100))
     return mean, sigma
 
 
 def parsemark(filepaths, fbench, stfu=False):
     """:param fbench: fbench(filename) -> float"""
     bench_map = {} # {filename: (avg, stddev)}
     for filepath in filepaths:
         filename = os.path.split(filepath)[-1]
         if not stfu:
-            print('Parsemarking %s...' % filename)
+            print('Parsemarking {}...'.format(filename))
         bench_map[filename] = fbench(filepath)
     print('{')
     for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
         assert '"' not in filename
-        fmt = '    %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}'
+        fmt = '    {:30s}: {{"average_ms": {:6.2f}, "stddev_ms": {:6.2f}}}'
         if i != len(bench_map) - 1:
             fmt += ','
-        filename_str = '"%s"' % filename
-        print(fmt % (filename_str, avg, stddev))
+        filename_str = '"{}"'.format(filename)
+        print(fmt.format(filename_str, avg, stddev))
     print('}')
     return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
-            for filename, (avg, stddev) in bench_map.iteritems())
+                for filename, (avg, stddev) in bench_map.iteritems())
 
 
 def main():
     parser = optparse.OptionParser(usage=__doc__.strip())
     parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
-            default=5, help='used to minimize test instability [%default]')
+                      default=5,
+                      help='used to minimize test instability [%default]')
     parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
-            default=50, help='timed data runs that count towards the average [%default]')
-    parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
-            'location; when omitted, will look in likely places')
+                      default=50,
+                      help='timed data runs that count towards the average'
+                      ' [%default]')
+    parser.add_option('-s', '--shell', metavar='PATH',
+                      help='explicit shell location; when omitted, will look'
+                      ' in likely places')
     parser.add_option('-b', '--baseline', metavar='JSON_PATH',
-            dest='baseline_path', help='json file with baseline values to '
-            'compare against')
+                      dest='baseline_path',
+                      help='json file with baseline values to '
+                      'compare against')
     parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
-            default=False, help='only print JSON to stdout [%default]')
+                      default=False,
+                      help='only print JSON to stdout [%default]')
     options, args = parser.parse_args()
     try:
         shellpath = args.pop(0)
     except IndexError:
         parser.print_help()
         print()
         print('error: shellpath required', file=sys.stderr)
         return -1
@@ -137,20 +145,23 @@ def main():
     if not shellpath or not os.path.exists(shellpath):
         print('error: could not find shell:', shellpath, file=sys.stderr)
         return -1
     if options.baseline_path:
         if not os.path.isfile(options.baseline_path):
             print('error: baseline file does not exist', file=sys.stderr)
             return -1
         if not compare_bench:
-            print('error: JSON support is missing, cannot compare benchmarks', file=sys.stderr)
+            print('error: JSON support is missing, cannot compare benchmarks',
+                  file=sys.stderr)
             return -1
     benchfile = lambda filepath: bench(shellpath, filepath,
-            options.warmup_runs, options.counted_runs, stfu=options.stfu)
+                                       options.warmup_runs,
+                                       options.counted_runs,
+                                       stfu=options.stfu)
     bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
     if options.baseline_path:
         compare_bench.compare_immediate(bench_map, options.baseline_path)
     return 0
 
 
 if __name__ == '__main__':
     sys.exit(main())