Bug 1464869 - Run autopep8 on js/ r=jorendorff
authorSylvestre Ledru <sledru@mozilla.com>
Sat, 26 May 2018 05:29:06 -0700
changeset 475765 4924d6ee1b83a0a00acff3c40b0ba95e4914231c
parent 475764 f2c1f77e2a723d7315acdc689cdb504ba1c722cc
child 475766 fd9de8dd00a40f86c3e82ce2c1ec4b182c38be08
push id9374
push userjlund@mozilla.com
push dateMon, 18 Jun 2018 21:43:20 +0000
treeherdermozilla-beta@160e085dfb0b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjorendorff
bugs1464869
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1464869 - Run autopep8 on js/ r=jorendorff MozReview-Commit-ID: GtOMQZ0K2bv
js/src/builtin/embedjs.py
js/src/builtin/intl/make_intl_data.py
js/src/ctypes/libffi/generate-darwin-source-and-headers.py
js/src/devtools/automation/autospider.py
js/src/devtools/gc/gc-test.py
js/src/devtools/rootAnalysis/analyze.py
js/src/devtools/rootAnalysis/explain.py
js/src/devtools/rootAnalysis/run-test.py
js/src/devtools/rootAnalysis/t/hazards/test.py
js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
js/src/devtools/rootAnalysis/t/sixgill.py
js/src/devtools/rootAnalysis/t/testlib.py
js/src/frontend/GenerateReservedWords.py
js/src/gc/GenerateStatsPhases.py
js/src/gdb/lib-for-tests/catcher.py
js/src/gdb/lib-for-tests/prologue.py
js/src/gdb/mozilla/ExecutableAllocator.py
js/src/gdb/mozilla/GCCellPtr.py
js/src/gdb/mozilla/Interpreter.py
js/src/gdb/mozilla/IonGraph.py
js/src/gdb/mozilla/JSObject.py
js/src/gdb/mozilla/JSString.py
js/src/gdb/mozilla/JSSymbol.py
js/src/gdb/mozilla/Root.py
js/src/gdb/mozilla/asmjs.py
js/src/gdb/mozilla/autoload.py
js/src/gdb/mozilla/jsid.py
js/src/gdb/mozilla/jsval.py
js/src/gdb/mozilla/prettyprinters.py
js/src/gdb/mozilla/unwind.py
js/src/gdb/progressbar.py
js/src/gdb/run-tests.py
js/src/gdb/taskpool.py
js/src/gdb/tests/test-ExecutableAllocator.py
js/src/gdb/tests/test-Interpreter.py
js/src/gdb/tests/test-Root.py
js/src/gdb/tests/test-prettyprinters.py
js/src/gdb/tests/test-unwind.py
js/src/gdb/tests/typedef-printers.py
js/src/jit-test/jit_test.py
js/src/jit/GenerateOpcodeFiles.py
js/src/jit/arm/gen-double-encoder-table.py
js/src/tests/jstests.py
js/src/tests/lib/jittests.py
js/src/tests/lib/manifest.py
js/src/tests/lib/progressbar.py
js/src/tests/lib/results.py
js/src/tests/lib/structuredlog.py
js/src/tests/lib/tasks_unix.py
js/src/tests/lib/terminal_unix.py
js/src/tests/lib/terminal_win.py
js/src/tests/lib/tests.py
js/src/tests/non262/String/make-normalize-generateddata-input.py
js/src/tests/parsemark.py
js/src/tests/test/run.py
js/src/tests/test262-export.py
js/src/tests/test262-update.py
js/src/util/make_unicode.py
js/src/vm/make_opcode_doc.py
js/src/vm/opcode.py
--- a/js/src/builtin/embedjs.py
+++ b/js/src/builtin/embedjs.py
@@ -32,34 +32,40 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # This utility converts JS files containing self-hosted builtins into a C
 # header file that can be embedded into SpiderMonkey.
 #
 # It uses the C preprocessor to process its inputs.
 
 from __future__ import with_statement
-import re, sys, os, subprocess
+import re
+import sys
+import os
+import subprocess
 import shlex
 import which
 import buildconfig
 
+
 def ToCAsciiArray(lines):
-  result = []
-  for chr in lines:
-    value = ord(chr)
-    assert value < 128
-    result.append(str(value))
-  return ", ".join(result)
+    result = []
+    for chr in lines:
+        value = ord(chr)
+        assert value < 128
+        result.append(str(value))
+    return ", ".join(result)
+
 
 def ToCArray(lines):
-  result = []
-  for chr in lines:
-    result.append(str(ord(chr)))
-  return ", ".join(result)
+    result = []
+    for chr in lines:
+        result.append(str(ord(chr)))
+    return ", ".join(result)
+
 
 HEADER_TEMPLATE = """\
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 namespace js {
 namespace %(namespace)s {
@@ -73,88 +79,96 @@ namespace %(namespace)s {
 
     uint32_t GetRawScriptsSize() {
         return %(raw_total_length)i;
     }
 } // selfhosted
 } // js
 """
 
+
 def embed(cxx, preprocessorOption, cppflags, msgs, sources, c_out, js_out, namespace, env):
-  combinedSources = '\n'.join([msgs] + ['#include "%(s)s"' % { 's': source } for source in sources])
-  args = cppflags + ['-D%(k)s=%(v)s' % { 'k': k, 'v': env[k] } for k in env]
-  preprocessed = preprocess(cxx, preprocessorOption, combinedSources, args)
-  processed = '\n'.join([line for line in preprocessed.splitlines() if \
-                         (line.strip() and not line.startswith('#'))])
+    combinedSources = '\n'.join([msgs] + ['#include "%(s)s"' %
+                                          {'s': source} for source in sources])
+    args = cppflags + ['-D%(k)s=%(v)s' % {'k': k, 'v': env[k]} for k in env]
+    preprocessed = preprocess(cxx, preprocessorOption, combinedSources, args)
+    processed = '\n'.join([line for line in preprocessed.splitlines() if
+                           (line.strip() and not line.startswith('#'))])
 
-  js_out.write(processed)
-  import zlib
-  compressed = zlib.compress(processed)
-  data = ToCArray(compressed)
-  c_out.write(HEADER_TEMPLATE % {
-    'sources_type': 'unsigned char',
-    'sources_data': data,
-    'sources_name': 'compressedSources',
-    'compressed_total_length': len(compressed),
-    'raw_total_length': len(processed),
-    'namespace': namespace
-  })
+    js_out.write(processed)
+    import zlib
+    compressed = zlib.compress(processed)
+    data = ToCArray(compressed)
+    c_out.write(HEADER_TEMPLATE % {
+        'sources_type': 'unsigned char',
+        'sources_data': data,
+        'sources_name': 'compressedSources',
+        'compressed_total_length': len(compressed),
+        'raw_total_length': len(processed),
+        'namespace': namespace
+    })
 
-def preprocess(cxx, preprocessorOption, source, args = []):
-  if (not os.path.exists(cxx[0])):
-    cxx[0] = which.which(cxx[0])
-  # Clang seems to complain and not output anything if the extension of the
-  # input is not something it recognizes, so just fake a .cpp here.
-  tmpIn = 'self-hosting-cpp-input.cpp';
-  tmpOut = 'self-hosting-preprocessed.pp';
-  outputArg = shlex.split(preprocessorOption + tmpOut)
+
+def preprocess(cxx, preprocessorOption, source, args=[]):
+    if (not os.path.exists(cxx[0])):
+        cxx[0] = which.which(cxx[0])
+    # Clang seems to complain and not output anything if the extension of the
+    # input is not something it recognizes, so just fake a .cpp here.
+    tmpIn = 'self-hosting-cpp-input.cpp'
+    tmpOut = 'self-hosting-preprocessed.pp'
+    outputArg = shlex.split(preprocessorOption + tmpOut)
 
-  with open(tmpIn, 'wb') as input:
-    input.write(source)
-  print(' '.join(cxx + outputArg + args + [tmpIn]))
-  result = subprocess.Popen(cxx + outputArg + args + [tmpIn]).wait()
-  if (result != 0):
-    sys.exit(result);
-  with open(tmpOut, 'r') as output:
-    processed = output.read();
-  os.remove(tmpIn)
-  os.remove(tmpOut)
-  return processed
+    with open(tmpIn, 'wb') as input:
+        input.write(source)
+    print(' '.join(cxx + outputArg + args + [tmpIn]))
+    result = subprocess.Popen(cxx + outputArg + args + [tmpIn]).wait()
+    if (result != 0):
+        sys.exit(result)
+    with open(tmpOut, 'r') as output:
+        processed = output.read()
+    os.remove(tmpIn)
+    os.remove(tmpOut)
+    return processed
+
 
 def messages(jsmsg):
-  defines = []
-  for line in open(jsmsg):
-    match = re.match("MSG_DEF\((JSMSG_(\w+))", line)
-    if match:
-      defines.append("#define %s %i" % (match.group(1), len(defines)))
-    else:
-      # Make sure that MSG_DEF isn't preceded by whitespace
-      assert not line.strip().startswith("MSG_DEF")
-  return '\n'.join(defines)
+    defines = []
+    for line in open(jsmsg):
+        match = re.match("MSG_DEF\((JSMSG_(\w+))", line)
+        if match:
+            defines.append("#define %s %i" % (match.group(1), len(defines)))
+        else:
+            # Make sure that MSG_DEF isn't preceded by whitespace
+            assert not line.strip().startswith("MSG_DEF")
+    return '\n'.join(defines)
+
 
 def get_config_defines(buildconfig):
-  # Collect defines equivalent to ACDEFINES and add MOZ_DEBUG_DEFINES.
-  env = buildconfig.defines['ALLDEFINES']
-  for define in buildconfig.substs['MOZ_DEBUG_DEFINES']:
-    env[define] = 1
-  return env
+    # Collect defines equivalent to ACDEFINES and add MOZ_DEBUG_DEFINES.
+    env = buildconfig.defines['ALLDEFINES']
+    for define in buildconfig.substs['MOZ_DEBUG_DEFINES']:
+        env[define] = 1
+    return env
+
 
 def process_inputs(namespace, c_out, msg_file, inputs):
-  deps = [path for path in inputs if path.endswith(".h") or path.endswith(".h.js")]
-  sources = [path for path in inputs if path.endswith(".js") and not path.endswith(".h.js")]
-  assert len(deps) + len(sources) == len(inputs)
-  cxx = shlex.split(buildconfig.substs['CXX'])
-  pp_option = buildconfig.substs['PREPROCESS_OPTION']
-  cppflags = buildconfig.substs['OS_CPPFLAGS']
-  cppflags += shlex.split(buildconfig.substs['WARNINGS_AS_ERRORS'])
-  env = get_config_defines(buildconfig)
-  js_path = re.sub(r"\.out\.h$", "", c_out.name) + ".js"
-  msgs = messages(msg_file)
-  with open(js_path, 'w') as js_out:
-    embed(cxx, pp_option, cppflags, msgs, sources, c_out, js_out, namespace, env)
+    deps = [path for path in inputs if path.endswith(".h") or path.endswith(".h.js")]
+    sources = [path for path in inputs if path.endswith(".js") and not path.endswith(".h.js")]
+    assert len(deps) + len(sources) == len(inputs)
+    cxx = shlex.split(buildconfig.substs['CXX'])
+    pp_option = buildconfig.substs['PREPROCESS_OPTION']
+    cppflags = buildconfig.substs['OS_CPPFLAGS']
+    cppflags += shlex.split(buildconfig.substs['WARNINGS_AS_ERRORS'])
+    env = get_config_defines(buildconfig)
+    js_path = re.sub(r"\.out\.h$", "", c_out.name) + ".js"
+    msgs = messages(msg_file)
+    with open(js_path, 'w') as js_out:
+        embed(cxx, pp_option, cppflags, msgs, sources, c_out, js_out, namespace, env)
+
 
 def generate_selfhosted(c_out, msg_file, *inputs):
-  # Called from moz.build to embed selfhosted JS.
-  process_inputs('selfhosted', c_out, msg_file, inputs)
+    # Called from moz.build to embed selfhosted JS.
+    process_inputs('selfhosted', c_out, msg_file, inputs)
+
 
 def generate_shellmoduleloader(c_out, msg_file, *inputs):
-  # Called from moz.build to embed shell module loader JS.
-  process_inputs('moduleloader', c_out, msg_file, inputs)
+    # Called from moz.build to embed shell module loader JS.
+    process_inputs('moduleloader', c_out, msg_file, inputs)
--- a/js/src/builtin/intl/make_intl_data.py
+++ b/js/src/builtin/intl/make_intl_data.py
@@ -48,16 +48,17 @@ if sys.version_info.major == 2:
     from itertools import ifilter as filter, ifilterfalse as filterfalse, imap as map
     from urllib2 import urlopen, Request as UrlRequest
     from urlparse import urlsplit
 else:
     from itertools import filterfalse
     from urllib.request import urlopen, Request as UrlRequest
     from urllib.parse import urlsplit
 
+
 def readRegistryRecord(registry):
     """ Yields the records of the IANA Language Subtag Registry as dictionaries. """
     record = {}
     for line in registry:
         line = line.strip()
         if line == "":
             continue
         if line == "%%":
@@ -70,16 +71,17 @@ def readRegistryRecord(registry):
                 record[key] = value
             else:
                 # continuation line
                 record[key] += " " + line
     if record:
         yield record
     return
 
+
 def readRegistry(registry):
     """ Reads IANA Language Subtag Registry and extracts information for Intl.js.
 
         Information extracted:
         - grandfatheredMappings: mappings from grandfathered tags to preferred
           complete language tags
         - redundantMappings: mappings from redundant tags to preferred complete
           language tags
@@ -190,24 +192,26 @@ def readRegistry(registry):
     return {"fileDate": fileDate,
             "grandfatheredMappings": grandfatheredMappings,
             "redundantMappings": redundantMappings,
             "languageMappings": languageMappings,
             "regionMappings": regionMappings,
             "variantMappings": variantMappings,
             "extlangMappings": extlangMappings}
 
+
 def writeMappingHeader(println, description, fileDate, url):
     if type(description) is not list:
         description = [description]
     for desc in description:
         println(u"// {0}".format(desc))
     println(u"// Derived from IANA Language Subtag Registry, file date {0}.".format(fileDate))
     println(u"// {0}".format(url))
 
+
 def writeMappingsVar(println, mapping, name, description, fileDate, url):
     """ Writes a variable definition with a mapping table.
 
         Writes the contents of dictionary |mapping| through the |println|
         function with the given variable name and a comment with description,
         fileDate, and URL.
     """
     println(u"")
@@ -215,21 +219,23 @@ def writeMappingsVar(println, mapping, n
     println(u"var {0} = {{".format(name))
     for key in sorted(mapping):
         if not isinstance(mapping[key], dict):
             value = '"{0}"'.format(mapping[key])
         else:
             preferred = mapping[key]["preferred"]
             prefix = mapping[key]["prefix"]
             if key != preferred:
-                raise Exception("Expected '{0}' matches preferred locale '{1}'".format(key, preferred))
+                raise Exception(
+                    "Expected '{0}' matches preferred locale '{1}'".format(key, preferred))
             value = '"{0}"'.format(prefix)
         println(u'    "{0}": {1},'.format(key, value))
     println(u"};")
 
+
 def writeMappingsFunction(println, variantMappings, redundantMappings, extlangMappings, description, fileDate, url):
     """ Writes a function definition which performs language tag mapping.
 
         Processes the contents of dictionaries |variantMappings| and
         |redundantMappings| through the |println| function with the given
         function name and a comment with description, fileDate, and URL.
     """
 
@@ -294,30 +300,31 @@ def writeMappingsFunction(println, varia
         extlangIndex = 1
         lastVariant = None
         for (kind, subtag) in splitSubtags(tag):
             if kind == Subtag.Language:
                 continue
 
             if kind == Subtag.ExtLang:
                 assert extlangIndex in [1, 2, 3],\
-                       "Language-Tag permits no more than three extlang subtags"
+                    "Language-Tag permits no more than three extlang subtags"
                 cond.append('tag.extlang{} === "{}"'.format(extlangIndex, subtag))
                 extlangIndex += 1
             elif kind == Subtag.Script:
                 cond.append('tag.script === "{}"'.format(subtag))
             elif kind == Subtag.Region:
                 cond.append('tag.region === "{}"'.format(subtag))
             else:
                 assert kind == Subtag.Variant
                 if lastVariant is None:
                     cond.append("tag.variants.length >= {}".format(len(variants(tag))))
                     cond.append('callFunction(ArrayIndexOf, tag.variants, "{}") > -1'.format(subtag))
                 else:
-                    cond.append('callFunction(ArrayIndexOf, tag.variants, "{}", callFunction(ArrayIndexOf, tag.variants, "{}") + 1) > -1'.format(subtag, lastVariant))
+                    cond.append(
+                        'callFunction(ArrayIndexOf, tag.variants, "{}", callFunction(ArrayIndexOf, tag.variants, "{}") + 1) > -1'.format(subtag, lastVariant))
                 lastVariant = subtag
 
         # Require exact matches for redundant language tags.
         if tag in redundantMappings:
             tag_it = splitSubtags(tag)
             tag_next = partial(maybeNext, tag_it)
             (tag_kind, _) = tag_next()
 
@@ -384,17 +391,17 @@ def writeMappingsFunction(println, varia
 
         # Remove any extlang subtags per RFC 5646, 4.5:
         # 'The canonical form contains no 'extlang' subtags.'
         # https://tools.ietf.org/html/rfc5646#section-4.5
         assert preferred_kind != Subtag.ExtLang
         extlangIndex = 1
         while tag_kind == Subtag.ExtLang:
             assert extlangIndex in [1, 2, 3],\
-                   "Language-Tag permits no more than three extlang subtags"
+                "Language-Tag permits no more than three extlang subtags"
             println3(u"tag.extlang{} = undefined;".format(extlangIndex))
             extlangIndex += 1
             (tag_kind, tag_subtag) = tag_next()
 
         # Update the script and region subtags.
         for kind, prop_name in [(Subtag.Script, "script"), (Subtag.Region, "region")]:
             if tag_kind == kind and preferred_kind == kind:
                 if tag_subtag != preferred_subtag:
@@ -423,17 +430,18 @@ def writeMappingsFunction(println, varia
                 println3(u"        continue;")
                 (tag_kind, tag_subtag) = tag_next()
             println3(u"    _DefineDataProperty(newVariants, newVariants.length, variant);")
             println3(u"}")
 
             # Add the new variants, unless already present.
             while preferred_kind == Subtag.Variant:
                 println3(u'if (callFunction(ArrayIndexOf, newVariants, "{}") < 0)'.format(preferred_subtag))
-                println3(u'    _DefineDataProperty(newVariants, newVariants.length, "{}");'.format(preferred_subtag))
+                println3(u'    _DefineDataProperty(newVariants, newVariants.length, "{}");'.format(
+                    preferred_subtag))
                 (preferred_kind, preferred_subtag) = preferred_next()
 
             # Update the property.
             println3(u"tag.variants = newVariants;")
 
         # Ensure both language tags were completely processed.
         assert list(tag_it) == [], "unhandled tag subtags"
         assert list(preferred_it) == [], "unhandled preferred subtags"
@@ -489,25 +497,26 @@ def writeMappingsFunction(println, varia
 
     # Switch on the language subtag.
     println(u"    switch (tag.language) {")
     for lang in sorted({language(tag) for tag in langTagMappings}):
         println(u'      case "{}":'.format(lang))
         isFirstLanguageTag = True
         for tag in sorted(tag for tag in langTagMappings if language(tag) == lang):
             assert not isinstance(langTagMappings[tag], dict),\
-                   "only supports complete language tags"
+                "only supports complete language tags"
             emitCompare(tag, langTagMappings[tag], isFirstLanguageTag)
             isFirstLanguageTag = False
         println(u"        break;")
     println(u"    }")
 
     println(u"}")
     println(u"/* eslint-enable complexity */")
 
+
 def writeLanguageTagData(println, data, url):
     """ Writes the language tag data to the Intl data file. """
 
     fileDate = data["fileDate"]
     grandfatheredMappings = data["grandfatheredMappings"]
     redundantMappings = data["redundantMappings"]
     languageMappings = data["languageMappings"]
     regionMappings = data["regionMappings"]
@@ -524,16 +533,17 @@ def writeLanguageTagData(println, data, 
                      "Mappings from region subtags to preferred values.", fileDate, url)
     writeMappingsVar(println, extlangMappings, "extlangMappings",
                      ["Mappings from extlang subtags to preferred values.",
                       "All current deprecated extlang subtags have the form `<prefix>-<extlang>`",
                       "and their preferred value is exactly equal to `<extlang>`. So each key in",
                       "extlangMappings acts both as the extlang subtag and its preferred value."],
                      fileDate, url)
 
+
 def updateLangTags(args):
     """ Update the LangTagMappingsGenerated.js file. """
     url = args.url
     out = args.out
     filename = args.file
 
     print("Arguments:")
     print("\tDownload url: %s" % url)
@@ -558,100 +568,114 @@ def updateLangTags(args):
 
     print("Writing Intl data...")
     with io.open(out, mode="w", encoding="utf-8", newline="") as f:
         println = partial(print, file=f)
 
         println(u"// Generated by make_intl_data.py. DO NOT EDIT.")
         writeLanguageTagData(println, data, url)
 
+
 def flines(filepath, encoding="utf-8"):
     """ Open filepath and iterate over its content. """
     with io.open(filepath, mode="r", encoding=encoding) as f:
         for line in f:
             yield line
 
+
 @total_ordering
 class Zone(object):
     """ Time zone with optional file name. """
 
     def __init__(self, name, filename=""):
         self.name = name
         self.filename = filename
+
     def __eq__(self, other):
         return hasattr(other, "name") and self.name == other.name
+
     def __lt__(self, other):
         return self.name < other.name
+
     def __hash__(self):
         return hash(self.name)
+
     def __str__(self):
         return self.name
+
     def __repr__(self):
         return self.name
 
+
 class TzDataDir(object):
     """ tzdata source from a directory. """
 
     def __init__(self, obj):
         self.name = partial(os.path.basename, obj)
         self.resolve = partial(os.path.join, obj)
         self.basename = os.path.basename
         self.isfile = os.path.isfile
         self.listdir = partial(os.listdir, obj)
         self.readlines = flines
 
+
 class TzDataFile(object):
     """ tzdata source from a file (tar or gzipped). """
 
     def __init__(self, obj):
         self.name = lambda: os.path.splitext(os.path.splitext(os.path.basename(obj))[0])[0]
         self.resolve = obj.getmember
         self.basename = attrgetter("name")
         self.isfile = tarfile.TarInfo.isfile
         self.listdir = obj.getnames
         self.readlines = partial(self._tarlines, obj)
 
     def _tarlines(self, tar, m):
         with closing(tar.extractfile(m)) as f:
             for line in f:
                 yield line.decode("utf-8")
 
+
 def validateTimeZones(zones, links):
     """ Validate the zone and link entries. """
     linkZones = set(links.keys())
     intersect = linkZones.intersection(zones)
     if intersect:
         raise RuntimeError("Links also present in zones: %s" % intersect)
 
     zoneNames = {z.name for z in zones}
     linkTargets = set(links.values())
     if not linkTargets.issubset(zoneNames):
         raise RuntimeError("Link targets not found: %s" % linkTargets.difference(zoneNames))
 
+
 def partition(iterable, *predicates):
     def innerPartition(pred, it):
         it1, it2 = tee(it)
         return (filter(pred, it1), filterfalse(pred, it2))
     if len(predicates) == 0:
         return iterable
     (left, right) = innerPartition(predicates[0], iterable)
     if len(predicates) == 1:
         return (left, right)
     return tuple([left] + list(partition(right, *predicates[1:])))
 
+
 def listIANAFiles(tzdataDir):
     def isTzFile(d, m, f):
         return m(f) and d.isfile(d.resolve(f))
     return filter(partial(isTzFile, tzdataDir, re.compile("^[a-z0-9]+$").match), tzdataDir.listdir())
 
+
 def readIANAFiles(tzdataDir, files):
     """ Read all IANA time zone files from the given iterable. """
     nameSyntax = "[\w/+\-]+"
     pZone = re.compile(r"Zone\s+(?P<name>%s)\s+.*" % nameSyntax)
-    pLink = re.compile(r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" % (nameSyntax, nameSyntax))
+    pLink = re.compile(r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" %
+                       (nameSyntax, nameSyntax))
 
     def createZone(line, fname):
         match = pZone.match(line)
         name = match.group("name")
         return Zone(name, fname)
 
     def createLink(line, fname):
         match = pLink.match(line)
@@ -666,16 +690,17 @@ def readIANAFiles(tzdataDir, files):
             if line.startswith("Zone"):
                 zones.add(createZone(line, filename))
             if line.startswith("Link"):
                 (link, target) = createLink(line, filename)
                 links[link] = target
 
     return (zones, links)
 
+
 def readIANATimeZones(tzdataDir, ignoreBackzone, ignoreFactory):
     """ Read the IANA time zone information from `tzdataDir`. """
 
     backzoneFiles = {"backzone"}
     (bkfiles, tzfiles) = partition(listIANAFiles(tzdataDir), backzoneFiles.__contains__)
 
     # Read zone and link infos.
     (zones, links) = readIANAFiles(tzdataDir, tzfiles)
@@ -690,32 +715,35 @@ def readIANATimeZones(tzdataDir, ignoreB
         zones |= backzones
         links = {name: target for name, target in links.items() if name not in backzones}
         links.update(backlinks)
 
     validateTimeZones(zones, links)
 
     return (zones, links)
 
+
 def readICUResourceFile(filename):
     """ Read an ICU resource file.
 
         Yields (<table-name>, <startOrEnd>, <value>) for each table.
     """
 
     numberValue = r"-?\d+"
     stringValue = r'".+?"'
-    asVector = lambda val: r"%s(?:\s*,\s*%s)*" % (val, val)
+
+    def asVector(val): return r"%s(?:\s*,\s*%s)*" % (val, val)
     numberVector = asVector(numberValue)
     stringVector = asVector(stringValue)
 
     reNumberVector = re.compile(numberVector)
     reStringVector = re.compile(stringVector)
     reNumberValue = re.compile(numberValue)
     reStringValue = re.compile(stringValue)
+
     def parseValue(value):
         m = reNumberVector.match(value)
         if m:
             return [int(v) for v in reNumberValue.findall(value)]
         m = reStringVector.match(value)
         if m:
             return [v[1:-1] for v in reStringValue.findall(value)]
         raise RuntimeError("unknown value type: %s" % value)
@@ -739,17 +767,18 @@ def readICUResourceFile(filename):
 
     reStartTable = line(tableName, r"\{")
     reEndTable = line(r"\}")
     reSingleValue = line(r",?", tableValue, r",?")
     reCompactTable = line(tableName, r"\{", tableValue, r"\}")
     reEmptyLine = line()
 
     tables = []
-    currentTable = lambda: "|".join(tables)
+
+    def currentTable(): return "|".join(tables)
     values = []
     for line in flines(filename, "utf-8-sig"):
         line = line.strip()
         if line == "":
             continue
 
         m = reEmptyLine.match(line)
         if m:
@@ -778,23 +807,25 @@ def readICUResourceFile(filename):
 
         m = reSingleValue.match(line)
         if m and tables:
             values.extend(parseValue(m.group("value")))
             continue
 
         raise RuntimeError("unknown entry: %s" % line)
 
+
 def readICUTimeZonesFromTimezoneTypes(icuTzDir):
     """ Read the ICU time zone information from `icuTzDir`/timezoneTypes.txt
         and returns the tuple (zones, links).
     """
     typeMapTimeZoneKey = "timezoneTypes:table(nofallback)|typeMap|timezone|"
     typeAliasTimeZoneKey = "timezoneTypes:table(nofallback)|typeAlias|timezone|"
-    toTimeZone = lambda name: Zone(name.replace(":", "/"))
+
+    def toTimeZone(name): return Zone(name.replace(":", "/"))
 
     zones = set()
     links = dict()
 
     for name, value in readICUResourceFile(os.path.join(icuTzDir, "timezoneTypes.txt")):
         if name.startswith(typeMapTimeZoneKey):
             zones.add(toTimeZone(name[len(typeMapTimeZoneKey):]))
         if name.startswith(typeAliasTimeZoneKey):
@@ -807,16 +838,17 @@ def readICUTimeZonesFromTimezoneTypes(ic
     # but it is still present in ICU sources. Manually remove it to keep our
     # tables consistent with IANA.
     del links[Zone("Canada/East-Saskatchewan")]
 
     validateTimeZones(zones, links)
 
     return (zones, links)
 
+
 def readICUTimeZonesFromZoneInfo(icuTzDir, ignoreFactory):
     """ Read the ICU time zone information from `icuTzDir`/zoneinfo64.txt
         and returns the tuple (zones, links).
     """
     zoneKey = "zoneinfo64:table(nofallback)|Zones:array|:table"
     linkKey = "zoneinfo64:table(nofallback)|Zones:array|:int"
     namesKey = "zoneinfo64:table(nofallback)|Names"
 
@@ -847,53 +879,55 @@ def readICUTimeZonesFromZoneInfo(icuTzDi
     # Remove the placeholder time zone "Factory".
     if ignoreFactory:
         zones.remove(Zone("Factory"))
 
     validateTimeZones(zones, links)
 
     return (zones, links)
 
+
 def readICUTimeZones(icuDir, icuTzDir, ignoreFactory):
     # zoneinfo64.txt contains the supported time zones by ICU. This data is
     # generated from tzdata files, it doesn't include "backzone" in stock ICU.
     (zoneinfoZones, zoneinfoLinks) = readICUTimeZonesFromZoneInfo(icuTzDir, ignoreFactory)
 
     # timezoneTypes.txt contains the canonicalization information for ICU. This
     # data is generated from CLDR files. It includes data about time zones from
     # tzdata's "backzone" file.
     (typesZones, typesLinks) = readICUTimeZonesFromTimezoneTypes(icuTzDir)
 
     # Information in zoneinfo64 should be a superset of timezoneTypes.
-    inZoneInfo64 = lambda zone: zone in zoneinfoZones or zone in zoneinfoLinks
+    def inZoneInfo64(zone): return zone in zoneinfoZones or zone in zoneinfoLinks
 
     # Remove legacy ICU time zones from zoneinfo64 data.
     (legacyZones, legacyLinks) = readICULegacyZones(icuDir)
     zoneinfoZones = {zone for zone in zoneinfoZones if zone not in legacyZones}
-    zoneinfoLinks = {zone: target for (zone, target) in zoneinfoLinks.items() if zone not in legacyLinks}
+    zoneinfoLinks = {zone: target for (
+        zone, target) in zoneinfoLinks.items() if zone not in legacyLinks}
 
     notFoundInZoneInfo64 = [zone for zone in typesZones if not inZoneInfo64(zone)]
     if notFoundInZoneInfo64:
         raise RuntimeError("Missing time zones in zoneinfo64.txt: %s" % notFoundInZoneInfo64)
 
     notFoundInZoneInfo64 = [zone for zone in typesLinks.keys() if not inZoneInfo64(zone)]
     if notFoundInZoneInfo64:
         raise RuntimeError("Missing time zones in zoneinfo64.txt: %s" % notFoundInZoneInfo64)
 
     # zoneinfo64.txt only defines the supported time zones by ICU, the canonicalization
     # rules are defined through timezoneTypes.txt. Merge both to get the actual zones
     # and links used by ICU.
     icuZones = set(chain(
-                 (zone for zone in zoneinfoZones if zone not in typesLinks),
-                 (zone for zone in typesZones)
-               ))
+        (zone for zone in zoneinfoZones if zone not in typesLinks),
+        (zone for zone in typesZones)
+    ))
     icuLinks = dict(chain(
-                 ((zone, target) for (zone, target) in zoneinfoLinks.items() if zone not in typesZones),
-                 ((zone, target) for (zone, target) in typesLinks.items())
-               ))
+        ((zone, target) for (zone, target) in zoneinfoLinks.items() if zone not in typesZones),
+        ((zone, target) for (zone, target) in typesLinks.items())
+    ))
 
     return (icuZones, icuLinks)
 
 
 def readICULegacyZones(icuDir):
     """ Read the ICU legacy time zones from `icuTzDir`/tools/tzcode/icuzones
         and returns the tuple (zones, links).
     """
@@ -905,16 +939,17 @@ def readICULegacyZones(icuDir):
 
     # tzdata2017c removed the link Canada/East-Saskatchewan -> America/Regina,
     # but it is still present in ICU sources. Manually tag it as a legacy time
     # zone so our tables are kept consistent with IANA.
     links[Zone("Canada/East-Saskatchewan")] = "America/Regina"
 
     return (zones, links)
 
+
 def icuTzDataVersion(icuTzDir):
     """ Read the ICU time zone version from `icuTzDir`/zoneinfo64.txt. """
     def searchInFile(pattern, f):
         p = re.compile(pattern)
         for line in flines(f, "utf-8-sig"):
             m = p.search(line)
             if m:
                 return m.group(1)
@@ -923,21 +958,24 @@ def icuTzDataVersion(icuTzDir):
     zoneinfo = os.path.join(icuTzDir, "zoneinfo64.txt")
     if not os.path.isfile(zoneinfo):
         raise RuntimeError("file not found: %s" % zoneinfo)
     version = searchInFile("^//\s+tz version:\s+([0-9]{4}[a-z])$", zoneinfo)
     if version is None:
         raise RuntimeError("%s does not contain a valid tzdata version string" % zoneinfo)
     return version
 
+
 def findIncorrectICUZones(ianaZones, ianaLinks, icuZones, icuLinks, ignoreBackzone):
     """ Find incorrect ICU zone entries. """
-    isIANATimeZone = lambda zone: zone in ianaZones or zone in ianaLinks
-    isICUTimeZone = lambda zone: zone in icuZones or zone in icuLinks
-    isICULink = lambda zone: zone in icuLinks
+    def isIANATimeZone(zone): return zone in ianaZones or zone in ianaLinks
+
+    def isICUTimeZone(zone): return zone in icuZones or zone in icuLinks
+
+    def isICULink(zone): return zone in icuLinks
 
     # All IANA zones should be present in ICU.
     missingTimeZones = [zone for zone in ianaZones if not isICUTimeZone(zone)]
     # Normally zones in backzone are also present as links in one of the other
     # time zone files. The only exception to this rule is the Asia/Hanoi time
     # zone, this zone is only present in the backzone file.
     expectedMissing = [] if ignoreBackzone else [Zone("Asia/Hanoi")]
     if missingTimeZones != expectedMissing:
@@ -954,62 +992,70 @@ def findIncorrectICUZones(ianaZones, ian
     result = ((zone, icuLinks[zone]) for zone in ianaZones if isICULink(zone))
 
     # Remove unnecessary UTC mappings.
     utcnames = ["Etc/UTC", "Etc/UCT", "Etc/GMT"]
     result = ((zone, target) for (zone, target) in result if zone.name not in utcnames)
 
     return sorted(result, key=itemgetter(0))
 
+
 def findIncorrectICULinks(ianaZones, ianaLinks, icuZones, icuLinks):
     """ Find incorrect ICU link entries. """
-    isIANATimeZone = lambda zone: zone in ianaZones or zone in ianaLinks
-    isICUTimeZone = lambda zone: zone in icuZones or zone in icuLinks
-    isICULink = lambda zone: zone in icuLinks
-    isICUZone = lambda zone: zone in icuZones
+    def isIANATimeZone(zone): return zone in ianaZones or zone in ianaLinks
+
+    def isICUTimeZone(zone): return zone in icuZones or zone in icuLinks
+
+    def isICULink(zone): return zone in icuLinks
+
+    def isICUZone(zone): return zone in icuZones
 
     # All links should be present in ICU.
     missingTimeZones = [zone for zone in ianaLinks.keys() if not isICUTimeZone(zone)]
     if missingTimeZones:
         raise RuntimeError("Not all zones are present in ICU, did you forget "
                            "to run intl/update-tzdata.sh? %s" % missingTimeZones)
 
     # Links which are only present in ICU?
     additionalTimeZones = [zone for zone in icuLinks.keys() if not isIANATimeZone(zone)]
     if additionalTimeZones:
         raise RuntimeError("Additional links present in ICU, did you forget "
                            "to run intl/update-tzdata.sh? %s" % additionalTimeZones)
 
     result = chain(
         # IANA links which have a different target in ICU.
-        ((zone, target, icuLinks[zone]) for (zone, target) in ianaLinks.items() if isICULink(zone) and target != icuLinks[zone]),
+        ((zone, target, icuLinks[zone]) for (zone, target)
+         in ianaLinks.items() if isICULink(zone) and target != icuLinks[zone]),
 
         # IANA links which are zones in ICU.
         ((zone, target, zone.name) for (zone, target) in ianaLinks.items() if isICUZone(zone))
     )
 
     # Remove unnecessary UTC mappings.
     utcnames = ["Etc/UTC", "Etc/UCT", "Etc/GMT"]
     result = ((zone, target, icuTarget)
               for (zone, target, icuTarget) in result
               if target not in utcnames or icuTarget not in utcnames)
 
     return sorted(result, key=itemgetter(0))
 
+
 generatedFileWarning = u"// Generated by make_intl_data.py. DO NOT EDIT."
 tzdataVersionComment = u"// tzdata version = {0}"
 
+
 def processTimeZones(tzdataDir, icuDir, icuTzDir, version, ignoreBackzone, ignoreFactory, out):
     """ Read the time zone info and create a new time zone cpp file. """
     print("Processing tzdata mapping...")
     (ianaZones, ianaLinks) = readIANATimeZones(tzdataDir, ignoreBackzone, ignoreFactory)
     (icuZones, icuLinks) = readICUTimeZones(icuDir, icuTzDir, ignoreFactory)
     (legacyZones, legacyLinks) = readICULegacyZones(icuDir)
 
-    incorrectZones = findIncorrectICUZones(ianaZones, ianaLinks, icuZones, icuLinks, ignoreBackzone)
+    incorrectZones = findIncorrectICUZones(
+        ianaZones, ianaLinks, icuZones, icuLinks, ignoreBackzone)
     if not incorrectZones:
         print("<<< No incorrect ICU time zones found, please update Intl.js! >>>")
         print("<<< Maybe https://ssl.icu-project.org/trac/ticket/12044 was fixed? >>>")
 
     incorrectLinks = findIncorrectICULinks(ianaZones, ianaLinks, icuZones, icuLinks)
     if not incorrectLinks:
         print("<<< No incorrect ICU time zone links found, please update Intl.js! >>>")
         print("<<< Maybe https://ssl.icu-project.org/trac/ticket/12044 was fixed? >>>")
@@ -1061,32 +1107,34 @@ def processTimeZones(tzdataDir, icuDir, 
         println(u"};")
         println(u"")
 
         println(u"} // namespace timezone")
         println(u"} // namespace js")
         println(u"")
         println(u"#endif /* builtin_intl_TimeZoneDataGenerated_h */")
 
+
 def updateBackzoneLinks(tzdataDir, links):
-    withZone = lambda fn: lambda zone_target: fn(zone_target[0])
+    def withZone(fn): return lambda zone_target: fn(zone_target[0])
 
     (backzoneZones, backzoneLinks) = readIANAFiles(tzdataDir, ["backzone"])
     (stableZones, updatedLinks, updatedZones) = partition(
         links.items(),
         # Link not changed in backzone.
         withZone(lambda zone: zone not in backzoneLinks and zone not in backzoneZones),
         # Link has a new target.
         withZone(lambda zone: zone in backzoneLinks),
     )
     # Keep stable zones and links with updated target.
     return dict(chain(
                 stableZones,
                 map(withZone(lambda zone: (zone, backzoneLinks[zone])), updatedLinks)
-           ))
+                ))
+
 
 def generateTzDataLinkTestContent(testDir, version, fileName, description, links):
     with io.open(os.path.join(testDir, fileName), mode="w", encoding="utf-8", newline="") as f:
         println = partial(print, file=f)
 
         println(u'// |reftest| skip-if(!this.hasOwnProperty("Intl"))')
         println(u"")
         println(generatedFileWarning)
@@ -1117,111 +1165,119 @@ for (let [linkName, target] of Object.en
     }
 }
 """)
         println(u"""
 if (typeof reportCompare === "function")
     reportCompare(0, 0, "ok");
 """)
 
+
 def generateTzDataTestBackwardLinks(tzdataDir, version, ignoreBackzone, testDir):
     (zones, links) = readIANAFiles(tzdataDir, ["backward"])
     assert len(zones) == 0
 
     if not ignoreBackzone:
         links = updateBackzoneLinks(tzdataDir, links)
 
     generateTzDataLinkTestContent(
         testDir, version,
         "timeZone_backward_links.js",
         u"// Link names derived from IANA Time Zone Database, backward file.",
         links.items()
     )
 
+
 def generateTzDataTestNotBackwardLinks(tzdataDir, version, ignoreBackzone, testDir):
     tzfiles = filterfalse({"backward", "backzone"}.__contains__, listIANAFiles(tzdataDir))
     (zones, links) = readIANAFiles(tzdataDir, tzfiles)
 
     if not ignoreBackzone:
         links = updateBackzoneLinks(tzdataDir, links)
 
     generateTzDataLinkTestContent(
         testDir, version,
         "timeZone_notbackward_links.js",
         u"// Link names derived from IANA Time Zone Database, excluding backward file.",
         links.items()
     )
 
+
 def generateTzDataTestBackzone(tzdataDir, version, ignoreBackzone, testDir):
     backzoneFiles = {"backzone"}
     (bkfiles, tzfiles) = partition(listIANAFiles(tzdataDir), backzoneFiles.__contains__)
 
     # Read zone and link infos.
     (zones, links) = readIANAFiles(tzdataDir, tzfiles)
     (backzones, backlinks) = readIANAFiles(tzdataDir, bkfiles)
 
     if not ignoreBackzone:
-        comment=u"""\
+        comment = u"""\
 // This file was generated with historical, pre-1970 backzone information
 // respected. Therefore, every zone key listed below is its own Zone, not
 // a Link to a modern-day target as IANA ignoring backzones would say.
 
 """
     else:
-        comment=u"""\
+        comment = u"""\
 // This file was generated while ignoring historical, pre-1970 backzone
 // information. Therefore, every zone key listed below is part of a Link
 // whose target is the corresponding value.
 
 """
 
     generateTzDataLinkTestContent(
         testDir, version,
         "timeZone_backzone.js",
         comment + u"// Backzone zones derived from IANA Time Zone Database.",
-        ((zone, zone if not ignoreBackzone else links[zone]) for zone in backzones if zone in links)
+        ((zone, zone if not ignoreBackzone else links[zone])
+         for zone in backzones if zone in links)
     )
 
+
 def generateTzDataTestBackzoneLinks(tzdataDir, version, ignoreBackzone, testDir):
     backzoneFiles = {"backzone"}
     (bkfiles, tzfiles) = partition(listIANAFiles(tzdataDir), backzoneFiles.__contains__)
 
     # Read zone and link infos.
     (zones, links) = readIANAFiles(tzdataDir, tzfiles)
     (backzones, backlinks) = readIANAFiles(tzdataDir, bkfiles)
 
     if not ignoreBackzone:
-        comment=u"""\
+        comment = u"""\
 // This file was generated with historical, pre-1970 backzone information
 // respected. Therefore, every zone key listed below points to a target
 // in the backzone file and not to its modern-day target as IANA ignoring
 // backzones would say.
 
 """
     else:
-        comment=u"""\
+        comment = u"""\
 // This file was generated while ignoring historical, pre-1970 backzone
 // information. Therefore, every zone key listed below is part of a Link
 // whose target is the corresponding value ignoring any backzone entries.
 
 """
 
     generateTzDataLinkTestContent(
         testDir, version,
         "timeZone_backzone_links.js",
-        comment +  u"// Backzone links derived from IANA Time Zone Database.",
-        ((zone, target if not ignoreBackzone else links[zone]) for (zone, target) in backlinks.items())
+        comment + u"// Backzone links derived from IANA Time Zone Database.",
+        ((zone, target if not ignoreBackzone else links[zone])
+         for (zone, target) in backlinks.items())
     )
 
+
 def generateTzDataTests(tzdataDir, version, ignoreBackzone, testDir):
     generateTzDataTestBackwardLinks(tzdataDir, version, ignoreBackzone, testDir)
     generateTzDataTestNotBackwardLinks(tzdataDir, version, ignoreBackzone, testDir)
     generateTzDataTestBackzone(tzdataDir, version, ignoreBackzone, testDir)
     generateTzDataTestBackzoneLinks(tzdataDir, version, ignoreBackzone, testDir)
 
+
 def updateTzdata(topsrcdir, args):
     """ Update the time zone cpp file. """
 
     icuDir = os.path.join(topsrcdir, "intl/icu/source")
     if not os.path.isdir(icuDir):
         raise RuntimeError("not a directory: %s" % icuDir)
 
     icuTzDir = os.path.join(topsrcdir, "intl/tzdata/source")
@@ -1251,36 +1307,40 @@ def updateTzdata(topsrcdir, args):
     print("\tICU timezone directory: %s" % icuTzDir)
     print("\tIgnore backzone file: %s" % ignoreBackzone)
     print("\tOutput file: %s" % out)
     print("")
 
     def updateFrom(f):
         if os.path.isfile(f) and tarfile.is_tarfile(f):
             with tarfile.open(f, "r:*") as tar:
-                processTimeZones(TzDataFile(tar), icuDir, icuTzDir, version, ignoreBackzone, ignoreFactory, out)
-                generateTzDataTests(TzDataFile(tar), version, ignoreBackzone, dateTimeFormatTestDir)
+                processTimeZones(TzDataFile(tar), icuDir, icuTzDir, version,
+                                 ignoreBackzone, ignoreFactory, out)
+                generateTzDataTests(TzDataFile(tar), version,
+                                    ignoreBackzone, dateTimeFormatTestDir)
         elif os.path.isdir(f):
-            processTimeZones(TzDataDir(f), icuDir, icuTzDir, version, ignoreBackzone, ignoreFactory, out)
+            processTimeZones(TzDataDir(f), icuDir, icuTzDir, version,
+                             ignoreBackzone, ignoreFactory, out)
             generateTzDataTests(TzDataDir(f), version, ignoreBackzone, dateTimeFormatTestDir)
         else:
             raise RuntimeError("unknown format")
 
     if tzDir is None:
         print("Downloading tzdata file...")
         with closing(urlopen(url)) as tzfile:
             fname = urlsplit(tzfile.geturl()).path.split("/")[-1]
             with tempfile.NamedTemporaryFile(suffix=fname) as tztmpfile:
                 print("File stored in %s" % tztmpfile.name)
                 tztmpfile.write(tzfile.read())
                 tztmpfile.flush()
                 updateFrom(tztmpfile.name)
     else:
         updateFrom(tzDir)
 
+
 def readCurrencyFile(tree):
     reCurrency = re.compile(r"^[A-Z]{3}$")
     reIntMinorUnits = re.compile(r"^\d+$")
 
     for country in tree.iterfind(".//CcyNtry"):
         # Skip entry if no currency information is available.
         currency = country.findtext("Ccy")
         if currency is None:
@@ -1291,16 +1351,17 @@ def readCurrencyFile(tree):
         assert minorUnits is not None
 
         # Skip all entries without minorUnits or which use the default minorUnits.
         if reIntMinorUnits.match(minorUnits) and int(minorUnits) != 2:
             currencyName = country.findtext("CcyNm")
             countryName = country.findtext("CtryNm")
             yield (currency, int(minorUnits), currencyName, countryName)
 
+
 def writeCurrencyFile(published, currencies, out):
     with io.open(out, mode="w", encoding="utf-8", newline="") as f:
         println = partial(print, file=f)
 
         println(generatedFileWarning)
         println(u"// Version: {}".format(published))
 
         println(u"""
@@ -1313,16 +1374,17 @@ def writeCurrencyFile(published, currenc
  */""")
         println(u"var currencyDigits = {")
         for (currency, entries) in groupby(sorted(currencies, key=itemgetter(0)), itemgetter(0)):
             for (_, minorUnits, currencyName, countryName) in entries:
                 println(u"    // {} ({})".format(currencyName, countryName))
             println(u"    {}: {},".format(currency, minorUnits))
         println(u"};")
 
+
 def updateCurrency(topsrcdir, args):
     """ Update the CurrencyDataGenerated.js file. """
     import xml.etree.ElementTree as ET
     from random import randint
 
     url = args.url
     out = args.out
     filename = args.file
@@ -1343,25 +1405,27 @@ def updateCurrency(topsrcdir, args):
         writeCurrencyFile(published, currencies, out)
 
     if filename is not None:
         print("Always make sure you have the newest currency code list file!")
         updateFrom(filename)
     else:
         print("Downloading currency & funds code list...")
         request = UrlRequest(url)
-        request.add_header("User-agent", "Mozilla/5.0 (Mobile; rv:{0}.0) Gecko/{0}.0 Firefox/{0}.0".format(randint(1, 999)))
+        request.add_header(
+            "User-agent", "Mozilla/5.0 (Mobile; rv:{0}.0) Gecko/{0}.0 Firefox/{0}.0".format(randint(1, 999)))
         with closing(urlopen(request)) as currencyFile:
             fname = urlsplit(currencyFile.geturl()).path.split("/")[-1]
             with tempfile.NamedTemporaryFile(suffix=fname) as currencyTmpFile:
                 print("File stored in %s" % currencyTmpFile.name)
                 currencyTmpFile.write(currencyFile.read())
                 currencyTmpFile.flush()
                 updateFrom(currencyTmpFile.name)
 
+
 if __name__ == "__main__":
     import argparse
 
     # This script must reside in js/src/builtin/intl to work correctly.
     (thisDir, thisFile) = os.path.split(os.path.abspath(sys.argv[0]))
     dirPaths = os.path.normpath(thisDir).split(os.sep)
     if "/".join(dirPaths[-4:]) != "js/src/builtin/intl":
         raise RuntimeError("%s must reside in js/src/builtin/intl" % sys.argv[0])
@@ -1403,17 +1467,16 @@ if __name__ == "__main__":
                            help="Ignore tzdata's 'backzone' file. Can be enabled to generate more "
                                 "accurate time zone canonicalization reflecting the actual time "
                                 "zones as used by ICU.")
     parser_tz.add_argument("--out",
                            default="TimeZoneDataGenerated.h",
                            help="Output file (default: %(default)s)")
     parser_tz.set_defaults(func=partial(updateTzdata, topsrcdir))
 
-
     parser_currency = subparsers.add_parser("currency", help="Update currency digits mapping")
     parser_currency.add_argument("--url",
                                  metavar="URL",
                                  default="https://www.currency-iso.org/dam/downloads/lists/list_one.xml",
                                  type=EnsureHttps,
                                  help="Download url for the currency & funds code list (default: "
                                       "%(default)s)")
     parser_currency.add_argument("--out",
--- a/js/src/ctypes/libffi/generate-darwin-source-and-headers.py
+++ b/js/src/ctypes/libffi/generate-darwin-source-and-headers.py
@@ -1,19 +1,21 @@
 #!/usr/bin/env python
 import subprocess
 import os
 import errno
 import collections
 import glob
 import argparse
 
+
 class Platform(object):
     pass
 
+
 class simulator_platform(Platform):
     directory = 'darwin_ios'
     sdk = 'iphonesimulator'
     arch = 'i386'
     triple = 'i386-apple-darwin11'
     version_min = '-miphoneos-version-min=5.1.1'
 
     prefix = "#ifdef __i386__\n\n"
@@ -112,37 +114,40 @@ def move_file(src_dir, dst_dir, filename
 
             out_file.write(in_file.read())
 
             if suffix:
                 out_file.write(suffix)
 
 
 def list_files(src_dir, pattern=None, filelist=None):
-    if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern))
+    if pattern:
+        filelist = glob.iglob(os.path.join(src_dir, pattern))
     for file in filelist:
         yield os.path.basename(file)
 
 
 def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None):
     for filename in list_files(src_dir, pattern=pattern, filelist=filelist):
-        move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix)
+        move_file(src_dir, dst_dir, filename, file_suffix=file_suffix,
+                  prefix=prefix, suffix=suffix)
 
 
 def copy_src_platform_files(platform):
     src_dir = os.path.join('src', platform.src_dir)
     dst_dir = os.path.join(platform.directory, 'src', platform.src_dir)
-    copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix)
+    copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch,
+               prefix=platform.prefix, suffix=platform.suffix)
 
 
 def build_target(platform, platform_headers):
     def xcrun_cmd(cmd):
         return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch)
 
-    tag='%s-%s' % (platform.sdk, platform.arch)
+    tag = '%s-%s' % (platform.sdk, platform.arch)
     build_dir = 'build_%s' % tag
     mkdir_p(build_dir)
     env = dict(CC=xcrun_cmd('clang'),
                LD=xcrun_cmd('ld'),
                CFLAGS='%s' % (platform.version_min))
     working_dir = os.getcwd()
     try:
         os.chdir(build_dir)
@@ -193,17 +198,19 @@ def generate_source_and_headers(generate
         build_target(desktop32_platform, platform_headers)
         build_target(desktop64_platform, platform_headers)
 
     mkdir_p('darwin_common/include')
     for header_name, tag_tuples in platform_headers.iteritems():
         basename, suffix = os.path.splitext(header_name)
         with open(os.path.join('darwin_common/include', header_name), 'w') as header:
             for tag_tuple in tag_tuples:
-                header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2]))
+                header.write('%s#include <%s_%s%s>\n%s\n' %
+                             (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2]))
+
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
     parser.add_argument('--only-ios', action='store_true', default=False)
     parser.add_argument('--only-osx', action='store_true', default=False)
     args = parser.parse_args()
 
     generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx)
--- a/js/src/devtools/automation/autospider.py
+++ b/js/src/devtools/automation/autospider.py
@@ -23,16 +23,17 @@ Dirs = namedtuple('Dirs', ['scripts', 'j
 def directories(pathmodule, cwd, fixup=lambda s: s):
     scripts = pathmodule.join(fixup(cwd), fixup(pathmodule.dirname(__file__)))
     js_src = pathmodule.abspath(pathmodule.join(scripts, "..", ".."))
     source = pathmodule.abspath(pathmodule.join(js_src, "..", ".."))
     tooltool = pathmodule.abspath(env.get('TOOLTOOL_CHECKOUT',
                                           pathmodule.join(source, "..", "..")))
     return Dirs(scripts, js_src, source, tooltool)
 
+
 # Some scripts will be called with sh, which cannot use backslashed
 # paths. So for direct subprocess.* invocation, use normal paths from
 # DIR, but when running under the shell, use POSIX style paths.
 DIR = directories(os.path, os.getcwd())
 PDIR = directories(posixpath, os.environ["PWD"],
                    fixup=lambda s: re.sub(r'^(\w):', r'/\1', s))
 env['CPP_UNIT_TESTS_DIR_JS_SRC'] = DIR.js_src
 
@@ -99,16 +100,17 @@ OBJDIR = args.objdir
 OUTDIR = os.path.join(OBJDIR, "out")
 POBJDIR = posixpath.join(PDIR.source, args.objdir)
 MAKE = env.get('MAKE', 'make')
 MAKEFLAGS = env.get('MAKEFLAGS', '-j6' + ('' if AUTOMATION else ' -s'))
 
 for d in ('scripts', 'js_src', 'source', 'tooltool'):
     info("DIR.{name} = {dir}".format(name=d, dir=getattr(DIR, d)))
 
+
 def set_vars_from_script(script, vars):
     '''Run a shell script, then dump out chosen environment variables. The build
        system uses shell scripts to do some configuration that we need to
        borrow. On Windows, the script itself must output the variable settings
        (in the form "export FOO=<value>"), since otherwise there will be
        problems with mismatched Windows/POSIX formats.
     '''
     script_text = 'source %s' % script
@@ -151,26 +153,28 @@ def set_vars_from_script(script, vars):
 
 def ensure_dir_exists(name, clobber=True, creation_marker_filename="CREATED-BY-AUTOSPIDER"):
     if creation_marker_filename is None:
         marker = None
     else:
         marker = os.path.join(name, creation_marker_filename)
     if clobber:
         if not AUTOMATION and marker and os.path.exists(name) and not os.path.exists(marker):
-            raise Exception("Refusing to delete objdir %s because it was not created by autospider" % name)
+            raise Exception(
+                "Refusing to delete objdir %s because it was not created by autospider" % name)
         shutil.rmtree(name, ignore_errors=True)
     try:
         os.mkdir(name)
         if marker:
             open(marker, 'a').close()
     except OSError:
         if clobber:
             raise
 
+
 with open(os.path.join(DIR.scripts, "variants", args.variant)) as fh:
     variant = json.load(fh)
 
 if args.variant == 'nonunified':
     # Rewrite js/src/**/moz.build to replace UNIFIED_SOURCES to SOURCES.
     # Note that this modifies the current checkout.
     for dirpath, dirnames, filenames in os.walk(DIR.js_src):
         if 'moz.build' in filenames:
@@ -296,16 +300,17 @@ if platform.system() == 'Linux' and AUTO
 ACTIVE_PROCESSES = set()
 
 
 def killall():
     for proc in ACTIVE_PROCESSES:
         proc.kill()
     ACTIVE_PROCESSES.clear()
 
+
 timer = Timer(args.timeout, killall)
 timer.daemon = True
 timer.start()
 
 ensure_dir_exists(OBJDIR, clobber=not args.dep and not args.nobuild)
 ensure_dir_exists(OUTDIR, clobber=not args.keep)
 
 # Any jobs that wish to produce additional output can save them into the upload
@@ -325,16 +330,17 @@ def run_command(command, check=False, **
         stdout, stderr = proc.communicate()
     finally:
         ACTIVE_PROCESSES.discard(proc)
     status = proc.wait()
     if check and status != 0:
         raise subprocess.CalledProcessError(status, command, output=stderr)
     return stdout, stderr, status
 
+
 # Add in environment variable settings for this variant. Normally used to
 # modify the flags passed to the shell or to set the GC zeal mode.
 for k, v in variant.get('env', {}).items():
     env[k.encode('ascii')] = v.encode('ascii').format(
         DIR=DIR.scripts,
         TOOLTOOL_CHECKOUT=DIR.tooltool,
         MOZ_UPLOAD_DIR=env['MOZ_UPLOAD_DIR'],
         OUTDIR=OUTDIR,
@@ -376,29 +382,31 @@ def need_updating_configure(configure):
         os.path.join(DIR.js_src, 'old-configure.in'),
     ]
     for file in dep_files:
         if os.path.getmtime(file) > os.path.getmtime(configure):
             return True
 
     return False
 
+
 if not args.nobuild:
     CONFIGURE_ARGS += ' --enable-nspr-build'
     CONFIGURE_ARGS += ' --prefix={OBJDIR}/dist'.format(OBJDIR=POBJDIR)
 
     # Generate a configure script from configure.in.
     configure = os.path.join(DIR.js_src, 'configure')
     if need_updating_configure(configure):
         shutil.copyfile(configure + ".in", configure)
         os.chmod(configure, 0755)
 
     # Run configure
     if not args.noconf:
-        run_command(['sh', '-c', posixpath.join(PDIR.js_src, 'configure') + ' ' + CONFIGURE_ARGS], check=True)
+        run_command(['sh', '-c', posixpath.join(PDIR.js_src, 'configure') +
+                     ' ' + CONFIGURE_ARGS], check=True)
 
     # Run make
     run_command('%s -w %s' % (MAKE, MAKEFLAGS), shell=True, check=True)
 
     if use_minidump:
         # Convert symbols to breakpad format.
         hostdir = os.path.join(OBJDIR, "dist", "host", "bin")
         if not os.path.isdir(hostdir):
@@ -419,24 +427,26 @@ COMMAND_PREFIX = []
 if subprocess.call("type setarch >/dev/null 2>&1", shell=True) == 0:
     COMMAND_PREFIX.extend(['setarch', platform.machine(), '-R'])
 
 
 def run_test_command(command, **kwargs):
     _, _, status = run_command(COMMAND_PREFIX + command, check=False, **kwargs)
     return status
 
+
 test_suites = set(['jstests', 'jittest', 'jsapitests', 'checks'])
 
 
 def normalize_tests(tests):
     if 'all' in tests:
         return test_suites
     return tests
 
+
 # Need a platform name to use as a key in variant files.
 if args.platform:
     variant_platform = args.platform.split("-")[0]
 elif platform.system() == 'Windows':
     variant_platform = 'win64' if word_bits == 64 else 'win32'
 elif platform.system() == 'Linux':
     variant_platform = 'linux64' if word_bits == 64 else 'linux'
 elif platform.system() == 'Darwin':
--- a/js/src/devtools/gc/gc-test.py
+++ b/js/src/devtools/gc/gc-test.py
@@ -1,98 +1,113 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Works with python2.6
 
-import datetime, os, re, sys, traceback
-import math, string, copy, json
+import datetime
+import os
+import re
+import sys
+import traceback
+import math
+import string
+import copy
+import json
 import subprocess
 from subprocess import *
 from operator import itemgetter
 
+
 class Test:
     def __init__(self, path, name):
         self.path = path
         self.name = name
 
     @classmethod
     def from_file(cls, path, name, options):
         return cls(path, name)
 
-def find_tests(dir, substring = None):
+
+def find_tests(dir, substring=None):
     ans = []
     for dirpath, dirnames, filenames in os.walk(dir):
         if dirpath == '.':
             continue
         for filename in filenames:
             if not filename.endswith('.js'):
                 continue
             test = os.path.join(dirpath, filename)
             if substring is None or substring in os.path.relpath(test, dir):
                 ans.append([test, filename])
     return ans
 
+
 def get_test_cmd(path):
-    return [ JS, '-f', path ]
+    return [JS, '-f', path]
+
 
 def avg(seq):
     return sum(seq) / len(seq)
 
+
 def stddev(seq, mean):
     diffs = ((float(item) - mean) ** 2 for item in seq)
     return math.sqrt(sum(diffs) / len(seq))
 
+
 def run_test(test):
     env = os.environ.copy()
     env['MOZ_GCTIMER'] = 'stderr'
     cmd = get_test_cmd(test.path)
     total = []
     mark = []
     sweep = []
     close_fds = sys.platform != 'win32'
     p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=close_fds, env=env)
     out, err = p.communicate()
     out, err = out.decode(), err.decode()
 
     float_array = [float(_) for _ in err.split()]
-    
+
     if len(float_array) == 0:
         print('Error: No data from application. Configured with --enable-gctimer?')
         sys.exit(1)
 
     for i, currItem in enumerate(float_array):
         if (i % 3 == 0):
             total.append(currItem)
         else:
             if (i % 3 == 1):
                 mark.append(currItem)
             else:
                 sweep.append(currItem)
 
     return max(total), avg(total), max(mark), avg(mark), max(sweep), avg(sweep)
 
+
 def run_tests(tests, test_dir):
     bench_map = {}
 
     try:
         for i, test in enumerate(tests):
             filename_str = '"%s"' % test.name
             TMax, TAvg, MMax, MAvg, SMax, SAvg = run_test(test)
             bench_map[test.name] = [TMax, TAvg, MMax, MAvg, SMax, SAvg]
             fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}'
             if (i != len(tests) - 1):
                 fmt += ','
-            print(fmt %(filename_str ,TMax, TAvg, MMax, MAvg, SMax, MAvg))
+            print(fmt % (filename_str, TMax, TAvg, MMax, MAvg, SMax, MAvg))
     except KeyboardInterrupt:
         print('fail')
 
     return dict((filename, dict(TMax=TMax, TAvg=TAvg, MMax=MMax, MAvg=MAvg, SMax=SMax, SAvg=SAvg))
-            for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems())
+                for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems())
+
 
 def compare(current, baseline):
     percent_speedups = []
     for key, current_result in current.iteritems():
         try:
             baseline_result = baseline[key]
         except KeyError:
             print key, 'missing from baseline'
@@ -101,64 +116,65 @@ def compare(current, baseline):
         val_getter = itemgetter('TMax', 'TAvg', 'MMax', 'MAvg', 'SMax', 'SAvg')
         BTMax, BTAvg, BMMax, BMAvg, BSMax, BSAvg = val_getter(baseline_result)
         CTMax, CTAvg, CMMax, CMAvg, CSMax, CSAvg = val_getter(current_result)
 
         fmt = '%30s: %s'
         if CTAvg <= BTAvg:
             speedup = (CTAvg / BTAvg - 1) * 100
             result = 'faster: %6.2f < baseline %6.2f (%+6.2f%%)' % \
-                    (CTAvg, BTAvg, speedup)
+                (CTAvg, BTAvg, speedup)
             percent_speedups.append(speedup)
         else:
             slowdown = (CTAvg / BTAvg - 1) * 100
             result = 'SLOWER: %6.2f > baseline %6.2f (%+6.2f%%) ' % \
-                    (CTAvg, BTAvg, slowdown)
+                (CTAvg, BTAvg, slowdown)
             percent_speedups.append(slowdown)
         print '%30s: %s' % (key, result)
     if percent_speedups:
         print 'Average speedup: %.2f%%' % avg(percent_speedups)
 
+
 if __name__ == '__main__':
     script_path = os.path.abspath(__file__)
     script_dir = os.path.dirname(script_path)
     test_dir = os.path.join(script_dir, 'tests')
 
     from optparse import OptionParser
     op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
 
     op.add_option('-b', '--baseline', metavar='JSON_PATH',
-            dest='baseline_path', help='json file with baseline values to '
-            'compare against')
+                  dest='baseline_path', help='json file with baseline values to '
+                  'compare against')
 
     (OPTIONS, args) = op.parse_args()
     if len(args) < 1:
         op.error('missing JS_SHELL argument')
     # We need to make sure we are using backslashes on Windows.
     JS, test_args = os.path.normpath(args[0]), args[1:]
 
     test_list = []
     bench_map = {}
 
     test_list = find_tests(test_dir)
 
     if not test_list:
         print >> sys.stderr, "No tests found matching command line arguments."
         sys.exit(0)
 
-    test_list = [ Test.from_file(tst, name, OPTIONS) for tst, name in test_list ]
+    test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list]
 
     try:
         print("{")
         bench_map = run_tests(test_list, test_dir)
         print("}")
 
     except OSError:
         if not os.path.exists(JS):
-            print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%JS
+            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % JS
             sys.exit(1)
         else:
             raise
 
     if OPTIONS.baseline_path:
         baseline_map = []
         fh = open(OPTIONS.baseline_path, 'r')
         baseline_map = json.load(fh)
--- a/js/src/devtools/rootAnalysis/analyze.py
+++ b/js/src/devtools/rootAnalysis/analyze.py
@@ -21,47 +21,51 @@ import re
 anystring_t = str if sys.version_info[0] > 2 else basestring
 
 try:
     execfile
 except:
     def execfile(thefile, globals):
         exec(compile(open(thefile).read(), filename=thefile, mode="exec"), globals)
 
+
 def env(config):
     e = dict(os.environ)
-    e['PATH'] = ':'.join(p for p in (config.get('gcc_bin'), config.get('sixgill_bin'), e['PATH']) if p)
+    e['PATH'] = ':'.join(p for p in (config.get('gcc_bin'),
+                                     config.get('sixgill_bin'), e['PATH']) if p)
     e['XDB'] = '%(sixgill_bin)s/xdb.so' % config
     e['SOURCE'] = config['source']
     e['ANALYZED_OBJDIR'] = config['objdir']
     bindir = os.path.dirname(config['js'])
     e['LD_LIBRARY_PATH'] = ':'.join(p for p in (e.get('LD_LIBRARY_PATH'), bindir) if p)
     return e
 
+
 def fill(command, config):
     try:
         return tuple(s % config for s in command)
     except:
         print("Substitution failed:")
         problems = []
         for fragment in command:
             try:
                 fragment % config
             except:
                 problems.append(fragment)
-        raise Exception("\n".join(["Substitution failed:"] + [ "  %s" % s for s in problems ]))
+        raise Exception("\n".join(["Substitution failed:"] + ["  %s" % s for s in problems]))
+
 
 def print_command(command, outfile=None, env=None):
     output = ' '.join(command)
     if outfile:
         output += ' > ' + outfile
     if env:
         changed = {}
         e = os.environ
-        for key,value in env.items():
+        for key, value in env.items():
             if (key not in e) or (e[key] != value):
                 changed[key] = value
         if changed:
             outputs = []
             for key, value in changed.items():
                 if key in e and e[key] in value:
                     start = value.index(e[key])
                     end = start + len(e[key])
@@ -70,16 +74,17 @@ def print_command(command, outfile=None,
                                                        key,
                                                        value[end:]))
                 else:
                     outputs.append("%s='%s'" % (key, value))
             output = ' '.join(outputs) + " " + output
 
     print(output)
 
+
 def generate_hazards(config, outfilename):
     jobs = []
     for i in range(int(config['jobs'])):
         command = fill(('%(js)s',
                         '%(analysis_scriptdir)s/analyzeRoots.js',
                         '%(gcFunctions_list)s',
                         '%(gcEdges)s',
                         '%(suppressedFunctions_list)s',
@@ -92,83 +97,86 @@ def generate_hazards(config, outfilename
         output = open(outfile, 'w')
         if config['verbose']:
             print_command(command, outfile=outfile, env=env(config))
         jobs.append((command, Popen(command, stdout=output, env=env(config))))
 
     final_status = 0
     while jobs:
         pid, status = os.wait()
-        jobs = [ job for job in jobs if job[1].pid != pid ]
+        jobs = [job for job in jobs if job[1].pid != pid]
         final_status = final_status or status
 
     if final_status:
         raise subprocess.CalledProcessError(final_status, 'analyzeRoots.js')
 
     with open(outfilename, 'w') as output:
-        command = ['cat'] + [ 'rootingHazards.%s' % (i+1,) for i in range(int(config['jobs'])) ]
+        command = ['cat'] + ['rootingHazards.%s' % (i+1,) for i in range(int(config['jobs']))]
         if config['verbose']:
             print_command(command, outfile=outfilename)
         subprocess.call(command, stdout=output)
 
-JOBS = { 'dbs':
-             (('%(analysis_scriptdir)s/run_complete',
-               '--foreground',
-               '--no-logs',
-               '--build-root=%(objdir)s',
-               '--wrap-dir=%(sixgill)s/scripts/wrap_gcc',
-               '--work-dir=work',
-               '-b', '%(sixgill_bin)s',
-               '--buildcommand=%(buildcommand)s',
-               '.'),
-              ()),
 
-         'list-dbs':
-             (('ls', '-l'),
-              ()),
+JOBS = {'dbs':
+        (('%(analysis_scriptdir)s/run_complete',
+          '--foreground',
+          '--no-logs',
+          '--build-root=%(objdir)s',
+          '--wrap-dir=%(sixgill)s/scripts/wrap_gcc',
+          '--work-dir=work',
+          '-b', '%(sixgill_bin)s',
+          '--buildcommand=%(buildcommand)s',
+          '.'),
+         ()),
 
-         'callgraph':
-             (('%(js)s', '%(analysis_scriptdir)s/computeCallgraph.js', '%(typeInfo)s'),
-              'callgraph.txt'),
+        'list-dbs':
+        (('ls', '-l'),
+         ()),
+
+        'callgraph':
+        (('%(js)s', '%(analysis_scriptdir)s/computeCallgraph.js', '%(typeInfo)s'),
+         'callgraph.txt'),
 
-         'gcFunctions':
-             (('%(js)s', '%(analysis_scriptdir)s/computeGCFunctions.js', '%(callgraph)s',
-               '[gcFunctions]', '[gcFunctions_list]', '[gcEdges]', '[suppressedFunctions_list]'),
-              ('gcFunctions.txt', 'gcFunctions.lst', 'gcEdges.txt', 'suppressedFunctions.lst')),
+        'gcFunctions':
+        (('%(js)s', '%(analysis_scriptdir)s/computeGCFunctions.js', '%(callgraph)s',
+          '[gcFunctions]', '[gcFunctions_list]', '[gcEdges]', '[suppressedFunctions_list]'),
+         ('gcFunctions.txt', 'gcFunctions.lst', 'gcEdges.txt', 'suppressedFunctions.lst')),
 
-         'gcTypes':
-             (('%(js)s', '%(analysis_scriptdir)s/computeGCTypes.js',
-               '[gcTypes]', '[typeInfo]'),
-              ('gcTypes.txt', 'typeInfo.txt')),
+        'gcTypes':
+        (('%(js)s', '%(analysis_scriptdir)s/computeGCTypes.js',
+          '[gcTypes]', '[typeInfo]'),
+         ('gcTypes.txt', 'typeInfo.txt')),
 
-         'allFunctions':
-             (('%(sixgill_bin)s/xdbkeys', 'src_body.xdb',),
-              'allFunctions.txt'),
+        'allFunctions':
+        (('%(sixgill_bin)s/xdbkeys', 'src_body.xdb',),
+         'allFunctions.txt'),
 
-         'hazards':
-             (generate_hazards, 'rootingHazards.txt'),
+        'hazards':
+        (generate_hazards, 'rootingHazards.txt'),
 
-         'explain':
-             ((os.environ.get('PYTHON', 'python2.7'),
-               '%(analysis_scriptdir)s/explain.py',
-               '%(hazards)s', '%(gcFunctions)s',
-               '[explained_hazards]', '[unnecessary]', '[refs]'),
-              ('hazards.txt', 'unnecessary.txt', 'refs.txt')),
+        'explain':
+        ((os.environ.get('PYTHON', 'python2.7'),
+          '%(analysis_scriptdir)s/explain.py',
+          '%(hazards)s', '%(gcFunctions)s',
+          '[explained_hazards]', '[unnecessary]', '[refs]'),
+         ('hazards.txt', 'unnecessary.txt', 'refs.txt')),
 
-         'heapwrites':
-             (('%(js)s', '%(analysis_scriptdir)s/analyzeHeapWrites.js'),
-              'heapWriteHazards.txt'),
-         }
+        'heapwrites':
+        (('%(js)s', '%(analysis_scriptdir)s/analyzeHeapWrites.js'),
+         'heapWriteHazards.txt'),
+        }
+
 
 def out_indexes(command):
     for i in range(len(command)):
         m = re.match(r'^\[(.*)\]$', command[i])
         if m:
             yield (i, m.group(1))
 
+
 def run_job(name, config):
     cmdspec, outfiles = JOBS[name]
     print("Running " + name + " to generate " + str(outfiles))
     if hasattr(cmdspec, '__call__'):
         cmdspec(config, outfiles)
     else:
         temp_map = {}
         cmdspec = fill(cmdspec, config)
@@ -202,20 +210,21 @@ def run_job(name, config):
                 subprocess.check_call(command, stdout=output, env=env(config))
         for (temp, final) in temp_map.items():
             try:
                 os.rename(temp, final)
             except OSError:
                 print("Error renaming %s -> %s" % (temp, final))
                 raise
 
-config = { 'analysis_scriptdir': os.path.dirname(__file__) }
+
+config = {'analysis_scriptdir': os.path.dirname(__file__)}
 
-defaults = [ '%s/defaults.py' % config['analysis_scriptdir'],
-             '%s/defaults.py' % os.getcwd() ]
+defaults = ['%s/defaults.py' % config['analysis_scriptdir'],
+            '%s/defaults.py' % os.getcwd()]
 
 parser = argparse.ArgumentParser(description='Statically analyze build tree for rooting hazards.')
 parser.add_argument('step', metavar='STEP', type=str, nargs='?',
                     help='run starting from this step')
 parser.add_argument('--source', metavar='SOURCE', type=str, nargs='?',
                     help='source code to analyze')
 parser.add_argument('--objdir', metavar='DIR', type=str, nargs='?',
                     help='object directory of compiled files')
@@ -246,22 +255,22 @@ for default in defaults:
         execfile(default, config)
         if args.verbose:
             print("Loaded %s" % default)
     except:
         pass
 
 data = config.copy()
 
-for k,v in vars(args).items():
+for k, v in vars(args).items():
     if v is not None:
         data[k] = v
 
 if args.tag and not args.buildcommand:
-    args.buildcommand="build.%s" % args.tag
+    args.buildcommand = "build.%s" % args.tag
 
 if args.jobs is not None:
     data['jobs'] = args.jobs
 if not data.get('jobs'):
     data['jobs'] = int(subprocess.check_output(['nproc', '--ignore=1']).strip())
 
 if args.buildcommand:
     data['buildcommand'] = args.buildcommand
@@ -273,30 +282,32 @@ else:
 if 'ANALYZED_OBJDIR' in os.environ:
     data['objdir'] = os.environ['ANALYZED_OBJDIR']
 
 if 'SOURCE' in os.environ:
     data['source'] = os.environ['SOURCE']
 
 if data.get('sixgill_bin'):
     if not data.get('source'):
-        path = subprocess.check_output(['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.cpp']).decode()
+        path = subprocess.check_output(
+            ['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.cpp']).decode()
         data['source'] = path.replace("\n", "").replace("/js/src/jsapi.cpp", "")
     if not data.get('objdir'):
-        path = subprocess.check_output(['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.h']).decode()
+        path = subprocess.check_output(
+            ['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.h']).decode()
         data['objdir'] = path.replace("\n", "").replace("/jsapi.h", "")
 
-steps = [ 'dbs',
-          'gcTypes',
-          'callgraph',
-          'gcFunctions',
-          'allFunctions',
-          'hazards',
-          'explain',
-          'heapwrites' ]
+steps = ['dbs',
+         'gcTypes',
+         'callgraph',
+         'gcFunctions',
+         'allFunctions',
+         'hazards',
+         'explain',
+         'heapwrites']
 
 if args.list:
     for step in steps:
         command, outfilename = JOBS[step]
         if outfilename:
             print("%s -> %s" % (step, outfilename))
         else:
             print(step)
@@ -306,17 +317,18 @@ for step in steps:
     command, outfiles = JOBS[step]
     if isinstance(outfiles, anystring_t):
         data[step] = outfiles
     else:
         outfile = 0
         for (i, name) in out_indexes(command):
             data[name] = outfiles[outfile]
             outfile += 1
-        assert len(outfiles) == outfile, 'step \'%s\': mismatched number of output files (%d) and params (%d)' % (step, outfile, len(outfiles))
+        assert len(outfiles) == outfile, 'step \'%s\': mismatched number of output files (%d) and params (%d)' % (
+            step, outfile, len(outfiles))
 
 if args.step:
     steps = steps[steps.index(args.step):]
 
 if args.upto:
     steps = steps[:steps.index(args.upto)+1]
 
 for step in steps:
--- a/js/src/devtools/rootAnalysis/explain.py
+++ b/js/src/devtools/rootAnalysis/explain.py
@@ -12,19 +12,19 @@ parser.add_argument('hazards', nargs='?'
 parser.add_argument('extra', nargs='?', default='unnecessary.txt')
 parser.add_argument('refs', nargs='?', default='refs.txt')
 args = parser.parse_args()
 
 num_hazards = 0
 num_refs = 0
 try:
     with open(args.rootingHazards) as rootingHazards, \
-        open(args.hazards, 'w') as hazards, \
-        open(args.extra, 'w') as extra, \
-        open(args.refs, 'w') as refs:
+            open(args.hazards, 'w') as hazards, \
+            open(args.extra, 'w') as extra, \
+            open(args.refs, 'w') as refs:
         current_gcFunction = None
 
         # Map from a GC function name to the list of hazards resulting from
         # that GC function
         hazardousGCFunctions = defaultdict(list)
 
         # List of tuples (gcFunction, index of hazard) used to maintain the
         # ordering of the hazards
@@ -45,23 +45,25 @@ try:
                 continue
 
             m = re.match(r'^Function.*takes unsafe address of unrooted', line)
             if m:
                 num_refs += 1
                 print >>refs, line
                 continue
 
-            m = re.match(r"^Function.*has unrooted.*of type.*live across GC call ('?)(.*?)('?) at \S+:\d+$", line)
+            m = re.match(
+                r"^Function.*has unrooted.*of type.*live across GC call ('?)(.*?)('?) at \S+:\d+$", line)
             if m:
                 # Function names are surrounded by single quotes. Field calls
                 # are unquoted.
                 current_gcFunction = m.group(2)
                 hazardousGCFunctions[current_gcFunction].append(line)
-                hazardOrder.append((current_gcFunction, len(hazardousGCFunctions[current_gcFunction]) - 1))
+                hazardOrder.append((current_gcFunction, len(
+                    hazardousGCFunctions[current_gcFunction]) - 1))
                 num_hazards += 1
                 continue
 
             if current_gcFunction:
                 if not line.strip():
                     # Blank line => end of this hazard
                     current_gcFunction = None
                 else:
--- a/js/src/devtools/rootAnalysis/run-test.py
+++ b/js/src/devtools/rootAnalysis/run-test.py
@@ -59,19 +59,21 @@ if not cfg.cxx:
         cfg.cxx = "g++"
 if not cfg.sixgill_bin:
     cfg.sixgill_bin = os.path.join(cfg.sixgill, "usr", "bin")
 if not cfg.sixgill_plugin:
     cfg.sixgill_plugin = os.path.join(cfg.sixgill, "usr", "libexec", "sixgill", "gcc", "xgill.so")
 
 subprocess.check_call([cfg.js, '-e', 'if (!getBuildConfiguration()["has-ctypes"]) quit(1)'])
 
+
 def binpath(prog):
     return os.path.join(cfg.sixgill_bin, prog)
 
+
 try:
     os.mkdir(os.path.join('t', 'out'))
 except OSError:
     pass
 
 for name in cfg.tests:
     name = os.path.basename(name)
     indir = os.path.join(testdir, name)
--- a/js/src/devtools/rootAnalysis/t/hazards/test.py
+++ b/js/src/devtools/rootAnalysis/t/hazards/test.py
@@ -24,24 +24,25 @@ assert('<returnvalue>' in hazmap)
 assert(hazmap['cell2'].function == 'Cell* f()')
 print(len(set(haz.function for haz in hazards)))
 assert(len(set(haz.function for haz in hazards)) == 2)
 
 # Check that the correct GC call is reported for each hazard. (cell3 has a
 # hazard from two different GC calls; it doesn't really matter which is
 # reported.)
 assert(hazmap['cell2'].GCFunction == 'void halfSuppressedFunction()')
-assert(hazmap['cell3'].GCFunction in ('void halfSuppressedFunction()', 'void unsuppressedFunction()'))
+assert(hazmap['cell3'].GCFunction in (
+    'void halfSuppressedFunction()', 'void unsuppressedFunction()'))
 assert(hazmap['<returnvalue>'].GCFunction == 'void GCInDestructor::~GCInDestructor()')
 
 # Type names are handy to have in the report.
 assert(hazmap['cell2'].type == 'Cell*')
 assert(hazmap['<returnvalue>'].type == 'Cell*')
 
 # loopy hazards. See comments in source.
-assert('haz1' not in hazmap);
-assert('haz2' not in hazmap);
-assert('haz3' in hazmap);
-assert('haz4' in hazmap);
-assert('haz5' in hazmap);
-assert('haz6' not in hazmap);
-assert('haz7' not in hazmap);
-assert('haz8' in hazmap);
+assert('haz1' not in hazmap)
+assert('haz2' not in hazmap)
+assert('haz3' in hazmap)
+assert('haz4' in hazmap)
+assert('haz5' in hazmap)
+assert('haz6' not in hazmap)
+assert('haz7' not in hazmap)
+assert('haz8' in hazmap)
--- a/js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
+++ b/js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
@@ -36,17 +36,17 @@ assert(cell['Kind'] == 'Struct')
 annotations = cell['Annotation']
 assert(len(annotations) == 1)
 (tag, value) = annotations[0]['Name']
 assert(tag == 'Tag')
 assert(value == 'GC Thing')
 
 # Check JSObject inheritance.
 JSObject = test.load_db_entry("src_comp", 'JSObject')[0]
-bases = [ b['Base'] for b in JSObject['CSUBaseClass'] ]
+bases = [b['Base'] for b in JSObject['CSUBaseClass']]
 assert('js::gc::Cell' in bases)
 assert('Bogon' in bases)
 assert(len(bases) == 2)
 
 # Check type analysis
 gctypes = test.load_gcTypes()
 assert('js::gc::Cell' in gctypes['GCThings'])
 assert('JustACell' in gctypes['GCThings'])
--- a/js/src/devtools/rootAnalysis/t/sixgill.py
+++ b/js/src/devtools/rootAnalysis/t/sixgill.py
@@ -1,28 +1,31 @@
 #!/usr/bin/env python
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from collections import defaultdict
 
 # Simplified version of the body info.
+
+
 class Body(dict):
     def __init__(self, body):
         self['BlockIdKind'] = body['BlockId']['Kind']
         if 'Variable' in body['BlockId']:
             self['BlockName'] = body['BlockId']['Variable']['Name'][0].split("$")[-1]
         loc = body['Location']
         self['LineRange'] = (loc[0]['Line'], loc[1]['Line'])
         self['Filename'] = loc[0]['CacheString']
         self['Edges'] = body.get('PEdge', [])
-        self['Points'] = { i: p['Location']['Line'] for i, p in enumerate(body['PPoint'], 1) }
+        self['Points'] = {i: p['Location']['Line'] for i, p in enumerate(body['PPoint'], 1)}
         self['Index'] = body['Index']
-        self['Variables'] = { x['Variable']['Name'][0].split("$")[-1]: x['Type'] for x in body['DefineVariable'] }
+        self['Variables'] = {x['Variable']['Name'][0].split(
+            "$")[-1]: x['Type'] for x in body['DefineVariable']}
 
         # Indexes
         self['Line2Points'] = defaultdict(list)
         for point, line in self['Points'].items():
             self['Line2Points'][line].append(point)
         self['SrcPoint2Edges'] = defaultdict(list)
         for edge in self['Edges']:
             src, dst = edge['Index']
--- a/js/src/devtools/rootAnalysis/t/testlib.py
+++ b/js/src/devtools/rootAnalysis/t/testlib.py
@@ -3,22 +3,25 @@ import os
 import re
 import subprocess
 
 from sixgill import Body
 from collections import defaultdict, namedtuple
 
 scriptdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
 
-HazardSummary = namedtuple('HazardSummary', ['function', 'variable', 'type', 'GCFunction', 'location'])
+HazardSummary = namedtuple(
+    'HazardSummary', ['function', 'variable', 'type', 'GCFunction', 'location'])
+
 
 def equal(got, expected):
     if got != expected:
         print("Got '%s', expected '%s'" % (got, expected))
 
+
 def extract_unmangled(func):
     return func.split('$')[-1]
 
 
 class Test(object):
     def __init__(self, indir, outdir, cfg, verbose=0):
         self.indir = indir
         self.outdir = outdir
@@ -26,17 +29,17 @@ class Test(object):
         self.verbose = verbose
 
     def infile(self, path):
         return os.path.join(self.indir, path)
 
     def binpath(self, prog):
         return os.path.join(self.cfg.sixgill_bin, prog)
 
-    def compile(self, source, options = ''):
+    def compile(self, source, options=''):
         cmd = "{CXX} -c {source} -O3 -std=c++11 -fplugin={sixgill} -fplugin-arg-xgill-mangle=1 {options}".format(
             source=self.infile(source),
             CXX=self.cfg.cxx, sixgill=self.cfg.sixgill_plugin,
             options=options)
         if self.cfg.verbose:
             print("Running %s" % cmd)
         subprocess.check_call(["sh", "-c", cmd])
 
@@ -100,17 +103,18 @@ sixgill_bin = '{bindir}'
             gctypes[collection].append(typename)
         return gctypes
 
     def load_gcFunctions(self):
         return self.load_text_file('gcFunctions.lst', extract=extract_unmangled)
 
     def load_hazards(self):
         def grab_hazard(line):
-            m = re.match(r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line)
+            m = re.match(
+                r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line)
             if m:
                 info = list(m.groups())
                 info[0] = info[0].split("$")[-1]
                 info[3] = info[3].split("$")[-1]
                 return HazardSummary(*info)
             return None
 
         return self.load_text_file('rootingHazards.txt', extract=grab_hazard)
--- a/js/src/frontend/GenerateReservedWords.py
+++ b/js/src/frontend/GenerateReservedWords.py
@@ -1,53 +1,59 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import re
 import sys
 
+
 def read_reserved_word_list(filename):
     macro_pat = re.compile(r"^\s*macro\(([^,]+), *[^,]+, *[^\)]+\)\s*\\?$")
 
     reserved_word_list = []
     index = 0
     with open(filename, 'r') as f:
         for line in f:
             m = macro_pat.search(line)
             if m:
                 reserved_word_list.append((index, m.group(1)))
                 index += 1
 
     assert(len(reserved_word_list) != 0)
 
     return reserved_word_list
 
+
 def line(opt, s):
     opt['output'].write('{}{}\n'.format('    ' * opt['indent_level'], s))
 
+
 def indent(opt):
     opt['indent_level'] += 1
 
+
 def dedent(opt):
     opt['indent_level'] -= 1
 
+
 def span_and_count_at(reserved_word_list, column):
-    assert(len(reserved_word_list) != 0);
+    assert(len(reserved_word_list) != 0)
 
     chars_dict = {}
     for index, word in reserved_word_list:
         chars_dict[ord(word[column])] = True
 
     chars = sorted(chars_dict.keys())
     return chars[-1] - chars[0] + 1, len(chars)
 
+
 def optimal_switch_column(opt, reserved_word_list, columns, unprocessed_columns):
-    assert(len(reserved_word_list) != 0);
-    assert(unprocessed_columns != 0);
+    assert(len(reserved_word_list) != 0)
+    assert(unprocessed_columns != 0)
 
     min_count = 0
     min_span = 0
     min_count_index = 0
     min_span_index = 0
 
     for index in range(0, unprocessed_columns):
         span, count = span_and_count_at(reserved_word_list, columns[index])
@@ -66,30 +72,32 @@ def optimal_switch_column(opt, reserved_
             min_count = count
             min_count_index = index
 
     if min_count <= opt['use_if_threshold']:
         return min_count_index, True
 
     return min_span_index, False
 
+
 def split_list_per_column(reserved_word_list, column):
-    assert(len(reserved_word_list) != 0);
+    assert(len(reserved_word_list) != 0)
 
     column_dict = {}
     for item in reserved_word_list:
         index, word = item
         per_column = column_dict.setdefault(word[column], [])
         per_column.append(item)
 
     return sorted(column_dict.items(), key=lambda (char, word): ord(char))
 
+
 def generate_letter_switch(opt, unprocessed_columns, reserved_word_list,
                            columns=None):
-    assert(len(reserved_word_list) != 0);
+    assert(len(reserved_word_list) != 0)
 
     if not columns:
         columns = range(0, unprocessed_columns)
 
     if len(reserved_word_list) == 1:
         index, word = reserved_word_list[0]
 
         if unprocessed_columns == 0:
@@ -110,17 +118,17 @@ def generate_letter_switch(opt, unproces
         indent(opt)
         line(opt, 'JSRW_GOT_MATCH({}) /* {} */'.format(index, word))
         dedent(opt)
 
         line(opt, '}')
         line(opt, 'JSRW_NO_MATCH()')
         return
 
-    assert(unprocessed_columns != 0);
+    assert(unprocessed_columns != 0)
 
     optimal_column_index, use_if = optimal_switch_column(opt, reserved_word_list,
                                                          columns,
                                                          unprocessed_columns)
     optimal_column = columns[optimal_column_index]
 
     # Make a copy to avoid breaking passed list.
     columns = columns[:]
@@ -147,29 +155,31 @@ def generate_letter_switch(opt, unproces
         if use_if:
             line(opt, '}')
 
     if not use_if:
         line(opt, '}')
 
     line(opt, 'JSRW_NO_MATCH()')
 
+
 def split_list_per_length(reserved_word_list):
-    assert(len(reserved_word_list) != 0);
+    assert(len(reserved_word_list) != 0)
 
     length_dict = {}
     for item in reserved_word_list:
         index, word = item
         per_length = length_dict.setdefault(len(word), [])
         per_length.append(item)
 
     return sorted(length_dict.items(), key=lambda (length, word): length)
 
+
 def generate_switch(opt, reserved_word_list):
-    assert(len(reserved_word_list) != 0);
+    assert(len(reserved_word_list) != 0)
 
     line(opt, '/*')
     line(opt, ' * Generating switch for the list of {} entries:'.format(len(reserved_word_list)))
     for index, word in reserved_word_list:
         line(opt, ' * {}'.format(word))
     line(opt, ' */')
 
     list_per_length = split_list_per_length(reserved_word_list)
@@ -193,21 +203,23 @@ def generate_switch(opt, reserved_word_l
 
         if use_if:
             line(opt, '}')
 
     if not use_if:
         line(opt, '}')
     line(opt, 'JSRW_NO_MATCH()')
 
+
 def main(output, reserved_words_h):
     reserved_word_list = read_reserved_word_list(reserved_words_h)
 
     opt = {
         'indent_level': 1,
         'use_if_threshold': 3,
         'char_tail_test_threshold': 4,
         'output': output
     }
     generate_switch(opt, reserved_word_list)
 
+
 if __name__ == '__main__':
     main(sys.stdout, *sys.argv[1:])
--- a/js/src/gc/GenerateStatsPhases.py
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -48,23 +48,25 @@
 #            +---+   +---+
 #            | E |   | E'|
 #            +---+   +---+
 
 import re
 import sys
 import collections
 
+
 class PhaseKind():
-    def __init__(self, name, descr, bucket, children = []):
+    def __init__(self, name, descr, bucket, children=[]):
         self.name = name
         self.descr = descr
         self.bucket = bucket
         self.children = children
 
+
 # The root marking phase appears in several places in the graph.
 MarkRootsPhaseKind = PhaseKind("MARK_ROOTS", "Mark Roots", 48, [
     PhaseKind("MARK_CCWS", "Mark Cross Compartment Wrappers", 50),
     PhaseKind("MARK_STACK", "Mark C and JS stacks", 51),
     PhaseKind("MARK_RUNTIME_DATA", "Mark Runtime-wide Data", 52),
     PhaseKind("MARK_EMBEDDING", "Mark Embedding", 53),
     PhaseKind("MARK_COMPARTMENTS", "Mark Compartments", 54)
 ])
@@ -140,17 +142,17 @@ PhaseKindGraphRoots = [
         PhaseKind("SWEEP_STRING", "Sweep String", 34),
         PhaseKind("SWEEP_SCRIPT", "Sweep Script", 35),
         PhaseKind("SWEEP_SCOPE", "Sweep Scope", 59),
         PhaseKind("SWEEP_REGEXP_SHARED", "Sweep RegExpShared", 61),
         PhaseKind("SWEEP_SHAPE", "Sweep Shape", 36),
         PhaseKind("FINALIZE_END", "Finalize End Callback", 38),
         PhaseKind("DESTROY", "Deallocate", 39),
         JoinParallelTasksPhaseKind
-        ]),
+    ]),
     PhaseKind("COMPACT", "Compact", 40, [
         PhaseKind("COMPACT_MOVE", "Compact Move", 41),
         PhaseKind("COMPACT_UPDATE", "Compact Update", 42, [
             MarkRootsPhaseKind,
             PhaseKind("COMPACT_UPDATE_CELLS", "Compact Update Cells", 43),
             JoinParallelTasksPhaseKind
         ]),
     ]),
@@ -168,50 +170,54 @@ PhaseKindGraphRoots = [
         UnmarkGrayPhaseKind
     ])
 ]
 
 # Make a linear list of all unique phases by performing a depth first
 # search on the phase graph starting at the roots.  This will be used to
 # generate the PhaseKind enum.
 
+
 def findAllPhaseKinds():
     phases = []
     seen = set()
 
     def dfs(phase):
         if phase in seen:
             return
         phases.append(phase)
         seen.add(phase)
         for child in phase.children:
             dfs(child)
 
     for phase in PhaseKindGraphRoots:
         dfs(phase)
     return phases
 
+
 AllPhaseKinds = findAllPhaseKinds()
 
 # Expand the DAG into a tree, duplicating phases which have more than
 # one parent.
 
+
 class Phase:
     def __init__(self, phaseKind, parent):
         self.phaseKind = phaseKind
         self.parent = parent
         self.depth = parent.depth + 1 if parent else 0
         self.children = []
         self.nextSibling = None
         self.nextInPhaseKind = None
 
         self.path = re.sub(r'\W+', '_', phaseKind.name.lower())
         if parent is not None:
             self.path = parent.path + '.' + self.path
 
+
 def expandPhases():
     phases = []
     phasesForKind = collections.defaultdict(list)
 
     def traverse(phaseKind, parent):
         ep = Phase(phaseKind, parent)
         phases.append(ep)
 
@@ -228,16 +234,17 @@ def expandPhases():
             ep.children.append(child_ep)
         return ep
 
     for phaseKind in PhaseKindGraphRoots:
         traverse(phaseKind, None)
 
     return phases, phasesForKind
 
+
 AllPhases, PhasesForPhaseKind = expandPhases()
 
 # Name phases based on phase kind name and index if there are multiple phases
 # corresponding to a single phase kind.
 
 for phaseKind in AllPhaseKinds:
     phases = PhasesForPhaseKind[phaseKind]
     if len(phases) == 1:
@@ -247,27 +254,30 @@ for phaseKind in AllPhaseKinds:
             phase.name = "%s_%d" % (phaseKind.name, index + 1)
 
 # Find the maximum phase nesting.
 
 MaxPhaseNesting = max(phase.depth for phase in AllPhases) + 1
 
 # Generate code.
 
+
 def writeList(out, items):
     if items:
         out.write(",\n".join("  " + item for item in items) + "\n")
 
+
 def writeEnumClass(out, name, type, items, extraItems):
-    items = [ "FIRST" ] + items + [ "LIMIT" ] + extraItems
+    items = ["FIRST"] + items + ["LIMIT"] + extraItems
     items[1] += " = " + items[0]
-    out.write("enum class %s : %s {\n" % (name, type));
+    out.write("enum class %s : %s {\n" % (name, type))
     writeList(out, items)
     out.write("};\n")
 
+
 def generateHeader(out):
     #
     # Generate PhaseKind enum.
     #
     phaseKindNames = map(lambda phaseKind: phaseKind.name, AllPhaseKinds)
     extraPhaseKinds = [
         "NONE = LIMIT",
         "EXPLICIT_SUSPENSION = LIMIT",
@@ -288,16 +298,17 @@ def generateHeader(out):
     writeEnumClass(out, "Phase", "uint8_t", phaseNames, extraPhases)
     out.write("\n")
 
     #
     # Generate MAX_PHASE_NESTING constant.
     #
     out.write("static const size_t MAX_PHASE_NESTING = %d;\n" % MaxPhaseNesting)
 
+
 def generateCpp(out):
     #
     # Generate the PhaseKindInfo table.
     #
     out.write("static const PhaseKindTable phaseKinds = {\n")
     for phaseKind in AllPhaseKinds:
         phase = PhasesForPhaseKind[phaseKind][0]
         out.write("    /* PhaseKind::%s */ PhaseKindInfo { Phase::%s, %d },\n" %
--- a/js/src/gdb/lib-for-tests/catcher.py
+++ b/js/src/gdb/lib-for-tests/catcher.py
@@ -8,21 +8,23 @@
 #
 # Remember, errors in this file will cause spurious passes, so keep this as
 # simple as possible!
 
 import os
 import sys
 import traceback
 
+
 def execfile(filename, globs, locs):
     with open(filename) as f:
         code = compile(f.read(), filename, 'exec')
         exec(code, globs, locs)
 
+
 try:
     # testlibdir is set on the GDB command line, via:
     # --eval-command python testlibdir=...
     execfile(os.path.join(testlibdir, 'prologue.py'), globals(), locals())
 except Exception as err:
     sys.stderr.write('Error running GDB prologue:\n')
     traceback.print_exc()
     sys.exit(1)
--- a/js/src/gdb/lib-for-tests/prologue.py
+++ b/js/src/gdb/lib-for-tests/prologue.py
@@ -6,74 +6,87 @@ import traceback
 
 # testlibdir is set on the GDB command line, via --eval-command python testlibdir=...
 sys.path[0:0] = [testlibdir]
 
 active_fragment = None
 
 # Run the C++ fragment named |fragment|, stopping on entry to |function|
 # ('breakpoint', by default) and then select the calling frame.
+
+
 def run_fragment(fragment, function='breakpoint'):
     # Arrange to stop at a reasonable place in the test program.
     bp = gdb.Breakpoint(function)
     try:
         gdb.execute("run %s" % (fragment,))
         # Check that we did indeed stop by hitting the breakpoint we set.
         assert bp.hit_count == 1
     finally:
         bp.delete()
     gdb.execute('frame 1')
 
     global active_fragment
     active_fragment = fragment
 
 # Assert that |actual| is equal to |expected|; if not, complain in a helpful way.
+
+
 def assert_eq(actual, expected):
     if actual != expected:
         raise AssertionError("""Unexpected result:
 expected: %r
 actual:   %r""" % (expected, actual))
 
 # Assert that |expected| regex matches |actual| result; if not, complain in a helpful way.
+
+
 def assert_match(actual, expected):
     if re.match(expected, actual, re.MULTILINE) == None:
         raise AssertionError("""Unexpected result:
 expected pattern: %r
 actual:           %r""" % (expected, actual))
 
 # Assert that |value|'s pretty-printed form is |form|. If |value| is a
 # string, then evaluate it with gdb.parse_and_eval to produce a value.
+
+
 def assert_pretty(value, form):
     if isinstance(value, str):
         value = gdb.parse_and_eval(value)
     assert_eq(str(value), form)
 
 # Assert that |value|'s pretty-printed form match the pattern |pattern|. If
 # |value| is a string, then evaluate it with gdb.parse_and_eval to produce a
 # value.
+
+
 def assert_regexp_pretty(value, form):
     if isinstance(value, str):
         value = gdb.parse_and_eval(value)
     assert_match(str(value), form)
 
 # Check that the list of registered pretty-printers includes one named
 # |printer|, with a subprinter named |subprinter|.
+
+
 def assert_subprinter_registered(printer, subprinter):
     # Match a line containing |printer| followed by a colon, and then a
     # series of more-indented lines containing |subprinter|.
 
-    names = { 'printer': re.escape(printer), 'subprinter': re.escape(subprinter) }
+    names = {'printer': re.escape(printer), 'subprinter': re.escape(subprinter)}
     pat = r'^( +)%(printer)s *\n(\1 +.*\n)*\1 +%(subprinter)s *\n' % names
     output = gdb.execute('info pretty-printer', to_string=True)
     if not re.search(pat, output, re.MULTILINE):
         raise AssertionError("assert_subprinter_registered failed to find pretty-printer:\n"
                              "  %s:%s\n"
                              "'info pretty-printer' says:\n"
                              "%s" % (printer, subprinter, output))
 
+
 enable_bigint = False
 try:
     if gdb.lookup_type('JS::BigInt'):
         enable_bigint = True
 except:
     pass
 
 # Request full stack traces for Python errors.
--- a/js/src/gdb/mozilla/ExecutableAllocator.py
+++ b/js/src/gdb/mozilla/ExecutableAllocator.py
@@ -7,30 +7,33 @@ allocated by the Jits.
 import gdb
 import mozilla.prettyprinters
 from mozilla.prettyprinters import pretty_printer, ptr_pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Cache information about the JSString type for this objfile.
+
+
 class jsjitExecutableAllocatorCache(object):
     def __init__(self):
         self.d = None
 
     def __getattr__(self, name):
         if self.d is None:
             self.initialize()
         return self.d[name]
 
     def initialize(self):
         self.d = {}
         self.d['ExecutableAllocator'] = gdb.lookup_type('js::jit::ExecutableAllocator')
         self.d['ExecutablePool'] = gdb.lookup_type('js::jit::ExecutablePool')
 
+
 @pretty_printer("js::jit::ExecutableAllocator")
 class jsjitExecutableAllocator(object):
     def __init__(self, value, cache):
         if not cache.mod_ExecutableAllocator:
             cache.mod_ExecutableAllocator = jsjitExecutableAllocatorCache()
         self.value = value
         self.cache = cache.mod_ExecutableAllocator
 
@@ -41,34 +44,35 @@ class jsjitExecutableAllocator(object):
         return self.PoolIterator(self)
 
     class PoolIterator(object):
         def __init__(self, allocator):
             self.allocator = allocator
             self.entryType = allocator.cache.ExecutablePool.pointer()
             # Emulate the HashSet::Range
             self.table = allocator.value['m_pools']['impl']['table']
-            self.index = 0;
-            HASHNUMBER_BIT_SIZE =  32
+            self.index = 0
+            HASHNUMBER_BIT_SIZE = 32
             self.max = 1 << (HASHNUMBER_BIT_SIZE - allocator.value['m_pools']['impl']['hashShift'])
             if self.table == 0:
                 self.max = 0
 
         def __iter__(self):
-            return self;
+            return self
 
         def __next__(self):
             cur = self.index
             if cur >= self.max:
                 raise StopIteration()
             self.index = self.index + 1
-            if self.table[cur]['keyHash'] > 1: # table[i]->isLive()
+            if self.table[cur]['keyHash'] > 1:  # table[i]->isLive()
                 return self.table[cur]['mem']['u']['mDummy'].cast(self.entryType)
             return self.__next__()
 
+
 @ptr_pretty_printer("js::jit::ExecutablePool")
 class jsjitExecutablePool(mozilla.prettyprinters.Pointer):
     def __init__(self, value, cache):
         if not cache.mod_ExecutableAllocator:
             cache.mod_ExecutableAllocator = jsjitExecutableAllocatorCache()
         self.value = value
         self.cache = cache.mod_ExecutableAllocator
 
--- a/js/src/gdb/mozilla/GCCellPtr.py
+++ b/js/src/gdb/mozilla/GCCellPtr.py
@@ -4,39 +4,43 @@ import gdb
 import mozilla.prettyprinters
 
 from mozilla.prettyprinters import pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Cache information about the JS::TraceKind type for this objfile.
+
+
 class GCCellPtrTypeCache(object):
     def __init__(self, cache):
         self.TraceKind_t = gdb.lookup_type('JS::TraceKind')
 
         # Build a mapping from TraceKind enum values to the types they denote.
         e = gdb.types.make_enum_dict(self.TraceKind_t)
         kind_to_type = {}
+
         def kind(k, t):
             kind_to_type[e['JS::TraceKind::' + k]] = gdb.lookup_type(t)
         kind('Object',      'JSObject')
         kind('String',      'JSString')
         kind('Symbol',      'JS::Symbol')
         kind('Script',      'JSScript')
         kind('Shape',       'js::Shape')
         kind('ObjectGroup', 'js::ObjectGroup')
         kind('BaseShape',   'js::BaseShape')
         kind('JitCode',     'js::jit::JitCode')
         kind('LazyScript',  'js::LazyScript')
         self.kind_to_type = kind_to_type
 
         self.Null = e['JS::TraceKind::Null']
         self.mask = gdb.parse_and_eval('JS::OutOfLineTraceKindMask')
 
+
 @pretty_printer('JS::GCCellPtr')
 class GCCellPtr(object):
     def __init__(self, value, cache):
         self.value = value
         if not cache.mod_GCCellPtr:
             cache.mod_GCCellPtr = GCCellPtrTypeCache(cache)
         self.cache = cache
 
--- a/js/src/gdb/mozilla/Interpreter.py
+++ b/js/src/gdb/mozilla/Interpreter.py
@@ -3,26 +3,29 @@
 import gdb
 import mozilla.prettyprinters as prettyprinters
 
 prettyprinters.clear_module_printers(__name__)
 
 from mozilla.prettyprinters import pretty_printer
 
 # Cache information about the Interpreter types for this objfile.
+
+
 class InterpreterTypeCache(object):
     def __init__(self):
         self.tValue = gdb.lookup_type('JS::Value')
         self.tJSOp = gdb.lookup_type('JSOp')
         self.tScriptFrameIterData = gdb.lookup_type('js::ScriptFrameIter::Data')
         self.tInterpreterFrame = gdb.lookup_type('js::InterpreterFrame')
         self.tBaselineFrame = gdb.lookup_type('js::jit::BaselineFrame')
         self.tRematerializedFrame = gdb.lookup_type('js::jit::RematerializedFrame')
         self.tDebugFrame = gdb.lookup_type('js::wasm::DebugFrame')
 
+
 @pretty_printer('js::InterpreterRegs')
 class InterpreterRegs(object):
     def __init__(self, value, cache):
         self.value = value
         self.cache = cache
         if not cache.mod_Interpreter:
             cache.mod_Interpreter = InterpreterTypeCache()
         self.itc = cache.mod_Interpreter
@@ -37,16 +40,17 @@ class InterpreterRegs(object):
         pc = self.value['pc']
         try:
             opcode = pc.dereference().cast(self.itc.tJSOp)
         except:
             opcode = 'bad pc'
         pc = 'pc = {} ({})'.format(pc.cast(self.cache.void_ptr_t), opcode)
         return '{{ {}, {}, {} }}'.format(fp_, sp, pc)
 
+
 @pretty_printer('js::AbstractFramePtr')
 class AbstractFramePtr(object):
     Tag_ScriptFrameIterData = 0x0
     Tag_InterpreterFrame = 0x1
     Tag_BaselineFrame = 0x2
     Tag_RematerializedFrame = 0x3
     Tag_WasmDebugFrame = 0x4
     TagMask = 0x7
--- a/js/src/gdb/mozilla/IonGraph.py
+++ b/js/src/gdb/mozilla/IonGraph.py
@@ -15,35 +15,40 @@ import tempfile
 import time
 import mozilla.prettyprinters
 from mozilla.prettyprinters import pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Cache information about the JSString type for this objfile.
+
+
 class jsvmPrinterCache(object):
     def __init__(self):
         self.d = None
 
     def __getattr__(self, name):
         if self.d is None:
             self.initialize()
         return self.d[name]
 
     def initialize(self):
         self.d = {}
         self.d['char'] = gdb.lookup_type('char')
 
 # Dummy class used to store the content of the type cache in the context of the
 # iongraph command, which uses the jsvmLSprinter.
+
+
 class ModuleCache(object):
     def __init__(self):
         self.mod_IonGraph = None
 
+
 @pretty_printer("js::vm::LSprinter")
 class jsvmLSprinter(object):
     def __init__(self, value, cache):
         self.value = value
         if not cache.mod_IonGraph:
             cache.mod_IonGraph = jsvmPrinterCache()
         self.cache = cache.mod_IonGraph
 
@@ -57,77 +62,94 @@ class jsvmLSprinter(object):
             chars = (next + 1).cast(self.cache.char.pointer())
             res = res + chars.string('ascii', 'ignore', next['length'])
             next = next['next']
         length = next['length'] - self.value['unused_']
         chars = (next + 1).cast(self.cache.char.pointer())
         res = res + chars.string('ascii', 'ignore', length)
         return res
 
+
 def search_in_path(bin):
     paths = os.getenv("PATH", "")
     if paths == "":
         return ""
     for d in paths.split(":"):
         f = os.path.join(d, bin)
         if os.access(f, os.X_OK):
             return f
     return ""
 
+
 class IonGraphBinParameter(gdb.Parameter):
     set_doc = "Set the path to iongraph binary, used by iongraph command."
     show_doc = "Show the path to iongraph binary, used by iongraph command."
+
     def get_set_string(self):
         return "Path to iongraph binary changed to: %s" % self.value
+
     def get_show_string(self, value):
         return "Path to iongraph binary set to: %s" % value
+
     def __init__(self):
-        super (IonGraphBinParameter, self).__init__ ("iongraph-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
+        super(IonGraphBinParameter, self).__init__(
+            "iongraph-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
         self.value = os.getenv("GDB_IONGRAPH", "")
         if self.value == "":
             self.value = search_in_path("iongraph")
 
 
 class DotBinParameter(gdb.Parameter):
     set_doc = "Set the path to dot binary, used by iongraph command."
     show_doc = "Show the path to dot binary, used by iongraph command."
+
     def get_set_string(self):
         return "Path to dot binary changed to: %s" % self.value
+
     def get_show_string(self, value):
         return "Path to dot binary set to: %s" % value
+
     def __init__(self):
-        super (DotBinParameter, self).__init__ ("dot-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
+        super(DotBinParameter, self).__init__("dot-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
         self.value = os.getenv("GDB_DOT", "")
         if self.value == "":
             self.value = search_in_path("dot")
 
+
 class PngViewerBinParameter(gdb.Parameter):
     set_doc = "Set the path to a png viewer binary, used by iongraph command."
     show_doc = "Show the path to a png viewer binary, used by iongraph command."
+
     def get_set_string(self):
         return "Path to a png viewer binary changed to: %s" % self.value
+
     def get_show_string(self):
         return "Path to a png viewer binary set to: %s" % value
+
     def __init__(self):
-        super (PngViewerBinParameter, self).__init__ ("pngviewer-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
+        super(PngViewerBinParameter, self).__init__(
+            "pngviewer-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
         self.value = os.getenv("GDB_PNGVIEWER", "")
         if self.value == "":
             self.value = search_in_path("xdg-open")
 
+
 iongraph = IonGraphBinParameter()
 dot = DotBinParameter()
 pngviewer = PngViewerBinParameter()
 
+
 class IonGraphCommand(gdb.Command):
     """Command used to display the current state of the MIR graph in a png
     viewer by providing an expression to access the MIRGenerator.
     """
 
     def __init__(self):
-        super (IonGraphCommand, self).__init__ ("iongraph", gdb.COMMAND_DATA, gdb.COMPLETE_EXPRESSION)
+        super(IonGraphCommand, self).__init__(
+            "iongraph", gdb.COMMAND_DATA, gdb.COMPLETE_EXPRESSION)
         self.typeCache = ModuleCache()
 
     def invoke(self, mirGenExpr, from_tty):
         """Call function from the graph spewer to populate the json printer with
         the content generated by the jsonSpewer. Then we read the json content
         from the jsonPrinter internal data, and gives that as input of iongraph
         command."""
 
@@ -140,20 +162,24 @@ class IonGraphCommand(gdb.Command):
         graph = mirGen['graph_']
 
         # These commands are doing side-effects which are saving the state of
         # the compiled code on the LSprinter dedicated for logging. Fortunately,
         # if you are using these gdb command, this probably means that other
         # ways of getting this content failed you already, so making a mess in
         # these logging strings should not cause much issues.
         gdb.parse_and_eval('(*(%s*)(%s)).clear()' % (jsonPrinter.type, jsonPrinter.address,))
-        gdb.parse_and_eval('(*(%s*)(%s)).beginFunction((JSScript*)0)' % (jsonSpewer.type, jsonSpewer.address,))
-        gdb.parse_and_eval('(*(%s*)(%s)).beginPass("gdb")' % (jsonSpewer.type, jsonSpewer.address,))
-        gdb.parse_and_eval('(*(%s*)(%s)).spewMIR((%s)%s)' % (jsonSpewer.type, jsonSpewer.address, graph.type, graph,))
-        gdb.parse_and_eval('(*(%s*)(%s)).spewLIR((%s)%s)' % (jsonSpewer.type, jsonSpewer.address, graph.type, graph,))
+        gdb.parse_and_eval('(*(%s*)(%s)).beginFunction((JSScript*)0)' %
+                           (jsonSpewer.type, jsonSpewer.address,))
+        gdb.parse_and_eval('(*(%s*)(%s)).beginPass("gdb")' %
+                           (jsonSpewer.type, jsonSpewer.address,))
+        gdb.parse_and_eval('(*(%s*)(%s)).spewMIR((%s)%s)' %
+                           (jsonSpewer.type, jsonSpewer.address, graph.type, graph,))
+        gdb.parse_and_eval('(*(%s*)(%s)).spewLIR((%s)%s)' %
+                           (jsonSpewer.type, jsonSpewer.address, graph.type, graph,))
         gdb.parse_and_eval('(*(%s*)(%s)).endPass()' % (jsonSpewer.type, jsonSpewer.address,))
         gdb.parse_and_eval('(*(%s*)(%s)).endFunction()' % (jsonSpewer.type, jsonSpewer.address,))
 
         # Dump the content of the LSprinter containing the JSON view of the
         # graph into a python string.
         json = jsvmLSprinter(jsonPrinter, self.typeCache).to_string()
 
         # We are in the middle of the program execution and are messing up with
@@ -169,18 +195,19 @@ class IonGraphCommand(gdb.Command):
         # Display the content of the json with iongraph and other tools.
         self.displayMIRGraph(json)
 
     def displayMIRGraph(self, jsonStr):
         png = tempfile.NamedTemporaryFile()
 
         # start all processes in a shell-like equivalent of:
         #   iongraph < json | dot > tmp.png; xdg-open tmp.png
-        i = subprocess.Popen([iongraph.value, '--funcnum', '0', '--passnum', '0', '--out-mir', '-', '-'], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
-        d = subprocess.Popen([dot.value, '-Tpng'], stdin = i.stdout, stdout = png)
+        i = subprocess.Popen([iongraph.value, '--funcnum', '0', '--passnum', '0',
+                              '--out-mir', '-', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+        d = subprocess.Popen([dot.value, '-Tpng'], stdin=i.stdout, stdout=png)
 
         # Write the json file as the input of the iongraph command.
         i.stdin.write(jsonStr.encode('utf8'))
         i.stdin.close()
         i.stdout.close()
 
         # Wait for iongraph and dot, such that the png file contains all the
         # bits needed to by the png viewer.
@@ -188,12 +215,13 @@ class IonGraphCommand(gdb.Command):
         output = d.communicate()[0]
 
         # Spawn & detach the png viewer, to which we give the name of the
         # temporary file.  Note, as we do not want to wait on the image viewer,
         # there is a minor race between the removal of the temporary file, which
         # would happen at the next garbage collection cycle, and the start of
         # the png viewer.  We could use a pipe, but unfortunately, this does not
         # seems to be supported by xdg-open.
-        v = subprocess.Popen([pngviewer.value, png.name], stdin = None, stdout = None)
+        v = subprocess.Popen([pngviewer.value, png.name], stdin=None, stdout=None)
         time.sleep(1)
 
+
 iongraph_cmd = IonGraphCommand()
--- a/js/src/gdb/mozilla/JSObject.py
+++ b/js/src/gdb/mozilla/JSObject.py
@@ -4,31 +4,34 @@ import re
 import gdb
 import mozilla.JSString
 import mozilla.prettyprinters as prettyprinters
 from mozilla.prettyprinters import ptr_pretty_printer, ref_pretty_printer
 from mozilla.Root import deref
 
 prettyprinters.clear_module_printers(__name__)
 
+
 class JSObjectTypeCache(object):
     def __init__(self, value, cache):
         baseshape_flags = gdb.lookup_type('js::BaseShape::Flag')
         self.flag_DELEGATE = prettyprinters.enum_value(baseshape_flags, 'js::BaseShape::DELEGATE')
         self.func_ptr_type = gdb.lookup_type('JSFunction').pointer()
         self.class_NON_NATIVE = gdb.parse_and_eval('js::Class::NON_NATIVE')
         self.NativeObject_ptr_t = gdb.lookup_type('js::NativeObject').pointer()
         self.Shape_ptr_t = gdb.lookup_type('js::Shape').pointer()
 
 # There should be no need to register this for JSFunction as well, since we
 # search for pretty-printers under the names of base classes, and
 # JSFunction has JSObject as a base class.
 
+
 gdb_string_regexp = re.compile(r'(?:0x[0-9a-z]+ )?(?:<.*> )?"(.*)"', re.I)
 
+
 @ptr_pretty_printer('JSObject')
 class JSObjectPtrOrRef(prettyprinters.Pointer):
     def __init__(self, value, cache):
         super(JSObjectPtrOrRef, self).__init__(value, cache)
         if not cache.mod_JSObject:
             cache.mod_JSObject = JSObjectTypeCache(value, cache)
         self.otc = cache.mod_JSObject
 
@@ -60,10 +63,11 @@ class JSObjectPtrOrRef(prettyprinters.Po
                     function = function.address
                 function = function.cast(self.otc.func_ptr_type)
                 atom = deref(function['atom_'])
                 name = str(atom) if atom else '<unnamed>'
             return '[object {}{}]{}'.format(class_name,
                                             ' ' + name if name else '',
                                             ' delegate' if is_delegate else '')
 
+
 @ref_pretty_printer('JSObject')
 def JSObjectRef(value, cache): return JSObjectPtrOrRef(value, cache)
--- a/js/src/gdb/mozilla/JSString.py
+++ b/js/src/gdb/mozilla/JSString.py
@@ -1,39 +1,43 @@
 # Pretty-printers for SpiderMonkey strings.
 
 import gdb
 import mozilla.prettyprinters
 from mozilla.prettyprinters import pretty_printer, ptr_pretty_printer
 
 try:
-    chr(10000) # UPPER RIGHT PENCIL
-except ValueError as exc: # yuck, we are in Python 2.x, so chr() is 8-bit
-    chr = unichr # replace with teh unicodes
+    chr(10000)  # UPPER RIGHT PENCIL
+except ValueError as exc:  # yuck, we are in Python 2.x, so chr() is 8-bit
+    chr = unichr  # replace with teh unicodes
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Cache information about the JSString type for this objfile.
+
+
 class JSStringTypeCache(object):
     def __init__(self, cache):
         dummy = gdb.Value(0).cast(cache.JSString_ptr_t)
         self.NON_ATOM_BIT = dummy['NON_ATOM_BIT']
         self.LINEAR_BIT = dummy['LINEAR_BIT']
         self.INLINE_CHARS_BIT = dummy['INLINE_CHARS_BIT']
         self.TYPE_FLAGS_MASK = dummy['TYPE_FLAGS_MASK']
         self.LATIN1_CHARS_BIT = dummy['LATIN1_CHARS_BIT']
 
+
 class Common(mozilla.prettyprinters.Pointer):
     def __init__(self, value, cache):
         super(Common, self).__init__(value, cache)
         if not cache.mod_JSString:
             cache.mod_JSString = JSStringTypeCache(cache)
         self.stc = cache.mod_JSString
 
+
 @ptr_pretty_printer("JSString")
 class JSStringPtr(Common):
     def display_hint(self):
         return "string"
 
     def chars(self):
         d = self.value['d']
         length = d['u1']['length']
@@ -84,12 +88,13 @@ class JSStringPtr(Common):
                 if invalid_chars_allowed == 0:
                     s += "<TOO_MANY_INVALID_CHARS>"
                     break
                 else:
                     invalid_chars_allowed -= 1
                     s += "\\x%04x" % (c & 0xffff)
         return s
 
+
 @ptr_pretty_printer("JSAtom")
 class JSAtomPtr(Common):
     def to_string(self):
         return self.value.cast(self.cache.JSString_ptr_t)
--- a/js/src/gdb/mozilla/JSSymbol.py
+++ b/js/src/gdb/mozilla/JSSymbol.py
@@ -6,16 +6,17 @@ from mozilla.prettyprinters import ptr_p
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # JS::SymbolCode enumerators
 InSymbolRegistry = 0xfffffffe
 UniqueSymbol = 0xffffffff
 
+
 @ptr_pretty_printer("JS::Symbol")
 class JSSymbolPtr(mozilla.prettyprinters.Pointer):
     def __init__(self, value, cache):
         super(JSSymbolPtr, self).__init__(value, cache)
         self.value = value
 
     def to_string(self):
         code = int(self.value['code_']) & 0xffffffff
@@ -25,9 +26,8 @@ class JSSymbolPtr(mozilla.prettyprinters
         elif code == UniqueSymbol:
             return "Symbol({})".format(desc)
         else:
             # Well-known symbol. Strip off the quotes added by the JSString *
             # pretty-printer.
             assert desc[0] == '"'
             assert desc[-1] == '"'
             return desc[1:-1]
-
--- a/js/src/gdb/mozilla/Root.py
+++ b/js/src/gdb/mozilla/Root.py
@@ -5,16 +5,18 @@ import mozilla.prettyprinters
 from mozilla.prettyprinters import pretty_printer, template_pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Common base class for all the rooting template pretty-printers. All these
 # templates have one member holding the referent (or a pointer to it), so
 # there's not much to it.
+
+
 class Common(object):
     # The name of the template member holding the referent.
     member = 'ptr'
 
     # If True, this is a handle type, and should be dereferenced. If False,
     # the template member holds the referent directly.
     handle = False
 
@@ -37,48 +39,55 @@ class Common(object):
     # |jsid|, we *can* register a pretty-printer constructor for the full
     # instantiation |JS::Rooted<long>|. That constructor creates a |JS::Rooted|
     # pretty-printer, and explicitly specifies the constructor for the referent,
     # using this initializer's |content_printer| argument.
     def __init__(self, value, cache, content_printer=None):
         self.value = value
         self.cache = cache
         self.content_printer = content_printer
+
     def to_string(self):
         ptr = self.value[self.member]
         if self.handle:
             ptr = ptr.dereference()
         if self.strip_typedefs:
             ptr = ptr.cast(ptr.type.strip_typedefs())
         if self.content_printer:
             return self.content_printer(ptr, self.cache).to_string()
         else:
             # As of 2012-11, GDB suppresses printing pointers in replacement
             # values; see http://sourceware.org/ml/gdb/2012-11/msg00055.html
             # That means that simply returning the 'ptr' member won't work.
             # Instead, just invoke GDB's formatter ourselves.
             return str(ptr)
 
+
 @template_pretty_printer("JS::Rooted")
 class Rooted(Common):
     strip_typedefs = True
 
+
 @template_pretty_printer("JS::Handle")
 class Handle(Common):
     handle = True
 
+
 @template_pretty_printer("JS::MutableHandle")
 class MutableHandle(Common):
     handle = True
 
+
 @template_pretty_printer("js::BarrieredBase")
 class BarrieredBase(Common):
     member = 'value'
 
 # Return the referent of a HeapPtr, Rooted, or Handle.
+
+
 def deref(root):
     tag = root.type.strip_typedefs().tag
     if not tag:
         raise TypeError("Can't dereference type with no structure tag: %s" % (root.type,))
     elif tag.startswith('js::HeapPtr<'):
         return root['value']
     elif tag.startswith('JS::Rooted<'):
         return root['ptr']
--- a/js/src/gdb/mozilla/asmjs.py
+++ b/js/src/gdb/mozilla/asmjs.py
@@ -5,16 +5,17 @@ handles internally. Make GDB ignore them
 
 import gdb
 
 SIGSEGV = 11
 
 # A sigaction buffer for each inferior process.
 sigaction_buffers = {}
 
+
 def on_stop(event):
     if isinstance(event, gdb.SignalEvent) and event.stop_signal == 'SIGSEGV':
         # Allocate memory for sigaction, once per js shell process.
         process = gdb.selected_inferior()
         buf = sigaction_buffers.get(process)
         if buf is None:
             buf = gdb.parse_and_eval("(struct sigaction *) malloc(sizeof(struct sigaction))")
             sigaction_buffers[process] = buf
@@ -26,15 +27,17 @@ def on_stop(event):
         if buf['__sigaction_handler']['sa_handler'] == WasmFaultHandler:
             # Advise the user that magic is happening.
             print("js/src/gdb/mozilla/asmjs.py: Allowing WasmFaultHandler to run.")
 
             # If WasmFaultHandler doesn't handle this segfault, it will unhook
             # itself and re-raise.
             gdb.execute("continue")
 
+
 def on_exited(event):
     if event.inferior in sigaction_buffers:
         del sigaction_buffers[event.inferior]
 
+
 def install():
     gdb.events.stop.connect(on_stop)
     gdb.events.exited.connect(on_exited)
--- a/js/src/gdb/mozilla/autoload.py
+++ b/js/src/gdb/mozilla/autoload.py
@@ -21,13 +21,15 @@ import mozilla.unwind
 
 # The user may have personal pretty-printers. Get those, too, if they exist.
 try:
     import my_mozilla_printers
 except ImportError:
     pass
 
 # Register our pretty-printers with |objfile|.
+
+
 def register(objfile):
     lookup = mozilla.prettyprinters.lookup_for_objfile(objfile)
     if lookup:
         gdb.printing.register_pretty_printer(objfile, lookup, replace=True)
     mozilla.unwind.register_unwinder(objfile)
--- a/js/src/gdb/mozilla/jsid.py
+++ b/js/src/gdb/mozilla/jsid.py
@@ -4,27 +4,28 @@ import gdb
 import mozilla.prettyprinters
 import mozilla.Root
 
 from mozilla.prettyprinters import pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
+
 @pretty_printer('jsid')
 class jsid(object):
     # Since people don't always build with macro debugging info, I can't
     # think of any way to avoid copying these values here, short of using
     # inferior calls for every operation (which, I hear, is broken from
     # pretty-printers in some recent GDBs).
-    TYPE_STRING                 = 0x0
-    TYPE_INT                    = 0x1
-    TYPE_VOID                   = 0x2
-    TYPE_SYMBOL                 = 0x4
-    TYPE_MASK                   = 0x7
+    TYPE_STRING = 0x0
+    TYPE_INT = 0x1
+    TYPE_VOID = 0x2
+    TYPE_SYMBOL = 0x4
+    TYPE_MASK = 0x7
 
     def __init__(self, value, cache):
         self.value = value
         self.cache = cache
         self.concrete_type = self.value.type.strip_typedefs()
 
     # SpiderMonkey has two alternative definitions of jsid: a typedef for
     # ptrdiff_t, and a struct with == and != operators defined on it.
@@ -53,17 +54,23 @@ class jsid(object):
             body = ((bits & ~jsid.TYPE_MASK)
                     .cast(self.cache.JSSymbol_ptr_t))
         else:
             body = "<unrecognized>"
         return '$jsid(%s)' % (body,)
 
 # Hard-code the referent type pretty-printer for jsid roots and handles.
 # See the comment for mozilla.Root.Common.__init__.
+
+
 @pretty_printer('JS::Rooted<long>')
 def RootedJSID(value, cache):
     return mozilla.Root.Rooted(value, cache, jsid)
+
+
 @pretty_printer('JS::Handle<long>')
 def HandleJSID(value, cache):
     return mozilla.Root.Handle(value, cache, jsid)
+
+
 @pretty_printer('JS::MutableHandle<long>')
 def MutableHandleJSID(value, cache):
     return mozilla.Root.MutableHandle(value, cache, jsid)
--- a/js/src/gdb/mozilla/jsval.py
+++ b/js/src/gdb/mozilla/jsval.py
@@ -63,71 +63,81 @@ mozilla.prettyprinters.clear_module_prin
 #
 # On x86_64 only the lower 48 bits of an address are significant, and only
 # those values whose top bit is zero are used for user-space addresses. Thus
 # x86_64 addresses are effectively 47 bits long and fit nicely in the available
 # portion of the fraction field.
 #
 # See Value.h for full details.
 
+
 class Box(object):
     def __init__(self, asBits, jtc):
         self.asBits = asBits
         self.jtc = jtc
         # Value::asBits is uint64_t, but somehow the sign bit can be botched
         # here, even though Python integers are arbitrary precision.
         if self.asBits < 0:
             self.asBits = self.asBits + (1 << 64)
 
     # Return this value's type tag.
     def tag(self): raise NotImplementedError
 
     # Return this value as a 32-bit integer, double, or address.
     def as_uint32(self): raise NotImplementedError
+
     def as_double(self): raise NotImplementedError
+
     def as_address(self): raise NotImplementedError
 
 # Packed non-number boxing --- the format used on x86_64. It would be nice to
 # simply call Value::toInt32, etc. here, but the debugger is likely to see many
 # Values, and doing several inferior calls for each one seems like a bad idea.
+
+
 class Punbox(Box):
 
-    FULL_WIDTH     = 64
-    TAG_SHIFT      = 47
-    PAYLOAD_MASK   = (1 << TAG_SHIFT) - 1
-    TAG_MASK       = (1 << (FULL_WIDTH - TAG_SHIFT)) - 1
+    FULL_WIDTH = 64
+    TAG_SHIFT = 47
+    PAYLOAD_MASK = (1 << TAG_SHIFT) - 1
+    TAG_MASK = (1 << (FULL_WIDTH - TAG_SHIFT)) - 1
     TAG_MAX_DOUBLE = 0x1fff0
-    TAG_TYPE_MASK  = 0x0000f
+    TAG_TYPE_MASK = 0x0000f
 
     def tag(self):
         tag = self.asBits >> Punbox.TAG_SHIFT
         if tag <= Punbox.TAG_MAX_DOUBLE:
             return self.jtc.DOUBLE
         else:
             return tag & Punbox.TAG_TYPE_MASK
 
     def as_uint32(self): return int(self.asBits & ((1 << 32) - 1))
+
     def as_address(self): return gdb.Value(self.asBits & Punbox.PAYLOAD_MASK)
 
+
 class Nunbox(Box):
-    TAG_SHIFT      = 32
-    TAG_CLEAR      = 0xffff0000
-    PAYLOAD_MASK   = 0xffffffff
-    TAG_TYPE_MASK  = 0x0000000f
+    TAG_SHIFT = 32
+    TAG_CLEAR = 0xffff0000
+    PAYLOAD_MASK = 0xffffffff
+    TAG_TYPE_MASK = 0x0000000f
 
     def tag(self):
         tag = self.asBits >> Nunbox.TAG_SHIFT
         if tag < Nunbox.TAG_CLEAR:
             return self.jtc.DOUBLE
         return tag & Nunbox.TAG_TYPE_MASK
 
     def as_uint32(self): return int(self.asBits & Nunbox.PAYLOAD_MASK)
+
     def as_address(self): return gdb.Value(self.asBits & Nunbox.PAYLOAD_MASK)
 
 # Cache information about the Value type for this objfile.
+
+
 class JSValueTypeCache(object):
     def __init__(self, cache):
         # Capture the tag values.
         d = gdb.types.make_enum_dict(gdb.lookup_type('JSValueType'))
 
         # The enum keys are prefixed when building with some compilers (clang at
         # a minimum), so use a helper function to handle either key format.
         def get(key):
@@ -154,21 +164,23 @@ class JSValueTypeCache(object):
             self.enable_bigint = True
         except:
             pass
 
         # Let self.magic_names be an array whose i'th element is the name of
         # the i'th magic value.
         d = gdb.types.make_enum_dict(gdb.lookup_type('JSWhyMagic'))
         self.magic_names = list(range(max(d.values()) + 1))
-        for (k,v) in d.items(): self.magic_names[v] = k
+        for (k, v) in d.items():
+            self.magic_names[v] = k
 
         # Choose an unboxing scheme for this architecture.
         self.boxer = Punbox if cache.void_ptr_t.sizeof == 8 else Nunbox
 
+
 @pretty_printer('JS::Value')
 class JSValue(object):
     def __init__(self, value, cache):
         # Save the generic typecache, and create our own, if we haven't already.
         self.cache = cache
         if not cache.mod_JS_Value:
             cache.mod_JS_Value = JSValueTypeCache(cache)
         self.jtc = cache.mod_JS_Value
--- a/js/src/gdb/mozilla/prettyprinters.py
+++ b/js/src/gdb/mozilla/prettyprinters.py
@@ -9,98 +9,118 @@ import re
 # factory, taking both a gdb.Value instance and a TypeCache instance as
 # arguments; see TypeCache, below.
 
 # Check that |fn| hasn't been registered as a pretty-printer under some
 # other name already. (The 'enabled' flags used by GDB's
 # 'enable/disable/info pretty-printer' commands are simply stored as
 # properties of the function objects themselves, so a single function
 # object can't carry the 'enabled' flags for two different printers.)
+
+
 def check_for_reused_pretty_printer(fn):
     if hasattr(fn, 'enabled'):
         raise RuntimeError("pretty-printer function %r registered more than once" % fn)
 
+
 # a dictionary mapping gdb.Type tags to pretty-printer functions.
 printers_by_tag = {}
 
 # A decorator: add the decoratee as a pretty-printer lookup function for types
 # named |type_name|.
+
+
 def pretty_printer(type_name):
     def add(fn):
         check_for_reused_pretty_printer(fn)
         add_to_subprinter_list(fn, type_name)
         printers_by_tag[type_name] = fn
         return fn
     return add
 
+
 # a dictionary mapping gdb.Type tags to pretty-printer functions for pointers to
 # that type.
 ptr_printers_by_tag = {}
 
 # A decorator: add the decoratee as a pretty-printer lookup function for
 # pointers to types named |type_name|.
+
+
 def ptr_pretty_printer(type_name):
     def add(fn):
         check_for_reused_pretty_printer(fn)
         add_to_subprinter_list(fn, "ptr-to-" + type_name)
         ptr_printers_by_tag[type_name] = fn
         return fn
     return add
 
+
 # a dictionary mapping gdb.Type tags to pretty-printer functions for
 # references to that type.
 ref_printers_by_tag = {}
 
 # A decorator: add the decoratee as a pretty-printer lookup function for
 # references to instances of types named |type_name|.
+
+
 def ref_pretty_printer(type_name):
     def add(fn):
         check_for_reused_pretty_printer(fn)
         add_to_subprinter_list(fn, "ref-to-" + type_name)
         ref_printers_by_tag[type_name] = fn
         return fn
     return add
 
+
 # a dictionary mapping the template name portion of gdb.Type tags to
 # pretty-printer functions for instantiations of that template.
 template_printers_by_tag = {}
 
 # A decorator: add the decoratee as a pretty-printer lookup function for
 # instantiations of templates named |template_name|.
+
+
 def template_pretty_printer(template_name):
     def add(fn):
         check_for_reused_pretty_printer(fn)
         add_to_subprinter_list(fn, 'instantiations-of-' + template_name)
         template_printers_by_tag[template_name] = fn
         return fn
     return add
 
+
 # A list of (REGEXP, PRINTER) pairs, such that if REGEXP (a RegexObject)
 # matches the result of converting a gdb.Value's type to a string, then
 # PRINTER is a pretty-printer lookup function that will probably like that
 # value.
 printers_by_regexp = []
 
 # A decorator: add the decoratee as a pretty-printer factory for types
 # that, when converted to a string, match |pattern|. Use |name| as the
 # pretty-printer's name, when listing, enabling and disabling.
+
+
 def pretty_printer_for_regexp(pattern, name):
     compiled = re.compile(pattern)
+
     def add(fn):
         check_for_reused_pretty_printer(fn)
         add_to_subprinter_list(fn, name)
         printers_by_regexp.append((compiled, fn))
         return fn
     return add
 
 # Forget all pretty-printer lookup functions defined in the module name
 # |module_name|, if any exist. Use this at the top of each pretty-printer
 # module like this:
 #
 #   clear_module_printers(__name__)
+
+
 def clear_module_printers(module_name):
     global printers_by_tag, ptr_printers_by_tag, ref_printers_by_tag
     global template_printers_by_tag, printers_by_regexp
 
     # Remove all pretty-printers defined in the module named |module_name|
     # from d.
     def clear_dictionary(d):
         # Walk the dictionary, building a list of keys whose entries we
@@ -123,33 +143,40 @@ def clear_module_printers(module_name):
     new_list = []
     for p in printers_by_regexp:
         if p.__module__ == module_name:
             remove_from_subprinter_list(p)
         else:
             new_list.append(p)
     printers_by_regexp = new_list
 
+
 # Our subprinters array. The 'subprinters' attributes of all lookup
 # functions returned by lookup_for_objfile point to this array instance,
 # which we mutate as subprinters are added and removed.
 subprinters = []
 
 # Set up the 'name' and 'enabled' attributes on |subprinter|, and add it to our
 # list of all SpiderMonkey subprinters.
+
+
 def add_to_subprinter_list(subprinter, name):
     subprinter.name = name
     subprinter.enabled = True
     subprinters.append(subprinter)
 
 # Remove |subprinter| from our list of all SpiderMonkey subprinters.
+
+
 def remove_from_subprinter_list(subprinter):
     subprinters.remove(subprinter)
 
 # An exception class meaning, "This objfile has no SpiderMonkey in it."
+
+
 class NotSpiderMonkeyObjfileError(TypeError):
     pass
 
 # TypeCache: a cache for frequently used information about an objfile.
 #
 # When a new SpiderMonkey objfile is loaded, we construct an instance of
 # this class for it. Then, whenever we construct a pretty-printer for some
 # gdb.Value, we also pass, as a second argument, the TypeCache for the
@@ -157,16 +184,18 @@ class NotSpiderMonkeyObjfileError(TypeEr
 #
 # if objfile doesn't seem to have SpiderMonkey code in it, the constructor
 # raises NotSpiderMonkeyObjfileError.
 #
 # Pretty-printer modules may add attributes to this to hold their own
 # cached values. Such attributes should be named mod_NAME, where the module
 # is named mozilla.NAME; for example, mozilla.JSString should store its
 # metadata in the TypeCache's mod_JSString attribute.
+
+
 class TypeCache(object):
     def __init__(self, objfile):
         self.objfile = objfile
 
         # Unfortunately, the Python interface doesn't allow us to specify
         # the objfile in whose scope lookups should occur. But simply
         # knowing that we need to lookup the types afresh is probably
         # enough.
@@ -199,43 +228,52 @@ class TypeCache(object):
 #
 # This is a hokey attempt to order the implemented types by meaningfulness when
 # pretty-printed. Perhaps it is entirely misguided, and we should actually
 # collect all applicable pretty-printers, and then use some ordering on the
 # pretty-printers themselves.
 #
 # We may yield a type more than once (say, if it appears more than once in the
 # class hierarchy).
+
+
 def implemented_types(t):
 
     # Yield all types that follow |t|.
     def followers(t):
         if t.code == gdb.TYPE_CODE_TYPEDEF:
             yield t.target()
-            for t2 in followers(t.target()): yield t2
+            for t2 in followers(t.target()):
+                yield t2
         elif is_struct_or_union(t):
             base_classes = []
             for f in t.fields():
                 if f.is_base_class:
                     yield f.type
                     base_classes.append(f.type)
             for b in base_classes:
-                for t2 in followers(b): yield t2
+                for t2 in followers(b):
+                    yield t2
 
     yield t
-    for t2 in followers(t): yield t2
+    for t2 in followers(t):
+        yield t2
+
 
 template_regexp = re.compile("([\w_:]+)<")
 
+
 def is_struct_or_union(t):
     return t.code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION)
 
 # Construct and return a pretty-printer lookup function for objfile, or
 # return None if the objfile doesn't contain SpiderMonkey code
 # (specifically, definitions for SpiderMonkey types).
+
+
 def lookup_for_objfile(objfile):
     # Create a type cache for this objfile.
     try:
         cache = TypeCache(objfile)
     except NotSpiderMonkeyObjfileError:
         if gdb.parameter("verbose"):
             gdb.write("objfile '%s' has no SpiderMonkey code; not registering pretty-printers\n"
                       % (objfile.filename,))
@@ -260,40 +298,45 @@ def lookup_for_objfile(objfile):
                 return check_table(table, t.tag)
             else:
                 return None
 
         for t in implemented_types(value.type):
             if t.code == gdb.TYPE_CODE_PTR:
                 for t2 in implemented_types(t.target()):
                     p = check_table_by_type_name(ptr_printers_by_tag, t2)
-                    if p: return p
+                    if p:
+                        return p
             elif t.code == gdb.TYPE_CODE_REF:
                 for t2 in implemented_types(t.target()):
                     p = check_table_by_type_name(ref_printers_by_tag, t2)
-                    if p: return p
+                    if p:
+                        return p
             else:
                 p = check_table_by_type_name(printers_by_tag, t)
-                if p: return p
+                if p:
+                    return p
                 if is_struct_or_union(t) and t.tag:
                     m = template_regexp.match(t.tag)
                     if m:
                         p = check_table(template_printers_by_tag, m.group(1))
-                        if p: return p
+                        if p:
+                            return p
 
         # Failing that, look for a printer in printers_by_regexp. We have
         # to scan the whole list, so regexp printers should be used
         # sparingly.
         s = str(value.type)
         for (r, f) in printers_by_regexp:
             if f.enabled:
                 m = r.match(s)
                 if m:
                     p = f(value, cache)
-                    if p: return p
+                    if p:
+                        return p
 
         # No luck.
         return None
 
     # Give |lookup| the attributes expected of a pretty-printer with
     # subprinters, for enabling and disabling.
     lookup.name = "SpiderMonkey"
     lookup.enabled = True
@@ -315,16 +358,18 @@ def lookup_for_objfile(objfile):
 #
 # to_string(self): format the type's name and address, as GDB would, and then
 #     call a 'summary' method (which the subclass must define) to produce a
 #     description of the referent.
 #
 #     Note that pretty-printers returning a 'string' display hint must not use
 #     this default 'to_string' method, as GDB will take everything it returns,
 #     including the type name and address, as string contents.
+
+
 class Pointer(object):
     def __new__(cls, value, cache):
         # Don't try to provide pretty-printers for NULL pointers.
         if value.type.strip_typedefs().code == gdb.TYPE_CODE_PTR and value == 0:
             return None
         return super(Pointer, cls).__new__(cls)
 
     def __init__(self, value, cache):
@@ -346,26 +391,29 @@ class Pointer(object):
         except gdb.MemoryError as r:
             summary = str(r)
         v = '(%s) %s %s' % (self.value.type, address, summary)
         return v
 
     def summary(self):
         raise NotImplementedError
 
+
 field_enum_value = None
 
 # Given |t|, a gdb.Type instance representing an enum type, return the
 # numeric value of the enum value named |name|.
 #
 # Pre-2012-4-18 versions of GDB store the value of an enum member on the
 # gdb.Field's 'bitpos' attribute; later versions store it on the 'enumval'
 # attribute. This function retrieves the value from either.
+
+
 def enum_value(t, name):
     global field_enum_value
     f = t[name]
     # Monkey-patching is a-okay in polyfills! Just because.
     if not field_enum_value:
         if hasattr(f, 'enumval'):
-            field_enum_value = lambda f: f.enumval
+            def field_enum_value(f): return f.enumval
         else:
-            field_enum_value = lambda f: f.bitpos
+            def field_enum_value(f): return f.bitpos
     return field_enum_value(f)
--- a/js/src/gdb/mozilla/unwind.py
+++ b/js/src/gdb/mozilla/unwind.py
@@ -25,20 +25,22 @@ except ImportError:
 try:
     from gdb.unwinder import Unwinder
 except ImportError:
     _have_unwinder = False
     # We need something here; it doesn't matter what as no unwinder
     # will ever be instantiated.
     Unwinder = object
 
+
 def debug(something):
     # print("@@ " + something)
     pass
 
+
 # Maps frametype enum base names to corresponding class.
 SizeOfFramePrefix = {
     'JitFrame_IonJS': 'ExitFrameLayout',
     'JitFrame_BaselineJS': 'JitFrameLayout',
     'JitFrame_BaselineStub': 'BaselineStubFrameLayout',
     'JitFrame_IonStub': 'JitStubFrameLayout',
     'JitFrame_CppToJSJit': 'JitFrameLayout',
     'JitFrame_WasmToJSJit': 'JitFrameLayout',
@@ -46,16 +48,18 @@ SizeOfFramePrefix = {
     'JitFrame_IonAccessorIC': 'IonAccessorICFrameLayout',
     'JitFrame_IonICCall': 'IonICCallFrameLayout',
     'JitFrame_Exit': 'ExitFrameLayout',
     'JitFrame_Bailout': 'JitFrameLayout',
 }
 
 # All types and symbols that we need are attached to an object that we
 # can dispose of as needed.
+
+
 class UnwinderTypeCache(object):
     def __init__(self):
         self.d = None
         self.frame_enum_names = {}
         self.frame_class_types = {}
 
     # We take this bizarre approach to defer trying to look up any
     # symbols until absolutely needed.  Without this, the loading
@@ -112,16 +116,18 @@ class UnwinderTypeCache(object):
             self.frame_class_types[enumval] = class_type.pointer()
 
 # gdb doesn't have a direct way to tell us if a given address is
 # claimed by some shared library or the executable.  See
 # https://sourceware.org/bugzilla/show_bug.cgi?id=19288
 # In the interest of not requiring a patched gdb, instead we read
 # /proc/.../maps.  This only works locally, but maybe could work
 # remotely using "remote get".  FIXME.
+
+
 def parse_proc_maps():
     mapfile = '/proc/' + str(gdb.selected_inferior().pid) + '/maps'
     # Note we only examine executable mappings here.
     matcher = re.compile("^([a-fA-F0-9]+)-([a-fA-F0-9]+)\s+..x.\s+\S+\s+\S+\s+\S*(.*)$")
     mappings = []
     with open(mapfile, "r") as inp:
         for line in inp:
             match = matcher.match(line)
@@ -133,30 +139,34 @@ def parse_proc_maps():
             name = match.group(3).strip()
             if name is '' or (name.startswith('[') and name is not '[vdso]'):
                 # Skip entries not corresponding to a file.
                 continue
             mappings.append((long(start, 16), long(end, 16)))
     return mappings
 
 # A symbol/value pair as expected from gdb frame decorators.
+
+
 class FrameSymbol(object):
     def __init__(self, sym, val):
         self.sym = sym
         self.val = val
 
     def symbol(self):
         return self.sym
 
     def value(self):
         return self.val
 
 # This represents a single JIT frame for the purposes of display.
 # That is, the frame filter creates instances of this when it sees a
 # JIT frame in the stack.
+
+
 class JitFrameDecorator(FrameDecorator):
     def __init__(self, base, info, cache):
         super(JitFrameDecorator, self).__init__(base)
         self.info = info
         self.cache = cache
 
     def _decode_jitframe(self, this_frame):
         calleetoken = long(this_frame['calleeToken_'])
@@ -199,20 +209,22 @@ class JitFrameDecorator(FrameDecorator):
                 script = self._decode_jitframe(this_frame)["script"]
                 if script is not None:
                     obj = script['sourceObject_']['value']
                     # Verify that this is a ScriptSource object.
                     # FIXME should also deal with wrappers here.
                     nativeobj = obj.cast(self.cache.NativeObject)
                     # See bug 987069 and despair.  At least this
                     # approach won't give exceptions.
-                    class_name = nativeobj['group_']['value']['clasp_']['name'].string("ISO-8859-1")
+                    class_name = nativeobj['group_']['value']['clasp_']['name'].string(
+                        "ISO-8859-1")
                     if class_name != "ScriptSource":
                         return FrameDecorator.filename(self)
-                    scriptsourceobj = (nativeobj + 1).cast(self.cache.HeapSlot)[self.cache.SOURCE_SLOT]
+                    scriptsourceobj = (
+                        nativeobj + 1).cast(self.cache.HeapSlot)[self.cache.SOURCE_SLOT]
                     scriptsource = scriptsourceobj['value']['data']['asBits'] << 1
                     scriptsource = scriptsource.cast(self.cache.ScriptSource)
                     return scriptsource['filename_']['mTuple']['mFirstA'].string()
         return FrameDecorator.filename(self)
 
     def frame_args(self):
         this_frame = self.info["this_frame"]
         if this_frame is None:
@@ -236,16 +248,18 @@ class JitFrameDecorator(FrameDecorator):
             if i == 0:
                 name = 'this'
             else:
                 name = 'arg%d' % i
             result.append(FrameSymbol(name, args_ptr[i]))
         return result
 
 # A frame filter for SpiderMonkey.
+
+
 class SpiderMonkeyFrameFilter(object):
     # |state_holder| is either None, or an instance of
     # SpiderMonkeyUnwinder.  If the latter, then this class will
     # reference the |unwinder_state| attribute to find the current
     # unwinder state.
     def __init__(self, cache, state_holder):
         self.name = "SpiderMonkey"
         self.enabled = True
@@ -261,16 +275,18 @@ class SpiderMonkeyFrameFilter(object):
         if info is None:
             return frame
         return JitFrameDecorator(frame, info, self.cache)
 
     def filter(self, frame_iter):
         return imap(self.maybe_wrap_frame, frame_iter)
 
 # A frame id class, as specified by the gdb unwinder API.
+
+
 class SpiderMonkeyFrameId(object):
     def __init__(self, sp, pc):
         self.sp = sp
         self.pc = pc
 
 # This holds all the state needed during a given unwind.  Each time a
 # new unwind is done, a new instance of this class is created.  It
 # keeps track of all the state needed to unwind JIT frames.  Note that
@@ -280,16 +296,18 @@ class SpiderMonkeyFrameId(object):
 # architecture, both because we need to use arch-specific register
 # names, and because entry frame unwinding is arch-specific.
 # See https://sourceware.org/bugzilla/show_bug.cgi?id=19286 for info
 # about the register name issue.
 #
 # Each subclass must define SP_REGISTER, PC_REGISTER, and
 # SENTINEL_REGISTER (see x64UnwinderState for info); and implement
 # unwind_entry_frame_registers.
+
+
 class UnwinderState(object):
     def __init__(self, typecache):
         self.next_sp = None
         self.next_type = None
         self.activation = None
         # An unwinder instance is specific to a thread.  Record the
         # selected thread for later verification.
         self.thread = gdb.selected_thread()
@@ -309,18 +327,18 @@ class UnwinderState(object):
         sp = long(frame.read_register(self.SP_REGISTER))
         if sp in self.frame_map:
             return self.frame_map[sp]
         return None
 
     # Add information about a frame to the frame map.  This map is
     # queried by |self.get_frame|.  |sp| is the frame's stack pointer,
     # and |name| the frame's type as a string, e.g. "JitFrame_Exit".
-    def add_frame(self, sp, name = None, this_frame = None):
-        self.frame_map[long(sp)] = { "name": name, "this_frame": this_frame }
+    def add_frame(self, sp, name=None, this_frame=None):
+        self.frame_map[long(sp)] = {"name": name, "this_frame": this_frame}
 
     # See whether |pc| is claimed by some text mapping.  See
     # |parse_proc_maps| for details on how the decision is made.
     def text_address_claimed(self, pc):
         for (start, end) in self.proc_mappings:
             if (pc >= start and pc <= end):
                 return True
         return False
@@ -403,17 +421,17 @@ class UnwinderState(object):
         this_class_type = self.typecache.frame_class_types[frame_type]
         this_frame = frame.cast(this_class_type)
 
         # Register this frame so the frame filter can find it.  This
         # is registered using SP because we don't have any other good
         # approach -- you can't get the frame id from a gdb.Frame.
         # https://sourceware.org/bugzilla/show_bug.cgi?id=19800
         frame_name = self.typecache.frame_enum_names[frame_type]
-        self.add_frame(sp, name = frame_name, this_frame = this_frame)
+        self.add_frame(sp, name=frame_name, this_frame=this_frame)
 
         # Update internal state for the next unwind.
         self.next_sp = next_sp
         self.next_type = next_type
 
         unwind_info = pending_frame.create_unwind_info(frame_id)
         unwind_info.add_saved_register(self.PC_REGISTER, next_pc)
         unwind_info.add_saved_register(self.SP_REGISTER, next_sp)
@@ -447,17 +465,17 @@ class UnwinderState(object):
         frame_type = self.typecache.JitFrame_Exit
         return self.create_frame(pc, exit_sp, packedExitFP, frame_type, pending_frame)
 
     # A wrapper for unwind_entry_frame_registers that handles
     # architecture-independent boilerplate.
     def unwind_entry_frame(self, pc, pending_frame):
         sp = self.next_sp
         # Notify the frame filter.
-        self.add_frame(sp, name = 'JitFrame_CppToJSJit')
+        self.add_frame(sp, name='JitFrame_CppToJSJit')
         # Make an unwind_info for the per-architecture code to fill in.
         frame_id = SpiderMonkeyFrameId(sp, pc)
         unwind_info = pending_frame.create_unwind_info(frame_id)
         self.unwind_entry_frame_registers(sp, unwind_info)
         self.next_sp = None
         self.next_type = None
         return unwind_info
 
@@ -477,16 +495,18 @@ class UnwinderState(object):
                 return self.unwind_entry_frame(pc, pending_frame)
             return self.unwind_ordinary(pc, pending_frame)
         # Maybe we've found an exit frame.  FIXME I currently don't
         # know how to identify these precisely, so we'll just hope for
         # the time being.
         return self.unwind_exit_frame(pc, pending_frame)
 
 # The UnwinderState subclass for x86-64.
+
+
 class x64UnwinderState(UnwinderState):
     SP_REGISTER = 'rsp'
     PC_REGISTER = 'rip'
 
     # A register unique to this architecture, that is also likely to
     # have been saved in any frame.  The best thing to use here is
     # some arch-specific name for PC or SP.
     SENTINEL_REGISTER = 'rip'
@@ -506,16 +526,18 @@ class x64UnwinderState(UnwinderState):
             sp = sp + 1
             unwind_info.add_saved_register(reg, data)
             if reg is "rbp":
                 unwind_info.add_saved_register(self.SP_REGISTER, sp)
 
 # The unwinder object.  This provides the "user interface" to the JIT
 # unwinder, and also handles constructing or destroying UnwinderState
 # objects as needed.
+
+
 class SpiderMonkeyUnwinder(Unwinder):
     # A list of all the possible unwinders.  See |self.make_unwinder|.
     UNWINDERS = [x64UnwinderState]
 
     def __init__(self, typecache):
         super(SpiderMonkeyUnwinder, self).__init__("SpiderMonkey")
         self.typecache = typecache
         self.unwinder_state = None
@@ -570,16 +592,18 @@ class SpiderMonkeyUnwinder(Unwinder):
             return None
         return self.unwinder_state.unwind(pending_frame)
 
     def invalidate_unwinder_state(self, *args, **kwargs):
         self.unwinder_state = None
 
 # Register the unwinder and frame filter with |objfile|.  If |objfile|
 # is None, register them globally.
+
+
 def register_unwinder(objfile):
     type_cache = UnwinderTypeCache()
     unwinder = None
     # This currently only works on Linux, due to parse_proc_maps.
     if _have_unwinder and platform.system() == "Linux":
         unwinder = SpiderMonkeyUnwinder(type_cache)
         gdb.unwinder.register_unwinder(objfile, unwinder, replace=True)
     # We unconditionally register the frame filter, because at some
--- a/js/src/gdb/progressbar.py
+++ b/js/src/gdb/progressbar.py
@@ -1,15 +1,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Text progress bar library, like curl or scp.
 
-import sys, datetime, time
+import sys
+import datetime
+import time
+
 
 class ProgressBar(object):
     def __init__(self, label, limit, label_width=12):
         self.label = label
         self.limit = limit
         self.label_width = label_width
         self.cur = 0
         self.t0 = datetime.datetime.now()
@@ -20,29 +23,30 @@ class ProgressBar(object):
 
     def update(self, value):
         self.cur = value
         pct = int(100.0 * self.cur / self.limit)
         barlen = int(1.0 * self.barlen * self.cur / self.limit) - 1
         bar = '='*barlen + '>'
         dt = datetime.datetime.now() - self.t0
         dt = dt.seconds + dt.microseconds * 1e-6
-        line = self.fmt%(self.label[:self.label_width], pct, bar, dt)
+        line = self.fmt % (self.label[:self.label_width], pct, bar, dt)
         self.fullwidth = len(line)
         sys.stdout.write(line)
         sys.stdout.flush()
 
     # Clear the current bar and leave the cursor at the start of the line.
     def clear(self):
         if (self.fullwidth):
             sys.stdout.write('\r' + ' ' * self.fullwidth + '\r')
             self.fullwidth = None
 
     def finish(self):
         self.update(self.limit)
         sys.stdout.write('\n')
 
+
 if __name__ == '__main__':
     pb = ProgressBar('test', 12)
     for i in range(12):
         pb.update(i)
         time.sleep(0.5)
     pb.finish()
--- a/js/src/gdb/run-tests.py
+++ b/js/src/gdb/run-tests.py
@@ -1,23 +1,29 @@
 #!/usr/bin/env python
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # run-tests.py -- Python harness for GDB SpiderMonkey support
 
-import os, re, subprocess, sys, traceback
+import os
+import re
+import subprocess
+import sys
+import traceback
 from threading import Thread
 
 # From this directory:
 import progressbar
 from taskpool import TaskPool, get_cpu_count
 
 # Backported from Python 3.1 posixpath.py
+
+
 def _relpath(path, start=None):
     """Return a relative version of a path"""
 
     if not path:
         raise ValueError("no path specified")
 
     if start is None:
         start = os.curdir
@@ -28,61 +34,70 @@ def _relpath(path, start=None):
     # Work out how much of the filepath is shared by start and path.
     i = len(os.path.commonprefix([start_list, path_list]))
 
     rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
     if not rel_list:
         return os.curdir
     return os.path.join(*rel_list)
 
+
 os.path.relpath = _relpath
 
 # Characters that need to be escaped when used in shell words.
 shell_need_escapes = re.compile('[^\w\d%+,-./:=@\'"]', re.DOTALL)
 # Characters that need to be escaped within double-quoted strings.
 shell_dquote_escapes = re.compile('[^\w\d%+,-./:=@"]', re.DOTALL)
+
+
 def make_shell_cmd(l):
     def quote(s):
         if shell_need_escapes.search(s):
             if s.find("'") < 0:
                 return "'" + s + "'"
             return '"' + shell_dquote_escapes.sub('\\g<0>', s) + '"'
         return s
 
     return ' '.join([quote(_) for _ in l])
 
 # An instance of this class collects the lists of passing, failing, and
 # timing-out tests, runs the progress bar, and prints a summary at the end.
+
+
 class Summary(object):
 
     class SummaryBar(progressbar.ProgressBar):
         def __init__(self, limit):
             super(Summary.SummaryBar, self).__init__('', limit, 24)
+
         def start(self):
             self.label = '[starting           ]'
             self.update(0)
+
         def counts(self, run, failures, timeouts):
             self.label = '[%4d|%4d|%4d|%4d]' % (run - failures, failures, timeouts, run)
             self.update(run)
 
     def __init__(self, num_tests):
         self.run = 0
         self.failures = []              # kind of judgemental; "unexpecteds"?
         self.timeouts = []
         if not OPTIONS.hide_progress:
             self.bar = Summary.SummaryBar(num_tests)
 
     # Progress bar control.
     def start(self):
         if not OPTIONS.hide_progress:
             self.bar.start()
+
     def update(self):
         if not OPTIONS.hide_progress:
             self.bar.counts(self.run, len(self.failures), len(self.timeouts))
     # Call 'thunk' to show some output, while getting the progress bar out of the way.
+
     def interleave_output(self, thunk):
         if not OPTIONS.hide_progress:
             self.bar.clear()
         thunk()
         self.update()
 
     def passed(self, test):
         self.run += 1
@@ -131,16 +146,17 @@ class Summary(object):
         if self.timeouts:
             print("tests timed out:")
             for test in self.timeouts:
                 test.show(sys.stdout)
 
         if self.failures or self.timeouts:
             sys.exit(2)
 
+
 class Test(TaskPool.Task):
     def __init__(self, path, summary):
         super(Test, self).__init__()
         self.test_path = path           # path to .py test file
         self.summary = summary
 
         # test.name is the name of the test relative to the top of the test
         # directory. This is what we use to report failures and timeouts,
@@ -199,37 +215,43 @@ class Test(TaskPool.Task):
 
     def show(self, out):
         out.write(self.name + '\n')
         if OPTIONS.write_failure_output:
             self.show_cmd(out)
             self.show_output(out)
             out.write('GDB exit code: %r\n' % (self.returncode,))
 
-def find_tests(dir, substring = None):
+
+def find_tests(dir, substring=None):
     ans = []
     for dirpath, _, filenames in os.walk(dir):
         if dirpath == '.':
             continue
         for filename in filenames:
             if not filename.endswith('.py'):
                 continue
             test = os.path.join(dirpath, filename)
             if substring is None or substring in os.path.relpath(test, dir):
                 ans.append(test)
     return ans
 
+
 def build_test_exec(builddir):
     subprocess.check_call(['make'], cwd=builddir)
 
+
 def run_tests(tests, summary):
     pool = TaskPool(tests, job_limit=OPTIONS.workercount, timeout=OPTIONS.timeout)
     pool.run_all()
 
+
 OPTIONS = None
+
+
 def main(argv):
     global OPTIONS
     script_path = os.path.abspath(__file__)
     script_dir = os.path.dirname(script_path)
 
     # OBJDIR is a standalone SpiderMonkey build directory. This is where we
     # find the SpiderMonkey shared library to link against.
     #
@@ -324,17 +346,17 @@ def main(argv):
             exclude_set.update(find_tests(OPTIONS.testdir, exclude))
         test_set -= exclude_set
 
     if not test_set:
         sys.stderr.write("No tests found matching command line arguments.\n")
         sys.exit(1)
 
     summary = Summary(len(test_set))
-    test_list = [ Test(_, summary) for _ in sorted(test_set) ]
+    test_list = [Test(_, summary) for _ in sorted(test_set)]
 
     # Build the test executable from all the .cpp files found in the test
     # directory tree.
     try:
         build_test_exec(OPTIONS.builddir)
     except subprocess.CalledProcessError as err:
         sys.stderr.write("Error building test executable: %s\n" % (err,))
         sys.exit(1)
@@ -345,10 +367,11 @@ def main(argv):
         run_tests(test_list, summary)
         summary.finish()
     except OSError as err:
         sys.stderr.write("Error running tests: %s\n" % (err,))
         sys.exit(1)
 
     sys.exit(0)
 
+
 if __name__ == '__main__':
     main(sys.argv[1:])
--- a/js/src/gdb/taskpool.py
+++ b/js/src/gdb/taskpool.py
@@ -1,21 +1,26 @@
-import fcntl, os, select, time
+import fcntl
+import os
+import select
+import time
 from subprocess import Popen, PIPE
 
 # Run a series of subprocesses. Try to keep up to a certain number going in
 # parallel at any given time. Enforce time limits.
 #
 # This is implemented using non-blocking I/O, and so is Unix-specific.
 #
 # We assume that, if a task closes its standard error, then it's safe to
 # wait for it to terminate. So an ill-behaved task that closes its standard
 # output and then hangs will hang us, as well. However, as it takes special
 # effort to close one's standard output, this seems unlikely to be a
 # problem in practice.
+
+
 class TaskPool(object):
 
     # A task we should run in a subprocess. Users should subclass this and
     # fill in the methods as given.
     class Task(object):
         def __init__(self):
             self.pipe = None
             self.start_time = None
@@ -93,18 +98,18 @@ class TaskPool(object):
                     break
 
                 # How many seconds do we have until the earliest deadline?
                 now = time.time()
                 secs_to_next_deadline = max(min([t.deadline for t in running]) - now, 0)
 
                 # Wait for output or a timeout.
                 stdouts_and_stderrs = ([t.pipe.stdout for t in running]
-                                     + [t.pipe.stderr for t in running])
-                (readable,w,x) = select.select(stdouts_and_stderrs, [], [], secs_to_next_deadline)
+                                       + [t.pipe.stderr for t in running])
+                (readable, w, x) = select.select(stdouts_and_stderrs, [], [], secs_to_next_deadline)
                 finished = set()
                 terminate = set()
                 for t in running:
                     # Since we've placed the pipes in non-blocking mode, these
                     # 'read's will simply return as many bytes as are available,
                     # rather than blocking until they have accumulated the full
                     # amount requested (or reached EOF). The 'read's should
                     # never throw, since 'select' has told us there was
@@ -148,65 +153,74 @@ class TaskPool(object):
                         t.pipe.wait()
                         t.onTimeout()
                         finished.add(t)
                 # Remove the finished tasks from the running set. (Do this here
                 # to avoid mutating the set while iterating over it.)
                 running -= finished
         return None
 
+
 def get_cpu_count():
     """
     Guess at a reasonable parallelism count to set as the default for the
     current machine and run.
     """
     # Python 2.6+
     try:
         import multiprocessing
         return multiprocessing.cpu_count()
-    except (ImportError,NotImplementedError):
+    except (ImportError, NotImplementedError):
         pass
 
     # POSIX
     try:
         res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
         if res > 0:
             return res
-    except (AttributeError,ValueError):
+    except (AttributeError, ValueError):
         pass
 
     # Windows
     try:
         res = int(os.environ['NUMBER_OF_PROCESSORS'])
         if res > 0:
             return res
     except (KeyError, ValueError):
         pass
 
     return 1
 
+
 if __name__ == '__main__':
     # Test TaskPool by using it to implement the unique 'sleep sort' algorithm.
     def sleep_sort(ns, timeout):
-        sorted=[]
+        sorted = []
+
         class SortableTask(TaskPool.Task):
             def __init__(self, n):
                 super(SortableTask, self).__init__()
                 self.n = n
+
             def start(self, pipe, deadline):
                 super(SortableTask, self).start(pipe, deadline)
+
             def cmd(self):
                 return ['sh', '-c', 'echo out; sleep %d; echo err>&2' % (self.n,)]
+
             def onStdout(self, text):
                 print('%d stdout: %r' % (self.n, text))
+
             def onStderr(self, text):
                 print('%d stderr: %r' % (self.n, text))
+
             def onFinished(self, returncode):
                 print('%d (rc=%d)' % (self.n, returncode))
                 sorted.append(self.n)
+
             def onTimeout(self):
                 print('%d timed out' % (self.n,))
 
         p = TaskPool([SortableTask(_) for _ in ns], job_limit=len(ns), timeout=timeout)
         p.run_all()
         return sorted
 
-    print(repr(sleep_sort([1,1,2,3,5,8,13,21,34], 15)))
+    print(repr(sleep_sort([1, 1, 2, 3, 5, 8, 13, 21, 34], 15)))
--- a/js/src/gdb/tests/test-ExecutableAllocator.py
+++ b/js/src/gdb/tests/test-ExecutableAllocator.py
@@ -5,13 +5,14 @@ assert_subprinter_registered('SpiderMonk
 run_fragment('ExecutableAllocator.empty')
 
 assert_pretty('execAlloc', 'ExecutableAllocator([])')
 
 run_fragment('ExecutableAllocator.onepool')
 
 reExecPool = 'ExecutablePool [a-f0-9]{8,}-[a-f0-9]{8,}'
 assert_regexp_pretty('pool', reExecPool)
-assert_regexp_pretty('execAlloc', 'ExecutableAllocator\(\[' +reExecPool+ '\]\)')
+assert_regexp_pretty('execAlloc', 'ExecutableAllocator\(\[' + reExecPool + '\]\)')
 
 run_fragment('ExecutableAllocator.twopools')
 
-assert_regexp_pretty('execAlloc', 'ExecutableAllocator\(\[' + reExecPool + ', ' + reExecPool + '\]\)')
+assert_regexp_pretty(
+    'execAlloc', 'ExecutableAllocator\(\[' + reExecPool + ', ' + reExecPool + '\]\)')
--- a/js/src/gdb/tests/test-Interpreter.py
+++ b/js/src/gdb/tests/test-Interpreter.py
@@ -5,9 +5,10 @@ assert_subprinter_registered('SpiderMonk
 run_fragment('Interpreter.Regs')
 
 assert_pretty('regs', '{ fp_ = , sp = fp_.slots() + 2, pc =  (JSOP_IFEQ) }')
 
 run_fragment('Interpreter.AbstractFramePtr')
 
 assert_pretty('ifptr', 'AbstractFramePtr ((js::InterpreterFrame *) ) = {ptr_ = 146464513}')
 assert_pretty('bfptr', 'AbstractFramePtr ((js::jit::BaselineFrame *) ) = {ptr_ = 3135025122}')
-assert_pretty('rfptr', 'AbstractFramePtr ((js::jit::RematerializedFrame *) ) = {ptr_ = 3669732611}')
+assert_pretty(
+    'rfptr', 'AbstractFramePtr ((js::jit::RematerializedFrame *) ) = {ptr_ = 3669732611}')
--- a/js/src/gdb/tests/test-Root.py
+++ b/js/src/gdb/tests/test-Root.py
@@ -11,17 +11,17 @@ assert_pretty('obj', '(JSObject * const)
 assert_pretty('mutableObj', '(JSObject *)  [object global] delegate')
 
 run_fragment('Root.HeapSlot')
 
 # This depends on implementation details of arrays, but since HeapSlot is
 # not a public type, I'm not sure how to avoid doing *something* ugly.
 assert_pretty('((js::NativeObject *) array.ptr)->elements_[0]', '$JS::Value("plinth")')
 
-run_fragment('Root.barriers');
+run_fragment('Root.barriers')
 
-assert_pretty('prebarriered', '(JSObject *)  [object Object]');
-assert_pretty('heapptr', '(JSObject *)  [object Object]');
-assert_pretty('relocatable', '(JSObject *)  [object Object]');
-assert_pretty('val', '$JS::Value((JSObject *)  [object Object])');
-assert_pretty('heapValue', '$JS::Value((JSObject *)  [object Object])');
-assert_pretty('prebarrieredValue', '$JS::Value((JSObject *)  [object Object])');
-assert_pretty('relocatableValue', '$JS::Value((JSObject *)  [object Object])');
+assert_pretty('prebarriered', '(JSObject *)  [object Object]')
+assert_pretty('heapptr', '(JSObject *)  [object Object]')
+assert_pretty('relocatable', '(JSObject *)  [object Object]')
+assert_pretty('val', '$JS::Value((JSObject *)  [object Object])')
+assert_pretty('heapValue', '$JS::Value((JSObject *)  [object Object])')
+assert_pretty('prebarrieredValue', '$JS::Value((JSObject *)  [object Object])')
+assert_pretty('relocatableValue', '$JS::Value((JSObject *)  [object Object])')
--- a/js/src/gdb/tests/test-prettyprinters.py
+++ b/js/src/gdb/tests/test-prettyprinters.py
@@ -1,22 +1,24 @@
 import mozilla.prettyprinters
 
 run_fragment('prettyprinters.implemented_types')
 
+
 def implemented_type_names(expr):
     v = gdb.parse_and_eval(expr)
     it = mozilla.prettyprinters.implemented_types(v.type)
     return [str(_) for _ in it]
 
+
 assert_eq(implemented_type_names('i'), ['int'])
 assert_eq(implemented_type_names('a'), ['A', 'int'])
 assert_eq(implemented_type_names('b'), ['B', 'A', 'int'])
 assert_eq(implemented_type_names('c'), ['C'])
 assert_eq(implemented_type_names('c_'), ['C_', 'C'])
 assert_eq(implemented_type_names('e'), ['E', 'C', 'D'])
 assert_eq(implemented_type_names('e_'), ['E_', 'E', 'C', 'D'])
 assert_eq(implemented_type_names('f'), ['F', 'C', 'D'])
 assert_eq(implemented_type_names('h'), ['H', 'F', 'G', 'C', 'D'])
 
 # Check that our pretty-printers aren't interfering with printing other types.
 assert_pretty('10', '10')
-assert_pretty('(void*) 0', '') # Because of 'set print address off'
+assert_pretty('(void*) 0', '')  # Because of 'set print address off'
--- a/js/src/gdb/tests/test-unwind.py
+++ b/js/src/gdb/tests/test-unwind.py
@@ -1,12 +1,13 @@
 # Test the unwinder and the frame filter.
 
 import platform
 
+
 def do_unwinder_test():
     # The unwinder is disabled by default for the moment. Turn it on to check
     # that the unwinder works as expected.
     import gdb
     gdb.execute("enable unwinder .* SpiderMonkey")
 
     run_fragment('unwind.simple', 'Something')
 
@@ -42,16 +43,17 @@ def do_unwinder_test():
     assert_eq(found_main, True)
     # Had to have found the entry and exit frames.
     assert_eq(found_exit, True)
     assert_eq(found_entry, True)
     # Had to have found the names of the two JS functions.
     assert_eq(found_inner, True)
     assert_eq(found_outer, True)
 
+
 # Only on the right platforms.
 if platform.machine() == 'x86_64' and platform.system() == 'Linux':
     # Only test when gdb has the unwinder feature.
     try:
         import gdb.unwinder
         import gdb.frames
         do_unwinder_test()
     except:
--- a/js/src/gdb/tests/typedef-printers.py
+++ b/js/src/gdb/tests/typedef-printers.py
@@ -1,14 +1,17 @@
 # Test that we can find pretty-printers for typedef names, not just for
 # struct types and templates.
 
 import mozilla.prettyprinters
 
+
 @mozilla.prettyprinters.pretty_printer('my_typedef')
 class my_typedef(object):
     def __init__(self, value, cache):
         pass
+
     def to_string(self):
         return 'huzzah'
 
+
 run_fragment('typedef_printers.one')
 assert_pretty('i', 'huzzah')
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -1,42 +1,55 @@
 #!/usr/bin/env python
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import print_function, unicode_literals
 
-import math, os, platform, posixpath, shlex, shutil, subprocess, sys, traceback
+import math
+import os
+import platform
+import posixpath
+import shlex
+import shutil
+import subprocess
+import sys
+import traceback
+
 
 def add_libdir_to_path():
     from os.path import dirname, exists, join, realpath
     js_src_dir = dirname(dirname(realpath(sys.argv[0])))
     assert exists(join(js_src_dir, 'jsapi.h'))
     sys.path.insert(0, join(js_src_dir, 'lib'))
     sys.path.insert(0, join(js_src_dir, 'tests', 'lib'))
 
+
 add_libdir_to_path()
 
 import jittests
 from tests import get_jitflags, valid_jitflags, get_cpu_count, get_environment_overlay, \
-                  change_env
+    change_env
 
 # Python 3.3 added shutil.which, but we can't use that yet.
+
+
 def which(name):
     if name.find(os.path.sep) != -1:
         return os.path.abspath(name)
 
     for path in os.environ["PATH"].split(os.pathsep):
         full = os.path.join(path, name)
         if os.path.exists(full):
             return os.path.abspath(full)
 
     return name
 
+
 def choose_item(jobs, max_items, display):
     job_count = len(jobs)
 
     # Don't present a choice if there are too many tests
     if job_count > max_items:
         raise Exception('Too many jobs.')
 
     for i, job in enumerate(jobs, 1):
@@ -47,16 +60,17 @@ def choose_item(jobs, max_items, display
         item = int(item)
         if item > job_count or item < 1:
             raise Exception('Input isn\'t between 1 and {}'.format(job_count))
     except ValueError:
         raise Exception('Unrecognized input')
 
     return jobs[item - 1]
 
+
 def main(argv):
     # The [TESTS] optional arguments are paths of test files relative
     # to the jit-test/tests directory.
     from optparse import OptionParser
     op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
     op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                   help='show js shell command run')
     op.add_option('-f', '--show-failed-cmd', dest='show_failed',
@@ -171,17 +185,17 @@ def main(argv):
     js_shell = which(args[0])
     test_args = args[1:]
     test_environment = get_environment_overlay(js_shell)
 
     if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
         if (platform.system() != 'Windows' or
             os.path.isfile(js_shell) or not
             os.path.isfile(js_shell + ".exe") or not
-            os.access(js_shell + ".exe", os.X_OK)):
+                os.access(js_shell + ".exe", os.X_OK)):
             op.error('shell is not executable: ' + js_shell)
 
     if jittests.stdio_might_be_broken():
         # Prefer erring on the side of caution and not using stdio if
         # it might be broken on this platform.  The file-redirect
         # fallback should work on any platform, so at worst by
         # guessing wrong we might have slowed down the tests a bit.
         #
@@ -317,17 +331,17 @@ def main(argv):
             sys.exit("Error reading file: " + options.ignore_timeouts)
     else:
         options.ignore_timeouts = set()
 
     prefix = [js_shell] + shlex.split(options.shell_args)
     prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
     if options.remote:
         prologue = posixpath.join(options.remote_test_root,
-                                'jit-tests', 'jit-tests', 'lib', 'prologue.js')
+                                  'jit-tests', 'jit-tests', 'lib', 'prologue.js')
 
     prefix += ['-f', prologue]
 
     # Clean up any remnants from previous crashes etc
     shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
     os.mkdir(jittests.JS_CACHE_DIR)
 
     if options.debugger:
@@ -376,10 +390,11 @@ def main(argv):
     except OSError:
         if not os.path.exists(prefix[0]):
             print("JS shell argument: file does not exist:"
                   " '{}'".format(prefix[0]), file=sys.stderr)
             sys.exit(1)
         else:
             raise
 
+
 if __name__ == '__main__':
     main(sys.argv[1:])
--- a/js/src/jit/GenerateOpcodeFiles.py
+++ b/js/src/jit/GenerateOpcodeFiles.py
@@ -18,40 +18,44 @@ HEADER_TEMPLATE = """\
 /* This file is generated by jit/GenerateOpcodeFiles.py. Do not edit! */
 
 #define %(listname)s(_) \\
 %(ops)s
 
 #endif // %(includeguard)s
 """
 
+
 def get_opcodes(inputs, pat):
-  # Preserve the original order. Use a set to detect duplicates.
-  ops = []
-  ops_set = set()
-  for inputfile in inputs:
-    for line in open(inputfile):
-      match = pat.match(line)
-      if match:
-        op = match.group('name')
-        if op in ops_set:
-          raise Exception("Duplicate opcode {} in {}".format(op, inputfile))
-        ops.append(op)
-        ops_set.add(op)
-  assert len(ops) == len(ops_set)
-  return ops
+    # Preserve the original order. Use a set to detect duplicates.
+    ops = []
+    ops_set = set()
+    for inputfile in inputs:
+        for line in open(inputfile):
+            match = pat.match(line)
+            if match:
+                op = match.group('name')
+                if op in ops_set:
+                    raise Exception("Duplicate opcode {} in {}".format(op, inputfile))
+                ops.append(op)
+                ops_set.add(op)
+    assert len(ops) == len(ops_set)
+    return ops
+
 
 def generate_header(c_out, inputs, pat, includeguard, listname):
-  ops = get_opcodes(inputs, pat)
-  ops_string = '\\\n'.join(['_(' + op + ')' for op in ops])
-  c_out.write(HEADER_TEMPLATE % {
-    'ops': ops_string,
-    'includeguard': includeguard,
-    'listname': listname,
-  })
+    ops = get_opcodes(inputs, pat)
+    ops_string = '\\\n'.join(['_(' + op + ')' for op in ops])
+    c_out.write(HEADER_TEMPLATE % {
+        'ops': ops_string,
+        'includeguard': includeguard,
+        'listname': listname,
+    })
+
 
 def generate_mir_header(c_out, *inputs):
-  pat = re.compile(r"^\s*INSTRUCTION_HEADER(_WITHOUT_TYPEPOLICY)?\((?P<name>\w+)\);?$")
-  generate_header(c_out, inputs, pat, 'jit_MOpcodes_h', 'MIR_OPCODE_LIST')
+    pat = re.compile(r"^\s*INSTRUCTION_HEADER(_WITHOUT_TYPEPOLICY)?\((?P<name>\w+)\);?$")
+    generate_header(c_out, inputs, pat, 'jit_MOpcodes_h', 'MIR_OPCODE_LIST')
+
 
 def generate_lir_header(c_out, *inputs):
-  pat = re.compile(r"^\s*LIR_HEADER\((?P<name>\w+)\);?$")
-  generate_header(c_out, inputs, pat, 'jit_LOpcodes_h', 'LIR_OPCODE_LIST')
+    pat = re.compile(r"^\s*LIR_HEADER\((?P<name>\w+)\);?$")
+    generate_header(c_out, inputs, pat, 'jit_LOpcodes_h', 'LIR_OPCODE_LIST')
--- a/js/src/jit/arm/gen-double-encoder-table.py
+++ b/js/src/jit/arm/gen-double-encoder-table.py
@@ -7,26 +7,29 @@
 DOES NOT get automatically run during the build process.  If you need to
 modify this file (which is unlikely), you must re-run this script:
 
 python gen-double-encode-table.py > $(topsrcdir)/path/to/DoubleEntryTable.tbl
 """
 
 import operator
 
+
 def rep(bit, count):
     return reduce(operator.ior, [bit << c for c in range(count)])
 
+
 def encodeDouble(value):
     """Generate an ARM ARM 'VFP modified immediate constant' with format:
     aBbbbbbb bbcdefgh 000...
 
     We will return the top 32 bits of the double; the rest are 0."""
     assert (0 <= value) and (value <= 255)
     a = value >> 7
     b = (value >> 6) & 1
     B = int(b == 0)
     cdefgh = value & 0x3f
     return (a << 31) | (B << 30) | (rep(b, 8) << 22) | cdefgh << 16
 
+
 print '/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py.  */'
 for i in range(256):
     print '  { 0x%08x, { %d, %d, 0 } },' % (encodeDouble(i), i & 0xf, i >> 4)
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -6,24 +6,27 @@
 """
 The JS Shell Test Harness.
 
 See the adjacent README.txt for more details.
 """
 
 from __future__ import print_function
 
-import os, sys, textwrap, platform
+import os
+import sys
+import textwrap
+import platform
 from os.path import abspath, dirname, isfile, realpath
 from contextlib import contextmanager
 from copy import copy
 from subprocess import list2cmdline, call
 
 from lib.tests import RefTestCase, get_jitflags, get_cpu_count, \
-                      get_environment_overlay, change_env
+    get_environment_overlay, change_env
 from lib.results import ResultsSink
 from lib.progressbar import ProgressBar
 
 if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
     from lib.tasks_unix import run_all_tests
 else:
     from lib.tasks_win import run_all_tests
 
@@ -36,34 +39,34 @@ def changedir(dirname):
         yield
     finally:
         os.chdir(pwd)
 
 
 class PathOptions(object):
     def __init__(self, location, requested_paths, excluded_paths):
         self.requested_paths = requested_paths
-        self.excluded_files, self.excluded_dirs = PathOptions._split_files_and_dirs(location, excluded_paths)
+        self.excluded_files, self.excluded_dirs = PathOptions._split_files_and_dirs(
+            location, excluded_paths)
 
     @staticmethod
     def _split_files_and_dirs(location, paths):
         """Split up a set of paths into files and directories"""
         files, dirs = set(), set()
         for path in paths:
             fullpath = os.path.join(location, path)
             if path.endswith('/'):
                 dirs.add(path[:-1])
             elif os.path.isdir(fullpath):
                 dirs.add(path)
             elif os.path.exists(fullpath):
                 files.add(path)
 
         return files, dirs
 
-
     def should_run(self, filename):
         # If any tests are requested by name, skip tests that do not match.
         if self.requested_paths and not any(req in filename for req in self.requested_paths):
             return False
 
         # Skip excluded tests.
         if filename in self.excluded_files:
             return False
@@ -188,18 +191,18 @@ def parse_args():
                          help='Write all not-passed tests to the given file.')
     output_og.add_option('--no-progress', dest='hide_progress',
                          action='store_true',
                          help='Do not show the progress bar.')
     output_og.add_option('--tinderbox', dest='format', action='store_const',
                          const='automation',
                          help='Use automation-parseable output format.')
     output_og.add_option('--format', dest='format', default='none',
-                          type='choice', choices=['automation', 'none'],
-                          help='Output format. Either automation or none'
+                         type='choice', choices=['automation', 'none'],
+                         help='Output format. Either automation or none'
                          ' (default %default).')
     op.add_option_group(output_og)
 
     special_og = OptionGroup(op, "Special",
                              "Special modes that do not run tests.")
     special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                           help='Generate reftest manifest files.')
     op.add_option_group(special_og)
@@ -379,19 +382,19 @@ def load_tests(options, requested_paths,
 
 def main():
     options, prefix, requested_paths, excluded_paths = parse_args()
     if options.js_shell is not None and not (isfile(options.js_shell) and
                                              os.access(options.js_shell, os.X_OK)):
         if (platform.system() != 'Windows' or
             isfile(options.js_shell) or not
             isfile(options.js_shell + ".exe") or not
-            os.access(options.js_shell + ".exe", os.X_OK)):
-           print('Could not find executable shell: ' + options.js_shell)
-           return 1
+                os.access(options.js_shell + ".exe", os.X_OK)):
+            print('Could not find executable shell: ' + options.js_shell)
+            return 1
 
     test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
     test_environment = get_environment_overlay(options.js_shell)
 
     if test_count == 0:
         print('no tests selected')
         return 1
 
@@ -420,10 +423,11 @@ def main():
             results.finish(True)
         except KeyboardInterrupt:
             results.finish(False)
 
         return 0 if results.all_passed() else 1
 
     return 0
 
+
 if __name__ == '__main__':
     sys.exit(main())
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -2,17 +2,20 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 
 # jit_test.py -- Python harness for JavaScript trace tests.
 
 from __future__ import print_function
-import os, posixpath, sys, traceback
+import os
+import posixpath
+import sys
+import traceback
 import subprocess
 from collections import namedtuple
 from datetime import datetime
 
 if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
     from tasks_unix import run_all_tests
 else:
     from tasks_win import run_all_tests
@@ -26,16 +29,18 @@ JS_DIR = os.path.dirname(os.path.dirname
 TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
 TEST_DIR = os.path.join(JS_DIR, 'jit-test', 'tests')
 LIB_DIR = os.path.join(JS_DIR, 'jit-test', 'lib') + os.path.sep
 MODULE_DIR = os.path.join(JS_DIR, 'jit-test', 'modules') + os.path.sep
 JS_CACHE_DIR = os.path.join(JS_DIR, 'jit-test', '.js-cache')
 JS_TESTS_DIR = posixpath.join(JS_DIR, 'tests')
 
 # Backported from Python 3.1 posixpath.py
+
+
 def _relpath(path, start=None):
     """Return a relative version of a path"""
 
     if not path:
         raise ValueError("no path specified")
 
     if start is None:
         start = os.curdir
@@ -46,42 +51,47 @@ def _relpath(path, start=None):
     # Work out how much of the filepath is shared by start and path.
     i = len(os.path.commonprefix([start_list, path_list]))
 
     rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
     if not rel_list:
         return os.curdir
     return os.path.join(*rel_list)
 
+
 # Mapping of Python chars to their javascript string representation.
 QUOTE_MAP = {
     '\\': '\\\\',
     '\b': '\\b',
     '\f': '\\f',
     '\n': '\\n',
     '\r': '\\r',
     '\t': '\\t',
     '\v': '\\v'
 }
 
 # Quote the string S, javascript style.
+
+
 def js_quote(quote, s):
     result = quote
     for c in s:
         if c == quote:
             result += '\\' + quote
         elif c in QUOTE_MAP:
             result += QUOTE_MAP[c]
         else:
             result += c
     result += quote
     return result
 
+
 os.path.relpath = _relpath
 
+
 class JitTest:
 
     VALGRIND_CMD = []
     paths = (d for d in os.environ['PATH'].split(os.pathsep))
     valgrinds = (os.path.join(d, 'valgrind') for d in paths)
     if any(os.path.exists(p) for p in valgrinds):
         VALGRIND_CMD = [
             'valgrind', '-q', '--smc-check=all-non-file',
@@ -101,33 +111,33 @@ class JitTest:
         # Path relative to the top mozilla/ directory.
         self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
 
         # Path relative to mozilla/js/src/jit-test/tests/.
         self.relpath_tests = os.path.relpath(path, TEST_DIR)
 
         self.jitflags = []     # jit flags to enable
         self.slow = False      # True means the test is slow-running
-        self.allow_oom = False # True means that OOM is not considered a failure
-        self.allow_unhandlable_oom = False # True means CrashAtUnhandlableOOM
-                                           # is not considered a failure
-        self.allow_overrecursed = False # True means that hitting recursion the
-                                        # limits is not considered a failure.
+        self.allow_oom = False  # True means that OOM is not considered a failure
+        self.allow_unhandlable_oom = False  # True means CrashAtUnhandlableOOM
+        # is not considered a failure
+        self.allow_overrecursed = False  # True means that hitting recursion the
+        # limits is not considered a failure.
         self.valgrind = False  # True means run under valgrind
-        self.tz_pacific = False # True means force Pacific time for the test
-        self.test_also_noasmjs = False # True means run with and without asm.js
-                                       # enabled.
-        self.test_also_wasm_baseline = False # True means run with and and without
-                                       # wasm baseline compiler enabled.
-        self.other_includes = [] # Additional files to include, in addition to prologue.js
-        self.test_also = [] # List of other configurations to test with.
-        self.test_join = [] # List of other configurations to test with all existing variants.
-        self.expect_error = '' # Errors to expect and consider passing
-        self.expect_status = 0 # Exit status to expect from shell
-        self.expect_crash = False # Exit status or error output.
+        self.tz_pacific = False  # True means force Pacific time for the test
+        self.test_also_noasmjs = False  # True means run with and without asm.js
+        # enabled.
+        self.test_also_wasm_baseline = False  # True means run with and and without
+        # wasm baseline compiler enabled.
+        self.other_includes = []  # Additional files to include, in addition to prologue.js
+        self.test_also = []  # List of other configurations to test with.
+        self.test_join = []  # List of other configurations to test with all existing variants.
+        self.expect_error = ''  # Errors to expect and consider passing
+        self.expect_status = 0  # Exit status to expect from shell
+        self.expect_crash = False  # Exit status or error output.
         self.is_module = False
         self.test_reflect_stringify = None  # Reflect.stringify implementation to test
 
         # Expected by the test runner. Always true for jit-tests.
         self.enable = True
 
     def copy(self):
         t = JitTest(self.path)
@@ -159,22 +169,21 @@ class JitTest:
     def copy_variants(self, variants):
         # Append variants to be tested in addition to the current set of tests.
         variants = variants + self.test_also
 
         # For each existing variant, duplicates it for each list of options in
         # test_join.  This will multiply the number of variants by 2 for set of
         # options.
         for join_opts in self.test_join:
-            variants = variants + [ opts + join_opts for opts in variants ];
+            variants = variants + [opts + join_opts for opts in variants]
 
         # For each list of jit flags, make a copy of the test.
         return [self.copy_and_extend_jitflags(v) for v in variants]
 
-
     COOKIE = '|jit-test|'
     CacheDir = JS_CACHE_DIR
     Directives = {}
 
     @classmethod
     def find_directives(cls, file_name):
         meta = ''
         line = open(file_name).readline()
@@ -300,17 +309,17 @@ class JitTest:
             quotechar = '"'
         else:
             quotechar = "'"
 
         # Don't merge the expressions: We want separate -e arguments to avoid
         # semicolons in the command line, bug 1351607.
         exprs = ["const platform={}".format(js_quote(quotechar, sys.platform)),
                  "const libdir={}".format(js_quote(quotechar, libdir)),
-                 "const scriptdir={}".format(js_quote(quotechar, scriptdir_var))];
+                 "const scriptdir={}".format(js_quote(quotechar, scriptdir_var))]
 
         # We may have specified '-a' or '-d' twice: once via --jitflags, once
         # via the "|jit-test|" line.  Remove dups because they are toggles.
         cmd = prefix + ['--js-cache', JitTest.CacheDir]
         cmd += list(set(self.jitflags))
         for expr in exprs:
             cmd += ['-e', expr]
         for inc in self.other_includes:
@@ -328,16 +337,17 @@ class JitTest:
 
         if self.allow_unhandlable_oom or self.expect_crash:
             cmd += ['--suppress-minidump']
 
         return cmd
 
     # The test runner expects this to be set to give to get_command.
     js_cmd_prefix = None
+
     def get_command(self, prefix):
         """Shim for the test runner."""
         return self.command(prefix, LIB_DIR, MODULE_DIR)
 
 
 def find_tests(substring=None):
     ans = []
     for dirpath, dirnames, filenames in os.walk(TEST_DIR):
@@ -351,16 +361,17 @@ def find_tests(substring=None):
             if filename in ('shell.js', 'browser.js'):
                 continue
             test = os.path.join(dirpath, filename)
             if substring is None \
                or substring in os.path.relpath(test, TEST_DIR):
                 ans.append(test)
     return ans
 
+
 def run_test_remote(test, device, prefix, options):
     from mozdevice import ADBDevice, ADBProcessError
 
     if options.test_reflect_stringify:
         raise ValueError("can't run Reflect.stringify tests remotely")
     cmd = test.command(prefix,
                        posixpath.join(options.remote_test_root, 'lib/'),
                        posixpath.join(options.remote_test_root, 'modules/'),
@@ -387,16 +398,17 @@ def run_test_remote(test, device, prefix
         returncode = e.adb_process.exitcode
 
     elapsed = (datetime.now() - start).total_seconds()
 
     # We can't distinguish between stdout and stderr so we pass
     # the same buffer to both.
     return TestOutput(test, cmd, out, out, returncode, elapsed, False)
 
+
 def check_output(out, err, rc, timed_out, test, options):
     if timed_out:
         if os.path.normpath(test.relpath_tests).replace(os.sep, '/') \
                 in options.ignore_timeouts:
             return True
 
         # The shell sometimes hangs on shutdown on Windows 7 and Windows
         # Server 2008. See bug 970063 comment 7 for a description of the
@@ -471,16 +483,17 @@ def check_output(out, err, rc, timed_out
         # forces the exit status.
         if test.expect_status != 0 and options.unusable_error_status:
             return True
 
         return False
 
     return True
 
+
 def print_automation_format(ok, res, slog):
     # Output test failures in a parsable format suitable for automation, eg:
     # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
     #
     # Example:
     # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
     # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
     # INFO exit-status     : 3
@@ -511,16 +524,17 @@ def print_automation_format(ok, res, slo
         return
     print("INFO exit-status     : {}".format(res.rc))
     print("INFO timed-out       : {}".format(res.timed_out))
     for line in res.out.splitlines():
         print("INFO stdout          > " + line.strip())
     for line in res.err.splitlines():
         print("INFO stderr         2> " + line.strip())
 
+
 def print_test_summary(num_tests, failures, complete, doing, options):
     if failures:
         if options.write_failures:
             try:
                 out = open(options.write_failures, 'w')
                 # Don't write duplicate entries when we are doing multiple
                 # failures per job.
                 written = set()
@@ -563,28 +577,30 @@ def print_test_summary(num_tests, failur
     if options.format == 'automation':
         num_failures = len(failures) if failures else 0
         print('Result summary:')
         print('Passed: {:d}'.format(num_tests - num_failures))
         print('Failed: {:d}'.format(num_failures))
 
     return not failures
 
+
 def create_progressbar(num_tests, options):
     if not options.hide_progress and not options.show_cmd \
        and ProgressBar.conservative_isatty():
         fmt = [
             {'value': 'PASS',    'color': 'green'},
             {'value': 'FAIL',    'color': 'red'},
             {'value': 'TIMEOUT', 'color': 'blue'},
             {'value': 'SKIP',    'color': 'brightgray'},
         ]
         return ProgressBar(num_tests, fmt)
     return NullProgressBar()
 
+
 def process_test_results(results, num_tests, pb, options, slog):
     failures = []
     timeouts = 0
     complete = False
     output_dict = {}
     doing = 'before starting'
 
     if num_tests == 0:
@@ -641,32 +657,34 @@ def process_test_results(results, num_te
         complete = True
     except KeyboardInterrupt:
         print("TEST-UNEXPECTED-FAIL | jit_test.py" +
               " : Test execution interrupted by user")
 
     pb.finish(True)
     return print_test_summary(num_tests, failures, complete, doing, options)
 
+
 def run_tests(tests, num_tests, prefix, options, remote=False):
     slog = None
     if options.format == 'automation':
         slog = TestLogger("jittests")
         slog.suite_start()
 
     if remote:
         ok = run_tests_remote(tests, num_tests, prefix, options, slog)
     else:
         ok = run_tests_local(tests, num_tests, prefix, options, slog)
 
     if slog:
         slog.suite_end()
 
     return ok
 
+
 def run_tests_local(tests, num_tests, prefix, options, slog):
     # The jstests tasks runner requires the following options. The names are
     # taken from the jstests options processing code, which are frequently
     # subtly different from the options jit-tests expects. As such, we wrap
     # them here, as needed.
     AdaptorOptions = namedtuple("AdaptorOptions", [
         "worker_count", "passthrough", "timeout", "output_fp",
         "hide_progress", "run_skipped", "show_cmd"])
@@ -676,51 +694,56 @@ def run_tests_local(tests, num_tests, pr
     # The test runner wants the prefix as a static on the Test class.
     JitTest.js_cmd_prefix = prefix
 
     pb = create_progressbar(num_tests, options)
     gen = run_all_tests(tests, prefix, pb, shim_options)
     ok = process_test_results(gen, num_tests, pb, options, slog)
     return ok
 
+
 def get_remote_results(tests, device, prefix, options):
     try:
         for i in xrange(0, options.repeat):
             for test in tests:
                 yield run_test_remote(test, device, prefix, options)
     except Exception as e:
         # After a device error, the device is typically in a
         # state where all further tests will fail so there is no point in
         # continuing here.
         sys.stderr.write("Error running remote tests: {}".format(e.message))
 
+
 def push_libs(options, device):
     # This saves considerable time in pushing unnecessary libraries
     # to the device but needs to be updated if the dependencies change.
     required_libs = ['libnss3.so', 'libmozglue.so', 'libnspr4.so',
                      'libplc4.so', 'libplds4.so']
 
     for file in os.listdir(options.local_lib):
         if file in required_libs:
             remote_file = posixpath.join(options.remote_test_root, file)
             device.push(os.path.join(options.local_lib, file), remote_file)
             device.chmod(remote_file, root=True)
 
+
 def push_progs(options, device, progs):
     for local_file in progs:
         remote_file = posixpath.join(options.remote_test_root,
                                      os.path.basename(local_file))
         device.push(local_file, remote_file)
         device.chmod(remote_file, root=True)
 
+
 def init_remote_dir(device, path, root=True):
     device.rm(path, recursive=True, force=True, root=root)
     device.mkdir(path, parents=True, root=root)
     device.chmod(path, recursive=True, root=root)
 
+
 def run_tests_remote(tests, num_tests, prefix, options, slog):
     # Setup device with everything needed to run our tests.
     from mozdevice import ADBAndroid
     device = ADBAndroid(device=options.device_serial,
                         test_root=options.remote_test_root)
 
     init_remote_dir(device, options.remote_test_root)
 
@@ -748,24 +771,27 @@ def run_tests_remote(tests, num_tests, p
     prefix[0] = os.path.join(options.remote_test_root, 'js')
 
     # Run all tests.
     pb = create_progressbar(num_tests, options)
     gen = get_remote_results(tests, device, prefix, options)
     ok = process_test_results(gen, num_tests, pb, options, slog)
     return ok
 
+
 def platform_might_be_android():
     try:
         # The python package for SL4A provides an |android| module.
         # If that module is present, we're likely in SL4A-python on
         # device.  False positives and negatives are possible,
         # however.
         import android
         return True
     except ImportError:
         return False
 
+
 def stdio_might_be_broken():
     return platform_might_be_android()
 
+
 if __name__ == '__main__':
     print('Use ../jit-test/jit_test.py to run these tests.')
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -1,30 +1,33 @@
 # Library for JSTest manifests.
 #
 # This includes classes for representing and parsing JS manifests.
 
 from __future__ import print_function
 
-import os, re, sys
+import os
+import re
+import sys
 from subprocess import Popen, PIPE
 
 from tests import RefTestCase
 
 
 def split_path_into_dirs(path):
     dirs = [path]
 
     while True:
         path, tail = os.path.split(path)
         if not tail:
             break
         dirs.append(path)
     return dirs
 
+
 class XULInfo:
     def __init__(self, abi, os, isdebug):
         self.abi = abi
         self.os = os
         self.isdebug = isdebug
         self.browserIsRemote = False
 
     def as_js(self):
@@ -74,16 +77,17 @@ class XULInfo:
                 if key == 'TARGET_XPCOM_ABI':
                     kw['abi'] = val
                 if key == 'OS_TARGET':
                     kw['os'] = val
                 if key == 'MOZ_DEBUG':
                     kw['isdebug'] = (val == '1')
         return cls(**kw)
 
+
 class XULInfoTester:
     def __init__(self, xulinfo, js_bin):
         self.js_prologue = xulinfo.as_js()
         self.js_bin = js_bin
         # Maps JS expr to evaluation result.
         self.cache = {}
 
     def test(self, cond):
@@ -107,21 +111,24 @@ class XULInfoTester:
                 ans = False
             else:
                 raise Exception("Failed to test XUL condition {!r};"
                                 " output was {!r}, stderr was {!r}".format(
                                     cond, out, err))
             self.cache[cond] = ans
         return ans
 
+
 class NullXULInfoTester:
     """Can be used to parse manifests without a JS shell."""
+
     def test(self, cond):
         return False
 
+
 def _parse_one(testcase, terms, xul_tester):
     pos = 0
     parts = terms.split()
     while pos < len(parts):
         if parts[pos] == 'fails':
             testcase.expect = False
             pos += 1
         elif parts[pos] == 'skip':
@@ -166,16 +173,17 @@ def _parse_one(testcase, terms, xul_test
             # This directive marks the test as module code.
             testcase.is_module = True
             pos += 1
         else:
             print('warning: invalid manifest line element "{}"'.format(
                 parts[pos]))
             pos += 1
 
+
 def _build_manifest_script_entry(script_name, test):
     line = []
     properties = []
     if test.terms:
         # Remove jsreftest internal terms.
         terms = " ".join([term for term in test.terms.split()
                           if not (term == "module" or term.startswith("error:"))])
         if terms:
@@ -191,31 +199,33 @@ def _build_manifest_script_entry(script_
     if properties:
         script = ";".join([script] + properties)
     line.append(script)
     if test.comment:
         line.append("#")
         line.append(test.comment)
     return ' '.join(line)
 
+
 def _map_prefixes_left(test_gen):
     """
     Splits tests into a dictionary keyed on the first component of the test
     path, aggregating tests with a common base path into a list.
     """
     byprefix = {}
     for t in test_gen:
         left, sep, remainder = t.path.partition(os.sep)
         if left not in byprefix:
             byprefix[left] = []
         if remainder:
             t.path = remainder
         byprefix[left].append(t)
     return byprefix
 
+
 def _emit_manifest_at(location, relative, test_gen, depth):
     """
     location  - str: absolute path where we want to write the manifest
     relative  - str: relative path from topmost manifest directory to current
     test_gen  - (str): generator of all test paths and directorys
     depth     - int: number of dirs we are below the topmost manifest dir
     """
     manifests = _map_prefixes_left(test_gen)
@@ -227,17 +237,18 @@ def _emit_manifest_at(location, relative
         fullpath = os.path.join(location, k)
         if os.path.isdir(fullpath):
             manifest.append("include " + k + "/jstests.list")
             relpath = os.path.join(relative, k)
             _emit_manifest_at(fullpath, relpath, test_list, depth + 1)
         else:
             numTestFiles += 1
             if len(test_list) != 1:
-                import pdb; pdb.set_trace()
+                import pdb
+                pdb.set_trace()
             assert len(test_list) == 1
             line = _build_manifest_script_entry(k, test_list[0])
             manifest.append(line)
 
     # Always present our manifest in sorted order.
     manifest.sort()
 
     # If we have tests, we have to set the url-prefix so reftest can find them.
@@ -246,40 +257,45 @@ def _emit_manifest_at(location, relative
             '../' * depth, relative)] + manifest
 
     fp = open(filename, 'w')
     try:
         fp.write('\n'.join(manifest) + '\n')
     finally:
         fp.close()
 
+
 def make_manifests(location, test_gen):
     _emit_manifest_at(location, '', test_gen, 0)
 
+
 def _find_all_js_files(location):
     for root, dirs, files in os.walk(location):
         root = root[len(location) + 1:]
         for fn in files:
             if fn.endswith('.js'):
                 yield root, fn
 
+
 TEST_HEADER_PATTERN_INLINE = re.compile(r'//\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?$')
-TEST_HEADER_PATTERN_MULTI  = re.compile(r'/\*\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?\*/')
+TEST_HEADER_PATTERN_MULTI = re.compile(r'/\*\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?\*/')
+
 
 def _append_terms_and_comment(testcase, terms, comment):
     if testcase.terms is None:
         testcase.terms = terms
     else:
         testcase.terms += " " + terms
 
     if testcase.comment is None:
         testcase.comment = comment
     elif comment:
         testcase.comment += "; " + comment
 
+
 def _parse_test_header(fullpath, testcase, xul_tester):
     """
     This looks a bit weird.  The reason is that it needs to be efficient, since
     it has to be done on every test
     """
     fp = open(fullpath, 'r')
     try:
         buf = fp.read(512)
@@ -298,16 +314,17 @@ def _parse_test_header(fullpath, testcas
         matches = TEST_HEADER_PATTERN_MULTI.match(buf)
         if not matches:
             return
 
     testcase.tag = matches.group(1)
     _append_terms_and_comment(testcase, matches.group(2), matches.group(4))
     _parse_one(testcase, matches.group(2), xul_tester)
 
+
 def _parse_external_manifest(filename, relpath):
     """
     Reads an external manifest file for test suites whose individual test cases
     can't be decorated with reftest comments.
     filename - str: name of the manifest file
     relpath - str: relative path of the directory containing the manifest
                    within the test suite
     """
@@ -338,32 +355,34 @@ def _parse_external_manifest(filename, r
             entries.append({'path': path, 'terms': matches.group(1),
                             'comment': comment.strip()})
 
     # if one directory name is a prefix of another, we want the shorter one
     # first
     entries.sort(key=lambda x: x["path"])
     return entries
 
+
 def _apply_external_manifests(filename, testcase, entries, xul_tester):
     for entry in entries:
         if filename.startswith(entry["path"]):
             # The reftest spec would require combining the terms (failure types)
             # that may already be defined in the test case with the terms
             # specified in entry; for example, a skip overrides a random, which
             # overrides a fails. Since we don't necessarily know yet in which
             # environment the test cases will be run, we'd also have to
             # consider skip-if, random-if, and fails-if with as-yet unresolved
             # conditions.
             # At this point, we use external manifests only for test cases
             # that can't have their own failure type comments, so we simply
             # use the terms for the most specific path.
             _append_terms_and_comment(testcase, entry["terms"], entry["comment"])
             _parse_one(testcase, entry["terms"], xul_tester)
 
+
 def _is_test_file(path_from_root, basename, filename, path_options):
     # Any file whose basename matches something in this set is ignored.
     EXCLUDED = set(('browser.js', 'shell.js', 'template.js',
                     'user.js', 'js-test-driver-begin.js', 'js-test-driver-end.js'))
 
     # Skip js files in the root test directory.
     if not path_from_root:
         return False
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -4,42 +4,49 @@ from datetime import datetime, timedelta
 import math
 import sys
 
 if sys.platform.startswith('win'):
     from terminal_win import Terminal
 else:
     from terminal_unix import Terminal
 
+
 class NullProgressBar(object):
     def update(self, current, data): pass
+
     def poke(self): pass
+
     def finish(self, complete=True): pass
+
     def beginline(self): pass
+
     def message(self, msg): sys.stdout.write(msg + '\n')
+
     @staticmethod
     def update_granularity(): return timedelta.max
 
+
 class ProgressBar(object):
     def __init__(self, limit, fmt):
         assert self.conservative_isatty()
 
         self.prior = None
         self.atLineStart = True
-        self.counters_fmt = fmt # [{str:str}] Describtion of how to lay out each
-                                #             field in the counters map.
-        self.limit = limit # int: The value of 'current' equal to 100%.
-        self.limit_digits = int(math.ceil(math.log10(self.limit))) # int: max digits in limit
-        self.t0 = datetime.now() # datetime: The start time.
+        self.counters_fmt = fmt  # [{str:str}] Describtion of how to lay out each
+        #             field in the counters map.
+        self.limit = limit  # int: The value of 'current' equal to 100%.
+        self.limit_digits = int(math.ceil(math.log10(self.limit)))  # int: max digits in limit
+        self.t0 = datetime.now()  # datetime: The start time.
 
         # Compute the width of the counters and build the format string.
-        self.counters_width = 1 # [
+        self.counters_width = 1  # [
         for layout in self.counters_fmt:
             self.counters_width += self.limit_digits
-            self.counters_width += 1 # | (or ']' for the last one)
+            self.counters_width += 1  # | (or ']' for the last one)
 
         self.barlen = 64 - self.counters_width
 
     @staticmethod
     def update_granularity():
         return timedelta(seconds=0.1)
 
     def update(self, current, data):
@@ -79,17 +86,17 @@ class ProgressBar(object):
     def poke(self):
         if not self.prior:
             return
         self.update(*self.prior)
 
     def finish(self, complete=True):
         if not self.prior:
             sys.stdout.write('No test run... You can try adding'
-                            ' --run-slow-tests or --run-skipped to run more tests\n')
+                             ' --run-slow-tests or --run-skipped to run more tests\n')
             return
         final_count = self.limit if complete else self.prior[0]
         self.update(final_count, self.prior[1])
         sys.stdout.write('\n')
 
     def beginline(self):
         if not self.atLineStart:
             sys.stdout.write('\n')
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -2,19 +2,22 @@ from __future__ import print_function
 
 import pipes
 import re
 
 from progressbar import NullProgressBar, ProgressBar
 from structuredlog import TestLogger
 
 # subprocess.list2cmdline does not properly escape for sh-like shells
+
+
 def escape_cmdline(args):
     return ' '.join([pipes.quote(a) for a in args])
 
+
 class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt, timed_out, extra=None):
         self.test = test   # Test
         self.cmd = cmd     # str:   command line of test
         self.out = out     # str:   stdout
         self.err = err     # str:   stderr
         self.rc = rc       # int:   return code
@@ -27,33 +30,37 @@ class TestOutput:
             return "Timeout"
         lines = self.err.splitlines()
         for line in lines:
             # Skip the asm.js compilation success message.
             if "Successfully compiled asm.js code" not in line:
                 return line
         return "Unknown"
 
+
 class NullTestOutput:
     """Variant of TestOutput that indicates a test was not run."""
+
     def __init__(self, test):
         self.test = test
         self.cmd = ''
         self.out = ''
         self.err = ''
         self.rc = 0
         self.dt = 0.0
         self.timed_out = False
 
+
 class TestResult:
     PASS = 'PASS'
     FAIL = 'FAIL'
     CRASH = 'CRASH'
 
     """Classified result from a test run."""
+
     def __init__(self, test, result, results):
         self.test = test
         self.result = result
         self.results = results
 
     @classmethod
     def from_output(cls, output):
         test = output.test
@@ -98,21 +105,23 @@ class TestResult:
         else:
             if (rc or passes > 0) and failures == 0:
                 result = cls.PASS
             else:
                 result = cls.FAIL
 
         return cls(test, result, results)
 
+
 class TestDuration:
     def __init__(self, test, duration):
         self.test = test
         self.duration = duration
 
+
 class ResultsSink:
     def __init__(self, testsuite, options, testcount):
         self.options = options
         self.fp = options.output_fp
         if self.options.format == 'automation':
             self.slog = TestLogger(testsuite)
             self.slog.suite_start()
 
@@ -161,28 +170,28 @@ class ResultsSink:
                     self.output_dict[output.test.path] = output
 
             if output.timed_out:
                 dev_label = 'TIMEOUTS'
             self.groups.setdefault(dev_label, []).append(result)
 
             if dev_label == 'REGRESSIONS':
                 show_output = self.options.show_output \
-                              or not self.options.no_show_failed
+                    or not self.options.no_show_failed
             elif dev_label == 'TIMEOUTS':
                 show_output = self.options.show_output
             else:
                 show_output = self.options.show_output \
-                              and not self.options.failed_only
+                    and not self.options.failed_only
 
             if dev_label in ('REGRESSIONS', 'TIMEOUTS'):
                 show_cmd = self.options.show_cmd
             else:
                 show_cmd = self.options.show_cmd \
-                           and not self.options.failed_only
+                    and not self.options.failed_only
 
             if show_output or show_cmd:
                 self.pb.beginline()
 
                 if show_output:
                     print('## {}: rc = {:d}, run time = {}'.format(
                         output.test.path, output.rc, output.dt), file=self.fp)
 
@@ -244,17 +253,17 @@ class ResultsSink:
         (TestResult.FAIL,  False, True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
         (TestResult.FAIL,  True,  False): ('TEST-UNEXPECTED-FAIL',               'REGRESSIONS'),
         (TestResult.FAIL,  True,  True):  ('TEST-KNOWN-FAIL (EXPECTED RANDOM)',  ''),
 
         (TestResult.PASS,  False, False): ('TEST-UNEXPECTED-PASS',               'FIXES'),
         (TestResult.PASS,  False, True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
         (TestResult.PASS,  True,  False): ('TEST-PASS',                          ''),
         (TestResult.PASS,  True,  True):  ('TEST-PASS (EXPECTED RANDOM)',        ''),
-        }
+    }
 
     def list(self, completed):
         for label, results in sorted(self.groups.items()):
             if label == '':
                 continue
 
             print(label)
             for result in results:
--- a/js/src/tests/lib/structuredlog.py
+++ b/js/src/tests/lib/structuredlog.py
@@ -1,15 +1,16 @@
 # produce mozlog-compatible log messages, following the spec at https://mozbase.readthedocs.io/en/latest/mozlog.html
 
 import json
 import os
 
 from time import time
 
+
 class TestLogger(object):
     def __init__(self, source, threadname='main'):
         self.template = {
             'source': source,
             'thread': threadname,
             'pid': os.getpid(),
         }
 
--- a/js/src/tests/lib/tasks_unix.py
+++ b/js/src/tests/lib/tasks_unix.py
@@ -1,28 +1,34 @@
 # A unix-oriented process dispatcher.  Uses a single thread with select and
 # waitpid to dispatch tasks.  This avoids several deadlocks that are possible
 # with fork/exec + threads + Python.
 
-import errno, os, select, signal, sys
+import errno
+import os
+import select
+import signal
+import sys
 from datetime import datetime, timedelta
 from progressbar import ProgressBar
 from results import NullTestOutput, TestOutput, escape_cmdline
 
+
 class Task(object):
     def __init__(self, test, prefix, pid, stdout, stderr):
         self.test = test
         self.cmd = test.get_command(prefix)
         self.pid = pid
         self.stdout = stdout
         self.stderr = stderr
         self.start = datetime.now()
         self.out = []
         self.err = []
 
+
 def spawn_test(test, prefix, passthrough, run_skipped, show_cmd):
     """Spawn one child, return a task struct."""
     if not test.enable and not run_skipped:
         return None
 
     cmd = test.get_command(prefix)
     if show_cmd:
         print(escape_cmdline(cmd))
@@ -43,16 +49,17 @@ def spawn_test(test, prefix, passthrough
         os.close(rout)
         os.close(rerr)
 
         os.dup2(wout, 1)
         os.dup2(werr, 2)
 
     os.execvp(cmd[0], cmd)
 
+
 def get_max_wait(tasks, timeout):
     """
     Return the maximum time we can wait before any task should time out.
     """
 
     # If we have a progress-meter, we need to wake up to update it frequently.
     wait = ProgressBar.update_granularity()
 
@@ -64,16 +71,17 @@ def get_max_wait(tasks, timeout):
         for task in tasks:
             remaining = task.start + timeout_delta - now
             if remaining < wait:
                 wait = remaining
 
     # Return the wait time in seconds, clamped between zero and max_wait.
     return max(wait.total_seconds(), 0)
 
+
 def flush_input(fd, frags):
     """
     Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
     """
     rv = os.read(fd, 4096)
     frags.append(rv)
     while len(rv) == 4096:
         # If read() returns a full buffer, it may indicate there was 1 buffer
@@ -81,24 +89,25 @@ def flush_input(fd, frags):
         # before we read again to ensure that we will not block indefinitly.
         readable, _, _ = select.select([fd], [], [], 0)
         if not readable:
             return
 
         rv = os.read(fd, 4096)
         frags.append(rv)
 
+
 def read_input(tasks, timeout):
     """
     Select on input or errors from the given task list for a max of timeout
     seconds.
     """
     rlist = []
     exlist = []
-    outmap = {} # Fast access to fragment list given fd.
+    outmap = {}  # Fast access to fragment list given fd.
     for t in tasks:
         rlist.append(t.stdout)
         rlist.append(t.stderr)
         outmap[t.stdout] = t.out
         outmap[t.stderr] = t.err
         # This will trigger with a close event when the child dies, allowing
         # us to respond immediately and not leave cores idle.
         exlist.append(t.stdout)
@@ -108,45 +117,48 @@ def read_input(tasks, timeout):
         readable, _, _ = select.select(rlist, [], exlist, timeout)
     except OverflowError as e:
         print >> sys.stderr, "timeout value", timeout
         raise
 
     for fd in readable:
         flush_input(fd, outmap[fd])
 
+
 def remove_task(tasks, pid):
     """
     Return a pair with the removed task and the new, modified tasks list.
     """
     index = None
     for i, t in enumerate(tasks):
         if t.pid == pid:
             index = i
             break
     else:
         raise KeyError("No such pid: {}".format(pid))
 
     out = tasks[index]
     tasks.pop(index)
     return out
 
+
 def timed_out(task, timeout):
     """
     Return a timedelta with the amount we are overdue, or False if the timeout
     has not yet been reached (or timeout is falsy, indicating there is no
     timeout.)
     """
     if not timeout:
         return False
 
     elapsed = datetime.now() - task.start
     over = elapsed - timedelta(seconds=timeout)
     return over if over.total_seconds() > 0 else False
 
+
 def reap_zombies(tasks, timeout):
     """
     Search for children of this process that have finished. If they are tasks,
     then this routine will clean up the child. This method returns a new task
     list that has had the ended tasks removed, followed by the list of finished
     tasks.
     """
     finished = []
@@ -177,44 +189,46 @@ def reap_zombies(tasks, timeout):
                 ''.join(ended.out),
                 ''.join(ended.err),
                 returncode,
                 (datetime.now() - ended.start).total_seconds(),
                 timed_out(ended, timeout),
                 {'pid': ended.pid}))
     return tasks, finished
 
+
 def kill_undead(tasks, timeout):
     """
     Signal all children that are over the given timeout. Use SIGABRT first to
     generate a stack dump. If it still doesn't die for another 30 seconds, kill
     with SIGKILL.
     """
     for task in tasks:
         over = timed_out(task, timeout)
         if over:
             if over.total_seconds() < 30:
                 os.kill(task.pid, signal.SIGABRT)
             else:
                 os.kill(task.pid, signal.SIGKILL)
 
+
 def run_all_tests(tests, prefix, pb, options):
     # Copy and reverse for fast pop off end.
     tests = list(tests)
     tests = tests[:]
     tests.reverse()
 
     # The set of currently running tests.
     tasks = []
 
     while len(tests) or len(tasks):
         while len(tests) and len(tasks) < options.worker_count:
             test = tests.pop()
             task = spawn_test(test, prefix,
-                    options.passthrough, options.run_skipped, options.show_cmd)
+                              options.passthrough, options.run_skipped, options.show_cmd)
             if task:
                 tasks.append(task)
             else:
                 yield NullTestOutput(test)
 
         timeout = get_max_wait(tasks, options.timeout)
         read_input(tasks, timeout)
 
--- a/js/src/tests/lib/terminal_unix.py
+++ b/js/src/tests/lib/terminal_unix.py
@@ -1,10 +1,11 @@
 import sys
 
+
 class Terminal(object):
     COLOR = {
         'red': '31',
         'green': '32',
         'blue': '34',
         'gray': '37'
     }
     NORMAL_INTENSITY = '1'
--- a/js/src/tests/lib/terminal_win.py
+++ b/js/src/tests/lib/terminal_win.py
@@ -9,76 +9,83 @@ GetConsoleScreenBufferInfo.
 $Id: color_console.py 534 2009-05-10 04:00:59Z andre $
 """
 
 from ctypes import windll, Structure, c_short, c_ushort, byref
 
 SHORT = c_short
 WORD = c_ushort
 
+
 class COORD(Structure):
     """struct in wincon.h."""
     _fields_ = [
         ("X", SHORT),
         ("Y", SHORT)]
 
+
 class SMALL_RECT(Structure):
     """struct in wincon.h."""
     _fields_ = [
         ("Left", SHORT),
         ("Top", SHORT),
         ("Right", SHORT),
         ("Bottom", SHORT)]
 
+
 class CONSOLE_SCREEN_BUFFER_INFO(Structure):
     """struct in wincon.h."""
     _fields_ = [
         ("dwSize", COORD),
         ("dwCursorPosition", COORD),
         ("wAttributes", WORD),
         ("srWindow", SMALL_RECT),
         ("dwMaximumWindowSize", COORD)]
 
+
 # winbase.h
 STD_INPUT_HANDLE = -10
 STD_OUTPUT_HANDLE = -11
 STD_ERROR_HANDLE = -12
 
 # wincon.h
-FOREGROUND_BLACK     = 0x0000
-FOREGROUND_BLUE      = 0x0001
-FOREGROUND_GREEN     = 0x0002
-FOREGROUND_CYAN      = 0x0003
-FOREGROUND_RED       = 0x0004
-FOREGROUND_MAGENTA   = 0x0005
-FOREGROUND_YELLOW    = 0x0006
-FOREGROUND_GREY      = 0x0007
-FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
+FOREGROUND_BLACK = 0x0000
+FOREGROUND_BLUE = 0x0001
+FOREGROUND_GREEN = 0x0002
+FOREGROUND_CYAN = 0x0003
+FOREGROUND_RED = 0x0004
+FOREGROUND_MAGENTA = 0x0005
+FOREGROUND_YELLOW = 0x0006
+FOREGROUND_GREY = 0x0007
+FOREGROUND_INTENSITY = 0x0008  # foreground color is intensified.
 
-BACKGROUND_BLACK     = 0x0000
-BACKGROUND_BLUE      = 0x0010
-BACKGROUND_GREEN     = 0x0020
-BACKGROUND_CYAN      = 0x0030
-BACKGROUND_RED       = 0x0040
-BACKGROUND_MAGENTA   = 0x0050
-BACKGROUND_YELLOW    = 0x0060
-BACKGROUND_GREY      = 0x0070
-BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+BACKGROUND_BLACK = 0x0000
+BACKGROUND_BLUE = 0x0010
+BACKGROUND_GREEN = 0x0020
+BACKGROUND_CYAN = 0x0030
+BACKGROUND_RED = 0x0040
+BACKGROUND_MAGENTA = 0x0050
+BACKGROUND_YELLOW = 0x0060
+BACKGROUND_GREY = 0x0070
+BACKGROUND_INTENSITY = 0x0080  # background color is intensified.
 
 stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
 SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
 GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
 
+
 def get_text_attr():
     csbi = CONSOLE_SCREEN_BUFFER_INFO()
     GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
     return csbi.wAttributes
 
+
 DEFAULT_COLORS = get_text_attr()
 
+
 class Terminal(object):
     COLOR = {
         'black': 0x0000,
         'blue': 0x0001,
         'green': 0x0002,
         'cyan': 0x0003,
         'red': 0x0004,
         'magenta': 0x0005,
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -1,40 +1,43 @@
 # Library for JSTest tests.
 #
 # This contains classes that represent an individual test, including
 # metadata, and know how to run the tests and determine failures.
 
-import datetime, os, sys, time
+import datetime
+import os
+import sys
+import time
 from contextlib import contextmanager
 from subprocess import Popen, PIPE
 from threading import Thread
 
 from results import TestOutput
 
 # When run on tbpl, we run each test multiple times with the following
 # arguments.
 JITFLAGS = {
     'all': [
-        [], # no flags, normal baseline and ion
-        ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
+        [],  # no flags, normal baseline and ion
+        ['--ion-eager', '--ion-offthread-compile=off'],  # implies --baseline-eager
         ['--ion-eager', '--ion-offthread-compile=off',
          '--ion-check-range-analysis', '--ion-extra-checks', '--no-sse3', '--no-threads'],
         ['--baseline-eager'],
         ['--no-baseline', '--no-ion'],
     ],
     # used by jit_test.py
     'ion': [
         ['--baseline-eager'],
         ['--ion-eager', '--ion-offthread-compile=off']
     ],
     # Run reduced variants on debug builds, since they take longer time.
     'debug': [
-        [], # no flags, normal baseline and ion
-        ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
+        [],  # no flags, normal baseline and ion
+        ['--ion-eager', '--ion-offthread-compile=off'],  # implies --baseline-eager
         ['--baseline-eager'],
     ],
     # Cover cases useful for tsan. Note that we test --ion-eager without
     # --ion-offthread-compile=off here, because it helps catch races.
     'tsan': [
         [],
         ['--ion-eager', '--ion-check-range-analysis', '--ion-extra-checks', '--no-sse3'],
         ['--no-baseline', '--no-ion'],
@@ -42,31 +45,34 @@ JITFLAGS = {
     'baseline': [
         ['--no-ion'],
     ],
     # Interpreter-only, for tools that cannot handle binary code generation.
     'interp': [
         ['--no-baseline', '--no-asmjs', '--no-wasm', '--no-native-regexp']
     ],
     'none': [
-        [] # no flags, normal baseline and ion
+        []  # no flags, normal baseline and ion
     ]
 }
 
+
 def get_jitflags(variant, **kwargs):
     if variant not in JITFLAGS:
         print('Invalid jitflag: "{}"'.format(variant))
         sys.exit(1)
     if variant == 'none' and 'none' in kwargs:
         return kwargs['none']
     return JITFLAGS[variant]
 
+
 def valid_jitflags():
     return JITFLAGS.keys()
 
+
 def get_environment_overlay(js_shell):
     """
     Build a dict of additional environment variables that must be set to run
     tests successfully.
     """
     env = {
         # Force Pacific time zone to avoid failures in Date tests.
         'TZ': 'PST8PDT',
@@ -139,24 +145,25 @@ def get_cpu_count():
     except (KeyError, ValueError):
         pass
 
     return 1
 
 
 class RefTestCase(object):
     """A test case consisting of a test and an expected result."""
+
     def __init__(self, path):
         self.path = path     # str:  path of JS file relative to tests root dir
         self.options = []    # [str]: Extra options to pass to the shell
         self.jitflags = []   # [str]: JIT flags to pass to the shell
         self.test_reflect_stringify = None  # str or None: path to
-                                            # reflect-stringify.js file to test
-                                            # instead of actually running tests
-        self.is_module = False # bool: True => test is module code
+        # reflect-stringify.js file to test
+        # instead of actually running tests
+        self.is_module = False  # bool: True => test is module code
         self.enable = True   # bool: True => run test, False => don't run
         self.error = None    # str?: Optional error type
         self.expect = True   # bool: expected result, True => pass
         self.random = False  # bool: True => ignore output as 'random'
         self.slow = False    # bool: True => test may run slowly
 
         # The terms parsed to produce the above properties.
         self.terms = None
@@ -179,26 +186,25 @@ class RefTestCase(object):
             prefix.append(shell_path)
             prefix.append('-f')
 
         prefix.reverse()
         return prefix
 
     def get_command(self, prefix):
         cmd = prefix + self.jitflags + self.options \
-              + RefTestCase.prefix_command(self.path)
+            + RefTestCase.prefix_command(self.path)
         if self.test_reflect_stringify is not None:
             cmd += [self.test_reflect_stringify, "--check", self.path]
         elif self.is_module:
             cmd += ["--module", self.path]
         else:
             cmd += ["-f", self.path]
         return cmd
 
-
     def __str__(self):
         ans = self.path
         if not self.enable:
             ans += ', skip'
         if self.error is not None:
             ans += ', error=' + self.error
         if not self.expect:
             ans += ', fails'
--- a/js/src/tests/non262/String/make-normalize-generateddata-input.py
+++ b/js/src/tests/non262/String/make-normalize-generateddata-input.py
@@ -3,26 +3,31 @@
 """ Usage: make-normalize-generateddata-input.py PATH_TO_MOZILLA_CENTRAL
 
     This script generates test input data for String.prototype.normalize
     from intl/icu/source/data/unidata/NormalizationTest.txt
     to js/src/tests/non262/String/normalize-generateddata-input.js
 """
 
 from __future__ import print_function
-import re, sys
+import re
+import sys
 
 sep_pat = re.compile(' +')
+
+
 def to_code_list(codes):
     return '[' + ', '.join('0x{0}'.format(x) for x in re.split(sep_pat, codes)) + ']'
 
+
 def convert(dir):
     ver_pat = re.compile('NormalizationTest-([0-9\.]+)\.txt')
     part_pat = re.compile('^@(Part([0-9]+) .+)$')
-    test_pat = re.compile('^([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);$')
+    test_pat = re.compile(
+        '^([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);$')
     ignore_pat = re.compile('^#|^$')
     js_path = 'js/src/tests/non262/String/normalize-generateddata-input.js'
     txt_path = 'intl/icu/source/data/unidata/NormalizationTest.txt'
 
     part_opened = False
     not_empty = False
     with open('{dir}/{path}'.format(dir=dir, path=txt_path), 'r') as f:
         with open('{dir}/{path}'.format(dir=dir, path=js_path), 'w') as outf:
@@ -58,13 +63,14 @@ def convert(dir):
                     continue
                 m = ignore_pat.search(line)
                 if m:
                     continue
                 print("Unknown line: {0}".format(line), file=sys.stderr)
             if part_opened:
                 outf.write('\n];\n')
 
+
 if __name__ == '__main__':
     if len(sys.argv) < 2:
         print("Usage: make-normalize-generateddata-input.py PATH_TO_MOZILLA_CENTRAL", file=sys.stderr)
         sys.exit(1)
     convert(sys.argv[1])
--- a/js/src/tests/parsemark.py
+++ b/js/src/tests/parsemark.py
@@ -84,17 +84,17 @@ def bench(shellpath, filepath, warmup_ru
         print('Mean:', mean)
         print('Stddev: {:.2f} ({:.2f}% of mean)'.format(
             sigma, sigma / mean * 100))
     return mean, sigma
 
 
 def parsemark(filepaths, fbench, stfu=False):
     """:param fbench: fbench(filename) -> float"""
-    bench_map = {} # {filename: (avg, stddev)}
+    bench_map = {}  # {filename: (avg, stddev)}
     for filepath in filepaths:
         filename = os.path.split(filepath)[-1]
         if not stfu:
             print('Parsemarking {}...'.format(filename))
         bench_map[filename] = fbench(filepath)
     print('{')
     for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
         assert '"' not in filename
@@ -148,20 +148,21 @@ def main():
     if options.baseline_path:
         if not os.path.isfile(options.baseline_path):
             print('error: baseline file does not exist', file=sys.stderr)
             return -1
         if not compare_bench:
             print('error: JSON support is missing, cannot compare benchmarks',
                   file=sys.stderr)
             return -1
-    benchfile = lambda filepath: bench(shellpath, filepath,
-                                       options.warmup_runs,
-                                       options.counted_runs,
-                                       stfu=options.stfu)
+
+    def benchfile(filepath): return bench(shellpath, filepath,
+                                          options.warmup_runs,
+                                          options.counted_runs,
+                                          stfu=options.stfu)
     bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
     if options.baseline_path:
         compare_bench.compare_immediate(bench_map, options.baseline_path)
     return 0
 
 
 if __name__ == '__main__':
     sys.exit(main())
--- a/js/src/tests/test/run.py
+++ b/js/src/tests/test/run.py
@@ -1,28 +1,36 @@
 #!/usr/bin/env python
 # Adapted from https://github.com/tc39/test262/blob/master/tools/generation/test/run.py
 
-import shutil, subprocess, contextlib, tempfile, sys, os, unittest
+import shutil
+import subprocess
+import contextlib
+import tempfile
+import sys
+import os
+import unittest
 
 testDir = os.path.dirname(os.path.relpath(__file__))
 OUT_DIR = os.path.join(testDir, 'out')
 EXPECTED_DIR = os.path.join(testDir, 'expected')
 ex = os.path.join(testDir, '..', 'test262-export.py')
 importExec = os.path.join(testDir, '..', 'test262-update.py')
 test262Url = 'git://github.com/tc39/test262.git'
 
+
 @contextlib.contextmanager
 def TemporaryDirectory():
     tmpDir = tempfile.mkdtemp()
     try:
         yield tmpDir
     finally:
         shutil.rmtree(tmpDir)
 
+
 class TestExport(unittest.TestCase):
     maxDiff = None
 
     def exportScript(self):
         relpath = os.path.relpath(os.path.join(testDir, 'fixtures', 'export'))
         sp = subprocess.Popen(
             [ex, relpath, '--out', OUT_DIR],
             stdout=subprocess.PIPE)
@@ -46,17 +54,17 @@ class TestExport(unittest.TestCase):
                 ['git', '-C', cloneDir, 'rm',
                     'test/language/export/escaped-from.js']
             )
             # Rename test/language/export/escaped-default.js
             subprocess.check_call(
                 ['git', '-C', cloneDir, 'mv',
                     'test/language/export/escaped-default.js',
                     'test/language/export/escaped-foobarbaz.js',
-                ]
+                 ]
             )
             # Copy fixtures files
             fixturesDir = os.path.join(testDir, 'fixtures', 'import', 'files')
             shutil.copytree(fixturesDir, os.path.join(cloneDir, 'test', 'temp42'))
             # Stage and Commit changes
             subprocess.check_call(['git', '-C', cloneDir, 'add', '.'])
             subprocess.check_call(['git', '-C', cloneDir, 'commit', '-m', '"local foo"'])
 
@@ -124,10 +132,11 @@ class TestExport(unittest.TestCase):
         self.assertEqual(returncode, 0)
         self.compareTrees(os.path.join('import', 'files'))
         self.compareContents(
             output,
             os.path.join(testDir, 'expected', 'import', 'output.txt'),
             folder
         )
 
+
 if __name__ == '__main__':
     unittest.main()
--- a/js/src/tests/test262-export.py
+++ b/js/src/tests/test262-export.py
@@ -16,32 +16,34 @@ import sys
 import yaml
 
 from functools import partial
 from itertools import chain, imap
 
 # Skip all common files used to support tests for jstests
 # These files are listed in the README.txt
 SUPPORT_FILES = set(["browser.js", "shell.js", "template.js", "user.js",
-    "js-test-driver-begin.js", "js-test-driver-end.js"])
+                     "js-test-driver-begin.js", "js-test-driver-end.js"])
 
 FRONTMATTER_WRAPPER_PATTERN = re.compile(
     r'/\*\---\n([\s]*)((?:\s|\S)*)[\n\s*]---\*/', flags=re.DOTALL)
 
+
 def convertTestFile(source, includes):
     """
     Convert a jstest test to a compatible Test262 test file.
     """
 
     source = convertReportCompare(source)
     source = updateMeta(source, includes)
     source = insertCopyrightLines(source)
 
     return source
 
+
 def convertReportCompare(source):
     """
     Captures all the reportCompare and convert them accordingly.
 
     Cases with reportCompare calls where the arguments are the same and one of
     0, true, or null, will be discarded as they are not necessary for Test262.
 
     Otherwise, reportCompare will be replaced with assert.sameValue, as the
@@ -60,16 +62,17 @@ def convertReportCompare(source):
     newSource = re.sub(
         r'.*reportCompare\s*\(\s*(\w*)\s*,\s*(\w*)\s*(,\s*\S*)?\s*\)\s*;*\s*',
         replaceFn,
         source
     )
 
     return re.sub(r'\breportCompare\b', "assert.sameValue", newSource)
 
+
 def fetchReftestEntries(reftest):
     """
     Collects and stores the entries from the reftest header.
     """
 
     # TODO: fails, slow, skip, random, random-if
 
     features = []
@@ -109,16 +112,17 @@ def fetchReftestEntries(reftest):
 
     return {
         "features": features,
         "error": error,
         "module": module,
         "info": comments
     }
 
+
 def parseHeader(source):
     """
     Parse the source to return it with the extracted the header
     """
     from lib.manifest import TEST_HEADER_PATTERN_INLINE
 
     # Bail early if we do not start with a single comment.
     if not source.startswith("//"):
@@ -132,32 +136,34 @@ def parseHeader(source):
         reftest = matches.group(0)
 
         # Remove the found header from the source;
         # Fetch and return the reftest entries
         return (source.replace(reftest + "\n", ""), fetchReftestEntries(reftest))
 
     return (source, {})
 
+
 def extractMeta(source):
     """
     Capture the frontmatter metadata as yaml if it exists.
     Returns a new dict if it doesn't.
     """
 
     match = FRONTMATTER_WRAPPER_PATTERN.search(source)
     if not match:
         return {}
 
     indent, frontmatter_lines = match.groups()
 
     unindented = re.sub('^%s' % indent, '', frontmatter_lines)
 
     return yaml.safe_load(unindented)
 
+
 def updateMeta(source, includes):
     """
     Captures the reftest meta and a pre-existing meta if any and merge them
     into a single dict.
     """
 
     # Extract the reftest data from the source
     source, reftest = parseHeader(source)
@@ -193,25 +199,26 @@ def cleanupMeta(meta):
     for tag in ("features", "flags", "includes"):
         if tag in meta:
             # We need the list back for the yaml dump
             meta[tag] = list(set(meta[tag]))
 
     if "negative" in meta:
         # If the negative tag exists, phase needs to be present and set
         if meta["negative"].get("phase") not in ("early", "runtime"):
-            print("Warning: the negative.phase is not properly set.\n" + \
-                "Ref https://github.com/tc39/test262/blob/master/INTERPRETING.md#negative")
+            print("Warning: the negative.phase is not properly set.\n" +
+                  "Ref https://github.com/tc39/test262/blob/master/INTERPRETING.md#negative")
         # If the negative tag exists, type is required
         if "type" not in meta["negative"]:
-            print("Warning: the negative.type is not set.\n" + \
-                "Ref https://github.com/tc39/test262/blob/master/INTERPRETING.md#negative")
+            print("Warning: the negative.type is not set.\n" +
+                  "Ref https://github.com/tc39/test262/blob/master/INTERPRETING.md#negative")
 
     return meta
 
+
 def mergeMeta(reftest, frontmatter, includes):
     """
     Merge the metadata from reftest and an existing frontmatter and populate
     required frontmatter fields properly.
     """
 
     # Merge the meta from reftest to the frontmatter
 
@@ -242,71 +249,72 @@ def mergeMeta(reftest, frontmatter, incl
                 # From this point, this code can also print a warning asking to
                 # specify the error phase in the generated code or fill the
                 # phase with an empty string.
                 "phase": "early",
                 "type": error
             }
         # Print a warning if the errors don't match
         elif frontmatter["negative"].get("type") != error:
-            print("Warning: The reftest error doesn't match the existing " + \
-                "frontmatter error. %s != %s" % (error,
-                frontmatter["negative"]["type"]))
+            print("Warning: The reftest error doesn't match the existing " +
+                  "frontmatter error. %s != %s" % (error,
+                                                   frontmatter["negative"]["type"]))
 
     # Add the shell specific includes
     if includes:
         frontmatter["includes"] = list(includes)
 
     return frontmatter
 
+
 def insertCopyrightLines(source):
     """
     Insert the copyright lines into the file.
     """
     from datetime import date
 
     lines = []
 
     if not re.match(r'\/\/\s+Copyright.*\. All rights reserved.', source):
         year = date.today().year
         lines.append("// Copyright (C) %s Mozilla Corporation. All rights reserved." % year)
         lines.append("// This code is governed by the BSD license found in the LICENSE file.")
         lines.append("\n")
 
     return "\n".join(lines) + source
 
+
 def insertMeta(source, frontmatter):
     """
     Insert the formatted frontmatter into the file, use the current existing
     space if any
     """
     lines = []
 
     lines.append("/*---")
 
     for (key, value) in frontmatter.items():
         if key in ("description", "info"):
             lines.append("%s: |" % key)
             lines.append("  " + yaml.dump(value, encoding="utf8",
-                ).strip().replace('\n...', ''))
+                                          ).strip().replace('\n...', ''))
         else:
             lines.append(yaml.dump({key: value}, encoding="utf8",
-                default_flow_style=False).strip())
+                                   default_flow_style=False).strip())
 
     lines.append("---*/")
 
     match = FRONTMATTER_WRAPPER_PATTERN.search(source)
 
     if match:
         return source.replace(match.group(0), "\n".join(lines))
     else:
         return "\n".join(lines) + source
 
 
-
 def findAndCopyIncludes(dirPath, baseDir, includeDir):
     relPath = os.path.relpath(dirPath, baseDir)
     includes = []
 
     # Recurse down all folders in the relative path until
     # we reach the base directory of shell.js include files.
     # Each directory will have a shell.js file to copy.
     while (relPath):
@@ -322,29 +330,29 @@ def findAndCopyIncludes(dirPath, baseDir
             # if the file exists, include in includes
             includes.append(includeFileName)
 
             if not os.path.exists(includesPath):
                 shutil.copyfile(shellFile, includesPath)
 
         relPath = os.path.split(relPath)[0]
 
-
     shellFile = os.path.join(baseDir, "shell.js")
     includesPath = os.path.join(includeDir, "shell.js")
     if not os.path.exists(includesPath):
         shutil.copyfile(shellFile, includesPath)
 
     includes.append("shell.js")
 
     if not os.path.exists(includesPath):
         shutil.copyfile(shellFile, includesPath)
 
     return includes
 
+
 def exportTest262(args):
 
     outDir = os.path.abspath(args.out)
     providedSrcs = args.src
     includeShell = args.exportshellincludes
     baseDir = os.getcwd()
 
     # Create the output directory from scratch.
@@ -379,21 +387,21 @@ def exportTest262(args):
             currentOutDir = os.path.join(outDir, "tests", fullRelPath)
 
             # This also creates the own outDir folder
             if not os.path.exists(currentOutDir):
                 os.makedirs(currentOutDir)
 
             for fileName in fileNames:
                 # Skip browser.js files
-                if fileName == "browser.js" or fileName == "shell.js" :
+                if fileName == "browser.js" or fileName == "shell.js":
                     continue
 
                 filePath = os.path.join(dirPath, fileName)
-                testName = os.path.join(fullRelPath, fileName) # captures folder(s)+filename
+                testName = os.path.join(fullRelPath, fileName)  # captures folder(s)+filename
 
                 # Copy non-test files as is.
                 (_, fileExt) = os.path.splitext(fileName)
                 if fileExt != ".js":
                     shutil.copyfile(filePath, os.path.join(currentOutDir, fileName))
                     print("C %s" % testName)
                     continue
 
@@ -407,24 +415,25 @@ def exportTest262(args):
 
                 newSource = convertTestFile(testSource, includes)
 
                 with open(os.path.join(currentOutDir, fileName), "wb") as output:
                     output.write(newSource)
 
                 print("SAVED %s" % testName)
 
+
 if __name__ == "__main__":
     import argparse
 
     # This script must be run from js/src/tests to work correctly.
     if "/".join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != "js/src/tests":
         raise RuntimeError("%s must be run from js/src/tests" % sys.argv[0])
 
     parser = argparse.ArgumentParser(description="Export tests to match Test262 file compliance.")
     parser.add_argument("--out", default="test262/export",
                         help="Output directory. Any existing directory will be removed! (default: %(default)s)")
     parser.add_argument("--exportshellincludes", action="store_true",
-                         help="Optionally export shell.js files as includes in exported tests. Only use for testing, do not use for exporting to test262 (test262 tests should have as few dependencies as possible).")
+                        help="Optionally export shell.js files as includes in exported tests. Only use for testing, do not use for exporting to test262 (test262 tests should have as few dependencies as possible).")
     parser.add_argument("src", nargs="+", help="Source folder with test files to export")
     parser.set_defaults(func=exportTest262)
     args = parser.parse_args()
     args.func(args)
--- a/js/src/tests/test262-update.py
+++ b/js/src/tests/test262-update.py
@@ -14,74 +14,79 @@ import tempfile
 import shutil
 import sys
 
 from functools import partial
 from itertools import chain
 
 # Skip all tests which use features not supported in SpiderMonkey.
 UNSUPPORTED_FEATURES = set([
-                            "tail-call-optimization",
-                            "class-fields-public",
-                            "class-fields-private",
-                            "regexp-dotall",
-                            "regexp-lookbehind",
-                            "regexp-named-groups",
-                            "regexp-unicode-property-escapes",
-                            "numeric-separator-literal",
-                            "Intl.Locale",
-                            "String.prototype.matchAll",
-                            "Symbol.matchAll",
-                       ])
+    "tail-call-optimization",
+    "class-fields-public",
+    "class-fields-private",
+    "regexp-dotall",
+    "regexp-lookbehind",
+    "regexp-named-groups",
+    "regexp-unicode-property-escapes",
+    "numeric-separator-literal",
+    "json-superset",
+    "Intl.Locale",
+    "String.prototype.matchAll",
+    "Symbol.matchAll",
+])
 FEATURE_CHECK_NEEDED = {
-                         "Atomics": "!this.hasOwnProperty('Atomics')",
-                         "BigInt": "!this.hasOwnProperty('BigInt')",
-                         "SharedArrayBuffer": "!this.hasOwnProperty('SharedArrayBuffer')",
-                       }
+    "Atomics": "!this.hasOwnProperty('Atomics')",
+    "BigInt": "!this.hasOwnProperty('BigInt')",
+    "SharedArrayBuffer": "!this.hasOwnProperty('SharedArrayBuffer')",
+}
 RELEASE_OR_BETA = set()
 
+
 @contextlib.contextmanager
 def TemporaryDirectory():
     tmpDir = tempfile.mkdtemp()
     try:
         yield tmpDir
     finally:
         shutil.rmtree(tmpDir)
 
+
 def loadTest262Parser(test262Dir):
     """
     Loads the test262 test record parser.
     """
     import imp
 
     fileObj = None
     try:
         moduleName = "parseTestRecord"
         packagingDir = os.path.join(test262Dir, "tools", "packaging")
         (fileObj, pathName, description) = imp.find_module(moduleName, [packagingDir])
         return imp.load_module(moduleName, fileObj, pathName, description)
     finally:
         if fileObj:
             fileObj.close()
 
+
 def tryParseTestFile(test262parser, source, testName):
     """
     Returns the result of test262parser.parseTestRecord() or None if a parser
     error occured.
 
     See <https://github.com/tc39/test262/blob/master/INTERPRETING.md> for an
     overview of the returned test attributes.
     """
     try:
         return test262parser.parseTestRecord(source, testName)
     except Exception as err:
         print("Error '%s' in file: %s" % (err, testName), file=sys.stderr)
         print("Please report this error to the test262 GitHub repository!")
         return None
 
+
 def createRefTestEntry(skip, skipIf, error, isModule):
     """
     Creates the |reftest| entry from the input list. Or the empty string if no
     reftest entry is required.
     """
 
     terms = []
     comments = []
@@ -101,16 +106,17 @@ def createRefTestEntry(skip, skipIf, err
         terms.append("module")
 
     line = " ".join(terms)
     if comments:
         line += " -- " + ", ".join(comments)
 
     return line
 
+
 def createSource(testSource, refTest, prologue, epilogue):
     """
     Returns the post-processed source for |testSource|.
     """
 
     source = []
 
     # Add the |reftest| line.
@@ -126,28 +132,31 @@ def createSource(testSource, refTest, pr
     # Append the test epilogue, i.e. the call to "reportCompare".
     # TODO: Does this conflict with raw tests?
     if epilogue:
         source.append(epilogue.encode("utf-8"))
         source.append(b"")
 
     return b"\n".join(source)
 
+
 def writeTestFile(test262OutDir, testFileName, source):
     """
     Writes the test source to |test262OutDir|.
     """
 
     with io.open(os.path.join(test262OutDir, testFileName), "wb") as output:
         output.write(source)
 
+
 def addSuffixToFileName(fileName, suffix):
     (filePath, ext) = os.path.splitext(fileName)
     return filePath + suffix + ext
 
+
 def writeShellAndBrowserFiles(test262OutDir, harnessDir, includesMap, localIncludesMap, relPath):
     """
     Generate the shell.js and browser.js files for the test harness.
     """
 
     # Find all includes from parent directories.
     def findParentIncludes():
         parentIncludes = set()
@@ -184,23 +193,26 @@ def writeShellAndBrowserFiles(test262Out
     # Write the concatenated include sources to shell.js.
     with io.open(os.path.join(test262OutDir, relPath, "shell.js"), "wb") as shellFile:
         shellFile.write(includeSource)
 
     # The browser.js file is always empty for test262 tests.
     with io.open(os.path.join(test262OutDir, relPath, "browser.js"), "wb") as browserFile:
         browserFile.write(b"")
 
+
 def pathStartsWith(path, *args):
     prefix = os.path.join(*args)
     return os.path.commonprefix([path, prefix]) == prefix
 
+
 def fileNameEndsWith(filePath, suffix):
     return os.path.splitext(os.path.basename(filePath))[0].endswith(suffix)
 
+
 def convertTestFile(test262parser, testSource, testName, includeSet, strictTests):
     """
     Convert a test262 test to a compatible jstests test file.
     """
 
     # The test record dictionary, its contents are explained in depth at
     # <https://github.com/tc39/test262/blob/master/INTERPRETING.md>.
     testRec = tryParseTestFile(test262parser, testSource.decode("utf-8"), testName)
@@ -284,17 +296,17 @@ def convertTestFile(test262parser, testS
     else:
         testEpilogue = ""
 
     refTest = createRefTestEntry(refTestSkip, refTestSkipIf, errorType, isModule)
 
     # Don't write a strict-mode variant for raw, module or support files.
     noStrictVariant = raw or isModule or isSupportFile
     assert not (noStrictVariant and (onlyStrict or noStrict)),\
-           "Unexpected onlyStrict or noStrict attribute: %s" % testName
+        "Unexpected onlyStrict or noStrict attribute: %s" % testName
 
     # Write non-strict mode test.
     if noStrictVariant or noStrict or not onlyStrict:
         testPrologue = ""
         nonStrictSource = createSource(testSource, refTest, testPrologue, testEpilogue)
         testFileName = testName
         yield (testFileName, nonStrictSource)
 
@@ -302,16 +314,17 @@ def convertTestFile(test262parser, testS
     if not noStrictVariant and (onlyStrict or (not noStrict and strictTests)):
         testPrologue = "'use strict';"
         strictSource = createSource(testSource, refTest, testPrologue, testEpilogue)
         testFileName = testName
         if not noStrict:
             testFileName = addSuffixToFileName(testFileName, "-strict")
         yield (testFileName, strictSource)
 
+
 def process_test262(test262Dir, test262OutDir, strictTests):
     """
     Process all test262 files and converts them into jstests compatible tests.
     """
 
     harnessDir = os.path.join(test262Dir, "harness")
     testDir = os.path.join(test262Dir, "test")
     test262parser = loadTest262Parser(test262Dir)
@@ -336,21 +349,21 @@ def process_test262(test262Dir, test262O
 
     # Write the root shell.js file.
     writeShellAndBrowserFiles(test262OutDir, harnessDir, includesMap, localIncludesMap, "")
 
     # Additional explicit includes inserted at well-chosen locations to reduce
     # code duplication in shell.js files.
     explicitIncludes = {}
     explicitIncludes[os.path.join("built-ins", "Atomics")] = ["testAtomics.js",
-        "testTypedArray.js"]
+                                                              "testTypedArray.js"]
     explicitIncludes[os.path.join("built-ins", "DataView")] = ["byteConversionValues.js"]
     explicitIncludes[os.path.join("built-ins", "Promise")] = ["promiseHelper.js"]
     explicitIncludes[os.path.join("built-ins", "TypedArray")] = ["byteConversionValues.js",
-        "detachArrayBuffer.js", "nans.js"]
+                                                                 "detachArrayBuffer.js", "nans.js"]
     explicitIncludes[os.path.join("built-ins", "TypedArrays")] = ["detachArrayBuffer.js"]
 
     # Process all test directories recursively.
     for (dirPath, dirNames, fileNames) in os.walk(testDir):
         relPath = os.path.relpath(dirPath, testDir)
         if relPath == ".":
             continue
 
@@ -378,17 +391,19 @@ def process_test262(test262Dir, test262O
             # Read the original test source and preprocess it for the jstests harness.
             with io.open(filePath, "rb") as testFile:
                 testSource = testFile.read()
 
             for (newFileName, newSource) in convertTestFile(test262parser, testSource, testName, includeSet, strictTests):
                 writeTestFile(test262OutDir, newFileName, newSource)
 
         # Add shell.js and browers.js files for the current directory.
-        writeShellAndBrowserFiles(test262OutDir, harnessDir, includesMap, localIncludesMap, relPath)
+        writeShellAndBrowserFiles(test262OutDir, harnessDir,
+                                  includesMap, localIncludesMap, relPath)
+
 
 def fetch_local_changes(inDir, outDir, srcDir, strictTests):
     """
     Fetch the changes from a local clone of Test262.
 
     1. Get the list of file changes made by the current branch used on Test262 (srcDir).
     2. Copy only the (A)dded, (C)opied, (M)odified, and (R)enamed files to inDir.
     3. inDir is treated like a Test262 checkout, where files will be converted.
@@ -467,16 +482,17 @@ def fetch_local_changes(inDir, outDir, s
     # Reset any older directory in the output using the same branch name
     outDir = os.path.join(outDir, "local", branchName)
     if os.path.isdir(outDir):
         shutil.rmtree(outDir)
     os.makedirs(outDir)
 
     process_test262(inDir, outDir, strictTests)
 
+
 def fetch_pr_files(inDir, outDir, prNumber, strictTests):
     import requests
 
     prTestsOutDir = os.path.join(outDir, "prs", prNumber)
     if os.path.isdir(prTestsOutDir):
         print("Removing folder %s" % prTestsOutDir)
         shutil.rmtree(prTestsOutDir)
     os.makedirs(prTestsOutDir)
@@ -520,16 +536,17 @@ def fetch_pr_files(inDir, outDir, prNumb
         if not os.path.isdir(filePathDirs):
             os.makedirs(filePathDirs)
 
         with io.open(os.path.join(inDir, *filename.split("/")), "wb") as output_file:
             output_file.write(fileText.encode('utf8'))
 
     process_test262(inDir, prTestsOutDir, strictTests)
 
+
 def general_update(inDir, outDir, strictTests):
     import subprocess
 
     restoreLocalTestsDir = False
     restorePrsTestsDir = False
     localTestsOutDir = os.path.join(outDir, "local")
     prsTestsOutDir = os.path.join(outDir, "prs")
 
@@ -562,16 +579,17 @@ def general_update(inDir, outDir, strict
     # Move test262/local back.
     if restoreLocalTestsDir:
         shutil.move(os.path.join(inDir, "local"), outDir)
 
     # Restore test262/prs if necessary after a general Test262 update.
     if restorePrsTestsDir:
         shutil.move(os.path.join(inDir, "prs"), outDir)
 
+
 def update_test262(args):
     import subprocess
 
     url = args.url
     branch = args.branch
     revision = args.revision
     outDir = args.out
     prNumber = args.pull
@@ -584,45 +602,50 @@ def update_test262(args):
 
     # Download the requested branch in a temporary directory.
     with TemporaryDirectory() as inDir:
         # If it's a local import, skip the git clone parts.
         if srcDir:
             return fetch_local_changes(inDir, outDir, srcDir, strictTests)
 
         if revision == "HEAD":
-            subprocess.check_call(["git", "clone", "--depth=1", "--branch=%s" % branch, url, inDir])
+            subprocess.check_call(["git", "clone", "--depth=1",
+                                   "--branch=%s" % branch, url, inDir])
         else:
-            subprocess.check_call(["git", "clone", "--single-branch", "--branch=%s" % branch, url, inDir])
+            subprocess.check_call(["git", "clone", "--single-branch",
+                                   "--branch=%s" % branch, url, inDir])
             subprocess.check_call(["git", "-C", inDir, "reset", "--hard", revision])
 
         # If a PR number is provided, fetches only the new and modified files
         # from that PR. It also creates a new folder for that PR or replaces if
         # it already exists, without updating the regular Test262 tests.
         if prNumber:
             return fetch_pr_files(inDir, outDir, prNumber, strictTests)
 
         # Without a PR or a local import, follows through a regular copy.
         general_update(inDir, outDir, strictTests)
 
+
 if __name__ == "__main__":
     import argparse
 
     # This script must be run from js/src/tests to work correctly.
     if "/".join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != "js/src/tests":
         raise RuntimeError("%s must be run from js/src/tests" % sys.argv[0])
 
     parser = argparse.ArgumentParser(description="Update the test262 test suite.")
     parser.add_argument("--url", default="git://github.com/tc39/test262.git",
                         help="URL to git repository (default: %(default)s)")
     parser.add_argument("--branch", default="master",
                         help="Git branch (default: %(default)s)")
     parser.add_argument("--revision", default="HEAD",
                         help="Git revision (default: %(default)s)")
     parser.add_argument("--out", default="test262",
                         help="Output directory. Any existing directory will be removed! (default: %(default)s)")
-    parser.add_argument("--pull", help="Import contents from a Pull Request specified by its number")
-    parser.add_argument("--local", help="Import new and modified contents from a local folder, a new folder will be created on local/branch_name")
+    parser.add_argument(
+        "--pull", help="Import contents from a Pull Request specified by its number")
+    parser.add_argument(
+        "--local", help="Import new and modified contents from a local folder, a new folder will be created on local/branch_name")
     parser.add_argument("--strict", default=False, action="store_true",
                         help="Generate additional strict mode tests. Not enabled by default.")
     parser.set_defaults(func=update_test262)
     args = parser.parse_args()
     args.func(args)
--- a/js/src/util/make_unicode.py
+++ b/js/src/util/make_unicode.py
@@ -35,41 +35,43 @@ from zipfile import ZipFile
 if sys.version_info.major == 2:
     from itertools import ifilter as filter, imap as map, izip_longest as zip_longest
     from urllib2 import urlopen
     range = xrange
 else:
     from itertools import zip_longest
     from urllib.request import urlopen
 
+
 class codepoint_dict(dict):
     def name(self, code_point):
         (_, _, name, alias) = self[code_point]
         return '{}{}'.format(name, (' (' + alias + ')' if alias else ''))
 
     def full_name(self, code_point):
         (_, _, name, alias) = self[code_point]
         return 'U+{:04X} {}{}'.format(code_point, name, (' (' + alias + ')' if alias else ''))
 
+
 # ECMAScript 2016
 # §11.2 White Space
 whitespace = [
     # python doesn't support using control character names :(
-    0x9, # CHARACTER TABULATION
-    0xb, # LINE TABULATION
-    0xc, # FORM FEED
+    0x9,  # CHARACTER TABULATION
+    0xb,  # LINE TABULATION
+    0xc,  # FORM FEED
     ord(u'\N{SPACE}'),
     ord(u'\N{NO-BREAK SPACE}'),
-    ord(u'\N{ZERO WIDTH NO-BREAK SPACE}'), # also BOM
+    ord(u'\N{ZERO WIDTH NO-BREAK SPACE}'),  # also BOM
 ]
 
 # §11.3 Line Terminators
 line_terminator = [
-    0xa, # LINE FEED
-    0xd, # CARRIAGE RETURN
+    0xa,  # LINE FEED
+    0xd,  # CARRIAGE RETURN
     ord(u'\N{LINE SEPARATOR}'),
     ord(u'\N{PARAGRAPH SEPARATOR}'),
 ]
 
 # These are also part of IdentifierPart §11.6 Names and Keywords
 compatibility_identifier_part = [
     ord(u'\N{ZERO WIDTH NON-JOINER}'),
     ord(u'\N{ZERO WIDTH JOINER}'),
@@ -99,16 +101,17 @@ mpl_license = """\
 warning_message = """\
 /* Generated by make_unicode.py DO NOT MODIFY */
 """
 
 unicode_version_message = """\
 /* Unicode version: {0} */
 """
 
+
 def read_unicode_data(unicode_data):
     """
         If you want to understand how this wonderful file format works checkout
           Unicode Standard Annex #44 - Unicode Character Database
           http://www.unicode.org/reports/tr44/
     """
 
     reader = csv.reader(unicode_data, delimiter=str(';'))
@@ -125,41 +128,44 @@ def read_unicode_data(unicode_data):
                 row[0] = i
                 row[1] = name[1:-8]
 
                 yield row
         else:
             row[0] = int(row[0], 16)
             yield row
 
+
 def read_case_folding(case_folding):
     for line in case_folding:
         if line == '\n' or line.startswith('#'):
             continue
         row = line.split('; ')
         if row[1] in ['F', 'T']:
             continue
         row[0] = int(row[0], 16)
         row[2] = int(row[2], 16)
         yield row
 
+
 def read_derived_core_properties(derived_core_properties):
     for line in derived_core_properties:
         if line == '\n' or line.startswith('#'):
             continue
         row = line.split('#')[0].split(';')
         char_range = row[0].strip()
         char_property = row[1].strip()
         if '..' not in char_range:
             yield (int(char_range, 16), char_property)
         else:
             [start, end] = char_range.split('..')
             for char in range(int(start, 16), int(end, 16) + 1):
                 yield (char, char_property)
 
+
 def read_special_casing(special_casing):
     # Format:
     # <code>; <lower>; <title>; <upper>; (<condition_list>;)? # <comment>
     for line in special_casing:
         if line == '\n' or line.startswith('#'):
             continue
         row = line.split('#')[0].split(';')
         code = int(row[0].strip(), 16)
@@ -174,46 +180,49 @@ def read_special_casing(special_casing):
             for cond in condition.split(' '):
                 if cond[0].islower():
                     languages.append(cond)
                 else:
                     contexts.append(cond)
             pass
         yield (code, lower, upper, languages, contexts)
 
+
 def int_ranges(ints):
     """ Yields consecutive ranges (inclusive) from integer values. """
     (a, b) = tee(sorted(ints))
     start = next(b)
     for (curr, succ) in zip_longest(a, b):
         if curr + 1 != succ:
             yield (start, curr)
             start = succ
 
+
 def utf16_encode(code):
     NonBMPMin = 0x10000
     LeadSurrogateMin = 0xD800
     TrailSurrogateMin = 0xDC00
 
     lead = (code - NonBMPMin) // 1024 + LeadSurrogateMin
     trail = ((code - NonBMPMin) % 1024) + TrailSurrogateMin
 
     return lead, trail
 
+
 def make_non_bmp_convert_macro(out_file, name, convert_map, codepoint_table):
     # Find continuous range in convert_map.
     convert_list = []
     entry = None
     for code in sorted(convert_map.keys()):
         lead, trail = utf16_encode(code)
         converted = convert_map[code]
         diff = converted - code
 
         if (entry and code == entry['code'] + entry['length'] and
-            diff == entry['diff'] and lead == entry['lead']):
+                diff == entry['diff'] and lead == entry['lead']):
             entry['length'] += 1
             continue
 
         entry = {
             'code': code,
             'diff': diff,
             'length': 1,
             'lead': lead,
@@ -239,28 +248,30 @@ def make_non_bmp_convert_macro(out_file,
                                             codepoint_table.full_name(to_code)))
 
     out_file.write('\n'.join(comment))
     out_file.write('\n')
     out_file.write('#define FOR_EACH_NON_BMP_{}(macro) \\\n'.format(name))
     out_file.write(' \\\n'.join(lines))
     out_file.write('\n')
 
+
 def process_derived_core_properties(derived_core_properties):
     id_start = set()
     id_continue = set()
 
     for (char, prop) in read_derived_core_properties(derived_core_properties):
         if prop == 'ID_Start':
             id_start.add(char)
         if prop == 'ID_Continue':
             id_continue.add(char)
 
     return (id_start, id_continue)
 
+
 def process_unicode_data(unicode_data, derived_core_properties):
     dummy = (0, 0, 0)
     table = [dummy]
     cache = {dummy: 0}
     index = [0] * (MAX_BMP + 1)
     same_upper_map = {}
     same_upper_dummy = (0, 0, 0)
     same_upper_table = [same_upper_dummy]
@@ -387,16 +398,17 @@ def process_unicode_data(unicode_data, d
         table, index,
         same_upper_table, same_upper_index,
         non_bmp_lower_map, non_bmp_upper_map,
         non_bmp_space_set,
         non_bmp_id_start_set, non_bmp_id_cont_set,
         codepoint_table, test_space_table,
     )
 
+
 def process_case_folding(case_folding):
     folding_map = {}
     rev_folding_map = {}
     folding_dummy = (0, 0, 0, 0)
     folding_table = [folding_dummy]
     folding_cache = {folding_dummy: 0}
     folding_index = [0] * (MAX_BMP + 1)
 
@@ -468,16 +480,17 @@ def process_case_folding(case_folding):
             folding_table.append(item)
         folding_index[code] = i
     return (
         folding_table, folding_index,
         non_bmp_folding_map, non_bmp_rev_folding_map,
         folding_tests
     )
 
+
 def process_special_casing(special_casing, table, index):
     # Unconditional special casing.
     unconditional_tolower = {}
     unconditional_toupper = {}
 
     # Conditional special casing, language independent.
     conditional_tolower = {}
     conditional_toupper = {}
@@ -581,46 +594,51 @@ def process_special_casing(special_casin
     assert is_equals(["az", "lt", "tr"], sorted(lang_conditional_toupper.keys()))
 
     # Maximum case mapping length is three characters.
     assert max(map(len, chain(
         unconditional_tolower.values(),
         unconditional_toupper.values(),
         map(itemgetter(0), conditional_tolower.values()),
         map(itemgetter(0), conditional_toupper.values()),
-        map(itemgetter(0), chain.from_iterable(d.values() for d in lang_conditional_tolower.values())),
-        map(itemgetter(0), chain.from_iterable(d.values() for d in lang_conditional_toupper.values())),
+        map(itemgetter(0), chain.from_iterable(d.values()
+                                               for d in lang_conditional_tolower.values())),
+        map(itemgetter(0), chain.from_iterable(d.values()
+                                               for d in lang_conditional_toupper.values())),
     ))) <= 3
 
     # Ensure all case mapping contexts are known (see Unicode 9.0, §3.13 Default Case Algorithms).
     assert set([
         'After_I', 'After_Soft_Dotted', 'Final_Sigma', 'More_Above', 'Not_Before_Dot',
     ]).issuperset(set(filter(partial(is_not, None), chain(
         map(itemgetter(1), conditional_tolower.values()),
         map(itemgetter(1), conditional_toupper.values()),
-        map(itemgetter(1), chain.from_iterable(d.values() for d in lang_conditional_tolower.values())),
-        map(itemgetter(1), chain.from_iterable(d.values() for d in lang_conditional_toupper.values())),
+        map(itemgetter(1), chain.from_iterable(d.values()
+                                               for d in lang_conditional_tolower.values())),
+        map(itemgetter(1), chain.from_iterable(d.values()
+                                               for d in lang_conditional_toupper.values())),
     ))))
 
     # Special casing for U+00DF (LATIN SMALL LETTER SHARP S).
-    assert upperCase(0x00DF) == 0x00DF and unconditional_toupper[0x00DF] == [0x0053, 0x0053];
+    assert upperCase(0x00DF) == 0x00DF and unconditional_toupper[0x00DF] == [0x0053, 0x0053]
 
     # Special casing for U+0130 (LATIN CAPITAL LETTER I WITH DOT ABOVE).
     assert unconditional_tolower[0x0130] == [0x0069, 0x0307]
 
     # Special casing for U+03A3 (GREEK CAPITAL LETTER SIGMA).
-    assert lowerCase(0x03A3) == 0x03C3 and conditional_tolower[0x03A3] == ([0x03C2], 'Final_Sigma');
+    assert lowerCase(0x03A3) == 0x03C3 and conditional_tolower[0x03A3] == ([0x03C2], 'Final_Sigma')
 
     return (unconditional_tolower, unconditional_toupper)
 
+
 def make_non_bmp_file(version,
                       non_bmp_lower_map, non_bmp_upper_map,
                       non_bmp_folding_map, non_bmp_rev_folding_map,
                       codepoint_table):
-    file_name = 'UnicodeNonBMP.h';
+    file_name = 'UnicodeNonBMP.h'
     with io.open(file_name, mode='w', encoding='utf-8') as non_bmp_file:
         non_bmp_file.write(mpl_license)
         non_bmp_file.write('\n')
         non_bmp_file.write(warning_message)
         non_bmp_file.write(unicode_version_message.format(version))
         non_bmp_file.write("""
 #ifndef util_UnicodeNonBMP_h
 #define util_UnicodeNonBMP_h
@@ -636,24 +654,27 @@ def make_non_bmp_file(version,
 //                 converted code point
 
 """)
 
         make_non_bmp_convert_macro(non_bmp_file, 'LOWERCASE', non_bmp_lower_map, codepoint_table)
         non_bmp_file.write('\n')
         make_non_bmp_convert_macro(non_bmp_file, 'UPPERCASE', non_bmp_upper_map, codepoint_table)
         non_bmp_file.write('\n')
-        make_non_bmp_convert_macro(non_bmp_file, 'CASE_FOLDING', non_bmp_folding_map, codepoint_table)
+        make_non_bmp_convert_macro(non_bmp_file, 'CASE_FOLDING',
+                                   non_bmp_folding_map, codepoint_table)
         non_bmp_file.write('\n')
-        make_non_bmp_convert_macro(non_bmp_file, 'REV_CASE_FOLDING', non_bmp_rev_folding_map, codepoint_table)
+        make_non_bmp_convert_macro(non_bmp_file, 'REV_CASE_FOLDING',
+                                   non_bmp_rev_folding_map, codepoint_table)
 
         non_bmp_file.write("""
 #endif /* util_UnicodeNonBMP_h */
 """)
 
+
 def write_special_casing_methods(unconditional_toupper, codepoint_table, println):
     def hexlit(n):
         """ Returns C++ hex-literal for |n|. """
         return '0x{:04X}'.format(n)
 
     def describe_range(ranges, depth):
         indent = depth * '    '
         for (start, end) in ranges:
@@ -826,36 +847,40 @@ def write_special_casing_methods(uncondi
         println('}')
 
     write_ChangesWhenUpperCasedSpecialCasing()
     println('')
     write_LengthUpperCaseSpecialCasing()
     println('')
     write_AppendUpperCaseSpecialCasing()
 
+
 def write_ascii_lookup_tables(table, index, write, println):
     def is_id_compat(code):
         return code == ord(u'\N{DOLLAR SIGN}') or code == ord(u'\N{LOW LINE}')
+
     def is_id_start(code):
         (upper, lower, flags) = table[index[code]]
         return (flags & FLAG_UNICODE_ID_START) or is_id_compat(code)
+
     def is_id_continue(code):
         (upper, lower, flags) = table[index[code]]
         return (flags & FLAG_UNICODE_ID_CONTINUE_ONLY) or is_id_start(code)
+
     def is_space(code):
         (upper, lower, flags) = table[index[code]]
         return flags & FLAG_SPACE
 
     def write_entries(name, predicate):
         println('const bool unicode::{}[] = {{'.format(name))
         println('/*       0     1     2     3     4     5     6     7     8     9  */')
         for i in range(0, 13):
             write('/* {0: >2} */'.format(i))
             for j in range(0, 10):
-                code = i * 10 + j;
+                code = i * 10 + j
                 if (code <= 0x7f):
                     write(' {},'.format('true' if predicate(code) else '____'))
             println('')
         println('};')
 
     println('')
     println('#define ____ false')
 
@@ -882,16 +907,17 @@ def write_ascii_lookup_tables(table, ind
 
     println("""
 /* Whitespace chars: '\\t', '\\n', '\\v', '\\f', '\\r', ' '. */""")
     write_entries('js_isspace', is_space)
 
     println('')
     println('#undef ____')
 
+
 def make_bmp_mapping_test(version, codepoint_table, unconditional_tolower, unconditional_toupper):
     def unicodeEsc(n):
         return '\\u{:04X}'.format(n)
 
     file_name = '../tests/non262/String/string-upper-lower-mapping.js'
     with io.open(file_name, mode='w', encoding='utf-8') as output:
         write = partial(print, file=output, sep='', end='')
         println = partial(print, file=output, sep='', end='\n')
@@ -922,40 +948,42 @@ for (var i = 0; i <= 0xffff; i++) {
     assertEq(char.toUpperCase(), info[0]);
     assertEq(char.toLowerCase(), info[1]);
 }
 
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
+
 def make_non_bmp_mapping_test(version, non_bmp_upper_map, non_bmp_lower_map, codepoint_table):
     file_name = '../tests/non262/String/string-code-point-upper-lower-mapping.js'
     with io.open(file_name, mode='w', encoding='utf-8') as test_non_bmp_mapping:
         test_non_bmp_mapping.write(warning_message)
         test_non_bmp_mapping.write(unicode_version_message.format(version))
         test_non_bmp_mapping.write(public_domain)
 
         for code in sorted(non_bmp_upper_map.keys()):
             test_non_bmp_mapping.write("""\
 assertEq(String.fromCodePoint(0x{:04X}).toUpperCase().codePointAt(0), 0x{:04X}); // {}, {}
 """.format(code, non_bmp_upper_map[code],
-           codepoint_table.name(code), codepoint_table.name(non_bmp_upper_map[code])))
+                codepoint_table.name(code), codepoint_table.name(non_bmp_upper_map[code])))
 
         for code in sorted(non_bmp_lower_map.keys()):
             test_non_bmp_mapping.write("""\
 assertEq(String.fromCodePoint(0x{:04X}).toLowerCase().codePointAt(0), 0x{:04X}); // {}, {}
 """.format(code, non_bmp_lower_map[code],
-           codepoint_table.name(code), codepoint_table.name(non_bmp_lower_map[code])))
+                codepoint_table.name(code), codepoint_table.name(non_bmp_lower_map[code])))
 
         test_non_bmp_mapping.write("""
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
+
 def make_space_test(version, test_space_table, codepoint_table):
     def hex_and_name(c):
         return '    0x{:04X} /* {} */'.format(c, codepoint_table.name(c))
 
     file_name = '../tests/non262/String/string-space-trim.js'
     with io.open(file_name, mode='w', encoding='utf-8') as test_space:
         test_space.write(warning_message)
         test_space.write(unicode_version_message.format(version))
@@ -968,16 +996,17 @@ assertEq(onlySpace.trim(), "");
 assertEq((onlySpace + 'aaaa').trim(), 'aaaa');
 assertEq(('aaaa' + onlySpace).trim(), 'aaaa');
 assertEq((onlySpace + 'aaaa' + onlySpace).trim(), 'aaaa');
 
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
+
 def make_regexp_space_test(version, test_space_table, codepoint_table):
     def hex_and_name(c):
         return '    0x{:04X} /* {} */'.format(c, codepoint_table.name(c))
 
     file_name = '../tests/non262/RegExp/character-class-escape-s.js'
     with io.open(file_name, mode='w', encoding='utf-8') as test_space:
         test_space.write(warning_message)
         test_space.write(unicode_version_message.format(version))
@@ -1002,16 +1031,17 @@ assertEq(/^[^\s]+$/u.exec(onlySpace) ===
 assertEq(/^\S+$/u.exec(onlySpace) === null, true);
 assertEq(/^[\S]+$/u.exec(onlySpace) === null, true);
 assertEq(/^[^\S]+$/u.exec(onlySpace) !== null, true);
 
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
+
 def make_icase_test(version, folding_tests, codepoint_table):
     def char_hex(c):
         return '0x{:04X}'.format(c)
 
     file_name = '../tests/non262/RegExp/unicode-ignoreCase.js'
     with io.open(file_name, mode='w', encoding='utf-8') as test_icase:
         test_icase.write(warning_message)
         test_icase.write(unicode_version_message.format(version))
@@ -1033,16 +1063,17 @@ function test(code, ...equivs) {
         for args in folding_tests:
             test_icase.write('test({}); // {}\n'.format(', '.join(map(char_hex, args)),
                                                         ', '.join(map(codepoint_table.name, args))))
         test_icase.write("""
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
+
 def make_unicode_file(version,
                       table, index,
                       same_upper_table, same_upper_index,
                       folding_table, folding_index,
                       non_bmp_space_set,
                       non_bmp_id_start_set, non_bmp_id_cont_set,
                       unconditional_toupper,
                       codepoint_table):
@@ -1129,16 +1160,17 @@ def make_unicode_file(version,
  *    put index as chunk in cache
  *
  *   push index >> shift to index1
  *
  *  increase shift
  *  stop if you found the best shift
  */
 """
+
     def dump(data, name, println):
         println('const uint8_t unicode::{}[] = {{'.format(name))
 
         line = pad = ' ' * 4
         lines = []
         for entry in data:
             assert entry < 256
             s = str(entry)
@@ -1168,17 +1200,18 @@ def make_unicode_file(version,
 
     def write_supplemental_identifier_method(name, group_set, println):
         println('bool')
         println('js::unicode::{}(uint32_t codePoint)'.format(name))
         println('{')
         for (from_code, to_code) in int_ranges(group_set.keys()):
             println('    if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) // {} .. {}'.format(from_code,
                                                                                              to_code,
-                                                                                             codepoint_table.name(from_code),
+                                                                                             codepoint_table.name(
+                                                                                                 from_code),
                                                                                              codepoint_table.name(to_code)))
             println('        return true;')
         println('    return false;')
         println('}')
         println('')
 
     file_name = 'Unicode.cpp'
     with io.open(file_name, 'w', encoding='utf-8') as data_file:
@@ -1222,44 +1255,46 @@ def make_unicode_file(version,
 
         write_supplemental_identifier_method('IsIdentifierPartNonBMP', non_bmp_id_cont_set,
                                              println)
 
         write_special_casing_methods(unconditional_toupper, codepoint_table, println)
 
         write_ascii_lookup_tables(table, index, write, println)
 
+
 def getsize(data):
     """ return smallest possible integer size for the given array """
     maxdata = max(data)
     assert maxdata < 2**32
 
     if maxdata < 256:
         return 1
     elif maxdata < 65536:
         return 2
     else:
         return 4
 
+
 def splitbins(t):
     """t -> (t1, t2, shift).  Split a table to save space.
 
     t is a sequence of ints.  This function can be useful to save space if
     many of the ints are the same.  t1 and t2 are lists of ints, and shift
     is an int, chosen to minimize the combined size of t1 and t2 (in C
     code), and where for each i in range(len(t)),
         t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
     where mask is a bitmask isolating the last "shift" bits.
     """
 
     def dump(t1, t2, shift, bytes):
         print("%d+%d bins at shift %d; %d bytes" % (
             len(t1), len(t2), shift, bytes), file=sys.stderr)
-        print("Size of original table:", len(t)*getsize(t), \
-            "bytes", file=sys.stderr)
+        print("Size of original table:", len(t)*getsize(t),
+              "bytes", file=sys.stderr)
     n = len(t)-1    # last valid index
     maxshift = 0    # the most we can shift n and still have something left
     if n > 0:
         while n >> 1:
             n >>= 1
             maxshift += 1
     del n
     bytes = sys.maxsize  # smallest total size so far
@@ -1291,16 +1326,17 @@ def splitbins(t):
     dump(t1, t2, shift, bytes)
 
     # exhaustively verify that the decomposition is correct
     mask = 2**shift - 1
     for i in range(len(t)):
         assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
     return best
 
+
 def make_irregexp_tables(version,
                          table, index,
                          folding_table, folding_index,
                          codepoint_table):
     import string
 
     MAX_ASCII = 0x7F
     MAX_LATIN1 = 0xFF
@@ -1361,18 +1397,18 @@ def make_irregexp_tables(version,
     def write_character_range(println, name, characters):
         char_ranges = list(int_ranges(characters))
         println('')
         println('const int js::irregexp::k{}Ranges[] = {{'.format(name))
         for (start, end) in char_ranges:
             s_name = char_name(start)
             e_name = char_name(end)
             println('    {}, {} + 1, // {}'.format(hex4(start), hex4(end),
-                                                               '{}..{}'.format(s_name, e_name)
-                                                               if start != end else s_name))
+                                                   '{}..{}'.format(s_name, e_name)
+                                                   if start != end else s_name))
         println('    {} + 1'.format(hex4(MAX_BMP)))
         println('};')
         println('const int js::irregexp::k{}RangeCount = {};'.format(name,
                                                                      len(char_ranges) * 2 + 1))
 
     def write_character_test(println, test, consequent, default):
         # Latin1 characters which, when case-mapped through
         # String.prototype.toUpperCase(), canonicalize to a non-Latin1 character.
@@ -1488,16 +1524,17 @@ def make_irregexp_tables(version,
         character_range('Digit', digit_chars)
         character_range('DigitAndSurrogate', digit_chars + surrogate_chars)
 
         character_range('Surrogate', surrogate_chars)
 
         character_range('LineTerminator', line_terminator)
         character_range('LineTerminatorAndSurrogate', line_terminator + surrogate_chars)
 
+
 def update_unicode(args):
     base_path = os.getcwd()
 
     version = args.version
     if version is not None:
         baseurl = 'https://unicode.org/Public'
         if version == 'UNIDATA':
             url = '%s/%s' % (baseurl, version)
@@ -1521,19 +1558,19 @@ def update_unicode(args):
         print('\tAlways make sure you have the newest Unicode files!')
     print('')
 
     def version_from_file(f, fname):
         pat_version = re.compile(r"# %s-(?P<version>\d+\.\d+\.\d+).txt" % fname)
         return pat_version.match(f.readline()).group("version")
 
     with io.open(os.path.join(base_path, 'UnicodeData.txt'), 'r', encoding='utf-8') as unicode_data, \
-         io.open(os.path.join(base_path, 'CaseFolding.txt'), 'r', encoding='utf-8') as case_folding, \
-         io.open(os.path.join(base_path, 'DerivedCoreProperties.txt'), 'r', encoding='utf-8') as derived_core_properties, \
-         io.open(os.path.join(base_path, 'SpecialCasing.txt'), 'r', encoding='utf-8') as special_casing:
+            io.open(os.path.join(base_path, 'CaseFolding.txt'), 'r', encoding='utf-8') as case_folding, \
+            io.open(os.path.join(base_path, 'DerivedCoreProperties.txt'), 'r', encoding='utf-8') as derived_core_properties, \
+            io.open(os.path.join(base_path, 'SpecialCasing.txt'), 'r', encoding='utf-8') as special_casing:
         unicode_version = version_from_file(derived_core_properties, 'DerivedCoreProperties')
 
         print('Processing...')
         (
             table, index,
             same_upper_table, same_upper_index,
             non_bmp_lower_map, non_bmp_upper_map,
             non_bmp_space_set,
@@ -1564,21 +1601,23 @@ def update_unicode(args):
                       codepoint_table)
     make_irregexp_tables(unicode_version,
                          table, index,
                          folding_table, folding_index,
                          codepoint_table)
 
     make_bmp_mapping_test(unicode_version,
                           codepoint_table, unconditional_tolower, unconditional_toupper)
-    make_non_bmp_mapping_test(unicode_version, non_bmp_upper_map, non_bmp_lower_map, codepoint_table)
+    make_non_bmp_mapping_test(unicode_version, non_bmp_upper_map,
+                              non_bmp_lower_map, codepoint_table)
     make_space_test(unicode_version, test_space_table, codepoint_table)
     make_regexp_space_test(unicode_version, test_space_table, codepoint_table)
     make_icase_test(unicode_version, folding_tests, codepoint_table)
 
+
 if __name__ == '__main__':
     import argparse
 
     # This script must be run from js/src/util to work correctly.
     if '/'.join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != 'js/src/util':
         raise RuntimeError('%s must be run from js/src/util' % sys.argv[0])
 
     parser = argparse.ArgumentParser(description='Update Unicode data.')
--- a/js/src/vm/make_opcode_doc.py
+++ b/js/src/vm/make_opcode_doc.py
@@ -17,30 +17,33 @@ import sys
 import os
 sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
 import opcode
 
 from xml.sax.saxutils import escape
 
 SOURCE_BASE = 'http://dxr.mozilla.org/mozilla-central/source'
 
+
 def override(value, override_value):
     if override_value != '':
         return override_value
 
     return value
 
+
 def format_flags(flags):
     flags = filter(lambda x: x != 'JOF_BYTE', flags)
     if len(flags) == 0:
         return ''
 
     flags = map(lambda x: x.replace('JOF_', ''), flags)
     return ' ({flags})'.format(flags=', '.join(flags))
 
+
 def print_opcode(opcode):
     names_template = '{name} [-{nuses}, +{ndefs}]{flags}'
     opcodes = sorted([opcode] + opcode.group,
                      key=lambda opcode: opcode.name)
     names = map(lambda code: names_template.format(name=escape(code.name),
                                                    nuses=override(code.nuses,
                                                                   opcode.nuses_override),
                                                    ndefs=override(code.ndefs,
@@ -48,17 +51,17 @@ def print_opcode(opcode):
                                                    flags=format_flags(code.flags)),
                 opcodes)
     if len(opcodes) == 1:
         values = ['{value} (0x{value:02x})'.format(value=opcode.value)]
     else:
         values_template = '{name}: {value} (0x{value:02x})'
         values = map(lambda code: values_template.format(name=escape(code.name),
                                                          value=code.value),
-                    opcodes)
+                     opcodes)
 
     print("""<dt id="{id}">{names}</dt>
 <dd>
 <table class="standard-table">
 <tbody>
 <tr><th>Value</th><td><code>{values}</code></td></tr>
 <tr><th>Operands</th><td><code>{operands}</code></td></tr>
 <tr><th>Length</th><td><code>{length}</code></td></tr>
@@ -72,21 +75,23 @@ def print_opcode(opcode):
 """.format(id=opcodes[0].name,
            names='<br>'.join(names),
            values='<br>'.join(values),
            operands=escape(opcode.operands) or "&nbsp;",
            length=escape(override(opcode.length,
                                   opcode.length_override)),
            stack_uses=escape(opcode.stack_uses) or "&nbsp;",
            stack_defs=escape(opcode.stack_defs) or "&nbsp;",
-           desc=opcode.desc)) # desc is already escaped
+           desc=opcode.desc))  # desc is already escaped
+
 
 id_cache = dict()
 id_count = dict()
 
+
 def make_element_id(category, type=''):
     key = '{}:{}'.format(category, type)
     if key in id_cache:
         return id_cache[key]
 
     if type == '':
         id = category.replace(' ', '_')
     else:
@@ -96,16 +101,17 @@ def make_element_id(category, type=''):
         id_count[id] += 1
         id = '{}_{}'.format(id, id_count[id])
     else:
         id_count[id] = 1
 
     id_cache[key] = id
     return id
 
+
 def print_doc(index):
     print("""<div>{{{{SpiderMonkeySidebar("Internals")}}}}</div>
 
 <h2 id="Bytecode_Listing">Bytecode Listing</h2>
 
 <p>This document is automatically generated from
 <a href="{source_base}/js/src/vm/Opcodes.h">Opcodes.h</a> by
 <a href="{source_base}/js/src/vm/make_opcode_doc.py">make_opcode_doc.py</a>.</p>
@@ -119,16 +125,17 @@ def print_doc(index):
                 print('<h4 id="{id}">{name}</h4>'.format(name=type_name,
                                                          id=make_element_id(category_name, type_name)))
             print('<dl>')
             for opcode in sorted(opcodes,
                                  key=lambda opcode: opcode.sort_key):
                 print_opcode(opcode)
             print('</dl>')
 
+
 if __name__ == '__main__':
     if len(sys.argv) < 2:
         print("Usage: make_opcode_doc.py PATH_TO_MOZILLA_CENTRAL",
               file=sys.stderr)
         sys.exit(1)
     dir = sys.argv[1]
 
     try:
--- a/js/src/vm/opcode.py
+++ b/js/src/vm/opcode.py
@@ -2,51 +2,64 @@
 
 from __future__ import print_function
 import re
 import sys
 from xml.sax.saxutils import escape
 
 quoted_pat = re.compile(r"([^A-Za-z0-9]|^)'([^']+)'")
 js_pat = re.compile(r"([^A-Za-z0-9]|^)(JS[A-Z0-9_\*]+)")
+
+
 def codify(text):
     text = re.sub(quoted_pat, '\\1<code>\\2</code>', text)
     text = re.sub(js_pat, '\\1<code>\\2</code>', text)
 
     return text
 
+
 space_star_space_pat = re.compile('^\s*\* ?', re.M)
+
+
 def get_comment_body(comment):
     return re.sub(space_star_space_pat, '', comment).split('\n')
 
+
 quote_pat = re.compile('"([^"]+)"')
 str_pat = re.compile('js_([^_]+)_str')
+
+
 def parse_name(s):
     m = quote_pat.search(s)
     if m:
         return m.group(1)
     m = str_pat.search(s)
     if m:
         return m.group(1)
     return s
 
+
 csv_pat = re.compile(', *')
+
+
 def parse_csv(s):
     a = csv_pat.split(s)
     if len(a) == 1 and a[0] == '':
         return []
     return a
 
+
 def get_stack_count(stack):
     if stack == '':
         return 0
     if '...' in stack:
         return -1
     return len(stack.split(','))
 
+
 def parse_index(comment):
     index = []
     current_types = None
     category_name = ''
     category_pat = re.compile('\[([^\]]+)\]')
     for line in get_comment_body(comment):
         m = category_pat.search(line)
         if m:
@@ -68,32 +81,36 @@ def parse_index(comment):
 #    *   Category: {category_name}
 #    *   Type: {type_name}
 #    *   Operands: {operands}
 #    *   Stack: {stack_uses} => {stack_defs}
 #    *   length: {length_override}
 #    *   nuses: {nuses_override}
 #    *   ndefs: {ndefs_override}
 #    */
+
+
 class CommentInfo:
     def __init__(self):
         self.desc = ''
         self.category_name = ''
         self.type_name = ''
         self.operands = ''
         self.stack_uses = ''
         self.stack_defs = ''
         self.length_override = ''
         self.nuses_override = ''
         self.ndefs_override = ''
 
 # Holds the information stored in the macro with the following format:
 #   macro({name}, {value}, {display_name}, {image}, {length}, {nuses}, {ndefs},
 #         {flags})
 # and the information from CommentInfo.
+
+
 class OpcodeInfo:
     def __init__(self, comment_info):
         self.name = ''
         self.value = ''
         self.display_name = ''
         self.image = ''
         self.length = ''
         self.nuses = ''
@@ -123,39 +140,42 @@ class OpcodeInfo:
         #    */
         #   macro(JSOP_SUB, ...)
         #   macro(JSOP_MUL, ...)
         #   macro(JSOP_DIV, ...)
         self.group = []
 
         self.sort_key = ''
 
+
 def find_by_name(list, name):
     for (n, body) in list:
         if n == name:
             return body
 
     return None
 
+
 def add_to_index(index, opcode):
     types = find_by_name(index, opcode.category_name)
     if types is None:
         raise Exception('Category is not listed in index: '
                         '{name}'.format(name=opcode.category_name))
     opcodes = find_by_name(types, opcode.type_name)
     if opcodes is None:
         if opcode.type_name:
             raise Exception('Type is not listed in {category}: '
                             '{name}'.format(category=opcode.category_name,
                                             name=opcode.type_name))
         types.append((opcode.type_name, [opcode]))
         return
 
     opcodes.append(opcode)
 
+
 def format_desc(descs):
     current_type = ''
     desc = ''
     for (type, line) in descs:
         if type != current_type:
             if current_type:
                 desc += '</{name}>\n'.format(name=current_type)
             current_type = type
@@ -163,32 +183,36 @@ def format_desc(descs):
                 desc += '<{name}>'.format(name=current_type)
         if current_type:
             desc += line + '\n'
     if current_type:
         desc += '</{name}>'.format(name=current_type)
 
     return desc
 
+
 tag_pat = re.compile('^\s*[A-Za-z]+:\s*|\s*$')
+
+
 def get_tag_value(line):
     return re.sub(tag_pat, '', line)
 
+
 def get_opcodes(dir):
     iter_pat = re.compile(r"/\*(.*?)\*/"  # either a documentation comment...
                           r"|"
                           r"macro\("      # or a macro(...) call
-                                 r"(?P<name>[^,]+),\s*"
-                                 r"(?P<value>[0-9]+),\s*"
-                                 r"(?P<display_name>[^,]+,)\s*"
-                                 r"(?P<image>[^,]+),\s*"
-                                 r"(?P<length>[0-9\-]+),\s*"
-                                 r"(?P<nuses>[0-9\-]+),\s*"
-                                 r"(?P<ndefs>[0-9\-]+),\s*"
-                                 r"(?P<flags>[^\)]+)"
+                          r"(?P<name>[^,]+),\s*"
+                          r"(?P<value>[0-9]+),\s*"
+                          r"(?P<display_name>[^,]+,)\s*"
+                          r"(?P<image>[^,]+),\s*"
+                          r"(?P<length>[0-9\-]+),\s*"
+                          r"(?P<nuses>[0-9\-]+),\s*"
+                          r"(?P<ndefs>[0-9\-]+),\s*"
+                          r"(?P<flags>[^\)]+)"
                           r"\)", re.S)
     stack_pat = re.compile(r"^(?P<uses>.*?)"
                            r"\s*=>\s*"
                            r"(?P<defs>.*?)$")
 
     opcodes = dict()
     index = []