Bug 1464869 - Fix flake8/pep8 issue by hand in js/ r=jorendorff
authorSylvestre Ledru <sledru@mozilla.com>
Wed, 06 Jun 2018 13:04:14 +0200
changeset 421548 fd9de8dd00a40f86c3e82ce2c1ec4b182c38be08
parent 421547 4924d6ee1b83a0a00acff3c40b0ba95e4914231c
child 421549 f9a66e05d90ea6e12616e97b1cc52f9727c4ddc7
push id34097
push usernbeleuzu@mozilla.com
push dateWed, 06 Jun 2018 16:59:16 +0000
treeherdermozilla-central@223e5900fbbb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjorendorff
bugs1464869
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1464869 - Fix flake8/pep8 issue by hand in js/ r=jorendorff MozReview-Commit-ID: 7cbfMkdykXn
js/src/builtin/embedjs.py
js/src/builtin/intl/make_intl_data.py
js/src/ctypes/libffi/generate-darwin-source-and-headers.py
js/src/devtools/automation/autospider.py
js/src/devtools/gc/gc-test.py
js/src/devtools/rootAnalysis/analyze.py
js/src/devtools/rootAnalysis/explain.py
js/src/devtools/rootAnalysis/t/exceptions/test.py
js/src/devtools/rootAnalysis/t/hazards/test.py
js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
js/src/devtools/rootAnalysis/t/suppression/test.py
js/src/devtools/rootAnalysis/t/testlib.py
js/src/gc/GenerateStatsPhases.py
js/src/gdb/lib-for-tests/catcher.py
js/src/gdb/lib-for-tests/prologue.py
js/src/gdb/mozilla/Interpreter.py
js/src/gdb/mozilla/IonGraph.py
js/src/gdb/mozilla/JSObject.py
js/src/gdb/mozilla/JSString.py
js/src/gdb/mozilla/JSSymbol.py
js/src/gdb/mozilla/Root.py
js/src/gdb/mozilla/autoload.py
js/src/gdb/mozilla/jsid.py
js/src/gdb/mozilla/jsval.py
js/src/gdb/mozilla/prettyprinters.py
js/src/gdb/mozilla/unwind.py
js/src/gdb/run-tests.py
js/src/gdb/taskpool.py
js/src/gdb/tests/test-ExecutableAllocator.py
js/src/gdb/tests/test-GCCellPtr.py
js/src/gdb/tests/test-Interpreter.py
js/src/gdb/tests/test-JSObject-null.py
js/src/gdb/tests/test-JSObject.py
js/src/gdb/tests/test-JSString-null.py
js/src/gdb/tests/test-JSString-subclasses.py
js/src/gdb/tests/test-JSString.py
js/src/gdb/tests/test-JSSymbol.py
js/src/gdb/tests/test-Root-null.py
js/src/gdb/tests/test-Root.py
js/src/gdb/tests/test-asmjs.py
js/src/gdb/tests/test-jsid.py
js/src/gdb/tests/test-jsval.py
js/src/gdb/tests/test-prettyprinters.py
js/src/gdb/tests/test-unwind.py
js/src/gdb/tests/typedef-printers.py
js/src/jit-test/jit_test.py
js/src/jit/arm/gen-double-encoder-table.py
js/src/tests/lib/jittests.py
js/src/tests/lib/manifest.py
js/src/tests/lib/progressbar.py
js/src/tests/lib/results.py
js/src/tests/lib/structuredlog.py
js/src/tests/lib/tasks_unix.py
js/src/tests/lib/tests.py
js/src/tests/non262/String/make-normalize-generateddata-input.py
js/src/tests/test/run.py
js/src/tests/test262-export.py
js/src/tests/test262-update.py
js/src/util/make_unicode.py
js/src/vm/make_opcode_doc.py
js/src/vm/opcode.py
--- a/js/src/builtin/embedjs.py
+++ b/js/src/builtin/embedjs.py
@@ -77,17 +77,17 @@ namespace %(namespace)s {
         return %(compressed_total_length)i;
     }
 
     uint32_t GetRawScriptsSize() {
         return %(raw_total_length)i;
     }
 } // selfhosted
 } // js
-"""
+"""  # NOQA: E501
 
 
 def embed(cxx, preprocessorOption, cppflags, msgs, sources, c_out, js_out, namespace, env):
     combinedSources = '\n'.join([msgs] + ['#include "%(s)s"' %
                                           {'s': source} for source in sources])
     args = cppflags + ['-D%(k)s=%(v)s' % {'k': k, 'v': env[k]} for k in env]
     preprocessed = preprocess(cxx, preprocessorOption, combinedSources, args)
     processed = '\n'.join([line for line in preprocessed.splitlines() if
--- a/js/src/builtin/intl/make_intl_data.py
+++ b/js/src/builtin/intl/make_intl_data.py
@@ -226,17 +226,18 @@ def writeMappingsVar(println, mapping, n
             if key != preferred:
                 raise Exception(
                     "Expected '{0}' matches preferred locale '{1}'".format(key, preferred))
             value = '"{0}"'.format(prefix)
         println(u'    "{0}": {1},'.format(key, value))
     println(u"};")
 
 
-def writeMappingsFunction(println, variantMappings, redundantMappings, extlangMappings, description, fileDate, url):
+def writeMappingsFunction(println, variantMappings, redundantMappings, extlangMappings,
+                          description, fileDate, url):
     """ Writes a function definition which performs language tag mapping.
 
         Processes the contents of dictionaries |variantMappings| and
         |redundantMappings| through the |println| function with the given
         function name and a comment with description, fileDate, and URL.
     """
 
     class Subtag(object):
@@ -265,17 +266,17 @@ def writeMappingsFunction(println, varia
                 kind = Subtag.Region
 
             # variant = 5*8alphanum / (DIGIT 3alphanum)
             elif ((len(subtag) in range(5, 8+1) and subtag.isalnum()) or
                   (len(subtag) == 4 and subtag[0].isdigit() and subtag[1:].isalnum())):
                 kind = Subtag.Variant
 
             else:
-                assert False, "unexpected language tag '{}'".format(key)
+                assert False, "unexpected language tag '{}'".format(subtag)
 
             yield (kind, subtag)
 
     def language(tag):
         (kind, subtag) = next(splitSubtags(tag))
         assert kind == Subtag.Language
         return subtag
 
@@ -311,20 +312,21 @@ def writeMappingsFunction(println, varia
             elif kind == Subtag.Script:
                 cond.append('tag.script === "{}"'.format(subtag))
             elif kind == Subtag.Region:
                 cond.append('tag.region === "{}"'.format(subtag))
             else:
                 assert kind == Subtag.Variant
                 if lastVariant is None:
                     cond.append("tag.variants.length >= {}".format(len(variants(tag))))
-                    cond.append('callFunction(ArrayIndexOf, tag.variants, "{}") > -1'.format(subtag))
+                    cond.append('callFunction(ArrayIndexOf, tag.variants, "{}") > -1'.format(
+                        subtag))
                 else:
                     cond.append(
-                        'callFunction(ArrayIndexOf, tag.variants, "{}", callFunction(ArrayIndexOf, tag.variants, "{}") + 1) > -1'.format(subtag, lastVariant))
+                        'callFunction(ArrayIndexOf, tag.variants, "{}", callFunction(ArrayIndexOf, tag.variants, "{}") + 1) > -1'.format(subtag, lastVariant))  # NOQA: E501
                 lastVariant = subtag
 
         # Require exact matches for redundant language tags.
         if tag in redundantMappings:
             tag_it = splitSubtags(tag)
             tag_next = partial(maybeNext, tag_it)
             (tag_kind, _) = tag_next()
 
@@ -429,17 +431,18 @@ def writeMappingsFunction(println, varia
                 println3(u'    if (variant === "{}")'.format(tag_subtag))
                 println3(u"        continue;")
                 (tag_kind, tag_subtag) = tag_next()
             println3(u"    _DefineDataProperty(newVariants, newVariants.length, variant);")
             println3(u"}")
 
             # Add the new variants, unless already present.
             while preferred_kind == Subtag.Variant:
-                println3(u'if (callFunction(ArrayIndexOf, newVariants, "{}") < 0)'.format(preferred_subtag))
+                println3(u'if (callFunction(ArrayIndexOf, newVariants, "{}") < 0)'.format(
+                    preferred_subtag))
                 println3(u'    _DefineDataProperty(newVariants, newVariants.length, "{}");'.format(
                     preferred_subtag))
                 (preferred_kind, preferred_subtag) = preferred_next()
 
             # Update the property.
             println3(u"tag.variants = newVariants;")
 
         # Ensure both language tags were completely processed.
@@ -472,17 +475,18 @@ def writeMappingsFunction(println, varia
 
         preferred_it = splitSubtags(preferred)
         (_, preferred_lang) = next(preferred_it)
 
         # Return true if the mapping is for an extlang language and the extlang
         # mapping table contains an equivalent entry and any trailing elements,
         # if present, are the same.
         return (tag_kind == Subtag.ExtLang and
-                (tag_extlang, {"preferred": preferred_lang, "prefix": tag_lang}) in extlangMappings.items() and
+                (tag_extlang, {"preferred": preferred_lang, "prefix": tag_lang}) in
+                extlangMappings.items() and
                 list(tag_it) == list(preferred_it))
 
     # Create a single mapping for variant and redundant tags, ignoring the
     # entries which are also covered through extlang mappings.
     langTagMappings = {tag: preferred
                        for mapping in [variantMappings, redundantMappings]
                        for (tag, preferred) in mapping.items()
                        if not hasExtlangMapping(tag, preferred)}
@@ -657,17 +661,18 @@ def partition(iterable, *predicates):
     if len(predicates) == 1:
         return (left, right)
     return tuple([left] + list(partition(right, *predicates[1:])))
 
 
 def listIANAFiles(tzdataDir):
     def isTzFile(d, m, f):
         return m(f) and d.isfile(d.resolve(f))
-    return filter(partial(isTzFile, tzdataDir, re.compile("^[a-z0-9]+$").match), tzdataDir.listdir())
+    return filter(partial(isTzFile, tzdataDir, re.compile("^[a-z0-9]+$").match),
+                  tzdataDir.listdir())
 
 
 def readIANAFiles(tzdataDir, files):
     """ Read all IANA time zone files from the given iterable. """
     nameSyntax = "[\w/+\-]+"
     pZone = re.compile(r"Zone\s+(?P<name>%s)\s+.*" % nameSyntax)
     pLink = re.compile(r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" %
                        (nameSyntax, nameSyntax))
@@ -896,18 +901,18 @@ def readICUTimeZones(icuDir, icuTzDir, i
     (typesZones, typesLinks) = readICUTimeZonesFromTimezoneTypes(icuTzDir)
 
     # Information in zoneinfo64 should be a superset of timezoneTypes.
     def inZoneInfo64(zone): return zone in zoneinfoZones or zone in zoneinfoLinks
 
     # Remove legacy ICU time zones from zoneinfo64 data.
     (legacyZones, legacyLinks) = readICULegacyZones(icuDir)
     zoneinfoZones = {zone for zone in zoneinfoZones if zone not in legacyZones}
-    zoneinfoLinks = {zone: target for (
-        zone, target) in zoneinfoLinks.items() if zone not in legacyLinks}
+    zoneinfoLinks = {zone: target for (zone, target) in zoneinfoLinks.items()
+                     if zone not in legacyLinks}
 
     notFoundInZoneInfo64 = [zone for zone in typesZones if not inZoneInfo64(zone)]
     if notFoundInZoneInfo64:
         raise RuntimeError("Missing time zones in zoneinfo64.txt: %s" % notFoundInZoneInfo64)
 
     notFoundInZoneInfo64 = [zone for zone in typesLinks.keys() if not inZoneInfo64(zone)]
     if notFoundInZoneInfo64:
         raise RuntimeError("Missing time zones in zoneinfo64.txt: %s" % notFoundInZoneInfo64)
@@ -1017,18 +1022,18 @@ def findIncorrectICULinks(ianaZones, ian
     # Links which are only present in ICU?
     additionalTimeZones = [zone for zone in icuLinks.keys() if not isIANATimeZone(zone)]
     if additionalTimeZones:
         raise RuntimeError("Additional links present in ICU, did you forget "
                            "to run intl/update-tzdata.sh? %s" % additionalTimeZones)
 
     result = chain(
         # IANA links which have a different target in ICU.
-        ((zone, target, icuLinks[zone]) for (zone, target)
-         in ianaLinks.items() if isICULink(zone) and target != icuLinks[zone]),
+        ((zone, target, icuLinks[zone]) for (zone, target) in ianaLinks.items()
+         if isICULink(zone) and target != icuLinks[zone]),
 
         # IANA links which are zones in ICU.
         ((zone, target, zone.name) for (zone, target) in ianaLinks.items() if isICUZone(zone))
     )
 
     # Remove unnecessary UTC mappings.
     utcnames = ["Etc/UTC", "Etc/UCT", "Etc/GMT"]
     result = ((zone, target, icuTarget)
@@ -1095,17 +1100,17 @@ def processTimeZones(tzdataDir, icuDir, 
         println(u"const LinkAndTarget ianaLinksCanonicalizedDifferentlyByICU[] = {")
         for (zone, target, icuTarget) in incorrectLinks:
             println(u'    { "%s", "%s" }, // %s [%s]' % (zone, target, icuTarget, zone.filename))
         println(u"};")
         println(u"")
 
         println(u"// Legacy ICU time zones, these are not valid IANA time zone names. We also")
         println(u"// disallow the old and deprecated System V time zones.")
-        println(u"// https://ssl.icu-project.org/repos/icu/trunk/icu4c/source/tools/tzcode/icuzones")
+        println(u"// https://ssl.icu-project.org/repos/icu/trunk/icu4c/source/tools/tzcode/icuzones")  # NOQA: E501
         println(u"const char* const legacyICUTimeZones[] = {")
         for zone in chain(sorted(legacyLinks.keys()), sorted(legacyZones)):
             println(u'    "%s",' % zone)
         println(u"};")
         println(u"")
 
         println(u"} // namespace timezone")
         println(u"} // namespace js")
@@ -1406,17 +1411,18 @@ def updateCurrency(topsrcdir, args):
 
     if filename is not None:
         print("Always make sure you have the newest currency code list file!")
         updateFrom(filename)
     else:
         print("Downloading currency & funds code list...")
         request = UrlRequest(url)
         request.add_header(
-            "User-agent", "Mozilla/5.0 (Mobile; rv:{0}.0) Gecko/{0}.0 Firefox/{0}.0".format(randint(1, 999)))
+            "User-agent", "Mozilla/5.0 (Mobile; rv:{0}.0) Gecko/{0}.0 Firefox/{0}.0".format(
+                randint(1, 999)))
         with closing(urlopen(request)) as currencyFile:
             fname = urlsplit(currencyFile.geturl()).path.split("/")[-1]
             with tempfile.NamedTemporaryFile(suffix=fname) as currencyTmpFile:
                 print("File stored in %s" % currencyTmpFile.name)
                 currencyTmpFile.write(currencyFile.read())
                 currencyTmpFile.flush()
                 updateFrom(currencyTmpFile.name)
 
@@ -1440,17 +1446,18 @@ if __name__ == "__main__":
     subparsers = parser.add_subparsers(help="Select update mode")
 
     parser_tags = subparsers.add_parser("langtags",
                                         help="Update language-subtag-registry")
     parser_tags.add_argument("--url",
                              metavar="URL",
                              default="https://www.iana.org/assignments/language-subtag-registry",
                              type=EnsureHttps,
-                             help="Download url for language-subtag-registry.txt (default: %(default)s)")
+                             help="Download url for language-subtag-registry.txt "
+                             "(default: %(default)s)")
     parser_tags.add_argument("--out",
                              default="LangTagMappingsGenerated.js",
                              help="Output file (default: %(default)s)")
     parser_tags.add_argument("file",
                              nargs="?",
                              help="Local language-subtag-registry.txt file, if omitted uses <URL>")
     parser_tags.set_defaults(func=updateLangTags)
 
@@ -1470,17 +1477,17 @@ if __name__ == "__main__":
     parser_tz.add_argument("--out",
                            default="TimeZoneDataGenerated.h",
                            help="Output file (default: %(default)s)")
     parser_tz.set_defaults(func=partial(updateTzdata, topsrcdir))
 
     parser_currency = subparsers.add_parser("currency", help="Update currency digits mapping")
     parser_currency.add_argument("--url",
                                  metavar="URL",
-                                 default="https://www.currency-iso.org/dam/downloads/lists/list_one.xml",
+                                 default="https://www.currency-iso.org/dam/downloads/lists/list_one.xml",  # NOQA: E501
                                  type=EnsureHttps,
                                  help="Download url for the currency & funds code list (default: "
                                       "%(default)s)")
     parser_currency.add_argument("--out",
                                  default="CurrencyDataGenerated.js",
                                  help="Output file (default: %(default)s)")
     parser_currency.add_argument("file",
                                  nargs="?",
--- a/js/src/ctypes/libffi/generate-darwin-source-and-headers.py
+++ b/js/src/ctypes/libffi/generate-darwin-source-and-headers.py
@@ -120,17 +120,18 @@ def move_file(src_dir, dst_dir, filename
 
 def list_files(src_dir, pattern=None, filelist=None):
     if pattern:
         filelist = glob.iglob(os.path.join(src_dir, pattern))
     for file in filelist:
         yield os.path.basename(file)
 
 
-def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None):
+def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None,
+               suffix=None):
     for filename in list_files(src_dir, pattern=pattern, filelist=filelist):
         move_file(src_dir, dst_dir, filename, file_suffix=file_suffix,
                   prefix=prefix, suffix=suffix)
 
 
 def copy_src_platform_files(platform):
     src_dir = os.path.join('src', platform.src_dir)
     dst_dir = os.path.join(platform.directory, 'src', platform.src_dir)
--- a/js/src/devtools/automation/autospider.py
+++ b/js/src/devtools/automation/autospider.py
@@ -43,17 +43,19 @@ parser = argparse.ArgumentParser(
     description='Run a spidermonkey shell build job')
 parser.add_argument('--verbose', action='store_true', default=AUTOMATION,
                     help="display additional logging info")
 parser.add_argument('--dep', action='store_true',
                     help='do not clobber the objdir before building')
 parser.add_argument('--keep', action='store_true',
                     help='do not delete the sanitizer output directory (for testing)')
 parser.add_argument('--platform', '-p', type=str, metavar='PLATFORM',
-                    default='', help='build platform, including a suffix ("-debug" or "") used by buildbot to override the variant\'s "debug" setting. The platform can be used to specify 32 vs 64 bits.')
+                    default='', help='build platform, including a suffix ("-debug" or "") used '
+                    'by buildbot to override the variant\'s "debug" setting. The platform can be '
+                    'used to specify 32 vs 64 bits.')
 parser.add_argument('--timeout', '-t', type=int, metavar='TIMEOUT',
                     default=10800,
                     help='kill job after TIMEOUT seconds')
 parser.add_argument('--objdir', type=str, metavar='DIR',
                     default=env.get('OBJDIR', os.path.join(DIR.source, 'obj-spider')),
                     help='object directory')
 group = parser.add_mutually_exclusive_group()
 group.add_argument('--optimize', action='store_true',
@@ -77,17 +79,18 @@ group.add_argument('--no-jemalloc', acti
                    dest='jemalloc',
                    help='use the default allocator instead of mozilla\'s jemalloc')
 group.set_defaults(jemalloc=None)
 parser.add_argument('--run-tests', '--tests', type=str, metavar='TESTSUITE',
                     default='',
                     help="comma-separated set of test suites to add to the variant's default set")
 parser.add_argument('--skip-tests', '--skip', type=str, metavar='TESTSUITE',
                     default='',
-                    help="comma-separated set of test suites to remove from the variant's default set")
+                    help="comma-separated set of test suites to remove from the variant's default "
+                    "set")
 parser.add_argument('--build-only', '--build',
                     dest='skip_tests', action='store_const', const='all',
                     help="only do a build, do not run any tests")
 parser.add_argument('--noconf', action='store_true',
                     help="skip running configure when doing a build")
 parser.add_argument('--nobuild', action='store_true',
                     help='Do not do a build. Rerun tests on existing build.')
 parser.add_argument('variant', type=str,
@@ -395,18 +398,18 @@ if not args.nobuild:
     # Generate a configure script from configure.in.
     configure = os.path.join(DIR.js_src, 'configure')
     if need_updating_configure(configure):
         shutil.copyfile(configure + ".in", configure)
         os.chmod(configure, 0755)
 
     # Run configure
     if not args.noconf:
-        run_command(['sh', '-c', posixpath.join(PDIR.js_src, 'configure') +
-                     ' ' + CONFIGURE_ARGS], check=True)
+        run_command(['sh', '-c', posixpath.join(PDIR.js_src, 'configure') + ' ' + CONFIGURE_ARGS],
+                    check=True)
 
     # Run make
     run_command('%s -w %s' % (MAKE, MAKEFLAGS), shell=True, check=True)
 
     if use_minidump:
         # Convert symbols to breakpad format.
         hostdir = os.path.join(OBJDIR, "dist", "host", "bin")
         if not os.path.isdir(hostdir):
@@ -516,17 +519,17 @@ if args.variant == 'msan':
     fullfiles = [os.path.join(OUTDIR, f) for f in files]
 
     # Summarize results
     sites = Counter()
     errors = Counter()
     for filename in fullfiles:
         with open(os.path.join(OUTDIR, filename), 'rb') as fh:
             for line in fh:
-                m = re.match(r'^SUMMARY: \w+Sanitizer: (?:data race|use-of-uninitialized-value) (.*)',
+                m = re.match(r'^SUMMARY: \w+Sanitizer: (?:data race|use-of-uninitialized-value) (.*)',  # NOQA: E501
                              line.strip())
                 if m:
                     # Some reports include file:line:column, some just
                     # file:line. Just in case it's nondeterministic, we will
                     # canonicalize to just the line number.
                     site = re.sub(r'^(\S+?:\d+)(:\d+)* ', r'\1 ', m.group(1))
                     sites[site] += 1
 
--- a/js/src/devtools/gc/gc-test.py
+++ b/js/src/devtools/gc/gc-test.py
@@ -1,25 +1,19 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Works with python2.6
 
-import datetime
 import os
-import re
 import sys
-import traceback
 import math
-import string
-import copy
 import json
-import subprocess
-from subprocess import *
+from subprocess import Popen, PIPE
 from operator import itemgetter
 
 
 class Test:
     def __init__(self, path, name):
         self.path = path
         self.name = name
 
@@ -88,54 +82,53 @@ def run_test(test):
 def run_tests(tests, test_dir):
     bench_map = {}
 
     try:
         for i, test in enumerate(tests):
             filename_str = '"%s"' % test.name
             TMax, TAvg, MMax, MAvg, SMax, SAvg = run_test(test)
             bench_map[test.name] = [TMax, TAvg, MMax, MAvg, SMax, SAvg]
-            fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}'
+            fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}'  # NOQA: E501
             if (i != len(tests) - 1):
                 fmt += ','
             print(fmt % (filename_str, TMax, TAvg, MMax, MAvg, SMax, MAvg))
     except KeyboardInterrupt:
         print('fail')
 
     return dict((filename, dict(TMax=TMax, TAvg=TAvg, MMax=MMax, MAvg=MAvg, SMax=SMax, SAvg=SAvg))
                 for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems())
 
 
 def compare(current, baseline):
     percent_speedups = []
     for key, current_result in current.iteritems():
         try:
             baseline_result = baseline[key]
         except KeyError:
-            print key, 'missing from baseline'
+            print(key, 'missing from baseline')
             continue
 
         val_getter = itemgetter('TMax', 'TAvg', 'MMax', 'MAvg', 'SMax', 'SAvg')
         BTMax, BTAvg, BMMax, BMAvg, BSMax, BSAvg = val_getter(baseline_result)
         CTMax, CTAvg, CMMax, CMAvg, CSMax, CSAvg = val_getter(current_result)
 
-        fmt = '%30s: %s'
         if CTAvg <= BTAvg:
             speedup = (CTAvg / BTAvg - 1) * 100
             result = 'faster: %6.2f < baseline %6.2f (%+6.2f%%)' % \
                 (CTAvg, BTAvg, speedup)
             percent_speedups.append(speedup)
         else:
             slowdown = (CTAvg / BTAvg - 1) * 100
             result = 'SLOWER: %6.2f > baseline %6.2f (%+6.2f%%) ' % \
                 (CTAvg, BTAvg, slowdown)
             percent_speedups.append(slowdown)
-        print '%30s: %s' % (key, result)
+        print('%30s: %s' % (key, result))
     if percent_speedups:
-        print 'Average speedup: %.2f%%' % avg(percent_speedups)
+        print('Average speedup: %.2f%%' % avg(percent_speedups))
 
 
 if __name__ == '__main__':
     script_path = os.path.abspath(__file__)
     script_dir = os.path.dirname(script_path)
     test_dir = os.path.join(script_dir, 'tests')
 
     from optparse import OptionParser
--- a/js/src/devtools/rootAnalysis/analyze.py
+++ b/js/src/devtools/rootAnalysis/analyze.py
@@ -17,43 +17,44 @@ import sys
 import re
 
 # Python 2/3 version independence polyfills
 
 anystring_t = str if sys.version_info[0] > 2 else basestring
 
 try:
     execfile
-except:
+except Exception:
     def execfile(thefile, globals):
         exec(compile(open(thefile).read(), filename=thefile, mode="exec"), globals)
 
 
 def env(config):
     e = dict(os.environ)
     e['PATH'] = ':'.join(p for p in (config.get('gcc_bin'),
-                                     config.get('sixgill_bin'), e['PATH']) if p)
+                                     config.get('sixgill_bin'),
+                                     e['PATH']) if p)
     e['XDB'] = '%(sixgill_bin)s/xdb.so' % config
     e['SOURCE'] = config['source']
     e['ANALYZED_OBJDIR'] = config['objdir']
     bindir = os.path.dirname(config['js'])
     e['LD_LIBRARY_PATH'] = ':'.join(p for p in (e.get('LD_LIBRARY_PATH'), bindir) if p)
     return e
 
 
 def fill(command, config):
     try:
         return tuple(s % config for s in command)
-    except:
+    except Exception:
         print("Substitution failed:")
         problems = []
         for fragment in command:
             try:
                 fragment % config
-            except:
+            except Exception:
                 problems.append(fragment)
         raise Exception("\n".join(["Substitution failed:"] + ["  %s" % s for s in problems]))
 
 
 def print_command(command, outfile=None, env=None):
     output = ' '.join(command)
     if outfile:
         output += ' > ' + outfile
@@ -236,31 +237,32 @@ parser.add_argument('--jobs', '-j', defa
                     help='number of simultaneous analyzeRoots.js jobs')
 parser.add_argument('--list', const=True, nargs='?', type=bool,
                     help='display available steps')
 parser.add_argument('--buildcommand', '--build', '-b', type=str, nargs='?',
                     help='command to build the tree being analyzed')
 parser.add_argument('--tag', '-t', type=str, nargs='?',
                     help='name of job, also sets build command to "build.<tag>"')
 parser.add_argument('--expect-file', type=str, nargs='?',
-                    help='deprecated option, temporarily still present for backwards compatibility')
+                    help='deprecated option, temporarily still present for backwards '
+                    'compatibility')
 parser.add_argument('--verbose', '-v', action='count', default=1,
                     help='Display cut & paste commands to run individual steps')
 parser.add_argument('--quiet', '-q', action='count', default=0,
                     help='Suppress output')
 
 args = parser.parse_args()
 args.verbose = max(0, args.verbose - args.quiet)
 
 for default in defaults:
     try:
         execfile(default, config)
         if args.verbose:
             print("Loaded %s" % default)
-    except:
+    except Exception:
         pass
 
 data = config.copy()
 
 for k, v in vars(args).items():
     if v is not None:
         data[k] = v
 
@@ -283,17 +285,18 @@ if 'ANALYZED_OBJDIR' in os.environ:
     data['objdir'] = os.environ['ANALYZED_OBJDIR']
 
 if 'SOURCE' in os.environ:
     data['source'] = os.environ['SOURCE']
 
 if data.get('sixgill_bin'):
     if not data.get('source'):
         path = subprocess.check_output(
-            ['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.cpp']).decode()
+            ['sh', '-c',
+             data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.cpp']).decode()
         data['source'] = path.replace("\n", "").replace("/js/src/jsapi.cpp", "")
     if not data.get('objdir'):
         path = subprocess.check_output(
             ['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.h']).decode()
         data['objdir'] = path.replace("\n", "").replace("/jsapi.h", "")
 
 steps = ['dbs',
          'gcTypes',
@@ -317,17 +320,17 @@ for step in steps:
     command, outfiles = JOBS[step]
     if isinstance(outfiles, anystring_t):
         data[step] = outfiles
     else:
         outfile = 0
         for (i, name) in out_indexes(command):
             data[name] = outfiles[outfile]
             outfile += 1
-        assert len(outfiles) == outfile, 'step \'%s\': mismatched number of output files (%d) and params (%d)' % (
+        assert len(outfiles) == outfile, 'step \'%s\': mismatched number of output files (%d) and params (%d)' % (  # NOQA: E501
             step, outfile, len(outfiles))
 
 if args.step:
     steps = steps[steps.index(args.step):]
 
 if args.upto:
     steps = steps[:steps.index(args.upto)+1]
 
--- a/js/src/devtools/rootAnalysis/explain.py
+++ b/js/src/devtools/rootAnalysis/explain.py
@@ -46,24 +46,24 @@ try:
 
             m = re.match(r'^Function.*takes unsafe address of unrooted', line)
             if m:
                 num_refs += 1
                 print >>refs, line
                 continue
 
             m = re.match(
-                r"^Function.*has unrooted.*of type.*live across GC call ('?)(.*?)('?) at \S+:\d+$", line)
+                r"^Function.*has unrooted.*of type.*live across GC call ('?)(.*?)('?) at \S+:\d+$", line)  # NOQA: E501
             if m:
                 # Function names are surrounded by single quotes. Field calls
                 # are unquoted.
                 current_gcFunction = m.group(2)
                 hazardousGCFunctions[current_gcFunction].append(line)
-                hazardOrder.append((current_gcFunction, len(
-                    hazardousGCFunctions[current_gcFunction]) - 1))
+                hazardOrder.append((current_gcFunction,
+                                    len(hazardousGCFunctions[current_gcFunction]) - 1))
                 num_hazards += 1
                 continue
 
             if current_gcFunction:
                 if not line.strip():
                     # Blank line => end of this hazard
                     current_gcFunction = None
                 else:
@@ -92,14 +92,14 @@ try:
             gcHazards = hazardousGCFunctions[gcFunction]
 
             if gcFunction in gcExplanations:
                 print >>hazards, (gcHazards[index] + gcExplanations[gcFunction])
             else:
                 print >>hazards, gcHazards[index]
 
 except IOError as e:
-    print 'Failed: %s' % str(e)
+    print('Failed: %s' % str(e))
 
 print("Wrote %s" % args.hazards)
 print("Wrote %s" % args.extra)
 print("Wrote %s" % args.refs)
 print("Found %d hazards and %d unsafe references" % (num_hazards, num_refs))
--- a/js/src/devtools/rootAnalysis/t/exceptions/test.py
+++ b/js/src/devtools/rootAnalysis/t/exceptions/test.py
@@ -1,8 +1,10 @@
+# flake8: noqa: F821
+
 test.compile("source.cpp", '-fno-exceptions')
 test.run_analysis_script('gcTypes')
 
 hazards = test.load_hazards()
 assert(len(hazards) == 0)
 
 # If we compile with exceptions, then there *should* be a hazard because
 # AutoSomething::AutoSomething might throw an exception, which would cause the
--- a/js/src/devtools/rootAnalysis/t/hazards/test.py
+++ b/js/src/devtools/rootAnalysis/t/hazards/test.py
@@ -1,8 +1,10 @@
+# flake8: noqa: F821
+
 test.compile("source.cpp")
 test.run_analysis_script('gcTypes')
 
 # gcFunctions should be the inverse, but we get to rely on unmangled names here.
 gcFunctions = test.load_gcFunctions()
 print(gcFunctions)
 assert('void GC()' in gcFunctions)
 assert('void suppressedFunction()' not in gcFunctions)
--- a/js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
+++ b/js/src/devtools/rootAnalysis/t/sixgill-tree/test.py
@@ -1,8 +1,9 @@
+# flake8: noqa: F821
 import re
 
 test.compile("source.cpp")
 test.computeGCTypes()
 body = test.process_body(test.load_db_entry("src_body", re.compile(r'root_arg'))[0])
 
 # Rendering positive and negative integers
 marker1 = body.assignment_line('MARKER1')
--- a/js/src/devtools/rootAnalysis/t/suppression/test.py
+++ b/js/src/devtools/rootAnalysis/t/suppression/test.py
@@ -1,8 +1,9 @@
+# flake8: noqa: F821
 test.compile("source.cpp")
 test.run_analysis_script('gcTypes', upto='gcFunctions')
 
 # The suppressions file uses only mangled names since it's for internal use,
 # though I may change that soon given (1) the unfortunate non-uniqueness of
 # mangled constructor names, and (2) the usefulness of this file for
 # mrgiggles's reporting.
 suppressed = test.load_suppressed_functions()
--- a/js/src/devtools/rootAnalysis/t/testlib.py
+++ b/js/src/devtools/rootAnalysis/t/testlib.py
@@ -30,17 +30,17 @@ class Test(object):
 
     def infile(self, path):
         return os.path.join(self.indir, path)
 
     def binpath(self, prog):
         return os.path.join(self.cfg.sixgill_bin, prog)
 
     def compile(self, source, options=''):
-        cmd = "{CXX} -c {source} -O3 -std=c++11 -fplugin={sixgill} -fplugin-arg-xgill-mangle=1 {options}".format(
+        cmd = "{CXX} -c {source} -O3 -std=c++11 -fplugin={sixgill} -fplugin-arg-xgill-mangle=1 {options}".format(  # NOQA: E501
             source=self.infile(source),
             CXX=self.cfg.cxx, sixgill=self.cfg.sixgill_plugin,
             options=options)
         if self.cfg.verbose:
             print("Running %s" % cmd)
         subprocess.check_call(["sh", "-c", cmd])
 
     def load_db_entry(self, dbname, pattern):
@@ -52,17 +52,18 @@ class Test(object):
                                              universal_newlines=True)
             matches = list(filter(lambda _: re.search(pattern, _), output.splitlines()))
             if len(matches) == 0:
                 raise Exception("entry not found")
             if len(matches) > 1:
                 raise Exception("multiple entries found")
             pattern = matches[0]
 
-        output = subprocess.check_output([self.binpath("xdbfind"), "-json", dbname + ".xdb", pattern],
+        output = subprocess.check_output([self.binpath("xdbfind"), "-json", dbname + ".xdb",
+                                          pattern],
                                          universal_newlines=True)
         return json.loads(output)
 
     def run_analysis_script(self, phase, upto=None):
         open("defaults.py", "w").write('''\
 analysis_scriptdir = '{scriptdir}'
 sixgill_bin = '{bindir}'
 '''.format(scriptdir=scriptdir, bindir=self.cfg.sixgill_bin))
@@ -104,17 +105,17 @@ sixgill_bin = '{bindir}'
         return gctypes
 
     def load_gcFunctions(self):
         return self.load_text_file('gcFunctions.lst', extract=extract_unmangled)
 
     def load_hazards(self):
         def grab_hazard(line):
             m = re.match(
-                r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line)
+                r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line)  # NOQA: E501
             if m:
                 info = list(m.groups())
                 info[0] = info[0].split("$")[-1]
                 info[3] = info[3].split("$")[-1]
                 return HazardSummary(*info)
             return None
 
         return self.load_text_file('rootingHazards.txt', extract=grab_hazard)
--- a/js/src/gc/GenerateStatsPhases.py
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -1,12 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+# flake8: noqa: F821
+
 # Generate graph structures for GC statistics recording.
 #
 # Stats phases are nested and form a directed acyclic graph starting
 # from a set of root phases. Importantly, a phase may appear under more
 # than one parent phase.
 #
 # For example, the following arrangement is possible:
 #
@@ -45,17 +47,16 @@
 #    +---+   +---+   +---+
 #              |       |
 #              v       v
 #            +---+   +---+
 #            | E |   | E'|
 #            +---+   +---+
 
 import re
-import sys
 import collections
 
 
 class PhaseKind():
     def __init__(self, name, descr, bucket, children=[]):
         self.name = name
         self.descr = descr
         self.bucket = bucket
@@ -166,22 +167,21 @@ PhaseKindGraphRoots = [
     PhaseKind("TRACE_HEAP", "Trace Heap", 47, [
         MarkRootsPhaseKind,
     ]),
     PhaseKind("BARRIER", "Barriers", 55, [
         UnmarkGrayPhaseKind
     ])
 ]
 
-# Make a linear list of all unique phases by performing a depth first
-# search on the phase graph starting at the roots.  This will be used to
-# generate the PhaseKind enum.
-
 
 def findAllPhaseKinds():
+    # Make a linear list of all unique phases by performing a depth first
+    # search on the phase graph starting at the roots.  This will be used to
+    # generate the PhaseKind enum.
     phases = []
     seen = set()
 
     def dfs(phase):
         if phase in seen:
             return
         phases.append(phase)
         seen.add(phase)
@@ -190,21 +190,20 @@ def findAllPhaseKinds():
 
     for phase in PhaseKindGraphRoots:
         dfs(phase)
     return phases
 
 
 AllPhaseKinds = findAllPhaseKinds()
 
-# Expand the DAG into a tree, duplicating phases which have more than
-# one parent.
-
 
 class Phase:
+    # Expand the DAG into a tree, duplicating phases which have more than
+    # one parent.
     def __init__(self, phaseKind, parent):
         self.phaseKind = phaseKind
         self.parent = parent
         self.depth = parent.depth + 1 if parent else 0
         self.children = []
         self.nextSibling = None
         self.nextInPhaseKind = None
 
@@ -321,17 +320,17 @@ def generateCpp(out):
     #
     def name(phase):
         return "Phase::" + phase.name if phase else "Phase::NONE"
 
     out.write("static const PhaseTable phases = {\n")
     for phase in AllPhases:
         firstChild = phase.children[0] if phase.children else None
         phaseKind = phase.phaseKind
-        out.write("    /* %s */ PhaseInfo { %s, %s, %s, %s, PhaseKind::%s, %d, \"%s\", \"%s\" },\n" %
+        out.write("    /* %s */ PhaseInfo { %s, %s, %s, %s, PhaseKind::%s, %d, \"%s\", \"%s\" },\n" %  # NOQA: E501
                   (name(phase),
                    name(phase.parent),
                    name(firstChild),
                    name(phase.nextSibling),
                    name(phase.nextInPhaseKind),
                    phaseKind.name,
                    phase.depth,
                    phaseKind.descr,
--- a/js/src/gdb/lib-for-tests/catcher.py
+++ b/js/src/gdb/lib-for-tests/catcher.py
@@ -3,16 +3,17 @@
 # we have --eval-command run prologue.py directly, syntax errors there will
 # lead GDB to exit with no indication anything went wrong.
 #
 # To avert that, we use this very small launcher script to run prologue.py
 # and catch errors.
 #
 # Remember, errors in this file will cause spurious passes, so keep this as
 # simple as possible!
+# flake8: noqa: F821
 
 import os
 import sys
 import traceback
 
 
 def execfile(filename, globs, locs):
     with open(filename) as f:
--- a/js/src/gdb/lib-for-tests/prologue.py
+++ b/js/src/gdb/lib-for-tests/prologue.py
@@ -1,10 +1,11 @@
+# flake8: noqa: F821
+
 import gdb
-import os
 import re
 import sys
 import traceback
 
 # testlibdir is set on the GDB command line, via --eval-command python testlibdir=...
 sys.path[0:0] = [testlibdir]
 
 active_fragment = None
@@ -35,17 +36,17 @@ def assert_eq(actual, expected):
         raise AssertionError("""Unexpected result:
 expected: %r
 actual:   %r""" % (expected, actual))
 
 # Assert that |expected| regex matches |actual| result; if not, complain in a helpful way.
 
 
 def assert_match(actual, expected):
-    if re.match(expected, actual, re.MULTILINE) == None:
+    if re.match(expected, actual, re.MULTILINE) is None:
         raise AssertionError("""Unexpected result:
 expected pattern: %r
 actual:           %r""" % (expected, actual))
 
 # Assert that |value|'s pretty-printed form is |form|. If |value| is a
 # string, then evaluate it with gdb.parse_and_eval to produce a value.
 
 
@@ -81,17 +82,17 @@ def assert_subprinter_registered(printer
                              "'info pretty-printer' says:\n"
                              "%s" % (printer, subprinter, output))
 
 
 enable_bigint = False
 try:
     if gdb.lookup_type('JS::BigInt'):
         enable_bigint = True
-except:
+except Exception:
     pass
 
 # Request full stack traces for Python errors.
 gdb.execute('set python print-stack full')
 
 # Tell GDB not to ask the user about the things we tell it to do.
 gdb.execute('set confirm off', False)
 
--- a/js/src/gdb/mozilla/Interpreter.py
+++ b/js/src/gdb/mozilla/Interpreter.py
@@ -2,20 +2,19 @@
 
 import gdb
 import mozilla.prettyprinters as prettyprinters
 
 prettyprinters.clear_module_printers(__name__)
 
 from mozilla.prettyprinters import pretty_printer
 
-# Cache information about the Interpreter types for this objfile.
-
 
 class InterpreterTypeCache(object):
+    # Cache information about the Interpreter types for this objfile.
     def __init__(self):
         self.tValue = gdb.lookup_type('JS::Value')
         self.tJSOp = gdb.lookup_type('JSOp')
         self.tScriptFrameIterData = gdb.lookup_type('js::ScriptFrameIter::Data')
         self.tInterpreterFrame = gdb.lookup_type('js::InterpreterFrame')
         self.tBaselineFrame = gdb.lookup_type('js::jit::BaselineFrame')
         self.tRematerializedFrame = gdb.lookup_type('js::jit::RematerializedFrame')
         self.tDebugFrame = gdb.lookup_type('js::wasm::DebugFrame')
@@ -35,17 +34,17 @@ class InterpreterRegs(object):
     # trying. No 'children', just 'to_string'.
     def to_string(self):
         fp_ = 'fp_ = {}'.format(self.value['fp_'])
         slots = (self.value['fp_'] + 1).cast(self.itc.tValue.pointer())
         sp = 'sp = fp_.slots() + {}'.format(self.value['sp'] - slots)
         pc = self.value['pc']
         try:
             opcode = pc.dereference().cast(self.itc.tJSOp)
-        except:
+        except Exception:
             opcode = 'bad pc'
         pc = 'pc = {} ({})'.format(pc.cast(self.cache.void_ptr_t), opcode)
         return '{{ {}, {}, {} }}'.format(fp_, sp, pc)
 
 
 @pretty_printer('js::AbstractFramePtr')
 class AbstractFramePtr(object):
     Tag_ScriptFrameIterData = 0x0
--- a/js/src/gdb/mozilla/IonGraph.py
+++ b/js/src/gdb/mozilla/IonGraph.py
@@ -4,17 +4,16 @@ script provide commands to let GDB open 
 any compilation, as they are executed within GDB.
 
 This python script should be sourced within GDB after loading the python scripts
 provided with SpiderMonkey.
 """
 
 import gdb
 import os
-import io
 import subprocess
 import tempfile
 import time
 import mozilla.prettyprinters
 from mozilla.prettyprinters import pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
@@ -117,17 +116,17 @@ class DotBinParameter(gdb.Parameter):
 class PngViewerBinParameter(gdb.Parameter):
     set_doc = "Set the path to a png viewer binary, used by iongraph command."
     show_doc = "Show the path to a png viewer binary, used by iongraph command."
 
     def get_set_string(self):
         return "Path to a png viewer binary changed to: %s" % self.value
 
     def get_show_string(self):
-        return "Path to a png viewer binary set to: %s" % value
+        return "Path to a png viewer binary set to: %s" % self.value
 
     def __init__(self):
         super(PngViewerBinParameter, self).__init__(
             "pngviewer-bin", gdb.COMMAND_SUPPORT, gdb.PARAM_FILENAME)
         self.value = os.getenv("GDB_PNGVIEWER", "")
         if self.value == "":
             self.value = search_in_path("xdg-open")
 
@@ -196,32 +195,33 @@ class IonGraphCommand(gdb.Command):
         self.displayMIRGraph(json)
 
     def displayMIRGraph(self, jsonStr):
         png = tempfile.NamedTemporaryFile()
 
         # start all processes in a shell-like equivalent of:
         #   iongraph < json | dot > tmp.png; xdg-open tmp.png
         i = subprocess.Popen([iongraph.value, '--funcnum', '0', '--passnum', '0',
-                              '--out-mir', '-', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+                              '--out-mir', '-', '-'], stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE)
         d = subprocess.Popen([dot.value, '-Tpng'], stdin=i.stdout, stdout=png)
 
         # Write the json file as the input of the iongraph command.
         i.stdin.write(jsonStr.encode('utf8'))
         i.stdin.close()
         i.stdout.close()
 
         # Wait for iongraph and dot, such that the png file contains all the
         # bits needed to by the png viewer.
         i.wait()
-        output = d.communicate()[0]
+        d.communicate()[0]
 
         # Spawn & detach the png viewer, to which we give the name of the
         # temporary file.  Note, as we do not want to wait on the image viewer,
         # there is a minor race between the removal of the temporary file, which
         # would happen at the next garbage collection cycle, and the start of
         # the png viewer.  We could use a pipe, but unfortunately, this does not
         # seems to be supported by xdg-open.
-        v = subprocess.Popen([pngviewer.value, png.name], stdin=None, stdout=None)
+        subprocess.Popen([pngviewer.value, png.name], stdin=None, stdout=None)
         time.sleep(1)
 
 
 iongraph_cmd = IonGraphCommand()
--- a/js/src/gdb/mozilla/JSObject.py
+++ b/js/src/gdb/mozilla/JSObject.py
@@ -1,13 +1,12 @@
 # Pretty-printers for SpiderMonkey JSObjects.
 
 import re
 import gdb
-import mozilla.JSString
 import mozilla.prettyprinters as prettyprinters
 from mozilla.prettyprinters import ptr_pretty_printer, ref_pretty_printer
 from mozilla.Root import deref
 
 prettyprinters.clear_module_printers(__name__)
 
 
 class JSObjectTypeCache(object):
--- a/js/src/gdb/mozilla/JSString.py
+++ b/js/src/gdb/mozilla/JSString.py
@@ -1,26 +1,25 @@
 # Pretty-printers for SpiderMonkey strings.
 
 import gdb
 import mozilla.prettyprinters
-from mozilla.prettyprinters import pretty_printer, ptr_pretty_printer
+from mozilla.prettyprinters import ptr_pretty_printer
 
 try:
     chr(10000)  # UPPER RIGHT PENCIL
 except ValueError as exc:  # yuck, we are in Python 2.x, so chr() is 8-bit
     chr = unichr  # replace with teh unicodes
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
-# Cache information about the JSString type for this objfile.
-
 
 class JSStringTypeCache(object):
+    # Cache information about the JSString type for this objfile.
     def __init__(self, cache):
         dummy = gdb.Value(0).cast(cache.JSString_ptr_t)
         self.NON_ATOM_BIT = dummy['NON_ATOM_BIT']
         self.LINEAR_BIT = dummy['LINEAR_BIT']
         self.INLINE_CHARS_BIT = dummy['INLINE_CHARS_BIT']
         self.TYPE_FLAGS_MASK = dummy['TYPE_FLAGS_MASK']
         self.LATIN1_CHARS_BIT = dummy['LATIN1_CHARS_BIT']
 
--- a/js/src/gdb/mozilla/JSSymbol.py
+++ b/js/src/gdb/mozilla/JSSymbol.py
@@ -1,11 +1,10 @@
 # Pretty-printer for SpiderMonkey symbols.
 
-import gdb
 import mozilla.prettyprinters
 from mozilla.prettyprinters import ptr_pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # JS::SymbolCode enumerators
 InSymbolRegistry = 0xfffffffe
--- a/js/src/gdb/mozilla/Root.py
+++ b/js/src/gdb/mozilla/Root.py
@@ -1,23 +1,23 @@
 # Pretty-printers and utilities for SpiderMonkey rooting templates:
 # Rooted, Handle, MutableHandle, etc.
 
 import mozilla.prettyprinters
-from mozilla.prettyprinters import pretty_printer, template_pretty_printer
+from mozilla.prettyprinters import template_pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
-# Common base class for all the rooting template pretty-printers. All these
-# templates have one member holding the referent (or a pointer to it), so
-# there's not much to it.
-
 
 class Common(object):
+    # Common base class for all the rooting template pretty-printers. All these
+    # templates have one member holding the referent (or a pointer to it), so
+    # there's not much to it.
+
     # The name of the template member holding the referent.
     member = 'ptr'
 
     # If True, this is a handle type, and should be dereferenced. If False,
     # the template member holds the referent directly.
     handle = False
 
     # If True, we should strip typedefs from our referent type. (Rooted<T>
@@ -75,20 +75,19 @@ class Handle(Common):
 class MutableHandle(Common):
     handle = True
 
 
 @template_pretty_printer("js::BarrieredBase")
 class BarrieredBase(Common):
     member = 'value'
 
-# Return the referent of a HeapPtr, Rooted, or Handle.
-
 
 def deref(root):
+    # Return the referent of a HeapPtr, Rooted, or Handle.
     tag = root.type.strip_typedefs().tag
     if not tag:
         raise TypeError("Can't dereference type with no structure tag: %s" % (root.type,))
     elif tag.startswith('js::HeapPtr<'):
         return root['value']
     elif tag.startswith('JS::Rooted<'):
         return root['ptr']
     elif tag.startswith('JS::Handle<'):
--- a/js/src/gdb/mozilla/autoload.py
+++ b/js/src/gdb/mozilla/autoload.py
@@ -16,20 +16,19 @@ import mozilla.JSString
 import mozilla.JSSymbol
 import mozilla.Root
 import mozilla.jsid
 import mozilla.jsval
 import mozilla.unwind
 
 # The user may have personal pretty-printers. Get those, too, if they exist.
 try:
-    import my_mozilla_printers
+    import my_mozilla_printers  # NOQA: F401
 except ImportError:
     pass
 
-# Register our pretty-printers with |objfile|.
-
 
 def register(objfile):
+    # Register our pretty-printers with |objfile|.
     lookup = mozilla.prettyprinters.lookup_for_objfile(objfile)
     if lookup:
         gdb.printing.register_pretty_printer(objfile, lookup, replace=True)
     mozilla.unwind.register_unwinder(objfile)
--- a/js/src/gdb/mozilla/jsid.py
+++ b/js/src/gdb/mozilla/jsid.py
@@ -52,22 +52,21 @@ class jsid(object):
             if bits == jsid.TYPE_SYMBOL:
                 return "JSID_EMPTY"
             body = ((bits & ~jsid.TYPE_MASK)
                     .cast(self.cache.JSSymbol_ptr_t))
         else:
             body = "<unrecognized>"
         return '$jsid(%s)' % (body,)
 
-# Hard-code the referent type pretty-printer for jsid roots and handles.
-# See the comment for mozilla.Root.Common.__init__.
-
 
 @pretty_printer('JS::Rooted<long>')
 def RootedJSID(value, cache):
+    # Hard-code the referent type pretty-printer for jsid roots and handles.
+    # See the comment for mozilla.Root.Common.__init__.
     return mozilla.Root.Rooted(value, cache, jsid)
 
 
 @pretty_printer('JS::Handle<long>')
 def HandleJSID(value, cache):
     return mozilla.Root.Handle(value, cache, jsid)
 
 
--- a/js/src/gdb/mozilla/jsval.py
+++ b/js/src/gdb/mozilla/jsval.py
@@ -1,14 +1,14 @@
 # Pretty-printers for SpiderMonkey's JS::Value.
 
 import gdb
 import gdb.types
 import mozilla.prettyprinters
-from mozilla.prettyprinters import pretty_printer, ptr_pretty_printer
+from mozilla.prettyprinters import pretty_printer
 
 # Forget any printers from previous loads of this module.
 mozilla.prettyprinters.clear_module_printers(__name__)
 
 # Summary of the JS::Value type:
 #
 # JS::Value is a 64-bit discriminated union, with JSString*, JSObject*, IEEE
 # 64-bit floating-point, and 32-bit integer branches (and a few others).
@@ -83,22 +83,21 @@ class Box(object):
 
     # Return this value as a 32-bit integer, double, or address.
     def as_uint32(self): raise NotImplementedError
 
     def as_double(self): raise NotImplementedError
 
     def as_address(self): raise NotImplementedError
 
-# Packed non-number boxing --- the format used on x86_64. It would be nice to
-# simply call Value::toInt32, etc. here, but the debugger is likely to see many
-# Values, and doing several inferior calls for each one seems like a bad idea.
-
 
 class Punbox(Box):
+    # Packed non-number boxing --- the format used on x86_64. It would be nice to
+    # simply call Value::toInt32, etc. here, but the debugger is likely to see many
+    # Values, and doing several inferior calls for each one seems like a bad idea.
 
     FULL_WIDTH = 64
     TAG_SHIFT = 47
     PAYLOAD_MASK = (1 << TAG_SHIFT) - 1
     TAG_MASK = (1 << (FULL_WIDTH - TAG_SHIFT)) - 1
     TAG_MAX_DOUBLE = 0x1fff0
     TAG_TYPE_MASK = 0x0000f
 
@@ -125,20 +124,20 @@ class Nunbox(Box):
         if tag < Nunbox.TAG_CLEAR:
             return self.jtc.DOUBLE
         return tag & Nunbox.TAG_TYPE_MASK
 
     def as_uint32(self): return int(self.asBits & Nunbox.PAYLOAD_MASK)
 
     def as_address(self): return gdb.Value(self.asBits & Nunbox.PAYLOAD_MASK)
 
-# Cache information about the Value type for this objfile.
-
 
 class JSValueTypeCache(object):
+    # Cache information about the Value type for this objfile.
+
     def __init__(self, cache):
         # Capture the tag values.
         d = gdb.types.make_enum_dict(gdb.lookup_type('JSValueType'))
 
         # The enum keys are prefixed when building with some compilers (clang at
         # a minimum), so use a helper function to handle either key format.
         def get(key):
             val = d.get(key)
@@ -157,17 +156,17 @@ class JSValueTypeCache(object):
         self.OBJECT = get('JSVAL_TYPE_OBJECT')
 
         self.enable_bigint = False
         try:
             # Looking up the tag will throw an exception if BigInt is not
             # enabled.
             self.BIGINT = get('JSVAL_TYPE_BIGINT')
             self.enable_bigint = True
-        except:
+        except Exception:
             pass
 
         # Let self.magic_names be an array whose i'th element is the name of
         # the i'th magic value.
         d = gdb.types.make_enum_dict(gdb.lookup_type('JSWhyMagic'))
         self.magic_names = list(range(max(d.values()) + 1))
         for (k, v) in d.items():
             self.magic_names[v] = k
--- a/js/src/gdb/mozilla/prettyprinters.py
+++ b/js/src/gdb/mozilla/prettyprinters.py
@@ -4,24 +4,23 @@ import gdb
 import re
 
 # Decorators for declaring pretty-printers.
 #
 # In each case, the decoratee should be a SpiderMonkey-style pretty-printer
 # factory, taking both a gdb.Value instance and a TypeCache instance as
 # arguments; see TypeCache, below.
 
-# Check that |fn| hasn't been registered as a pretty-printer under some
-# other name already. (The 'enabled' flags used by GDB's
-# 'enable/disable/info pretty-printer' commands are simply stored as
-# properties of the function objects themselves, so a single function
-# object can't carry the 'enabled' flags for two different printers.)
-
 
 def check_for_reused_pretty_printer(fn):
+    # Check that |fn| hasn't been registered as a pretty-printer under some
+    # other name already. (The 'enabled' flags used by GDB's
+    # 'enable/disable/info pretty-printer' commands are simply stored as
+    # properties of the function objects themselves, so a single function
+    # object can't carry the 'enabled' flags for two different printers.)
     if hasattr(fn, 'enabled'):
         raise RuntimeError("pretty-printer function %r registered more than once" % fn)
 
 
 # a dictionary mapping gdb.Type tags to pretty-printer functions.
 printers_by_tag = {}
 
 # A decorator: add the decoratee as a pretty-printer lookup function for types
--- a/js/src/gdb/mozilla/unwind.py
+++ b/js/src/gdb/mozilla/unwind.py
@@ -46,21 +46,21 @@ SizeOfFramePrefix = {
     'JitFrame_WasmToJSJit': 'JitFrameLayout',
     'JitFrame_Rectifier': 'RectifierFrameLayout',
     'JitFrame_IonAccessorIC': 'IonAccessorICFrameLayout',
     'JitFrame_IonICCall': 'IonICCallFrameLayout',
     'JitFrame_Exit': 'ExitFrameLayout',
     'JitFrame_Bailout': 'JitFrameLayout',
 }
 
-# All types and symbols that we need are attached to an object that we
-# can dispose of as needed.
-
 
 class UnwinderTypeCache(object):
+    # All types and symbols that we need are attached to an object that we
+    # can dispose of as needed.
+
     def __init__(self):
         self.d = None
         self.frame_enum_names = {}
         self.frame_class_types = {}
 
     # We take this bizarre approach to defer trying to look up any
     # symbols until absolutely needed.  Without this, the loading
     # approach taken by the gdb-tests would cause spurious exceptions.
@@ -169,17 +169,18 @@ class JitFrameDecorator(FrameDecorator):
         self.cache = cache
 
     def _decode_jitframe(self, this_frame):
         calleetoken = long(this_frame['calleeToken_'])
         tag = calleetoken & 3
         calleetoken = calleetoken ^ tag
         function = None
         script = None
-        if tag == self.cache.CalleeToken_Function or tag == self.cache.CalleeToken_FunctionConstructing:
+        if (tag == self.cache.CalleeToken_Function or
+            tag == self.cache.CalleeToken_FunctionConstructing):
             fptr = gdb.Value(calleetoken).cast(self.cache.JSFunction)
             try:
                 atom = fptr['atom_']
                 if atom:
                     function = str(atom)
             except gdb.MemoryError:
                 function = "(could not read function name)"
             script = fptr['u']['i']['s']['script_']
@@ -340,17 +341,17 @@ class UnwinderState(object):
     def text_address_claimed(self, pc):
         for (start, end) in self.proc_mappings:
             if (pc >= start and pc <= end):
                 return True
         return False
 
     # See whether |pc| is claimed by the Jit.
     def is_jit_address(self, pc):
-        if self.proc_mappings != None:
+        if self.proc_mappings is not None:
             return not self.text_address_claimed(pc)
 
         cx = self.get_tls_context()
         runtime = cx['runtime_']['value']
         if long(runtime.address) == 0:
             return False
 
         jitRuntime = runtime['jitRuntime_']
@@ -571,17 +572,17 @@ class SpiderMonkeyUnwinder(Unwinder):
     def make_unwinder(self, pending_frame):
         # gdb doesn't provide a good way to find the architecture.
         # See https://sourceware.org/bugzilla/show_bug.cgi?id=19399
         # So, we look at each known architecture and see if the
         # corresponding "unique register" is known.
         for unwinder in self.UNWINDERS:
             try:
                 pending_frame.read_register(unwinder.SENTINEL_REGISTER)
-            except:
+            except Exception:
                 # Failed to read the register, so let's keep going.
                 # This is more fragile than it might seem, because it
                 # fails if the sentinel register wasn't saved in the
                 # previous frame.
                 continue
             return unwinder(self.typecache)
         return None
 
--- a/js/src/gdb/run-tests.py
+++ b/js/src/gdb/run-tests.py
@@ -4,27 +4,25 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # run-tests.py -- Python harness for GDB SpiderMonkey support
 
 import os
 import re
 import subprocess
 import sys
-import traceback
-from threading import Thread
 
 # From this directory:
 import progressbar
 from taskpool import TaskPool, get_cpu_count
 
-# Backported from Python 3.1 posixpath.py
-
 
 def _relpath(path, start=None):
+    # Backported from Python 3.1 posixpath.py
+
     """Return a relative version of a path"""
 
     if not path:
         raise ValueError("no path specified")
 
     if start is None:
         start = os.curdir
 
@@ -53,20 +51,19 @@ def make_shell_cmd(l):
         if shell_need_escapes.search(s):
             if s.find("'") < 0:
                 return "'" + s + "'"
             return '"' + shell_dquote_escapes.sub('\\g<0>', s) + '"'
         return s
 
     return ' '.join([quote(_) for _ in l])
 
+
 # An instance of this class collects the lists of passing, failing, and
 # timing-out tests, runs the progress bar, and prints a summary at the end.
-
-
 class Summary(object):
 
     class SummaryBar(progressbar.ProgressBar):
         def __init__(self, limit):
             super(Summary.SummaryBar, self).__init__('', limit, 24)
 
         def start(self):
             self.label = '[starting           ]'
@@ -172,17 +169,18 @@ class Test(TaskPool.Task):
         return [OPTIONS.gdb_executable,
                 '-nw',          # Don't create a window (unnecessary?)
                 '-nx',          # Don't read .gdbinit.
                 '--ex', 'add-auto-load-safe-path %s' % (OPTIONS.bindir,),
                 '--ex', 'set env LD_LIBRARY_PATH %s' % (OPTIONS.bindir,),
                 '--ex', 'file %s' % (os.path.join(OPTIONS.bindir, 'gdb-tests'),),
                 '--eval-command', 'python testlibdir=%r' % (testlibdir,),
                 '--eval-command', 'python testscript=%r' % (self.test_path,),
-                '--eval-command', 'python exec(open(%r).read())' % os.path.join(testlibdir, 'catcher.py')]
+                '--eval-command', 'python exec(open(%r).read())' % os.path.join(testlibdir,
+                                                                                'catcher.py')]
 
     def start(self, pipe, deadline):
         super(Test, self).start(pipe, deadline)
         if OPTIONS.show_cmd:
             self.summary.interleave_output(lambda: self.show_cmd(sys.stdout))
 
     def onStdout(self, text):
         self.stdout += text
@@ -274,17 +272,18 @@ def main(argv):
     op.add_option('--worklist', dest='worklist', metavar='FILE',
                   help='Read tests to run from [FILE] (or run all if [FILE] not found);\n'
                        'write failures back to [FILE]')
     op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                   help='Run test files listed in [FILE]')
     op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
                   help='Write failing tests to [FILE]')
     op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
-                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
+                  help='With --write-failures=FILE, additionally write the output of failed '
+                  'tests to [FILE]')
     op.add_option('--gdb', dest='gdb_executable', metavar='EXECUTABLE', default='gdb',
                   help='Run tests with [EXECUTABLE], rather than plain \'gdb\'.')
     op.add_option('--srcdir', dest='srcdir',
                   default=os.path.abspath(os.path.join(script_dir, '..')),
                   help='Use SpiderMonkey sources in [SRCDIR].')
     op.add_option('--testdir', dest='testdir', default=os.path.join(script_dir, 'tests'),
                   help='Find tests in [TESTDIR].')
     op.add_option('--builddir', dest='builddir',
--- a/js/src/gdb/taskpool.py
+++ b/js/src/gdb/taskpool.py
@@ -1,27 +1,28 @@
+# flake8: noqa: F821
+
 import fcntl
 import os
 import select
 import time
 from subprocess import Popen, PIPE
 
-# Run a series of subprocesses. Try to keep up to a certain number going in
-# parallel at any given time. Enforce time limits.
-#
-# This is implemented using non-blocking I/O, and so is Unix-specific.
-#
-# We assume that, if a task closes its standard error, then it's safe to
-# wait for it to terminate. So an ill-behaved task that closes its standard
-# output and then hangs will hang us, as well. However, as it takes special
-# effort to close one's standard output, this seems unlikely to be a
-# problem in practice.
-
 
 class TaskPool(object):
+    # Run a series of subprocesses. Try to keep up to a certain number going in
+    # parallel at any given time. Enforce time limits.
+    #
+    # This is implemented using non-blocking I/O, and so is Unix-specific.
+    #
+    # We assume that, if a task closes its standard error, then it's safe to
+    # wait for it to terminate. So an ill-behaved task that closes its standard
+    # output and then hangs will hang us, as well. However, as it takes special
+    # effort to close one's standard output, this seems unlikely to be a
+    # problem in practice.
 
     # A task we should run in a subprocess. Users should subclass this and
     # fill in the methods as given.
     class Task(object):
         def __init__(self):
             self.pipe = None
             self.start_time = None
 
@@ -71,45 +72,46 @@ class TaskPool(object):
         self.next_pending = next(self.pending, None)
 
     def run_all(self):
         # The currently running tasks: a set of Task instances.
         running = set()
         with open(os.devnull, 'r') as devnull:
             while True:
                 while len(running) < self.job_limit and self.next_pending:
-                    t = self.next_pending
-                    p = Popen(t.cmd(), bufsize=16384,
+                    task = self.next_pending
+                    p = Popen(task.cmd(), bufsize=16384,
                               stdin=devnull, stdout=PIPE, stderr=PIPE,
                               cwd=self.cwd)
 
                     # Put the stdout and stderr pipes in non-blocking mode. See
                     # the post-'select' code below for details.
                     flags = fcntl.fcntl(p.stdout, fcntl.F_GETFL)
                     fcntl.fcntl(p.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
                     flags = fcntl.fcntl(p.stderr, fcntl.F_GETFL)
                     fcntl.fcntl(p.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
 
-                    t.start(p, time.time() + self.timeout)
-                    running.add(t)
+                    task.start(p, time.time() + self.timeout)
+                    running.add(task)
                     self.next_pending = next(self.pending, None)
 
                 # If we have no tasks running, and the above wasn't able to
                 # start any new ones, then we must be done!
                 if not running:
                     break
 
                 # How many seconds do we have until the earliest deadline?
                 now = time.time()
                 secs_to_next_deadline = max(min([t.deadline for t in running]) - now, 0)
 
                 # Wait for output or a timeout.
                 stdouts_and_stderrs = ([t.pipe.stdout for t in running]
                                        + [t.pipe.stderr for t in running])
-                (readable, w, x) = select.select(stdouts_and_stderrs, [], [], secs_to_next_deadline)
+                (readable, w, x) = select.select(stdouts_and_stderrs, [], [],
+                                                 secs_to_next_deadline)
                 finished = set()
                 terminate = set()
                 for t in running:
                     # Since we've placed the pipes in non-blocking mode, these
                     # 'read's will simply return as many bytes as are available,
                     # rather than blocking until they have accumulated the full
                     # amount requested (or reached EOF). The 'read's should
                     # never throw, since 'select' has told us there was
--- a/js/src/gdb/tests/test-ExecutableAllocator.py
+++ b/js/src/gdb/tests/test-ExecutableAllocator.py
@@ -1,9 +1,12 @@
 # Tests for ExecutableAllocator pretty-printing
+# Ignore flake8 errors "undefined name 'assert_regexp_pretty'"
+# As it caused by the way we instanciate this file
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'JS::GCCellPtr')
 
 run_fragment('ExecutableAllocator.empty')
 
 assert_pretty('execAlloc', 'ExecutableAllocator([])')
 
 run_fragment('ExecutableAllocator.onepool')
--- a/js/src/gdb/tests/test-GCCellPtr.py
+++ b/js/src/gdb/tests/test-GCCellPtr.py
@@ -1,9 +1,10 @@
 # Tests for GCCellPtr pretty-printing
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'JS::GCCellPtr')
 
 run_fragment('GCCellPtr.simple')
 
 assert_pretty('nulll', 'JS::GCCellPtr(nullptr)')
 assert_pretty('object', 'JS::GCCellPtr((JSObject*) )')
 assert_pretty('string', 'JS::GCCellPtr((JSString*) )')
--- a/js/src/gdb/tests/test-Interpreter.py
+++ b/js/src/gdb/tests/test-Interpreter.py
@@ -1,9 +1,12 @@
 # Test printing interpreter internal data structures.
+# Ignore flake8 errors "undefined name 'assert_pretty'"
+# As it caused by the way we instanciate this file
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'js::InterpreterRegs')
 
 run_fragment('Interpreter.Regs')
 
 assert_pretty('regs', '{ fp_ = , sp = fp_.slots() + 2, pc =  (JSOP_IFEQ) }')
 
 run_fragment('Interpreter.AbstractFramePtr')
--- a/js/src/gdb/tests/test-JSObject-null.py
+++ b/js/src/gdb/tests/test-JSObject-null.py
@@ -1,6 +1,8 @@
+# flake8: noqa: F821
+
 gdb.execute('set print address on')
 
 run_fragment('JSObject.null')
 
 assert_pretty('null', '0x0')
 assert_pretty('nullRaw', '0x0')
--- a/js/src/gdb/tests/test-JSObject.py
+++ b/js/src/gdb/tests/test-JSObject.py
@@ -1,9 +1,10 @@
 # Printing JSObjects.
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'ptr-to-JSObject')
 assert_subprinter_registered('SpiderMonkey', 'ref-to-JSObject')
 
 run_fragment('JSObject.simple')
 
 # These patterns look a little strange because of prologue.py's 'set print
 # address off', which avoids putting varying addresses in the output. After
--- a/js/src/gdb/tests/test-JSString-null.py
+++ b/js/src/gdb/tests/test-JSString-null.py
@@ -1,6 +1,8 @@
+# flake8: noqa: F821
+
 gdb.execute('set print address on')
 
 run_fragment('JSString.null')
 
 assert_pretty('null', '0x0')
 assert_pretty('nullRaw', '0x0')
--- a/js/src/gdb/tests/test-JSString-subclasses.py
+++ b/js/src/gdb/tests/test-JSString-subclasses.py
@@ -1,5 +1,7 @@
+# flake8: noqa: F821
+
 # We can print pointers to subclasses of JSString.
 
 run_fragment('JSString.subclasses')
 
 assert_pretty('flat', '"Hi!"')
--- a/js/src/gdb/tests/test-JSString.py
+++ b/js/src/gdb/tests/test-JSString.py
@@ -1,9 +1,10 @@
 # Printing JSStrings.
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'ptr-to-JSString')
 run_fragment('JSString.simple')
 
 assert_pretty('empty', '""')
 assert_pretty('x', '"x"')
 assert_pretty('z', '"z"')
 assert_pretty('xz', '"xz"')
--- a/js/src/gdb/tests/test-JSSymbol.py
+++ b/js/src/gdb/tests/test-JSSymbol.py
@@ -1,9 +1,10 @@
 # Printing JS::Symbols.
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'ptr-to-JS::Symbol')
 
 run_fragment('JSSymbol.simple')
 
 assert_pretty('unique', 'Symbol()')
 assert_pretty('unique_with_desc', 'Symbol("Hello!")')
 assert_pretty('registry', 'Symbol.for("Hello!")')
--- a/js/src/gdb/tests/test-Root-null.py
+++ b/js/src/gdb/tests/test-Root-null.py
@@ -7,14 +7,15 @@
 #
 # Thus, if the pretty-printer for JS::Rooted simply returns the referent as
 # a replacement value (which seems reasonable enough, if you want the
 # pretty-printer to be completely transparent), and the referent is a null
 # pointer, it prints as nothing at all.
 #
 # This test ensures that the JS::Rooted pretty-printer doesn't make that
 # mistake.
+# flake8: noqa: F821
 
 gdb.execute('set print address on')
 
 run_fragment('Root.null')
 
 assert_pretty('null', '0x0')
--- a/js/src/gdb/tests/test-Root.py
+++ b/js/src/gdb/tests/test-Root.py
@@ -1,9 +1,12 @@
 # Test printing Handles.
+# Ignore flake8 errors "undefined name 'assert_pretty'"
+# As it caused by the way we instanciate this file
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'instantiations-of-JS::Rooted')
 assert_subprinter_registered('SpiderMonkey', 'instantiations-of-JS::Handle')
 assert_subprinter_registered('SpiderMonkey', 'instantiations-of-JS::MutableHandle')
 assert_subprinter_registered('SpiderMonkey', 'instantiations-of-js::BarrieredBase')
 
 run_fragment('Root.handle')
 
--- a/js/src/gdb/tests/test-asmjs.py
+++ b/js/src/gdb/tests/test-asmjs.py
@@ -1,13 +1,14 @@
 # Test for special asmjs SIGSEGV-handling.
 #
 # Expected behavior is for the asm.js code in the following fragment to trigger
 # SIGSEGV. The code in js/src/gdb/mozilla/asmjs.py should prevent GDB from
 # handling that signal.
+# flake8: noqa: F821
 
 run_fragment('asmjs.segfault')
 
 # If SIGSEGV handling is broken, GDB would have stopped at the SIGSEGV signal.
 # The breakpoint would not have hit, and run_fragment would have thrown.
 #
 # So if we get here, and the asm.js code actually ran, we win.
 
--- a/js/src/gdb/tests/test-jsid.py
+++ b/js/src/gdb/tests/test-jsid.py
@@ -1,9 +1,10 @@
 # Tests for jsid pretty-printing
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'jsid')
 
 run_fragment('jsid.simple')
 
 assert_pretty('string_id', '$jsid("moon")')
 assert_pretty('int_id', '$jsid(1729)')
 unique_symbol_pretty = str(gdb.parse_and_eval('unique_symbol_id')).split('@')[0]
--- a/js/src/gdb/tests/test-jsval.py
+++ b/js/src/gdb/tests/test-jsval.py
@@ -1,9 +1,10 @@
 # Basic unit tests for jsval pretty-printer.
+# flake8: noqa: F821
 
 assert_subprinter_registered('SpiderMonkey', 'JS::Value')
 
 run_fragment('jsval.simple')
 
 assert_pretty('fortytwo', '$JS::Int32Value(42)')
 assert_pretty('fortytwoD', '$JS::DoubleValue(42)')
 assert_pretty('negone', '$JS::Int32Value(-1)')
--- a/js/src/gdb/tests/test-prettyprinters.py
+++ b/js/src/gdb/tests/test-prettyprinters.py
@@ -1,8 +1,12 @@
+# Ignore flake8 errors "undefined name 'assert_pretty'"
+# As it caused by the way we instanciate this file
+# flake8: noqa: F821
+
 import mozilla.prettyprinters
 
 run_fragment('prettyprinters.implemented_types')
 
 
 def implemented_type_names(expr):
     v = gdb.parse_and_eval(expr)
     it = mozilla.prettyprinters.implemented_types(v.type)
--- a/js/src/gdb/tests/test-unwind.py
+++ b/js/src/gdb/tests/test-unwind.py
@@ -1,10 +1,10 @@
 # Test the unwinder and the frame filter.
-
+# flake8:  NOQA: F821
 import platform
 
 
 def do_unwinder_test():
     # The unwinder is disabled by default for the moment. Turn it on to check
     # that the unwinder works as expected.
     import gdb
     gdb.execute("enable unwinder .* SpiderMonkey")
@@ -48,13 +48,12 @@ def do_unwinder_test():
     assert_eq(found_inner, True)
     assert_eq(found_outer, True)
 
 
 # Only on the right platforms.
 if platform.machine() == 'x86_64' and platform.system() == 'Linux':
     # Only test when gdb has the unwinder feature.
     try:
-        import gdb.unwinder
-        import gdb.frames
+        import gdb.unwinder  # NOQA: F401
         do_unwinder_test()
-    except:
+    except Exception:
         pass
--- a/js/src/gdb/tests/typedef-printers.py
+++ b/js/src/gdb/tests/typedef-printers.py
@@ -1,10 +1,11 @@
 # Test that we can find pretty-printers for typedef names, not just for
 # struct types and templates.
+# flake8: noqa: F821
 
 import mozilla.prettyprinters
 
 
 @mozilla.prettyprinters.pretty_printer('my_typedef')
 class my_typedef(object):
     def __init__(self, value, cache):
         pass
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -267,19 +267,19 @@ def main(argv):
 
         # Prevent code coverage test that expects coverage
         # to be off when it starts.
         options.exclude += [os.path.join('debug', 'Script-getOffsetsCoverage-02.js')]
 
     if options.exclude_from:
         with open(options.exclude_from) as fh:
             for line in fh:
-                line = line.strip()
-                if not line.startswith("#") and len(line):
-                    options.exclude.append(line)
+                line_exclude = line.strip()
+                if not line_exclude.startswith("#") and len(line_exclude):
+                    options.exclude.append(line_exclude)
 
     if options.exclude:
         exclude_list = []
         for exclude in options.exclude:
             exclude_list += jittests.find_tests(exclude)
         test_list = [test for test in test_list
                      if test not in set(exclude_list)]
 
--- a/js/src/jit/arm/gen-double-encoder-table.py
+++ b/js/src/jit/arm/gen-double-encoder-table.py
@@ -25,11 +25,11 @@ def encodeDouble(value):
     assert (0 <= value) and (value <= 255)
     a = value >> 7
     b = (value >> 6) & 1
     B = int(b == 0)
     cdefgh = value & 0x3f
     return (a << 31) | (B << 30) | (rep(b, 8) << 22) | cdefgh << 16
 
 
-print '/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py.  */'
+print('/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py.  */')
 for i in range(256):
-    print '  { 0x%08x, { %d, %d, 0 } },' % (encodeDouble(i), i & 0xf, i >> 4)
+    print('  { 0x%08x, { %d, %d, 0 } },' % (encodeDouble(i), i & 0xf, i >> 4))
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -109,37 +109,50 @@ class JitTest:
         self.path = path
 
         # Path relative to the top mozilla/ directory.
         self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
 
         # Path relative to mozilla/js/src/jit-test/tests/.
         self.relpath_tests = os.path.relpath(path, TEST_DIR)
 
-        self.jitflags = []     # jit flags to enable
-        self.slow = False      # True means the test is slow-running
-        self.allow_oom = False  # True means that OOM is not considered a failure
-        self.allow_unhandlable_oom = False  # True means CrashAtUnhandlableOOM
-        # is not considered a failure
-        self.allow_overrecursed = False  # True means that hitting recursion the
-        # limits is not considered a failure.
-        self.valgrind = False  # True means run under valgrind
-        self.tz_pacific = False  # True means force Pacific time for the test
-        self.test_also_noasmjs = False  # True means run with and without asm.js
+        # jit flags to enable
+        self.jitflags = []
+        # True means the test is slow-running
+        self.slow = False
+        # True means that OOM is not considered a failure
+        self.allow_oom = False
+        # True means CrashAtUnhandlableOOM is not considered a failure
+        self.allow_unhandlable_oom = False
+        # True means that hitting recursion the limits is not considered a failure.
+        self.allow_overrecursed = False
+        # True means run under valgrind
+        self.valgrind = False
+        # True means force Pacific time for the test
+        self.tz_pacific = False
+        # True means run with and without asm.js
+        self.test_also_noasmjs = False
         # enabled.
-        self.test_also_wasm_baseline = False  # True means run with and and without
-        # wasm baseline compiler enabled.
-        self.other_includes = []  # Additional files to include, in addition to prologue.js
-        self.test_also = []  # List of other configurations to test with.
-        self.test_join = []  # List of other configurations to test with all existing variants.
-        self.expect_error = ''  # Errors to expect and consider passing
-        self.expect_status = 0  # Exit status to expect from shell
-        self.expect_crash = False  # Exit status or error output.
+        # True means run with and and without wasm baseline compiler enabled.
+        self.test_also_wasm_baseline = False
+        # Additional files to include, in addition to prologue.js
+        self.other_includes = []
+        # List of other configurations to test with.
+        self.test_also = []
+        # List of other configurations to test with all existing variants.
+        self.test_join = []
+        # Errors to expect and consider passing
+        self.expect_error = ''
+        # Exit status to expect from shell
+        self.expect_status = 0
+        # Exit status or error output.
+        self.expect_crash = False
         self.is_module = False
-        self.test_reflect_stringify = None  # Reflect.stringify implementation to test
+        # Reflect.stringify implementation to test
+        self.test_reflect_stringify = None
 
         # Expected by the test runner. Always true for jit-tests.
         self.enable = True
 
     def copy(self):
         t = JitTest(self.path)
         t.jitflags = self.jitflags[:]
         t.slow = self.slow
@@ -778,17 +791,17 @@ def run_tests_remote(tests, num_tests, p
 
 
 def platform_might_be_android():
     try:
         # The python package for SL4A provides an |android| module.
         # If that module is present, we're likely in SL4A-python on
         # device.  False positives and negatives are possible,
         # however.
-        import android
+        import android  # NOQA: F401
         return True
     except ImportError:
         return False
 
 
 def stdio_might_be_broken():
     return platform_might_be_android()
 
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -56,17 +56,17 @@ class XULInfo:
 
         path = None
         for dir in dirs:
             _path = os.path.join(dir, 'config/autoconf.mk')
             if os.path.isfile(_path):
                 path = _path
                 break
 
-        if path == None:
+        if path is None:
             print("Can't find config/autoconf.mk on a directory containing"
                   " the JS shell (searched from {})".format(jsdir))
             sys.exit(1)
 
         # Read the values.
         val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)')
         kw = {'isdebug': False}
         for line in open(path):
@@ -421,15 +421,14 @@ def load_reftests(location, path_options
     for root, basename in _find_all_js_files(location):
         # Get the full path and relative location of the file.
         filename = os.path.join(root, basename)
         if not _is_test_file(root, basename, filename, path_options):
             continue
 
         # Skip empty files.
         fullpath = os.path.join(location, filename)
-        statbuf = os.stat(fullpath)
 
         testcase = RefTestCase(filename)
         _apply_external_manifests(filename, testcase, externalManifestEntries,
                                   xul_tester)
         _parse_test_header(fullpath, testcase, xul_tester)
         yield testcase
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -26,27 +26,31 @@ class NullProgressBar(object):
 
 
 class ProgressBar(object):
     def __init__(self, limit, fmt):
         assert self.conservative_isatty()
 
         self.prior = None
         self.atLineStart = True
-        self.counters_fmt = fmt  # [{str:str}] Describtion of how to lay out each
-        #             field in the counters map.
-        self.limit = limit  # int: The value of 'current' equal to 100%.
-        self.limit_digits = int(math.ceil(math.log10(self.limit)))  # int: max digits in limit
-        self.t0 = datetime.now()  # datetime: The start time.
+        # [{str:str}] Describtion of how to lay out each field in the counters map.
+        self.counters_fmt = fmt
+        # int: The value of 'current' equal to 100%.
+        self.limit = limit
+        # int: max digits in limit
+        self.limit_digits = int(math.ceil(math.log10(self.limit)))
+        # datetime: The start time.
+        self.t0 = datetime.now()
 
         # Compute the width of the counters and build the format string.
         self.counters_width = 1  # [
         for layout in self.counters_fmt:
             self.counters_width += self.limit_digits
-            self.counters_width += 1  # | (or ']' for the last one)
+            # | (or ']' for the last one)
+            self.counters_width += 1
 
         self.barlen = 64 - self.counters_width
 
     @staticmethod
     def update_granularity():
         return timedelta(seconds=0.1)
 
     def update(self, current, data):
@@ -111,13 +115,13 @@ class ProgressBar(object):
     def conservative_isatty():
         """
         Prefer erring on the side of caution and not using terminal commands if
         the current output stream may be a file.  We explicitly check for the
         Android platform because terminal commands work poorly over ADB's
         redirection.
         """
         try:
-            import android
+            import android  # NOQA: F401
             return False
         except ImportError:
             return sys.stdout.isatty()
         return False
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -17,18 +17,18 @@ class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt, timed_out, extra=None):
         self.test = test   # Test
         self.cmd = cmd     # str:   command line of test
         self.out = out     # str:   stdout
         self.err = err     # str:   stderr
         self.rc = rc       # int:   return code
         self.dt = dt       # float: run time
-        self.timed_out = timed_out # bool: did the test time out
-        self.extra = extra # includes the pid on some platforms
+        self.timed_out = timed_out  # bool: did the test time out
+        self.extra = extra  # includes the pid on some platforms
 
     def describe_failure(self):
         if self.timed_out:
             return "Timeout"
         lines = self.err.splitlines()
         for line in lines:
             # Skip the asm.js compilation success message.
             if "Successfully compiled asm.js code" not in line:
@@ -92,17 +92,17 @@ class TestResult:
                     expected_rcs.append(int(m.group(1)))
 
         if test.error is not None:
             expected_rcs.append(3)
             if test.error not in err:
                 failures += 1
                 results.append((cls.FAIL, "Expected uncaught error: {}".format(test.error)))
 
-        if rc and not rc in expected_rcs:
+        if rc and rc not in expected_rcs:
             if rc == 3:
                 result = cls.FAIL
             else:
                 result = cls.CRASH
         else:
             if (rc or passes > 0) and failures == 0:
                 result = cls.PASS
             else:
@@ -217,17 +217,20 @@ class ResultsSink:
                         tup = (sub_ok, result.test.expect, result.test.random)
                         label = self.LABELS[tup][0]
                         if label == 'TEST-UNEXPECTED-PASS':
                             label = 'TEST-PASS (EXPECTED RANDOM)'
                         self.print_automation_result(
                             label, result.test, time=output.dt,
                             message=msg)
                 tup = (result.result, result.test.expect, result.test.random)
-                self.print_automation_result(self.LABELS[tup][0], result.test, time=output.dt, extra=getattr(output, 'extra', None))
+                self.print_automation_result(self.LABELS[tup][0],
+                                             result.test,
+                                             time=output.dt,
+                                             extra=getattr(output, 'extra', None))
                 return
 
             if dev_label:
                 def singular(label):
                     return "FIXED" if label == "FIXES" else label[:-1]
                 self.pb.message("{} - {}".format(singular(dev_label),
                                                  output.test.path))
 
@@ -314,17 +317,17 @@ class ResultsSink:
             result += " | " + message
         if skip:
             result += ' | (SKIP)'
         if time > self.options.timeout:
             result += ' | (TIMEOUT)'
         result += ' [{:.1f} s]'.format(time)
         print(result)
 
-        details = { 'extra': extra.copy() if extra else {} }
+        details = {'extra': extra.copy() if extra else {}}
         if self.options.shell_args:
             details['extra']['shell_args'] = self.options.shell_args
         details['extra']['jitflags'] = test.jitflags
         if message:
             details['message'] = message
         status = 'FAIL' if 'TEST-UNEXPECTED' in label else 'PASS'
 
         self.slog.test(test.path, status, time or 0, **details)
--- a/js/src/tests/lib/structuredlog.py
+++ b/js/src/tests/lib/structuredlog.py
@@ -1,9 +1,10 @@
-# produce mozlog-compatible log messages, following the spec at https://mozbase.readthedocs.io/en/latest/mozlog.html
+# produce mozlog-compatible log messages, following the spec at
+# https://mozbase.readthedocs.io/en/latest/mozlog.html
 
 import json
 import os
 
 from time import time
 
 
 class TestLogger(object):
--- a/js/src/tests/lib/tasks_unix.py
+++ b/js/src/tests/lib/tasks_unix.py
@@ -110,17 +110,17 @@ def read_input(tasks, timeout):
         outmap[t.stderr] = t.err
         # This will trigger with a close event when the child dies, allowing
         # us to respond immediately and not leave cores idle.
         exlist.append(t.stdout)
 
     readable = []
     try:
         readable, _, _ = select.select(rlist, [], exlist, timeout)
-    except OverflowError as e:
+    except OverflowError:
         print >> sys.stderr, "timeout value", timeout
         raise
 
     for fd in readable:
         flush_input(fd, outmap[fd])
 
 
 def remove_task(tasks, pid):
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -1,22 +1,16 @@
 # Library for JSTest tests.
 #
 # This contains classes that represent an individual test, including
 # metadata, and know how to run the tests and determine failures.
 
-import datetime
 import os
 import sys
-import time
 from contextlib import contextmanager
-from subprocess import Popen, PIPE
-from threading import Thread
-
-from results import TestOutput
 
 # When run on tbpl, we run each test multiple times with the following
 # arguments.
 JITFLAGS = {
     'all': [
         [],  # no flags, normal baseline and ion
         ['--ion-eager', '--ion-offthread-compile=off'],  # implies --baseline-eager
         ['--ion-eager', '--ion-offthread-compile=off',
@@ -147,28 +141,37 @@ def get_cpu_count():
 
     return 1
 
 
 class RefTestCase(object):
     """A test case consisting of a test and an expected result."""
 
     def __init__(self, path):
-        self.path = path     # str:  path of JS file relative to tests root dir
-        self.options = []    # [str]: Extra options to pass to the shell
-        self.jitflags = []   # [str]: JIT flags to pass to the shell
-        self.test_reflect_stringify = None  # str or None: path to
-        # reflect-stringify.js file to test
+        # str:  path of JS file relative to tests root dir
+        self.path = path
+        # [str]: Extra options to pass to the shell
+        self.options = []
+        # [str]: JIT flags to pass to the shell
+        self.jitflags = []
+        # str or None: path to reflect-stringify.js file to test
         # instead of actually running tests
-        self.is_module = False  # bool: True => test is module code
-        self.enable = True   # bool: True => run test, False => don't run
-        self.error = None    # str?: Optional error type
-        self.expect = True   # bool: expected result, True => pass
-        self.random = False  # bool: True => ignore output as 'random'
-        self.slow = False    # bool: True => test may run slowly
+        self.test_reflect_stringify = None
+        # bool: True => test is module code
+        self.is_module = False
+        # bool: True => run test, False => don't run
+        self.enable = True
+        # str?: Optional error type
+        self.error = None
+        # bool: expected result, True => pass
+        self.expect = True
+        # bool: True => ignore output as 'random'
+        self.random = False
+        # bool: True => test may run slowly
+        self.slow = False
 
         # The terms parsed to produce the above properties.
         self.terms = None
 
         # The tag between |...| in the test header.
         self.tag = None
 
         # Anything occuring after -- in the test header.
--- a/js/src/tests/non262/String/make-normalize-generateddata-input.py
+++ b/js/src/tests/non262/String/make-normalize-generateddata-input.py
@@ -32,17 +32,17 @@ def convert(dir):
     with open('{dir}/{path}'.format(dir=dir, path=txt_path), 'r') as f:
         with open('{dir}/{path}'.format(dir=dir, path=js_path), 'w') as outf:
             for line in f:
                 m = test_pat.search(line)
                 if m:
                     if not_empty:
                         outf.write(',')
                     outf.write('\n')
-                    pat = '{{ source: {source}, NFC: {NFC}, NFD: {NFD}, NFKC: {NFKC}, NFKD: {NFKD} }}'
+                    pat = '{{ source: {source}, NFC: {NFC}, NFD: {NFD}, NFKC: {NFKC}, NFKD: {NFKD} }}'  # NOQA: E501
                     outf.write(pat.format(source=to_code_list(m.group(1)),
                                           NFC=to_code_list(m.group(2)),
                                           NFD=to_code_list(m.group(3)),
                                           NFKC=to_code_list(m.group(4)),
                                           NFKD=to_code_list(m.group(5))))
                     not_empty = True
                     continue
                 m = part_pat.search(line)
@@ -66,11 +66,12 @@ def convert(dir):
                     continue
                 print("Unknown line: {0}".format(line), file=sys.stderr)
             if part_opened:
                 outf.write('\n];\n')
 
 
 if __name__ == '__main__':
     if len(sys.argv) < 2:
-        print("Usage: make-normalize-generateddata-input.py PATH_TO_MOZILLA_CENTRAL", file=sys.stderr)
+        print("Usage: make-normalize-generateddata-input.py PATH_TO_MOZILLA_CENTRAL",
+              file=sys.stderr)
         sys.exit(1)
     convert(sys.argv[1])
--- a/js/src/tests/test/run.py
+++ b/js/src/tests/test/run.py
@@ -1,16 +1,15 @@
 #!/usr/bin/env python
 # Adapted from https://github.com/tc39/test262/blob/master/tools/generation/test/run.py
 
 import shutil
 import subprocess
 import contextlib
 import tempfile
-import sys
 import os
 import unittest
 
 testDir = os.path.dirname(os.path.relpath(__file__))
 OUT_DIR = os.path.join(testDir, 'out')
 EXPECTED_DIR = os.path.join(testDir, 'expected')
 ex = os.path.join(testDir, '..', 'test262-export.py')
 importExec = os.path.join(testDir, '..', 'test262-update.py')
--- a/js/src/tests/test262-export.py
+++ b/js/src/tests/test262-export.py
@@ -2,27 +2,22 @@
 # -*- coding: utf-8 -*-
 #
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import print_function
 
-import contextlib
 import os
 import re
-import tempfile
 import shutil
 import sys
 import yaml
 
-from functools import partial
-from itertools import chain, imap
-
 # Skip all common files used to support tests for jstests
 # These files are listed in the README.txt
 SUPPORT_FILES = set(["browser.js", "shell.js", "template.js", "user.js",
                      "js-test-driver-begin.js", "js-test-driver-end.js"])
 
 FRONTMATTER_WRAPPER_PATTERN = re.compile(
     r'/\*\---\n([\s]*)((?:\s|\S)*)[\n\s*]---\*/', flags=re.DOTALL)
 
@@ -425,15 +420,18 @@ if __name__ == "__main__":
     import argparse
 
     # This script must be run from js/src/tests to work correctly.
     if "/".join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != "js/src/tests":
         raise RuntimeError("%s must be run from js/src/tests" % sys.argv[0])
 
     parser = argparse.ArgumentParser(description="Export tests to match Test262 file compliance.")
     parser.add_argument("--out", default="test262/export",
-                        help="Output directory. Any existing directory will be removed! (default: %(default)s)")
+                        help="Output directory. Any existing directory will be removed! "
+                        "(default: %(default)s)")
     parser.add_argument("--exportshellincludes", action="store_true",
-                        help="Optionally export shell.js files as includes in exported tests. Only use for testing, do not use for exporting to test262 (test262 tests should have as few dependencies as possible).")
+                        help="Optionally export shell.js files as includes in exported tests. "
+                        "Only use for testing, do not use for exporting to test262 (test262 tests "
+                        "should have as few dependencies as possible).")
     parser.add_argument("src", nargs="+", help="Source folder with test files to export")
     parser.set_defaults(func=exportTest262)
     args = parser.parse_args()
     args.func(args)
--- a/js/src/tests/test262-update.py
+++ b/js/src/tests/test262-update.py
@@ -22,17 +22,16 @@ UNSUPPORTED_FEATURES = set([
     "tail-call-optimization",
     "class-fields-public",
     "class-fields-private",
     "regexp-dotall",
     "regexp-lookbehind",
     "regexp-named-groups",
     "regexp-unicode-property-escapes",
     "numeric-separator-literal",
-    "json-superset",
     "Intl.Locale",
     "String.prototype.matchAll",
     "Symbol.matchAll",
 ])
 FEATURE_CHECK_NEEDED = {
     "Atomics": "!this.hasOwnProperty('Atomics')",
     "BigInt": "!this.hasOwnProperty('BigInt')",
     "SharedArrayBuffer": "!this.hasOwnProperty('SharedArrayBuffer')",
@@ -172,17 +171,18 @@ def writeShellAndBrowserFiles(test262Out
     def findIncludes():
         parentIncludes = findParentIncludes()
         for include in includesMap[relPath]:
             if include not in parentIncludes:
                 yield include
 
     def readIncludeFile(filePath):
         with io.open(filePath, "rb") as includeFile:
-            return b"// file: %s\n%s" % (os.path.basename(filePath).encode("utf-8"), includeFile.read())
+            return b"// file: %s\n%s" % (os.path.basename(filePath).encode("utf-8"),
+                                         includeFile.read())
 
     localIncludes = localIncludesMap[relPath] if relPath in localIncludesMap else []
 
     # Concatenate all includes files.
     includeSource = b"\n".join(map(readIncludeFile, chain(
         # The requested include files.
         map(partial(os.path.join, harnessDir), sorted(findIncludes())),
 
@@ -275,18 +275,20 @@ def convertTestFile(test262parser, testS
         else:
             releaseOrBeta = [f for f in testRec["features"] if f in RELEASE_OR_BETA]
             if releaseOrBeta:
                 refTestSkipIf.append(("release_or_beta",
                                       "%s is not released yet" % ",".join(releaseOrBeta)))
 
             featureCheckNeeded = [f for f in testRec["features"] if f in FEATURE_CHECK_NEEDED]
             if featureCheckNeeded:
-                refTestSkipIf.append(("||".join([FEATURE_CHECK_NEEDED[f] for f in featureCheckNeeded]),
-                                      "%s is not enabled unconditionally" % ",".join(featureCheckNeeded)))
+                refTestSkipIf.append(("||".join([FEATURE_CHECK_NEEDED[f]
+                                                 for f in featureCheckNeeded]),
+                                      "%s is not enabled unconditionally" % ",".join(
+                                          featureCheckNeeded)))
 
     # Includes for every test file in a directory is collected in a single
     # shell.js file per directory level. This is done to avoid adding all
     # test harness files to the top level shell.js file.
     if "includes" in testRec:
         assert not raw, "Raw test with includes: %s" % testName
         includeSet.update(testRec["includes"])
 
@@ -363,17 +365,18 @@ def process_test262(test262Dir, test262O
 
     # Process all test directories recursively.
     for (dirPath, dirNames, fileNames) in os.walk(testDir):
         relPath = os.path.relpath(dirPath, testDir)
         if relPath == ".":
             continue
 
         # Skip creating a "prs" directory if it already exists
-        if relPath not in ("prs", "local") and not os.path.exists(os.path.join(test262OutDir, relPath)):
+        if relPath not in ("prs", "local") and not os.path.exists(os.path.join(test262OutDir,
+                                                                               relPath)):
             os.makedirs(os.path.join(test262OutDir, relPath))
 
         includeSet = set()
         includesMap[relPath] = includeSet
 
         if relPath in explicitIncludes:
             includeSet.update(explicitIncludes[relPath])
 
@@ -387,17 +390,18 @@ def process_test262(test262Dir, test262O
             if fileExt != ".js":
                 shutil.copyfile(filePath, os.path.join(test262OutDir, testName))
                 continue
 
             # Read the original test source and preprocess it for the jstests harness.
             with io.open(filePath, "rb") as testFile:
                 testSource = testFile.read()
 
-            for (newFileName, newSource) in convertTestFile(test262parser, testSource, testName, includeSet, strictTests):
+            for (newFileName, newSource) in convertTestFile(test262parser, testSource, testName,
+                                                            includeSet, strictTests):
                 writeTestFile(test262OutDir, newFileName, newSource)
 
         # Add shell.js and browers.js files for the current directory.
         writeShellAndBrowserFiles(test262OutDir, harnessDir,
                                   includesMap, localIncludesMap, relPath)
 
 
 def fetch_local_changes(inDir, outDir, srcDir, strictTests):
@@ -416,17 +420,18 @@ def fetch_local_changes(inDir, outDir, s
 
     # Checks for unstaged or non committed files. A clean branch provides a clean status.
     status = subprocess.check_output(
         ("git -C %s status --porcelain" % srcDir).split(" ")
     )
 
     if status.strip():
         raise RuntimeError(
-            "Please commit files and cleanup the local test262 folder before importing files.\nCurrent status: \n%s"
+            "Please commit files and cleanup the local test262 folder before importing files.\n"
+            "Current status: \n%s"
             % status)
 
     # Captures the branch name to be used on the output
     branchName = subprocess.check_output(
         ("git -C %s rev-parse --abbrev-ref HEAD" % srcDir).split(" ")).split("\n")[0]
 
     # Fetches the file names to import
     files = subprocess.check_output(
@@ -634,18 +639,20 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser(description="Update the test262 test suite.")
     parser.add_argument("--url", default="git://github.com/tc39/test262.git",
                         help="URL to git repository (default: %(default)s)")
     parser.add_argument("--branch", default="master",
                         help="Git branch (default: %(default)s)")
     parser.add_argument("--revision", default="HEAD",
                         help="Git revision (default: %(default)s)")
     parser.add_argument("--out", default="test262",
-                        help="Output directory. Any existing directory will be removed! (default: %(default)s)")
-    parser.add_argument(
-        "--pull", help="Import contents from a Pull Request specified by its number")
-    parser.add_argument(
-        "--local", help="Import new and modified contents from a local folder, a new folder will be created on local/branch_name")
+                        help="Output directory. Any existing directory will be removed!"
+                        "(default: %(default)s)")
+    parser.add_argument("--pull",
+                        help="Import contents from a Pull Request specified by its number")
+    parser.add_argument("--local",
+                        help="Import new and modified contents from a local folder, a new folder "
+                        "will be created on local/branch_name")
     parser.add_argument("--strict", default=False, action="store_true",
                         help="Generate additional strict mode tests. Not enabled by default.")
     parser.set_defaults(func=update_test262)
     args = parser.parse_args()
     args.func(args)
--- a/js/src/util/make_unicode.py
+++ b/js/src/util/make_unicode.py
@@ -23,17 +23,17 @@ from __future__ import print_function, u
 
 import csv
 import io
 import re
 import os
 import sys
 from contextlib import closing
 from functools import partial
-from itertools import chain, groupby, tee
+from itertools import chain, tee
 from operator import is_not, itemgetter
 from zipfile import ZipFile
 
 if sys.version_info.major == 2:
     from itertools import ifilter as filter, imap as map, izip_longest as zip_longest
     from urllib2 import urlopen
     range = xrange
 else:
@@ -825,25 +825,26 @@ def write_special_casing_methods(uncondi
         println('    MOZ_ASSERT_UNREACHABLE("Bad character input.");')
         println('    return 0;')
 
         println('}')
 
     def write_AppendUpperCaseSpecialCasing():
         """ Slow case: Special casing character was found, append its mapping characters. """
         println('void')
-        println('js::unicode::AppendUpperCaseSpecialCasing(char16_t ch, char16_t* elements, size_t* index)')
+        println('js::unicode::AppendUpperCaseSpecialCasing(char16_t ch, char16_t* elements, size_t* index)')  # NOQA: E501
         println('{')
 
         println('    switch(ch) {')
         for (code, converted) in sorted(unconditional_toupper.items(), key=itemgetter(0)):
             println('      case {}: // {}'.format(hexlit(code), codepoint_table.name(code)))
             for ch in converted:
-                println('        elements[(*index)++] = {}; // {}'.format(hexlit(ch),
-                                                                          codepoint_table.name(ch)))
+                println('        elements[(*index)++] = {}; // {}'
+                        .format(hexlit(ch),
+                                codepoint_table.name(ch)))
             println('        return;')
         println('    }')
         println('')
         println('    MOZ_ASSERT_UNREACHABLE("Bad character input.");')
 
         println('}')
 
     write_ChangesWhenUpperCasedSpecialCasing()
@@ -1057,17 +1058,18 @@ function test(code, ...equivs) {
   var ans = String.fromCodePoint(code) + equivs.map(c => String.fromCodePoint(c)).join("");
   assertEqArray(codeRe.exec("<" + ans + ">"), [ans]);
   codeRe = new RegExp("[" + String.fromCodePoint(code) + "]+", "iu");
   assertEqArray(codeRe.exec("<" + ans + ">"), [ans]);
 }
 """)
         for args in folding_tests:
             test_icase.write('test({}); // {}\n'.format(', '.join(map(char_hex, args)),
-                                                        ', '.join(map(codepoint_table.name, args))))
+                                                        ', '.join(map(codepoint_table.name,
+                                                                      args))))
         test_icase.write("""
 if (typeof reportCompare === "function")
     reportCompare(true, true);
 """)
 
 
 def make_unicode_file(version,
                       table, index,
@@ -1079,17 +1081,18 @@ def make_unicode_file(version,
                       codepoint_table):
     index1, index2, shift = splitbins(index)
 
     # Don't forget to update CharInfo in Unicode.h if you need to change this
     assert shift == 6
 
     same_upper_index1, same_upper_index2, same_upper_shift = splitbins(same_upper_index)
 
-    # Don't forget to update CodepointsWithSameUpperCaseInfo in Unicode.h if you need to change this
+    # Don't forget to update CodepointsWithSameUpperCaseInfo in Unicode.h if you need
+    # to change this
     assert same_upper_shift == 6
 
     folding_index1, folding_index2, folding_shift = splitbins(folding_index)
 
     # Don't forget to update CaseFoldInfo in Unicode.h if you need to change this
     assert folding_shift == 6
 
     # verify correctness
@@ -1198,21 +1201,21 @@ def make_unicode_file(version,
         dump(idx2, idx2_name, println)
         println('')
 
     def write_supplemental_identifier_method(name, group_set, println):
         println('bool')
         println('js::unicode::{}(uint32_t codePoint)'.format(name))
         println('{')
         for (from_code, to_code) in int_ranges(group_set.keys()):
-            println('    if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) // {} .. {}'.format(from_code,
-                                                                                             to_code,
-                                                                                             codepoint_table.name(
-                                                                                                 from_code),
-                                                                                             codepoint_table.name(to_code)))
+            println('    if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) // {} .. {}'
+                    .format(from_code,
+                            to_code,
+                            codepoint_table.name(from_code),
+                            codepoint_table.name(to_code)))
             println('        return true;')
         println('    return false;')
         println('}')
         println('')
 
     file_name = 'Unicode.cpp'
     with io.open(file_name, 'w', encoding='utf-8') as data_file:
         write = partial(print, file=data_file, sep='', end='')
@@ -1488,17 +1491,18 @@ def make_irregexp_tables(version,
         assert all(ch <= MAX_ASCII for ch in digit_chars)
 
         # Characters in \w, 21.2.2.12 CharacterClassEscape.
         word_chars = [ord(ch) for ch in string.digits + string.ascii_letters + '_']
         assert all(ch <= MAX_ASCII for ch in word_chars)
 
         # Characters which case-fold to characters in \w.
         ignorecase_word_chars = (word_chars +
-                                 [ch for ch in range(MAX_ASCII + 1, MAX_BMP + 1) if casefolds_to_ascii(ch)])
+                                 [ch for ch in range(MAX_ASCII + 1, MAX_BMP + 1)
+                                  if casefolds_to_ascii(ch)])
 
         # Surrogate characters.
         surrogate_chars = [ch for ch in range(LEAD_SURROGATE_MIN, TRAIL_SURROGATE_MAX + 1)]
 
         write(warning_message)
         write(unicode_version_message.format(version))
         println('#include "irregexp/RegExpCharacters.h"')
         println('')
@@ -1546,31 +1550,38 @@ def update_unicode(args):
         print('\tVersion: %s' % version)
         print('\tDownload url: %s' % url)
 
         request_url = '{}/UCD.zip'.format(url)
         with closing(urlopen(request_url)) as downloaded_file:
             downloaded_data = io.BytesIO(downloaded_file.read())
 
         with ZipFile(downloaded_data) as zip_file:
-            for fname in ['UnicodeData.txt', 'CaseFolding.txt', 'DerivedCoreProperties.txt', 'SpecialCasing.txt']:
+            for fname in ['UnicodeData.txt',
+                          'CaseFolding.txt',
+                          'DerivedCoreProperties.txt',
+                          'SpecialCasing.txt']:
                 zip_file.extract(fname, path=base_path)
     else:
         print('\tUsing local files.')
         print('\tAlways make sure you have the newest Unicode files!')
     print('')
 
     def version_from_file(f, fname):
         pat_version = re.compile(r"# %s-(?P<version>\d+\.\d+\.\d+).txt" % fname)
         return pat_version.match(f.readline()).group("version")
 
-    with io.open(os.path.join(base_path, 'UnicodeData.txt'), 'r', encoding='utf-8') as unicode_data, \
-            io.open(os.path.join(base_path, 'CaseFolding.txt'), 'r', encoding='utf-8') as case_folding, \
-            io.open(os.path.join(base_path, 'DerivedCoreProperties.txt'), 'r', encoding='utf-8') as derived_core_properties, \
-            io.open(os.path.join(base_path, 'SpecialCasing.txt'), 'r', encoding='utf-8') as special_casing:
+    with io.open(os.path.join(base_path, 'UnicodeData.txt'),
+                 'r', encoding='utf-8') as unicode_data, \
+            io.open(os.path.join(base_path, 'CaseFolding.txt'),
+                    'r', encoding='utf-8') as case_folding, \
+            io.open(os.path.join(base_path, 'DerivedCoreProperties.txt'),
+                    'r', encoding='utf-8') as derived_core_properties, \
+            io.open(os.path.join(base_path, 'SpecialCasing.txt'),
+                    'r', encoding='utf-8') as special_casing:
         unicode_version = version_from_file(derived_core_properties, 'DerivedCoreProperties')
 
         print('Processing...')
         (
             table, index,
             same_upper_table, same_upper_index,
             non_bmp_lower_map, non_bmp_upper_map,
             non_bmp_space_set,
--- a/js/src/vm/make_opcode_doc.py
+++ b/js/src/vm/make_opcode_doc.py
@@ -6,17 +6,16 @@
     from js/src/vm/Opcodes.h.
 
     Output is written to stdout and should be pasted into the following
     MDN page:
     https://developer.mozilla.org/en-US/docs/SpiderMonkey/Internals/Bytecode
 """
 
 from __future__ import print_function
-import re
 import sys
 
 import os
 sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
 import opcode
 
 from xml.sax.saxutils import escape
 
@@ -117,22 +116,23 @@ def print_doc(index):
 <a href="{source_base}/js/src/vm/make_opcode_doc.py">make_opcode_doc.py</a>.</p>
 """.format(source_base=SOURCE_BASE))
 
     for (category_name, types) in index:
         print('<h3 id="{id}">{name}</h3>'.format(name=category_name,
                                                  id=make_element_id(category_name)))
         for (type_name, opcodes) in types:
             if type_name:
-                print('<h4 id="{id}">{name}</h4>'.format(name=type_name,
-                                                         id=make_element_id(category_name, type_name)))
+                print('<h4 id="{id}">{name}</h4>'.format(
+                    name=type_name,
+                    id=make_element_id(category_name, type_name)))
             print('<dl>')
-            for opcode in sorted(opcodes,
-                                 key=lambda opcode: opcode.sort_key):
-                print_opcode(opcode)
+            for opcode_ in sorted(opcodes,
+                                  key=lambda opcode: opcode.sort_key):
+                print_opcode(opcode_)
             print('</dl>')
 
 
 if __name__ == '__main__':
     if len(sys.argv) < 2:
         print("Usage: make_opcode_doc.py PATH_TO_MOZILLA_CENTRAL",
               file=sys.stderr)
         sys.exit(1)
--- a/js/src/vm/opcode.py
+++ b/js/src/vm/opcode.py
@@ -1,13 +1,12 @@
 #!/usr/bin/python -B
 
 from __future__ import print_function
 import re
-import sys
 from xml.sax.saxutils import escape
 
 quoted_pat = re.compile(r"([^A-Za-z0-9]|^)'([^']+)'")
 js_pat = re.compile(r"([^A-Za-z0-9]|^)(JS[A-Z0-9_\*]+)")
 
 
 def codify(text):
     text = re.sub(quoted_pat, '\\1<code>\\2</code>', text)