bug 1463425 - autopep8 on build/ r=gps
authorSylvestre Ledru <sledru@mozilla.com>
Mon, 21 May 2018 23:56:34 +0200
changeset 419596 c37fb2bf78d66c664f17af47a6d0f838e54850a2
parent 419595 02159d1ec622088538ee4a5d1d9af5043e8c787f
child 419597 afa720beeefb08b8f0c3e2a7f88fde4530eddd69
push id34040
push userebalazs@mozilla.com
push dateThu, 24 May 2018 09:37:05 +0000
treeherdermozilla-central@c411ccb6bb4a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersgps
bugs1463425
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1463425 - autopep8 on build/ r=gps MozReview-Commit-ID: ETzx4HsjbEF
build/clang-plugin/import_mozilla_checks.py
build/mobile/remoteautomation.py
build/pgo/genpgocert.py
build/unix/elfhack/inject/copy_source.py
build/unix/rewrite_asan_dylib.py
build/util/count_ctors.py
build/valgrind/mach_commands.py
build/valgrind/output_handler.py
build/win32/autobinscope.py
build/win32/pgomerge.py
--- a/build/clang-plugin/import_mozilla_checks.py
+++ b/build/clang-plugin/import_mozilla_checks.py
@@ -7,16 +7,17 @@ import os
 import re
 import sys
 import glob
 import shutil
 import errno
 
 import ThirdPartyPaths
 
+
 def copy_dir_contents(src, dest):
     for f in glob.glob("%s/*" % src):
         try:
             destname = "%s/%s" % (dest, os.path.basename(f))
             if os.path.isdir(f):
                 shutil.copytree(f, destname)
             else:
                 shutil.copy2(f, destname)
@@ -27,21 +28,22 @@ def copy_dir_contents(src, dest):
                 if os.path.isdir(f):
                     copy_dir_contents(f, destname)
                 else:
                     os.remove(destname)
                     shutil.copy2(f, destname)
             else:
                 raise Exception('Directory not copied. Error: %s' % e)
 
+
 def write_cmake(module_path):
-  names = map(lambda f: '  ' + os.path.basename(f),
-              glob.glob("%s/*.cpp" % module_path))
-  with open(os.path.join(module_path, 'CMakeLists.txt'), 'wb') as f:
-    f.write("""set(LLVM_LINK_COMPONENTS support)
+    names = map(lambda f: '  ' + os.path.basename(f),
+                glob.glob("%s/*.cpp" % module_path))
+    with open(os.path.join(module_path, 'CMakeLists.txt'), 'wb') as f:
+        f.write("""set(LLVM_LINK_COMPONENTS support)
 
 add_definitions( -DCLANG_TIDY )
 add_definitions( -DHAVE_NEW_ASTMATCHER_NAMES )
 
 add_clang_library(clangTidyMozillaModule
   ThirdPartyPaths.cpp
 %(names)s
 
@@ -50,97 +52,100 @@ add_clang_library(clangTidyMozillaModule
   clangASTMatchers
   clangBasic
   clangLex
   clangTidy
   clangTidyReadabilityModule
   clangTidyUtils
   )""" % {'names': "\n".join(names)})
 
-def add_item_to_cmake_section(cmake_path, section, library):
-  with open(cmake_path, 'r') as f:
-    lines = f.readlines()
-  f.close()
 
-  libs = []
-  seen_target_libs = False
-  for line in lines:
-    if line.find(section) > -1:
-      seen_target_libs = True
-    elif seen_target_libs:
-      if line.find(')') > -1:
-        break
-      else:
-        libs.append(line.strip())
-  libs.append(library)
-  libs = sorted(libs, key = lambda s: s.lower())
+def add_item_to_cmake_section(cmake_path, section, library):
+    with open(cmake_path, 'r') as f:
+        lines = f.readlines()
+    f.close()
 
-  with open(cmake_path, 'wb') as f:
+    libs = []
     seen_target_libs = False
     for line in lines:
-      if line.find(section) > -1:
-        seen_target_libs = True
-        f.write(line)
-        f.writelines(map(lambda p: '  ' + p + '\n', libs))
-        continue
-      elif seen_target_libs:
-        if line.find(')') > -1:
-          seen_target_libs = False
-        else:
-          continue
-      f.write(line)
+        if line.find(section) > -1:
+            seen_target_libs = True
+        elif seen_target_libs:
+            if line.find(')') > -1:
+                break
+            else:
+                libs.append(line.strip())
+    libs.append(library)
+    libs = sorted(libs, key=lambda s: s.lower())
 
-  f.close()
+    with open(cmake_path, 'wb') as f:
+        seen_target_libs = False
+        for line in lines:
+            if line.find(section) > -1:
+                seen_target_libs = True
+                f.write(line)
+                f.writelines(map(lambda p: '  ' + p + '\n', libs))
+                continue
+            elif seen_target_libs:
+                if line.find(')') > -1:
+                    seen_target_libs = False
+                else:
+                    continue
+            f.write(line)
+
+    f.close()
 
 
 def write_third_party_paths(mozilla_path, module_path):
-  tpp_txt = os.path.join(mozilla_path, '../../tools/rewriting/ThirdPartyPaths.txt')
-  with open(os.path.join(module_path, 'ThirdPartyPaths.cpp'), 'w') as f:
-    ThirdPartyPaths.generate(f, tpp_txt)
+    tpp_txt = os.path.join(
+        mozilla_path, '../../tools/rewriting/ThirdPartyPaths.txt')
+    with open(os.path.join(module_path, 'ThirdPartyPaths.cpp'), 'w') as f:
+        ThirdPartyPaths.generate(f, tpp_txt)
 
 
 def do_import(mozilla_path, clang_tidy_path):
-  module = 'mozilla'
-  module_path = os.path.join(clang_tidy_path, module)
-  if not os.path.isdir(module_path):
-      os.mkdir(module_path)
+    module = 'mozilla'
+    module_path = os.path.join(clang_tidy_path, module)
+    if not os.path.isdir(module_path):
+        os.mkdir(module_path)
 
-  copy_dir_contents(mozilla_path, module_path)
-  write_third_party_paths(mozilla_path, module_path)
-  write_cmake(module_path)
-  add_item_to_cmake_section(os.path.join(module_path, '..', 'plugin',
-                                         'CMakeLists.txt'),
-                            'LINK_LIBS', 'clangTidyMozillaModule')
-  add_item_to_cmake_section(os.path.join(module_path, '..', 'tool',
-                                         'CMakeLists.txt'),
-                            'target_link_libraries', 'clangTidyMozillaModule')
-  with open(os.path.join(module_path, '..', 'CMakeLists.txt'), 'a') as f:
-    f.write('add_subdirectory(%s)\n' % module)
-  with open(os.path.join(module_path, '..', 'tool', 'ClangTidyMain.cpp'), 'a') as f:
-    f.write('''
+    copy_dir_contents(mozilla_path, module_path)
+    write_third_party_paths(mozilla_path, module_path)
+    write_cmake(module_path)
+    add_item_to_cmake_section(os.path.join(module_path, '..', 'plugin',
+                                           'CMakeLists.txt'),
+                              'LINK_LIBS', 'clangTidyMozillaModule')
+    add_item_to_cmake_section(os.path.join(module_path, '..', 'tool',
+                                           'CMakeLists.txt'),
+                              'target_link_libraries', 'clangTidyMozillaModule')
+    with open(os.path.join(module_path, '..', 'CMakeLists.txt'), 'a') as f:
+        f.write('add_subdirectory(%s)\n' % module)
+    with open(os.path.join(module_path, '..', 'tool', 'ClangTidyMain.cpp'), 'a') as f:
+        f.write('''
 // This anchor is used to force the linker to link the MozillaModule.
 extern volatile int MozillaModuleAnchorSource;
 static int LLVM_ATTRIBUTE_UNUSED MozillaModuleAnchorDestination =
           MozillaModuleAnchorSource;
 ''')
 
+
 def main():
-  if len(sys.argv) != 3:
-    print """\
+    if len(sys.argv) != 3:
+        print """\
 Usage: import_mozilla_checks.py <mozilla-clang-plugin-path> <clang-tidy-path>
 Imports the Mozilla static analysis checks into a clang-tidy source tree.
 """
 
-    return
+        return
 
-  mozilla_path = sys.argv[1]
-  if not os.path.isdir(mozilla_path):
-      print "Invalid path to mozilla clang plugin"
+    mozilla_path = sys.argv[1]
+    if not os.path.isdir(mozilla_path):
+        print "Invalid path to mozilla clang plugin"
 
-  clang_tidy_path = sys.argv[2]
-  if not os.path.isdir(mozilla_path):
-      print "Invalid path to clang-tidy source directory"
+    clang_tidy_path = sys.argv[2]
+    if not os.path.isdir(mozilla_path):
+        print "Invalid path to clang-tidy source directory"
 
-  do_import(mozilla_path, clang_tidy_path)
+    do_import(mozilla_path, clang_tidy_path)
 
 
 if __name__ == '__main__':
-  main()
+    main()
--- a/build/mobile/remoteautomation.py
+++ b/build/mobile/remoteautomation.py
@@ -13,29 +13,30 @@ import shutil
 import sys
 
 from automation import Automation
 from mozlog import get_default_logger
 from mozscreenshot import dump_screen
 import mozcrash
 
 # signatures for logcat messages that we don't care about much
-fennecLogcatFilters = [ "The character encoding of the HTML document was not declared",
-                        "Use of Mutation Events is deprecated. Use MutationObserver instead.",
-                        "Unexpected value from nativeGetEnabledTags: 0" ]
+fennecLogcatFilters = ["The character encoding of the HTML document was not declared",
+                       "Use of Mutation Events is deprecated. Use MutationObserver instead.",
+                       "Unexpected value from nativeGetEnabledTags: 0"]
+
 
 class RemoteAutomation(Automation):
 
-    def __init__(self, device, appName = '', remoteProfile = None, remoteLog = None,
+    def __init__(self, device, appName='', remoteProfile=None, remoteLog=None,
                  processArgs=None):
         self._device = device
         self._appName = appName
         self._remoteProfile = remoteProfile
         self._remoteLog = remoteLog
-        self._processArgs = processArgs or {};
+        self._processArgs = processArgs or {}
 
         self.lastTestSeen = "remoteautomation.py"
         Automation.__init__(self)
 
     # Set up what we need for the remote environment
     def environment(self, env=None, xrePath=None, crashreporter=True, debugger=False, lsanPath=None, ubsanPath=None):
         # Because we are running remote, we don't want to mimic the local env
         # so no copying of os.environ
@@ -74,27 +75,28 @@ class RemoteAutomation(Automation):
 
     def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath, outputHandler=None):
         """ Wait for tests to finish.
             If maxTime seconds elapse or no output is detected for timeout
             seconds, kill the process and fail the test.
         """
         proc.utilityPath = utilityPath
         # maxTime is used to override the default timeout, we should honor that
-        status = proc.wait(timeout = maxTime, noOutputTimeout = timeout)
+        status = proc.wait(timeout=maxTime, noOutputTimeout=timeout)
         self.lastTestSeen = proc.getLastTestSeen
 
         topActivity = self._device.get_top_activity(timeout=60)
         if topActivity == proc.procName:
             print "Browser unexpectedly found running. Killing..."
             proc.kill(True)
         if status == 1:
             if maxTime:
                 print "TEST-UNEXPECTED-FAIL | %s | application ran for longer than " \
-                      "allowed maximum time of %s seconds" % (self.lastTestSeen, maxTime)
+                      "allowed maximum time of %s seconds" % (
+                          self.lastTestSeen, maxTime)
             else:
                 print "TEST-UNEXPECTED-FAIL | %s | application ran for longer than " \
                       "allowed maximum time" % (self.lastTestSeen)
         if status == 2:
             print "TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output" \
                 % (self.lastTestSeen, int(timeout))
 
         return status
@@ -123,17 +125,18 @@ class RemoteAutomation(Automation):
                 self.deleteANRs()
             except Exception as e:
                 print "Error pulling %s: %s" % (traces, str(e))
         else:
             print "%s not found" % traces
 
     def deleteTombstones(self):
         # delete any tombstone files from device
-        self._device.rm("/data/tombstones", force=True, recursive=True, root=True)
+        self._device.rm("/data/tombstones", force=True,
+                        recursive=True, root=True)
 
     def checkForTombstones(self):
         # pull any tombstones from device and move to MOZ_UPLOAD_DIR
         remoteDir = "/data/tombstones"
         uploadDir = os.environ.get('MOZ_UPLOAD_DIR', None)
         if uploadDir:
             if not os.path.exists(uploadDir):
                 os.mkdir(uploadDir)
@@ -155,19 +158,21 @@ class RemoteAutomation(Automation):
                 print "%s does not exist; tombstone check skipped" % remoteDir
         else:
             print "MOZ_UPLOAD_DIR not defined; tombstone check skipped"
 
     def checkForCrashes(self, directory, symbolsPath):
         self.checkForANRs()
         self.checkForTombstones()
 
-        logcat = self._device.get_logcat(filter_out_regexps=fennecLogcatFilters)
+        logcat = self._device.get_logcat(
+            filter_out_regexps=fennecLogcatFilters)
 
-        javaException = mozcrash.check_for_java_exception(logcat, test_name=self.lastTestSeen)
+        javaException = mozcrash.check_for_java_exception(
+            logcat, test_name=self.lastTestSeen)
         if javaException:
             return True
 
         # If crash reporting is disabled (MOZ_CRASHREPORTER!=1), we can't say
         # anything.
         if not self.CRASHREPORTER:
             return False
 
@@ -179,53 +184,56 @@ class RemoteAutomation(Automation):
                 # minidumps directory is automatically created when Fennec
                 # (first) starts, so its lack of presence is a hint that
                 # something went wrong.
                 print "Automation Error: No crash directory (%s) found on remote device" % remoteCrashDir
                 return True
             self._device.pull(remoteCrashDir, dumpDir)
 
             logger = get_default_logger()
-            crashed = mozcrash.log_crashes(logger, dumpDir, symbolsPath, test=self.lastTestSeen)
+            crashed = mozcrash.log_crashes(
+                logger, dumpDir, symbolsPath, test=self.lastTestSeen)
 
         finally:
             try:
                 shutil.rmtree(dumpDir)
             except Exception as e:
-                print "WARNING: unable to remove directory %s: %s" % (dumpDir, str(e))
+                print "WARNING: unable to remove directory %s: %s" % (
+                    dumpDir, str(e))
         return crashed
 
     def buildCommandLine(self, app, debuggerInfo, profileDir, testURL, extraArgs):
         # If remote profile is specified, use that instead
         if self._remoteProfile:
             profileDir = self._remoteProfile
 
         # Hack for robocop, if app is "am" and extraArgs contains the rest of the stuff, lets
         # assume extraArgs is all we need
         if app == "am" and extraArgs[0] in ('instrument', 'start'):
             return app, extraArgs
 
-        cmd, args = Automation.buildCommandLine(self, app, debuggerInfo, profileDir, testURL, extraArgs)
+        cmd, args = Automation.buildCommandLine(
+            self, app, debuggerInfo, profileDir, testURL, extraArgs)
         try:
             args.remove('-foreground')
         except:
             pass
         return app, args
 
-    def Process(self, cmd, stdout = None, stderr = None, env = None, cwd = None):
+    def Process(self, cmd, stdout=None, stderr=None, env=None, cwd=None):
         return self.RProcess(self._device, cmd, self._remoteLog, env, cwd, self._appName,
                              **self._processArgs)
 
     class RProcess(object):
         def __init__(self, device, cmd, stdout=None, env=None, cwd=None, app=None,
                      messageLogger=None, counts=None):
             self.device = device
             self.lastTestSeen = "remoteautomation.py"
             self.messageLogger = messageLogger
-            self.proc =  stdout
+            self.proc = stdout
             self.procName = cmd[0].split(posixpath.sep)[-1]
             self.stdoutlen = 0
             self.utilityPath = None
 
             self.counts = counts
             if self.counts is not None:
                 self.counts['pass'] = 0
                 self.counts['fail'] = 0
@@ -246,17 +254,18 @@ class RemoteAutomation(Automation):
                     url = None
                 else:
                     args = args[:-1]
                 if 'geckoview' in app:
                     activity = "TestRunnerActivity"
                     self.device.launch_activity(app, activity, e10s=True, moz_env=env,
                                                 extra_args=args, url=url)
                 else:
-                    self.device.launch_fennec(app, moz_env=env, extra_args=args, url=url)
+                    self.device.launch_fennec(
+                        app, moz_env=env, extra_args=args, url=url)
 
             # Setting timeout at 1 hour since on a remote device this takes much longer.
             # Temporarily increased to 90 minutes because no more chunks can be created.
             self.timeout = 5400
 
             # Used to buffer log messages until we meet a line break
             self.logBuffer = ""
 
@@ -274,26 +283,28 @@ class RemoteAutomation(Automation):
         def read_stdout(self):
             """
             Fetch the full remote log file, log any new content and return True if new
             content processed.
             """
             if not self.device.is_file(self.proc):
                 return False
             try:
-                newLogContent = self.device.get_file(self.proc, offset=self.stdoutlen)
+                newLogContent = self.device.get_file(
+                    self.proc, offset=self.stdoutlen)
             except Exception:
                 return False
             if not newLogContent:
                 return False
 
             self.stdoutlen += len(newLogContent)
 
             if self.messageLogger is None:
-                testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
+                testStartFilenames = re.findall(
+                    r"TEST-START \| ([^\s]*)", newLogContent)
                 if testStartFilenames:
                     self.lastTestSeen = testStartFilenames[-1]
                 print newLogContent
                 return True
 
             self.logBuffer += newLogContent
             lines = self.logBuffer.split('\n')
             lines = [l for l in lines if l]
@@ -339,26 +350,26 @@ class RemoteAutomation(Automation):
             return self.lastTestSeen
 
         # Wait for the remote process to end (or for its activity to go to background).
         # While waiting, periodically retrieve the process output and print it.
         # If the process is still running after *timeout* seconds, return 1;
         # If the process is still running but no output is received in *noOutputTimeout*
         # seconds, return 2;
         # Else, once the process exits/goes to background, return 0.
-        def wait(self, timeout = None, noOutputTimeout = None):
+        def wait(self, timeout=None, noOutputTimeout=None):
             timer = 0
             noOutputTimer = 0
             interval = 10
             if timeout == None:
                 timeout = self.timeout
             status = 0
             top = self.procName
             slowLog = False
-            endTime = datetime.datetime.now() + datetime.timedelta(seconds = timeout)
+            endTime = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
             while top == self.procName:
                 # Get log updates on each interval, but if it is taking
                 # too long, only do it every 60 seconds
                 hasOutput = False
                 if (not slowLog) or (timer % 60 == 0):
                     startRead = datetime.datetime.now()
                     hasOutput = self.read_stdout()
                     if (datetime.datetime.now() - startRead) > datetime.timedelta(seconds=5):
@@ -380,17 +391,17 @@ class RemoteAutomation(Automation):
                     top = self.device.get_top_activity(timeout=60)
                     if top is None:
                         print "Failed to get top activity, retrying, once..."
                         top = self.device.get_top_activity(timeout=60)
             # Flush anything added to stdout during the sleep
             self.read_stdout()
             return status
 
-        def kill(self, stagedShutdown = False):
+        def kill(self, stagedShutdown=False):
             if self.utilityPath:
                 # Take a screenshot to capture the screen state just before
                 # the application is killed. There are on-device screenshot
                 # options but they rarely work well with Firefox on the
                 # Android emulator. dump_screen provides an effective
                 # screenshot of the emulator and its host desktop.
                 dump_screen(self.utilityPath, get_default_logger())
             if stagedShutdown:
--- a/build/pgo/genpgocert.py
+++ b/build/pgo/genpgocert.py
@@ -17,167 +17,185 @@ import sys
 import tempfile
 import distutils
 
 from mozbuild.base import MozbuildObject
 from mozfile import NamedTemporaryFile, TemporaryDirectory
 from mozprofile.permissions import ServerLocations
 
 dbFiles = [
-  re.compile("^cert[0-9]+\.db$"),
-  re.compile("^key[0-9]+\.db$"),
-  re.compile("^secmod\.db$")
+    re.compile("^cert[0-9]+\.db$"),
+    re.compile("^key[0-9]+\.db$"),
+    re.compile("^secmod\.db$")
 ]
 
+
 def unlinkDbFiles(path):
-  for root, dirs, files in os.walk(path):
-    for name in files:
-      for dbFile in dbFiles:
-        if dbFile.match(name) and os.path.exists(os.path.join(root, name)):
-          os.unlink(os.path.join(root, name))
+    for root, dirs, files in os.walk(path):
+        for name in files:
+            for dbFile in dbFiles:
+                if dbFile.match(name) and os.path.exists(os.path.join(root, name)):
+                    os.unlink(os.path.join(root, name))
+
 
 def dbFilesExist(path):
-  for root, dirs, files in os.walk(path):
-    for name in files:
-      for dbFile in dbFiles:
-        if dbFile.match(name) and os.path.exists(os.path.join(root, name)):
-          return True
-  return False
+    for root, dirs, files in os.walk(path):
+        for name in files:
+            for dbFile in dbFiles:
+                if dbFile.match(name) and os.path.exists(os.path.join(root, name)):
+                    return True
+    return False
+
 
-def runUtil(util, args, inputdata = None, outputstream = None):
-  env = os.environ.copy()
-  if mozinfo.os == "linux":
-    pathvar = "LD_LIBRARY_PATH"
-    app_path = os.path.dirname(util)
-    if pathvar in env:
-      env[pathvar] = "%s%s%s" % (app_path, os.pathsep, env[pathvar])
-    else:
-      env[pathvar] = app_path
-  proc = subprocess.Popen([util] + args, env=env,
-                          stdin=subprocess.PIPE if inputdata else None,
-                          stdout=outputstream)
-  proc.communicate(inputdata)
-  return proc.returncode
+def runUtil(util, args, inputdata=None, outputstream=None):
+    env = os.environ.copy()
+    if mozinfo.os == "linux":
+        pathvar = "LD_LIBRARY_PATH"
+        app_path = os.path.dirname(util)
+        if pathvar in env:
+            env[pathvar] = "%s%s%s" % (app_path, os.pathsep, env[pathvar])
+        else:
+            env[pathvar] = app_path
+    proc = subprocess.Popen([util] + args, env=env,
+                            stdin=subprocess.PIPE if inputdata else None,
+                            stdout=outputstream)
+    proc.communicate(inputdata)
+    return proc.returncode
+
 
 def createRandomFile(randomFile):
-  for count in xrange(0, 2048):
-    randomFile.write(chr(random.randint(0, 255)))
+    for count in xrange(0, 2048):
+        randomFile.write(chr(random.randint(0, 255)))
+
 
 def writeCertspecForServerLocations(fd):
     locations = ServerLocations(os.path.join(build.topsrcdir,
                                              "build", "pgo",
                                              "server-locations.txt"))
-    SAN=[]
+    SAN = []
     for loc in [i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options]:
-      customCertOption = False
-      customCertRE = re.compile("^cert=(?:\w+)")
-      for _ in [i for i in loc.options if customCertRE.match(i)]:
-        customCertOption = True
-        break
+        customCertOption = False
+        customCertRE = re.compile("^cert=(?:\w+)")
+        for _ in [i for i in loc.options if customCertRE.match(i)]:
+            customCertOption = True
+            break
 
-      if not customCertOption:
-        SAN.append(loc.host)
+        if not customCertOption:
+            SAN.append(loc.host)
 
     fd.write("issuer:printableString/CN=Temporary Certificate Authority/O=Mozilla Testing/OU=Profile Guided Optimization\n")
     fd.write("subject:{}\n".format(SAN[0]))
     fd.write("extension:subjectAlternativeName:{}\n".format(",".join(SAN)))
 
+
 def constructCertDatabase(build, srcDir):
-  certutil = build.get_binary_path(what="certutil")
-  pk12util = build.get_binary_path(what="pk12util")
-  openssl = distutils.spawn.find_executable("openssl")
-  pycert = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
-                        "unit", "pycert.py")
-  pykey = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
-                        "unit", "pykey.py")
-
+    certutil = build.get_binary_path(what="certutil")
+    pk12util = build.get_binary_path(what="pk12util")
+    openssl = distutils.spawn.find_executable("openssl")
+    pycert = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
+                          "unit", "pycert.py")
+    pykey = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
+                         "unit", "pykey.py")
 
-  with NamedTemporaryFile() as pwfile, NamedTemporaryFile() as rndfile, TemporaryDirectory() as pemfolder:
-    pgoCAPath = os.path.join(srcDir, "pgoca.p12")
+    with NamedTemporaryFile() as pwfile, NamedTemporaryFile() as rndfile, TemporaryDirectory() as pemfolder:
+        pgoCAPath = os.path.join(srcDir, "pgoca.p12")
 
-    pwfile.write("\n")
-    pwfile.flush()
+        pwfile.write("\n")
+        pwfile.flush()
 
-    if dbFilesExist(srcDir):
-      # Make sure all DB files from src are really deleted
-      unlinkDbFiles(srcDir)
+        if dbFilesExist(srcDir):
+            # Make sure all DB files from src are really deleted
+            unlinkDbFiles(srcDir)
 
-    # Copy  all .certspec and .keyspec files to a temporary directory
-    for root, dirs, files in os.walk(srcDir):
-      for spec in [i for i in files if i.endswith(".certspec") or i.endswith(".keyspec")]:
-        shutil.copyfile(os.path.join(root, spec), os.path.join(pemfolder, spec))
+        # Copy  all .certspec and .keyspec files to a temporary directory
+        for root, dirs, files in os.walk(srcDir):
+            for spec in [i for i in files if i.endswith(".certspec") or i.endswith(".keyspec")]:
+                shutil.copyfile(os.path.join(root, spec),
+                                os.path.join(pemfolder, spec))
 
-    # Write a certspec for the "server-locations.txt" file to that temporary directory
-    pgoserver_certspec = os.path.join(pemfolder, "pgoserver.certspec")
-    if os.path.exists(pgoserver_certspec):
-      raise Exception("{} already exists, which isn't allowed".format(pgoserver_certspec))
-    with open(pgoserver_certspec, "w") as fd:
-      writeCertspecForServerLocations(fd)
+        # Write a certspec for the "server-locations.txt" file to that temporary directory
+        pgoserver_certspec = os.path.join(pemfolder, "pgoserver.certspec")
+        if os.path.exists(pgoserver_certspec):
+            raise Exception(
+                "{} already exists, which isn't allowed".format(pgoserver_certspec))
+        with open(pgoserver_certspec, "w") as fd:
+            writeCertspecForServerLocations(fd)
 
-    # Generate certs for all certspecs
-    for root, dirs, files in os.walk(pemfolder):
-      for certspec in [i for i in files if i.endswith(".certspec")]:
-        name = certspec.split(".certspec")[0]
-        pem = os.path.join(pemfolder, "{}.cert.pem".format(name))
+        # Generate certs for all certspecs
+        for root, dirs, files in os.walk(pemfolder):
+            for certspec in [i for i in files if i.endswith(".certspec")]:
+                name = certspec.split(".certspec")[0]
+                pem = os.path.join(pemfolder, "{}.cert.pem".format(name))
+
+                print("Generating public certificate {} (pem={})".format(name, pem))
 
-        print("Generating public certificate {} (pem={})".format(name, pem))
-
-        with open(os.path.join(root, certspec), "r") as certspec_file:
-          certspec_data = certspec_file.read()
-          with open(pem, "w") as pem_file:
-            status = runUtil(pycert, [], inputdata=certspec_data, outputstream=pem_file)
-            if status:
-              return status
+                with open(os.path.join(root, certspec), "r") as certspec_file:
+                    certspec_data = certspec_file.read()
+                    with open(pem, "w") as pem_file:
+                        status = runUtil(
+                            pycert, [], inputdata=certspec_data, outputstream=pem_file)
+                        if status:
+                            return status
 
-        status = runUtil(certutil, ["-A", "-n", name, "-t", "P,,", "-i", pem, "-d", srcDir, "-f", pwfile.name])
-        if status:
-          return status
+                status = runUtil(certutil, [
+                                 "-A", "-n", name, "-t", "P,,", "-i", pem, "-d", srcDir, "-f", pwfile.name])
+                if status:
+                    return status
 
+            for keyspec in [i for i in files if i.endswith(".keyspec")]:
+                parts = keyspec.split(".")
+                name = parts[0]
+                key_type = parts[1]
+                if key_type not in ["ca", "client", "server"]:
+                    raise Exception("{}: keyspec filenames must be of the form XXX.client.keyspec or XXX.ca.keyspec (key_type={})".format(
+                        keyspec, key_type))
+                key_pem = os.path.join(pemfolder, "{}.key.pem".format(name))
 
-      for keyspec in [i for i in files if i.endswith(".keyspec")]:
-        parts = keyspec.split(".")
-        name = parts[0]
-        key_type = parts[1]
-        if key_type not in ["ca", "client", "server"]:
-          raise Exception("{}: keyspec filenames must be of the form XXX.client.keyspec or XXX.ca.keyspec (key_type={})".format(keyspec, key_type))
-        key_pem = os.path.join(pemfolder, "{}.key.pem".format(name))
+                print("Generating private key {} (pem={})".format(name, key_pem))
 
-        print("Generating private key {} (pem={})".format(name, key_pem))
+                with open(os.path.join(root, keyspec), "r") as keyspec_file:
+                    keyspec_data = keyspec_file.read()
+                    with open(key_pem, "w") as pem_file:
+                        status = runUtil(
+                            pykey, [], inputdata=keyspec_data, outputstream=pem_file)
+                        if status:
+                            return status
 
-        with open(os.path.join(root, keyspec), "r") as keyspec_file:
-          keyspec_data = keyspec_file.read()
-          with open(key_pem, "w") as pem_file:
-            status = runUtil(pykey, [], inputdata=keyspec_data, outputstream=pem_file)
-            if status:
-              return status
+                cert_pem = os.path.join(pemfolder, "{}.cert.pem".format(name))
+                if not os.path.exists(cert_pem):
+                    raise Exception("There has to be a corresponding certificate named {} for the keyspec {}".format(
+                        cert_pem, keyspec))
 
-        cert_pem = os.path.join(pemfolder, "{}.cert.pem".format(name))
-        if not os.path.exists(cert_pem):
-          raise Exception("There has to be a corresponding certificate named {} for the keyspec {}".format(cert_pem, keyspec))
+                p12 = os.path.join(pemfolder, "{}.key.p12".format(name))
+                print("Converting private key {} to PKCS12 (p12={})".format(
+                    key_pem, p12))
+                status = runUtil(openssl, ["pkcs12", "-export", "-inkey", key_pem, "-in",
+                                           cert_pem, "-name", name, "-out", p12, "-passout", "file:"+pwfile.name])
+                if status:
+                    return status
 
-        p12 = os.path.join(pemfolder, "{}.key.p12".format(name))
-        print("Converting private key {} to PKCS12 (p12={})".format(key_pem, p12))
-        status = runUtil(openssl, ["pkcs12", "-export", "-inkey", key_pem, "-in", cert_pem, "-name", name, "-out", p12, "-passout", "file:"+pwfile.name])
-        if status:
-          return status
+                print("Importing private key {} to database".format(key_pem))
+                status = runUtil(
+                    pk12util, ["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name])
+                if status:
+                    return status
 
-        print("Importing private key {} to database".format(key_pem))
-        status = runUtil(pk12util, ["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name])
-        if status:
-          return status
+                if key_type == "ca":
+                    shutil.copyfile(cert_pem, os.path.join(
+                        srcDir, "{}.ca".format(name)))
+                elif key_type == "client":
+                    shutil.copyfile(p12, os.path.join(
+                        srcDir, "{}.client".format(name)))
+                elif key_type == "server":
+                    pass  # Nothing to do for server keys
+                else:
+                    raise Exception(
+                        "State error: Unknown keyspec key_type: {}".format(key_type))
 
-        if key_type == "ca":
-          shutil.copyfile(cert_pem, os.path.join(srcDir, "{}.ca".format(name)))
-        elif key_type == "client":
-          shutil.copyfile(p12, os.path.join(srcDir, "{}.client".format(name)))
-        elif key_type == "server":
-          pass # Nothing to do for server keys
-        else:
-          raise Exception("State error: Unknown keyspec key_type: {}".format(key_type))
+    return 0
 
-  return 0
 
 build = MozbuildObject.from_environment()
 certdir = os.path.join(build.topsrcdir, "build", "pgo", "certs")
 certificateStatus = constructCertDatabase(build, certdir)
 if certificateStatus:
-  print "TEST-UNEXPECTED-FAIL | SSL Server Certificate generation"
+    print "TEST-UNEXPECTED-FAIL | SSL Server Certificate generation"
 sys.exit(certificateStatus)
--- a/build/unix/elfhack/inject/copy_source.py
+++ b/build/unix/elfhack/inject/copy_source.py
@@ -1,9 +1,10 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+
 def copy(out_file, in_path):
     with open(in_path, 'r') as fh:
         out_file.write(fh.read())
--- a/build/unix/rewrite_asan_dylib.py
+++ b/build/unix/rewrite_asan_dylib.py
@@ -11,17 +11,18 @@ from buildconfig import substs
 
 '''
 Scans the given directories for binaries referencing the AddressSanitizer
 runtime library, copies it to the main directory and rewrites binaries to not
 reference it with absolute paths but with @executable_path instead.
 '''
 
 # This is the dylib we're looking for
-DYLIB_NAME='libclang_rt.asan_osx_dynamic.dylib'
+DYLIB_NAME = 'libclang_rt.asan_osx_dynamic.dylib'
+
 
 def resolve_rpath(filename):
     otoolOut = subprocess.check_output([substs['OTOOL'], '-l', filename])
     currentCmd = None
 
     # The lines we need to find look like this:
     # ...
     # Load command 22
@@ -42,63 +43,71 @@ def resolve_rpath(filename):
             if pathMatch is not None:
                 path = pathMatch.group(1)
                 if os.path.isdir(path):
                     return path
 
     sys.stderr.write('@rpath could not be resolved from %s\n' % filename)
     sys.exit(1)
 
+
 def scan_directory(path):
     dylibCopied = False
 
     for root, subdirs, files in os.walk(path):
         for filename in files:
             filename = os.path.join(root, filename)
 
             # Skip all files that aren't either dylibs or executable
             if not (filename.endswith('.dylib') or os.access(filename, os.X_OK)):
                 continue
 
             try:
-                otoolOut = subprocess.check_output([substs['OTOOL'], '-L', filename])
+                otoolOut = subprocess.check_output(
+                    [substs['OTOOL'], '-L', filename])
             except Exception:
                 # Errors are expected on non-mach executables, ignore them and continue
                 continue
 
             for line in otoolOut.splitlines():
                 if DYLIB_NAME in line:
                     absDylibPath = line.split()[0]
 
                     # Don't try to rewrite binaries twice
                     if absDylibPath.startswith('@executable_path/'):
                         continue
 
                     if not dylibCopied:
                         if absDylibPath.startswith('@rpath/'):
                             rpath = resolve_rpath(filename)
-                            copyDylibPath = absDylibPath.replace('@rpath', rpath)
+                            copyDylibPath = absDylibPath.replace(
+                                '@rpath', rpath)
                         else:
                             copyDylibPath = absDylibPath
 
                         if os.path.isfile(copyDylibPath):
                             # Copy the runtime once to the main directory, which is passed
                             # as the argument to this function.
                             shutil.copy(copyDylibPath, path)
 
                             # Now rewrite the library itself
-                            subprocess.check_call([substs['INSTALL_NAME_TOOL'], '-id', '@executable_path/' + DYLIB_NAME, os.path.join(path, DYLIB_NAME)])
+                            subprocess.check_call(
+                                [substs['INSTALL_NAME_TOOL'], '-id', '@executable_path/' + DYLIB_NAME, os.path.join(path, DYLIB_NAME)])
                             dylibCopied = True
                         else:
-                            sys.stderr.write('dylib path in %s was not found at: %s\n' % (filename, copyDylibPath))
+                            sys.stderr.write('dylib path in %s was not found at: %s\n' % (
+                                filename, copyDylibPath))
 
                     # Now use install_name_tool to rewrite the path in our binary
-                    relpath = '' if path == root else os.path.relpath(path, root) + '/'
-                    subprocess.check_call([substs['INSTALL_NAME_TOOL'], '-change', absDylibPath, '@executable_path/' + relpath + DYLIB_NAME, filename])
+                    relpath = '' if path == root else os.path.relpath(
+                        path, root) + '/'
+                    subprocess.check_call([substs['INSTALL_NAME_TOOL'], '-change',
+                                           absDylibPath, '@executable_path/' + relpath + DYLIB_NAME, filename])
                     break
 
     if not dylibCopied:
         sys.stderr.write('%s could not be found\n' % DYLIB_NAME)
         sys.exit(1)
 
+
 if __name__ == '__main__':
     for d in sys.argv[1:]:
         scan_directory(d)
--- a/build/util/count_ctors.py
+++ b/build/util/count_ctors.py
@@ -1,16 +1,17 @@
 
 #!/usr/bin/python
 import json
 
 import re
 import subprocess
 import sys
 
+
 def count_ctors(filename):
     proc = subprocess.Popen(
         ['readelf', '-W', '-S', filename], stdout=subprocess.PIPE)
 
     # Some versions of ld produce both .init_array and .ctors.  So we have
     # to check for both.
     n_init_array_ctors = 0
     have_init_array = False
@@ -43,24 +44,24 @@ def count_ctors(filename):
     if have_ctors:
         return n_ctors_ctors
 
     # We didn't find anything; somebody switched initialization mechanisms on
     # us, or the binary is completely busted.  Complain either way.
     print >>sys.stderr, "Couldn't find .init_array or .ctors in", filename
     sys.exit(1)
 
+
 if __name__ == '__main__':
     for f in sys.argv[1:]:
         perfherder_data = {
             "framework": {"name": "build_metrics"},
             "suites": [{
                 "name": "compiler_metrics",
                 "subtests": [{
                     "name": "num_static_constructors",
                     "value": count_ctors(f),
                     "alertChangeType": "absolute",
                     "alertThreshold": 3
                 }]}
             ]
         }
         print "PERFHERDER_DATA: %s" % json.dumps(perfherder_data)
-
--- a/build/valgrind/mach_commands.py
+++ b/build/valgrind/mach_commands.py
@@ -27,27 +27,28 @@ def is_valgrind_build(cls):
     return 'MOZ_VALGRIND' in defines and 'MOZ_MEMORY' not in defines
 
 
 @CommandProvider
 class MachCommands(MachCommandBase):
     '''
     Run Valgrind tests.
     '''
+
     def __init__(self, context):
         MachCommandBase.__init__(self, context)
 
     @Command('valgrind-test', category='testing',
-        conditions=[conditions.is_firefox, is_valgrind_build],
-        description='Run the Valgrind test job (memory-related errors).')
+             conditions=[conditions.is_firefox, is_valgrind_build],
+             description='Run the Valgrind test job (memory-related errors).')
     @CommandArgument('--suppressions', default=[], action='append',
-        metavar='FILENAME',
-        help='Specify a suppression file for Valgrind to use. Use '
-            '--suppression multiple times to specify multiple suppression '
-            'files.')
+                     metavar='FILENAME',
+                     help='Specify a suppression file for Valgrind to use. Use '
+                     '--suppression multiple times to specify multiple suppression '
+                     'files.')
     def valgrind_test(self, suppressions):
         import sys
         import tempfile
 
         from mozbuild.base import MozbuildObject
         from mozfile import TemporaryDirectory
         from mozhttpd import MozHttpd
         from mozprofile import FirefoxProfile, Preferences
@@ -60,18 +61,19 @@ class MachCommands(MachCommandBase):
         build_dir = os.path.join(self.topsrcdir, 'build')
 
         # XXX: currently we just use the PGO inputs for Valgrind runs.  This may
         # change in the future.
         httpd = MozHttpd(docroot=os.path.join(build_dir, 'pgo'))
         httpd.start(block=False)
 
         with TemporaryDirectory() as profilePath:
-            #TODO: refactor this into mozprofile
-            profile_data_dir = os.path.join(self.topsrcdir, 'testing', 'profiles')
+            # TODO: refactor this into mozprofile
+            profile_data_dir = os.path.join(
+                self.topsrcdir, 'testing', 'profiles')
             with open(os.path.join(profile_data_dir, 'profiles.json'), 'r') as fh:
                 base_profiles = json.load(fh)['valgrind']
 
             prefpaths = [os.path.join(profile_data_dir, profile, 'user.js')
                          for profile in base_profiles]
             prefs = {}
             for path in prefpaths:
                 prefs.update(Preferences.read_prefs(path))
@@ -79,17 +81,18 @@ class MachCommands(MachCommandBase):
             interpolation = {
                 'server': '%s:%d' % httpd.httpd.server_address,
             }
             for k, v in prefs.items():
                 if isinstance(v, string_types):
                     v = v.format(**interpolation)
                 prefs[k] = Preferences.cast(v)
 
-            quitter = os.path.join(self.topsrcdir, 'tools', 'quitter', 'quitter@mozilla.org.xpi')
+            quitter = os.path.join(
+                self.topsrcdir, 'tools', 'quitter', 'quitter@mozilla.org.xpi')
 
             locations = ServerLocations()
             locations.add_host(host='127.0.0.1',
                                port=httpd.httpd.server_port,
                                options='primary')
 
             profile = FirefoxProfile(profile=profilePath,
                                      preferences=prefs,
--- a/build/valgrind/output_handler.py
+++ b/build/valgrind/output_handler.py
@@ -2,16 +2,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import print_function, unicode_literals
 
 import logging
 import re
 
+
 class OutputHandler(object):
     '''
     A class for handling Valgrind output.
 
     Valgrind errors look like this:
 
     ==60741== 40 (24 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss record 2,746 of 5,235
     ==60741==    at 0x4C26B43: calloc (vg_replace_malloc.c:593)
@@ -99,18 +100,17 @@ class OutputHandler(object):
             if self.number_of_stack_entries_to_get != 0:
                 self.curr_location += ' / '
             else:
                 # We've finished getting the first few stack entries. Print the
                 # failure message and the buffered lines, and then reset state.
                 self.logger(logging.ERROR, 'valgrind-error-msg',
                             {'error': self.curr_error,
                              'location': self.curr_location},
-                             'TEST-UNEXPECTED-FAIL | valgrind-test | {error} at {location}')
+                            'TEST-UNEXPECTED-FAIL | valgrind-test | {error} at {location}')
                 for b in self.buffered_lines:
                     self.log(b)
                 self.curr_error = None
                 self.curr_location = None
                 self.buffered_lines = None
 
         if re.match(self.re_suppression, line):
             self.suppression_count += 1
-
--- a/build/win32/autobinscope.py
+++ b/build/win32/autobinscope.py
@@ -14,84 +14,87 @@
 import sys
 import subprocess
 import os
 
 BINSCOPE_OUTPUT_LOGFILE = r".\binscope_xml_output.log"
 
 # usage
 if len(sys.argv) < 3:
-  print """usage : autobinscope.by path_to_binary path_to_symbols [log_file_path]"
+    print """usage : autobinscope.by path_to_binary path_to_symbols [log_file_path]"
 		log_file_path is optional, log will be written to .\binscope_xml_output.log by default"""
-  sys.exit(0)
+    sys.exit(0)
 
 binary_path = sys.argv[1]
 symbol_path = sys.argv[2]
 
 if len(sys.argv) == 4:
-  log_file_path = sys.argv[3]
+    log_file_path = sys.argv[3]
 else:
-  log_file_path = BINSCOPE_OUTPUT_LOGFILE
-  
+    log_file_path = BINSCOPE_OUTPUT_LOGFILE
+
 # execute binscope against the binary, using the BINSCOPE environment
 # variable as the path to binscope.exe
 try:
-  binscope_path = os.environ['BINSCOPE']
+    binscope_path = os.environ['BINSCOPE']
 except KeyError:
-  print "TEST-UNEXPECTED-FAIL | autobinscope.py | BINSCOPE environment variable is not set, can't check DEP/ASLR etc. status."
-  sys.exit(0)
-  
-try:    
-  proc = subprocess.Popen([
-    binscope_path,
-    "/NoLogo",
-    "/Target", binary_path,
-    "/SymPath", symbol_path,
-    "/Checks", "ATLVersionCheck",
-    "/Checks", "ATLVulnCheck",
-    # We do not ship in the Windows Store
-    "/SkippedChecks", "AppContainerCheck",
-    # The CompilerVersionCheck doesn't like clang-cl (we would need to set MinimumCompilerVersion)
-    # But we check the compiler in our build system anyway, so this doesn't seem useful
-    "/SkippedChecks", "CompilerVersionCheck",
-    "/Checks", "DBCheck",
-    "/Checks", "DefaultGSCookieCheck",
-    "/Checks", "ExecutableImportsCheck",
-    # FunctonPointersCheck is disabled per bug 1014002
-    "/SkippedChecks", "FunctionPointersCheck",
-    # GSCheck doesn't know how to deal with Rust libs
-    "/SkippedChecks", "GSCheck",
-    "/Checks", "GSFriendlyInitCheck",
-    # We are not safebuffers-clean, bug 1449951
-    "/SkippedChecks", "GSFunctionSafeBuffersCheck",
-    "/Checks", "HighEntropyVACheck",
-    "/Checks", "NXCheck",
-    "/Checks", "RSA32Check",
-    "/Checks", "SafeSEHCheck",
-    "/Checks", "SharedSectionCheck",
-    "/Checks", "VB6Check",
-    "/Checks", "WXCheck"
+    print "TEST-UNEXPECTED-FAIL | autobinscope.py | BINSCOPE environment variable is not set, can't check DEP/ASLR etc. status."
+    sys.exit(0)
+
+try:
+    proc = subprocess.Popen([
+        binscope_path,
+        "/NoLogo",
+        "/Target", binary_path,
+        "/SymPath", symbol_path,
+        "/Checks", "ATLVersionCheck",
+        "/Checks", "ATLVulnCheck",
+        # We do not ship in the Windows Store
+        "/SkippedChecks", "AppContainerCheck",
+        # The CompilerVersionCheck doesn't like clang-cl (we would need to set MinimumCompilerVersion)
+        # But we check the compiler in our build system anyway, so this doesn't seem useful
+        "/SkippedChecks", "CompilerVersionCheck",
+        "/Checks", "DBCheck",
+        "/Checks", "DefaultGSCookieCheck",
+        "/Checks", "ExecutableImportsCheck",
+        # FunctonPointersCheck is disabled per bug 1014002
+        "/SkippedChecks", "FunctionPointersCheck",
+        # GSCheck doesn't know how to deal with Rust libs
+        "/SkippedChecks", "GSCheck",
+        "/Checks", "GSFriendlyInitCheck",
+        # We are not safebuffers-clean, bug 1449951
+        "/SkippedChecks", "GSFunctionSafeBuffersCheck",
+        "/Checks", "HighEntropyVACheck",
+        "/Checks", "NXCheck",
+        "/Checks", "RSA32Check",
+        "/Checks", "SafeSEHCheck",
+        "/Checks", "SharedSectionCheck",
+        "/Checks", "VB6Check",
+        "/Checks", "WXCheck"
     ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
-except WindowsError, (errno, strerror): 
-  if errno != 2 and errno != 3:
-    print "TEST-UNEXPECTED-FAIL | autobinscope.py | Unexpected error %d : %s" (errno, strerror)
-    sys.exit(0)
-  else:
-    print "TEST-UNEXPECTED-FAIL | autobinscope.py | Could not locate binscope at location : %s\n" % binscope_path
-    sys.exit(0)
+except WindowsError, (errno, strerror):
+    if errno != 2 and errno != 3:
+        print "TEST-UNEXPECTED-FAIL | autobinscope.py | Unexpected error %d : %s" (
+            errno, strerror)
+        sys.exit(0)
+    else:
+        print "TEST-UNEXPECTED-FAIL | autobinscope.py | Could not locate binscope at location : %s\n" % binscope_path
+        sys.exit(0)
 
 proc.wait()
 
 output = proc.communicate()[1].decode('utf-8').splitlines()
 
 errors = 0
 for line in output:
-  print(line)
-  if 'error' in line:
-    errors += 1
+    print(line)
+    if 'error' in line:
+        errors += 1
 
 if proc.returncode != 0:
-  print "TEST-UNEXPECTED-FAIL | autobinscope.py | Binscope returned error code %d for file %s" % (proc.returncode, binary_path)
+    print "TEST-UNEXPECTED-FAIL | autobinscope.py | Binscope returned error code %d for file %s" % (
+        proc.returncode, binary_path)
 elif errors != 0:
-  print "TEST-UNEXPECTED-FAIL | autobinscope.py | Binscope reported %d error(s) for file %s" % (errors, binary_path)
+    print "TEST-UNEXPECTED-FAIL | autobinscope.py | Binscope reported %d error(s) for file %s" % (
+        errors, binary_path)
 else:
-  print "TEST-PASS | autobinscope.py | %s succeeded" % binary_path
+    print "TEST-PASS | autobinscope.py | %s succeeded" % binary_path
--- a/build/win32/pgomerge.py
+++ b/build/win32/pgomerge.py
@@ -3,42 +3,47 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Usage: pgomerge.py <binary basename> <dist/bin>
 # Gathers .pgc files from dist/bin and merges them into
 # $PWD/$basename.pgd using pgomgr, then deletes them.
 # No errors if any of these files don't exist.
 
-import sys, os, os.path, subprocess
+import sys
+import os
+import os.path
+import subprocess
 if not sys.platform == "win32":
     raise Exception("This script was only meant for Windows.")
 
+
 def MergePGOFiles(basename, pgddir, pgcdir):
-  """Merge pgc files produced from an instrumented binary
-     into the pgd file for the second pass of profile-guided optimization
-     with MSVC.  |basename| is the name of the DLL or EXE without the
-     extension.  |pgddir| is the path that contains <basename>.pgd
-     (should be the objdir it was built in).  |pgcdir| is the path
-     containing basename!N.pgc files, which is probably dist/bin.
-     Calls pgomgr to merge each pgc file into the pgd, then deletes
-     the pgc files."""
-  if not os.path.isdir(pgddir) or not os.path.isdir(pgcdir):
-    return
-  pgdfile = os.path.abspath(os.path.join(pgddir, basename + ".pgd"))
-  if not os.path.isfile(pgdfile):
-    return
-  for file in os.listdir(pgcdir):
-    if file.startswith(basename+"!") and file.endswith(".pgc"):
-      try:
-        pgcfile = os.path.normpath(os.path.join(pgcdir, file))
-        subprocess.call(['pgomgr', '-merge',
-                         pgcfile,
-                         pgdfile])
-        os.remove(pgcfile)
-      except OSError:
-        pass
+    """Merge pgc files produced from an instrumented binary
+       into the pgd file for the second pass of profile-guided optimization
+       with MSVC.  |basename| is the name of the DLL or EXE without the
+       extension.  |pgddir| is the path that contains <basename>.pgd
+       (should be the objdir it was built in).  |pgcdir| is the path
+       containing basename!N.pgc files, which is probably dist/bin.
+       Calls pgomgr to merge each pgc file into the pgd, then deletes
+       the pgc files."""
+    if not os.path.isdir(pgddir) or not os.path.isdir(pgcdir):
+        return
+    pgdfile = os.path.abspath(os.path.join(pgddir, basename + ".pgd"))
+    if not os.path.isfile(pgdfile):
+        return
+    for file in os.listdir(pgcdir):
+        if file.startswith(basename+"!") and file.endswith(".pgc"):
+            try:
+                pgcfile = os.path.normpath(os.path.join(pgcdir, file))
+                subprocess.call(['pgomgr', '-merge',
+                                 pgcfile,
+                                 pgdfile])
+                os.remove(pgcfile)
+            except OSError:
+                pass
+
 
 if __name__ == '__main__':
-  if len(sys.argv) != 3:
-      print >>sys.stderr, "Usage: pgomerge.py <binary basename> <dist/bin>"
-      sys.exit(1)
-  MergePGOFiles(sys.argv[1], os.getcwd(), sys.argv[2])
+    if len(sys.argv) != 3:
+        print >>sys.stderr, "Usage: pgomerge.py <binary basename> <dist/bin>"
+        sys.exit(1)
+    MergePGOFiles(sys.argv[1], os.getcwd(), sys.argv[2])