Bug 1494091: [mozharness] Lint mozharness package; r=Callek
authorTom Prince <mozilla@hocat.ca>
Tue, 02 Oct 2018 18:32:03 +0000
changeset 450416 192acee588a64054cc615dd8ede045098fc17fe2
parent 450415 c5cfe5943dde8f887c89b28aa116e903cfab58ca
child 450417 e0c96dab418a5224a39e116f6056c13d993cca35
push id234
push usermozilla@hocat.ca
push dateSat, 10 Nov 2018 00:08:05 +0000
treeherdermozilla-esr60@f2569ad7bd92 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersCallek
bugs1494091
milestone60.3.1
Bug 1494091: [mozharness] Lint mozharness package; r=Callek Differential Revision: https://phabricator.services.mozilla.com/D6837
.flake8
testing/mozharness/mozharness/base/config.py
testing/mozharness/mozharness/base/errors.py
testing/mozharness/mozharness/base/log.py
testing/mozharness/mozharness/base/python.py
testing/mozharness/mozharness/base/script.py
testing/mozharness/mozharness/base/transfer.py
testing/mozharness/mozharness/base/vcs/gittool.py
testing/mozharness/mozharness/base/vcs/mercurial.py
testing/mozharness/mozharness/lib/python/authentication.py
testing/mozharness/mozharness/mozilla/building/buildbase.py
testing/mozharness/mozharness/mozilla/checksums.py
testing/mozharness/mozharness/mozilla/l10n/locales.py
testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
testing/mozharness/mozharness/mozilla/merkle.py
testing/mozharness/mozharness/mozilla/secrets.py
testing/mozharness/mozharness/mozilla/updates/__init__.py
tools/lint/flake8.yml
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,7 @@
 [flake8]
 # See http://pep8.readthedocs.io/en/latest/intro.html#configuration
 ignore = E121, E123, E126, E129, E133, E226, E241, E242, E704, W503, E402, E741
 max-line-length = 99
 exclude =
     testing/mochitest/pywebsocket,
+    testing/mozharness/mozharness/mozilla/testing
--- a/testing/mozharness/mozharness/base/config.py
+++ b/testing/mozharness/mozharness/base/config.py
@@ -73,16 +73,17 @@ def make_immutable(item):
     else:
         result = item
     return result
 
 
 class LockedTuple(tuple):
     def __new__(cls, items):
         return tuple.__new__(cls, (make_immutable(x) for x in items))
+
     def __deepcopy__(self, memo):
         return [deepcopy(elem, memo) for elem in self]
 
 
 # ReadOnlyDict {{{1
 class ReadOnlyDict(dict):
     def __init__(self, dictionary):
         self._lock = False
@@ -130,17 +131,22 @@ class ReadOnlyDict(dict):
         memo[id(self)] = result
         for k, v in self.__dict__.items():
             setattr(result, k, deepcopy(v, memo))
         result._lock = False
         for k, v in self.items():
             result[k] = deepcopy(v, memo)
         return result
 
-DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "configs")
+
+DEFAULT_CONFIG_PATH = os.path.join(
+    os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+    "configs",
+)
+
 
 # parse_config_file {{{1
 def parse_config_file(file_name, quiet=False, search_path=None,
                       config_dict_name="config"):
     """Read a config file and return a dictionary.
     """
     file_path = None
     if os.path.exists(file_name):
@@ -161,17 +167,18 @@ def parse_config_file(file_name, quiet=F
         config = local_dict[config_dict_name]
     elif file_name.endswith('.json'):
         fh = open(file_path)
         config = {}
         json_config = json.load(fh)
         config = dict(json_config)
         fh.close()
     else:
-        raise RuntimeError("Unknown config file type %s! (config files must end in .json or .py)" % file_name)
+        raise RuntimeError(
+            "Unknown config file type %s! (config files must end in .json or .py)" % file_name)
     # TODO return file_path
     return config
 
 
 def download_config_file(url, file_name):
     n = 0
     attempts = 5
     sleeptime = 60
@@ -253,17 +260,17 @@ class BaseConfig(object):
             # parse sys.argv which in this case would be the command line
             # options specified to run the tests, e.g. nosetests -v. Clearly,
             # the options passed to nosetests (such as -v) should not be
             # interpreted by mozharness as mozharness options, so we specify
             # a dummy command line with no options, so that the parser does
             # not add anything from the test invocation command line
             # arguments to the mozharness options.
             if option_args is None:
-                option_args=['dummy_mozharness_script_with_no_command_line_options.py']
+                option_args = ['dummy_mozharness_script_with_no_command_line_options.py']
         if config_options is None:
             config_options = []
         self._create_config_parser(config_options, usage)
         # we allow manually passing of option args for things like nosetests
         self.parse_args(args=option_args)
 
     def get_read_only_config(self):
         return ReadOnlyDict(self._config)
@@ -403,17 +410,17 @@ class BaseConfig(object):
                 if not quiet:
                     print("Invalid action %s not in %s!" % (action,
                                                             self.all_actions))
                 raise SystemExit(-1)
         return action_list
 
     def verify_actions_order(self, action_list):
         try:
-            indexes = [ self.all_actions.index(elt) for elt in action_list ]
+            indexes = [self.all_actions.index(elt) for elt in action_list]
             sorted_indexes = sorted(indexes)
             for i in range(len(indexes)):
                 if indexes[i] != sorted_indexes[i]:
                     print(("Action %s comes in different order in %s\n" +
                            "than in %s") % (action_list[i], action_list, self.all_actions))
                     raise SystemExit(-1)
         except ValueError as e:
             print("Invalid action found: " + str(e))
@@ -451,17 +458,20 @@ class BaseConfig(object):
                     file_name = os.path.basename(cf)
                     file_path = os.path.join(os.getcwd(), file_name)
                     download_config_file(cf, file_path)
                     all_cfg_files_and_dicts.append(
                         (file_path, parse_config_file(file_path, search_path=["."]))
                     )
                 else:
                     all_cfg_files_and_dicts.append(
-                        (cf, parse_config_file(cf, search_path=config_paths + [DEFAULT_CONFIG_PATH]))
+                        (cf, parse_config_file(
+                            cf,
+                            search_path=config_paths + [DEFAULT_CONFIG_PATH]
+                        ))
                     )
             except Exception:
                 if cf in options.opt_config_files:
                     print(
                         "WARNING: optional config file not found %s" % cf
                     )
                 else:
                     raise
--- a/testing/mozharness/mozharness/base/errors.py
+++ b/testing/mozharness/mozharness/base/errors.py
@@ -23,16 +23,17 @@ import re
 
 from mozharness.base.log import DEBUG, WARNING, ERROR, CRITICAL, FATAL
 
 
 # Exceptions
 class VCSException(Exception):
     pass
 
+
 # ErrorLists {{{1
 BaseErrorList = [{
     'substr': r'''command not found''',
     'level': ERROR
 }]
 
 # For ssh, scp, rsync over ssh
 SSHErrorList = BaseErrorList + [{
@@ -123,17 +124,18 @@ PythonErrorList = BaseErrorList + [
     {'regex': re.compile(r'''raise \w*Exception: '''), 'level': CRITICAL},
     {'regex': re.compile(r'''raise \w*Error: '''), 'level': CRITICAL},
 ]
 
 VirtualenvErrorList = [
     {'substr': r'''not found or a compiler error:''', 'level': WARNING},
     {'regex': re.compile('''\d+: error: '''), 'level': ERROR},
     {'regex': re.compile('''\d+: warning: '''), 'level': WARNING},
-    {'regex': re.compile(r'''Downloading .* \(.*\): *([0-9]+%)? *[0-9\.]+[kmKM]b'''), 'level': DEBUG},
+    {'regex': re.compile(
+        r'''Downloading .* \(.*\): *([0-9]+%)? *[0-9\.]+[kmKM]b'''), 'level': DEBUG},
 ] + PythonErrorList
 
 
 # We may need to have various MakefileErrorLists for differing amounts of
 # warning-ignoring-ness.
 MakefileErrorList = BaseErrorList + PythonErrorList + [
     {'substr': r'''No rule to make target ''', 'level': ERROR},
     {'regex': re.compile(r'''akefile.*was not found\.'''), 'level': ERROR},
@@ -161,25 +163,27 @@ ADBErrorList = BaseErrorList + [
     {'substr': r'''error: protocol fault''', 'level': ERROR},
     {'substr': r'''unable to connect to ''', 'level': ERROR},
 ]
 
 JarsignerErrorList = [{
     'substr': r'''command not found''',
     'level': FATAL
 }, {
-    'substr': r'''jarsigner error: java.lang.RuntimeException: keystore load: Keystore was tampered with, or password was incorrect''',
+    'substr': r'''jarsigner error: java.lang.RuntimeException: keystore load: '''
+              r'''Keystore was tampered with, or password was incorrect''',
     'level': FATAL,
     'explanation': r'''The store passphrase is probably incorrect!''',
 }, {
     'regex': re.compile(r'''jarsigner: key associated with .* not a private key'''),
     'level': FATAL,
     'explanation': r'''The key passphrase is probably incorrect!''',
 }, {
-    'regex': re.compile(r'''jarsigner error: java.lang.RuntimeException: keystore load: .* .No such file or directory'''),
+    'regex': re.compile(r'''jarsigner error: java.lang.RuntimeException: '''
+                        r'''keystore load: .* .No such file or directory'''),
     'level': FATAL,
     'explanation': r'''The keystore doesn't exist!''',
 }, {
     'substr': r'''jarsigner: unable to open jar file:''',
     'level': FATAL,
     'explanation': r'''The apk is missing!''',
 }]
 
--- a/testing/mozharness/mozharness/base/log.py
+++ b/testing/mozharness/mozharness/base/log.py
@@ -687,12 +687,13 @@ def numeric_log_level(level):
     Args:
         level (str): log level name to convert.
 
     Returns:
         int: numeric value of the log level name.
     """
     return LOG_LEVELS[level]
 
+
 # __main__ {{{1
 if __name__ == '__main__':
     """ Useless comparison, due to the `pass` keyword on its body"""
     pass
--- a/testing/mozharness/mozharness/base/python.py
+++ b/testing/mozharness/mozharness/base/python.py
@@ -26,16 +26,17 @@ from mozharness.base.script import (
 from mozharness.base.errors import VirtualenvErrorList
 from mozharness.base.log import WARNING, FATAL
 
 external_tools_path = os.path.join(
     os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
     'external_tools',
 )
 
+
 def get_tlsv1_post():
     # Monkeypatch to work around SSL errors in non-bleeding-edge Python.
     # Taken from https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
     import requests
     from requests.packages.urllib3.poolmanager import PoolManager
     import ssl
 
     class TLSV1Adapter(requests.adapters.HTTPAdapter):
@@ -43,16 +44,17 @@ def get_tlsv1_post():
             self.poolmanager = PoolManager(num_pools=connections,
                                            maxsize=maxsize,
                                            block=block,
                                            ssl_version=ssl.PROTOCOL_TLSv1)
     s = requests.Session()
     s.mount('https://', TLSV1Adapter())
     return s.post
 
+
 # Virtualenv {{{1
 virtualenv_config_options = [
     [["--virtualenv-path"], {
         "action": "store",
         "dest": "virtualenv_path",
         "default": "venv",
         "help": "Specify the path to the virtualenv top level directory"
     }],
@@ -129,17 +131,18 @@ class VirtualenvMixin(object):
         c['virtualenv_path'] is set; otherwise return the binary name.
         Otherwise return None
         """
         if binary not in self.python_paths:
             bin_dir = 'bin'
             if self._is_windows():
                 bin_dir = 'Scripts'
             virtualenv_path = self.query_virtualenv_path()
-            self.python_paths[binary] = os.path.abspath(os.path.join(virtualenv_path, bin_dir, binary))
+            self.python_paths[binary] = os.path.abspath(
+                os.path.join(virtualenv_path, bin_dir, binary))
 
         return self.python_paths[binary]
 
     def query_python_site_packages_path(self):
         if self.site_packages_path:
             return self.site_packages_path
         python = self.query_python_path()
         self.site_packages_path = self.get_output_from_command(
@@ -156,19 +159,22 @@ class VirtualenvMixin(object):
         packages = {}
 
         if pip_freeze_output is None:
             # get the output from `pip freeze`
             pip = self.query_python_path("pip")
             if not pip:
                 self.log("package_versions: Program pip not in path", level=error_level)
                 return {}
-            pip_freeze_output = self.get_output_from_command([pip, "freeze"], silent=True, ignore_errors=True)
+            pip_freeze_output = self.get_output_from_command(
+                [pip, "freeze"], silent=True, ignore_errors=True)
             if not isinstance(pip_freeze_output, basestring):
-                self.fatal("package_versions: Error encountered running `pip freeze`: %s" % pip_freeze_output)
+                self.fatal(
+                    "package_versions: Error encountered running `pip freeze`: "
+                    + pip_freeze_output)
 
         for line in pip_freeze_output.splitlines():
             # parse the output into package, version
             line = line.strip()
             if not line:
                 # whitespace
                 continue
             if line.startswith('-'):
@@ -239,17 +245,19 @@ class VirtualenvMixin(object):
                 self.fatal("module parameter required with install_method='easy_install'")
             if requirements:
                 # Install pip requirements files separately, since they're
                 # not understood by easy_install.
                 self.install_module(requirements=requirements,
                                     install_method='pip')
             command = [self.query_python_path(), '-m', 'easy_install']
         else:
-            self.fatal("install_module() doesn't understand an install_method of %s!" % install_method)
+            self.fatal(
+                "install_module() doesn't understand an install_method of %s!"
+                % install_method)
 
         for link in c.get('find_links', []):
             parsed = urlparse.urlparse(link)
 
             try:
                 socket.gethostbyname(parsed.hostname)
             except socket.gaierror as e:
                 self.info('error resolving %s (ignoring): %s' %
@@ -259,17 +267,19 @@ class VirtualenvMixin(object):
             command.extend(["--find-links", link])
 
         # module_url can be None if only specifying requirements files
         if module_url:
             if editable:
                 if install_method in (None, 'pip'):
                     command += ['-e']
                 else:
-                    self.fatal("editable installs not supported for install_method %s" % install_method)
+                    self.fatal(
+                        "editable installs not supported for install_method %s"
+                        % install_method)
             command += [module_url]
 
         # If we're only installing a single requirements file, use
         # the file's directory as cwd, so relative paths work correctly.
         cwd = dirs['abs_work_dir']
         if not module and len(requirements) == 1:
             cwd = os.path.dirname(requirements[0])
 
@@ -277,17 +287,20 @@ class VirtualenvMixin(object):
         # Allow for errors while building modules, but require a
         # return status of 0.
         self.retry(
             self.run_command,
             # None will cause default value to be used
             attempts=1 if optional else None,
             good_statuses=(0,),
             error_level=WARNING if optional else FATAL,
-            error_message='Could not install python package: ' + quoted_command + ' failed after %(attempts)d tries!',
+            error_message=(
+                'Could not install python package: '
+                + quoted_command + ' failed after %(attempts)d tries!'
+            ),
             args=[command, ],
             kwargs={
                 'error_list': VirtualenvErrorList,
                 'cwd': cwd,
                 'env': env,
                 # WARNING only since retry will raise final FATAL if all
                 # retry attempts are unsuccessful - and we only want
                 # an ERROR of FATAL if *no* retry attempt works
@@ -357,17 +370,19 @@ class VirtualenvMixin(object):
         # only use --alway-copy when not using Redhat.
         if self._is_redhat():
             self.warning("creating virtualenv without --always-copy "
                          "due to issues on Redhat derived distros")
         else:
             virtualenv_options.append('--always-copy')
 
         if os.path.exists(self.query_python_path()):
-            self.info("Virtualenv %s appears to already exist; skipping virtualenv creation." % self.query_python_path())
+            self.info(
+                "Virtualenv %s appears to already exist; "
+                "skipping virtualenv creation." % self.query_python_path())
         else:
             self.mkdir_p(dirs['abs_work_dir'])
             self.run_command(virtualenv + virtualenv_options + [venv_path],
                              cwd=dirs['abs_work_dir'],
                              error_list=VirtualenvErrorList,
                              partial_env={'VIRTUALENV_NO_DOWNLOAD': "1"},
                              halt_on_failure=True)
 
@@ -583,17 +598,20 @@ class ResourceMonitoringMixin(Perfherder
             message = '{prefix} - Wall time: {duration:.0f}s; ' \
                 'CPU: {cpu_percent}; ' \
                 'Read bytes: {io_read_bytes}; Write bytes: {io_write_bytes}; ' \
                 'Read time: {io_read_time}; Write time: {io_write_time}'
 
             # XXX Some test harnesses are complaining about a string being
             # being fed into a 'f' formatter. This will help diagnose the
             # issue.
-            cpu_percent_str = str(round(cpu_percent)) + '%' if cpu_percent else "Can't collect data"
+            if cpu_percent:
+                cpu_percent_str = str(round(cpu_percent)) + '%'
+            else:
+                cpu_percent_str = "Can't collect data"
 
             try:
                 self.info(
                     message.format(
                         prefix=prefix, duration=duration,
                         cpu_percent=cpu_percent_str, io_read_bytes=io.read_bytes,
                         io_write_bytes=io.write_bytes, io_read_time=io.read_time,
                         io_write_time=io.write_time
--- a/testing/mozharness/mozharness/base/script.py
+++ b/testing/mozharness/mozharness/base/script.py
@@ -1,10 +1,8 @@
-
-#!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 """Generic script objects.
 
 script.py, along with config.py and log.py, represents the core of
@@ -325,19 +323,20 @@ class ScriptMixin(PlatformMixin):
             if name == '.' or name == '..':
                 continue
             full_name = os.path.join(path, name)
 
             if file_attr & win32file.FILE_ATTRIBUTE_DIRECTORY:
                 self._rmtree_windows(full_name)
             else:
                 try:
-                    win32file.SetFileAttributesW('\\\\?\\' + full_name, win32file.FILE_ATTRIBUTE_NORMAL)
+                    win32file.SetFileAttributesW(
+                        '\\\\?\\' + full_name, win32file.FILE_ATTRIBUTE_NORMAL)
                     win32file.DeleteFile('\\\\?\\' + full_name)
-                except:
+                except Exception:
                     # DeleteFile fails on long paths, del /f /q works just fine
                     self.run_command('del /F /Q "%s"' % full_name)
 
         win32file.RemoveDirectory('\\\\?\\' + path)
 
     def get_filename_from_url(self, url):
         """ parse a filename base on an url.
 
@@ -373,18 +372,16 @@ class ScriptMixin(PlatformMixin):
 
         .. _urllib2.urlopen:
         https://docs.python.org/2/library/urllib2.html#urllib2.urlopen
         """
         # http://bugs.python.org/issue13359 - urllib2 does not automatically quote the URL
         url_quoted = urllib2.quote(url, safe='%/:=&?~#+!$,;\'@()*[]|')
         return urllib2.urlopen(url_quoted, **kwargs)
 
-
-
     def fetch_url_into_memory(self, url):
         ''' Downloads a file from a url into memory instead of disk.
 
         Args:
             url (str): URL path where the file to be downloaded is located.
 
         Raises:
             IOError: When the url points to a file on disk and cannot be found
@@ -406,17 +403,18 @@ class ScriptMixin(PlatformMixin):
             content_length = os.stat(path).st_size
 
             # In case we're referrencing a file without file://
             if parsed_url.scheme == '':
                 url = 'file://%s' % os.path.abspath(url)
                 parsed_url = urlparse.urlparse(url)
 
         request = urllib2.Request(url)
-        # When calling fetch_url_into_memory() you should retry when we raise one of these exceptions:
+        # When calling fetch_url_into_memory() you should retry when we raise
+        # one of these exceptions:
         # * Bug 1300663 - HTTPError: HTTP Error 404: Not Found
         # * Bug 1300413 - HTTPError: HTTP Error 500: Internal Server Error
         # * Bug 1300943 - HTTPError: HTTP Error 503: Service Unavailable
         # * Bug 1300953 - URLError: <urlopen error [Errno -2] Name or service not known>
         # * Bug 1301594 - URLError: <urlopen error [Errno 10054] An existing connection was ...
         # * Bug 1301597 - URLError: <urlopen error [Errno 8] _ssl.c:504: EOF occurred in ...
         # * Bug 1301855 - URLError: <urlopen error [Errno 60] Operation timed out>
         # * Bug 1302237 - URLError: <urlopen error [Errno 104] Connection reset by peer>
@@ -431,37 +429,37 @@ class ScriptMixin(PlatformMixin):
         response_body = response.read()
         response_body_size = len(response_body)
 
         self.info('Content-Length response header: {}'.format(content_length))
         self.info('Bytes received: {}'.format(response_body_size))
 
         if response_body_size != content_length:
             raise ContentLengthMismatch(
-                'The retrieved Content-Length header declares a body length of {} bytes, while we actually retrieved {} bytes'.format(
+                'The retrieved Content-Length header declares a body length '
+                'of {} bytes, while we actually retrieved {} bytes'.format(
                     content_length, response_body_size)
             )
 
         if response.info().get('Content-Encoding') == 'gzip':
             self.info('Content-Encoding is "gzip", so decompressing response body')
             # See http://www.zlib.net/manual.html#Advanced
             # section "ZEXTERN int ZEXPORT inflateInit2 OF....":
             #   Add 32 to windowBits to enable zlib and gzip decoding with automatic
             #   header detection, or add 16 to decode only the gzip format (the zlib
             #   format will return a Z_DATA_ERROR).
             # Adding 16 since we only wish to support gzip encoding.
-            file_contents = zlib.decompress(response_body, zlib.MAX_WBITS|16)
+            file_contents = zlib.decompress(response_body, zlib.MAX_WBITS | 16)
         else:
             file_contents = response_body
 
         # Use BytesIO instead of StringIO
         # http://stackoverflow.com/questions/34162017/unzip-buffer-with-python/34162395#34162395
         return BytesIO(file_contents)
 
-
     def _download_file(self, url, file_name):
         """ Helper function for download_file()
         Additionaly this function logs all exceptions as warnings before
         re-raising them
 
         Args:
             url (str): string containing the URL with the file location
             file_name (str): name of the file where the downloaded file
@@ -497,17 +495,19 @@ class ScriptMixin(PlatformMixin):
                 # file, and delete the compressed version.
                 local_file = open(file_name + '.gz', 'wb')
             else:
                 local_file = open(file_name, 'wb')
             while True:
                 block = f.read(1024 ** 2)
                 if not block:
                     if f_length is not None and got_length != f_length:
-                        raise urllib2.URLError("Download incomplete; content-length was %d, but only received %d" % (f_length, got_length))
+                        raise urllib2.URLError(
+                            "Download incomplete; content-length was %d, "
+                            "but only received %d" % (f_length, got_length))
                     break
                 local_file.write(block)
                 if f_length is not None:
                     got_length += len(block)
             local_file.close()
             if f.info().get('Content-Encoding') == 'gzip':
                 # Decompress file into target location, then remove compressed version
                 with open(file_name, 'wb') as f_out:
@@ -579,26 +579,24 @@ class ScriptMixin(PlatformMixin):
             kwargs = {"url": url, "file_name": file_name}
 
         return self.retry(
             download_func,
             kwargs=kwargs,
             **retry_args
         )
 
-
     def _filter_entries(self, namelist, extract_dirs):
         """Filter entries of the archive based on the specified list of to extract dirs."""
         filter_partial = functools.partial(fnmatch.filter, namelist)
         entries = itertools.chain(*map(filter_partial, extract_dirs or ['*']))
 
         for entry in entries:
             yield entry
 
-
     def unzip(self, compressed_file, extract_to, extract_dirs='*', verbose=False):
         """This method allows to extract a zip file without writing to disk first.
 
         Args:
             compressed_file (object): File-like object with the contents of a compressed zip file.
             extract_to (str): where to extract the compressed file.
             extract_dirs (list, optional): directories inside the archive file to extract.
                                            Defaults to '*'.
@@ -630,29 +628,27 @@ class ScriptMixin(PlatformMixin):
                     # Only set permissions if attributes are available. Otherwise all
                     # permissions will be removed eg. on Windows.
                     if mode:
                         os.chmod(fname, mode)
 
                 except KeyError:
                     self.warning('{} was not found in the zip file'.format(entry))
 
-
     def deflate(self, compressed_file, mode, extract_to='.', *args, **kwargs):
         """This method allows to extract a compressed file from a tar, tar.bz2 and tar.gz files.
 
         Args:
             compressed_file (object): File-like object with the contents of a compressed file.
             mode (str): string of the form 'filemode[:compression]' (e.g. 'r:gz' or 'r:bz2')
             extract_to (str, optional): where to extract the compressed file.
         """
         t = tarfile.open(fileobj=compressed_file, mode=mode)
         t.extractall(path=extract_to)
 
-
     def download_unpack(self, url, extract_to='.', extract_dirs='*', verbose=False):
         """Generic method to download and extract a compressed file without writing it to disk first.
 
         Args:
             url (str): URL where the file to be downloaded is located.
             extract_to (str, optional): directory where the downloaded file will
                                         be extracted to.
             extract_dirs (list, optional): directories inside the archive to extract.
@@ -739,17 +735,16 @@ class ScriptMixin(PlatformMixin):
         #    Let's unpack the file
         function, kwargs = _determine_extraction_method_and_kwargs(url)
         try:
             function(**kwargs)
         except zipfile.BadZipfile:
             # Dump the exception and exit
             self.exception(level=FATAL)
 
-
     def load_json_url(self, url, error_level=None, *args, **kwargs):
         """ Returns a json object from a url (it retries). """
         contents = self._retry_download(
             url=url, error_level=error_level, *args, **kwargs
         )
         return json.loads(contents.read())
 
     # http://www.techniqal.com/blog/2008/07/31/python-file-read-write-with-urllib2/
@@ -839,17 +834,19 @@ class ScriptMixin(PlatformMixin):
 
         .. _stat:
         https://docs.python.org/2/library/os.html#os.chmod
         """
 
         self.info("Chmoding %s to %s" % (path, str(oct(mode))))
         os.chmod(path, mode)
 
-    def copyfile(self, src, dest, log_level=INFO, error_level=ERROR, copystat=False, compress=False):
+    def copyfile(
+        self, src, dest, log_level=INFO, error_level=ERROR, copystat=False, compress=False
+    ):
         """ copy or compress `src` into `dest`.
 
         Args:
             src (str): filepath to copy.
             dest (str): filepath where to move the content to.
             log_level (str, optional): log level to use for normal operation. Defaults to
                                       `INFO`
             error_level (str, optional): log level to use on error. Defaults to `ERROR`
@@ -1303,35 +1300,38 @@ class ScriptMixin(PlatformMixin):
                         found = True
                 else:
                     self.log("a exes %s dict's value is not a string, list, or tuple. Got key "
                              "%s and value %s" % (exe_name, name, str(path)), level=error_level)
                 if found:
                     exe = path
                     break
             else:
-                self.log("query_exe was a searchable dict but an existing path could not be "
-                         "determined. Tried searching in paths: %s" % (str(exe)), level=error_level)
+                self.log("query_exe was a searchable dict but an existing "
+                         "path could not be determined. Tried searching in "
+                         "paths: %s" % (str(exe)), level=error_level)
                 return None
         elif isinstance(exe, list) or isinstance(exe, tuple):
             exe = [x % repl_dict for x in exe]
         elif isinstance(exe, str):
             exe = exe % repl_dict
         else:
             self.log("query_exe: %s is not a list, tuple, dict, or string: "
                      "%s!" % (exe_name, str(exe)), level=error_level)
             return exe
         if return_type == "list":
             if isinstance(exe, str):
                 exe = [exe]
         elif return_type == "string":
             if isinstance(exe, list):
                 exe = subprocess.list2cmdline(exe)
         elif return_type is not None:
-            self.log("Unknown return_type type %s requested in query_exe!" % return_type, level=error_level)
+            self.log(
+                "Unknown return_type type %s requested in query_exe!"
+                % return_type, level=error_level)
         return exe
 
     def run_command(self, command, cwd=None, error_list=None,
                     halt_on_failure=False, success_codes=None,
                     env=None, partial_env=None, return_type='status',
                     throw_exception=False, output_parser=None,
                     output_timeout=None, fatal_exit_code=2,
                     error_level=ERROR, **kwargs):
@@ -1412,17 +1412,19 @@ class ScriptMixin(PlatformMixin):
             parser = output_parser
 
         try:
             if output_timeout:
                 def processOutput(line):
                     parser.add_lines(line)
 
                 def onTimeout():
-                    self.info("Automation Error: mozprocess timed out after %s seconds running %s" % (str(output_timeout), str(command)))
+                    self.info(
+                        "Automation Error: mozprocess timed out after "
+                        "%s seconds running %s" % (str(output_timeout), str(command)))
 
                 p = ProcessHandler(command,
                                    shell=shell,
                                    env=env,
                                    cwd=cwd,
                                    storeOutput=False,
                                    onTimeout=(onTimeout,),
                                    processOutputLine=[processOutput])
@@ -1587,17 +1589,19 @@ class ScriptMixin(PlatformMixin):
         shell = True
         if isinstance(command, list):
             shell = False
 
         p = subprocess.Popen(command, shell=shell, stdout=tmp_stdout,
                              cwd=cwd, stderr=tmp_stderr, env=env, bufsize=0)
         # XXX: changed from self.debug to self.log due to this error:
         #      TypeError: debug() takes exactly 1 argument (2 given)
-        self.log("Temporary files: %s and %s" % (tmp_stdout_filename, tmp_stderr_filename), level=DEBUG)
+        self.log(
+            "Temporary files: %s and %s"
+            % (tmp_stdout_filename, tmp_stderr_filename), level=DEBUG)
         p.wait()
         tmp_stdout.close()
         tmp_stderr.close()
         return_level = DEBUG
         output = None
         if return_type == 'output' or not silent:
             if os.path.exists(tmp_stdout_filename) and os.path.getsize(tmp_stdout_filename):
                 output = self.read_from_file(tmp_stdout_filename,
@@ -2017,18 +2021,18 @@ class BaseScript(ScriptMixin, LogMixin, 
                         continue
 
                     try:
                         self.info("Running post-action listener: %s" % fn)
                         method = getattr(self, fn)
                         method(action, success=False)
                     except Exception:
                         self.error("An additional exception occurred during "
-                                   "post-action for %s: %s" % (action,
-                                   traceback.format_exc()))
+                                   "post-action for %s: %s"
+                                   % (action, traceback.format_exc()))
 
                 self.fatal("Aborting due to exception in pre-action listener.")
 
         # We always run post action listeners, even if the main routine failed.
         success = False
         try:
             self.info("Running main action method: %s" % method_name)
             self._possibly_run_method("preflight_%s" % method_name)
@@ -2312,13 +2316,8 @@ class BaseScript(ScriptMixin, LogMixin, 
     def return_code(self):
         return self._return_code
 
     @return_code.setter
     def return_code(self, code):
         old_return_code, self._return_code = self._return_code, code
         if old_return_code != code:
             self.warning("setting return code to %d" % code)
-
-# __main__ {{{1
-if __name__ == '__main__':
-    """ Useless comparison, due to the `pass` keyword on its body"""
-    pass
--- a/testing/mozharness/mozharness/base/transfer.py
+++ b/testing/mozharness/mozharness/base/transfer.py
@@ -29,27 +29,27 @@ class TransferMixin(object):
     """
     def load_json_from_url(self, url, timeout=30, log_level=DEBUG):
         self.log("Attempting to download %s; timeout=%i" % (url, timeout),
                  level=log_level)
         try:
             r = urllib2.urlopen(url, timeout=timeout)
             j = json.load(r)
             self.log(pprint.pformat(j), level=log_level)
-        except:
+        except BaseException:
             self.exception(message="Unable to download %s!" % url)
             raise
         return j
 
     def scp_upload_directory(self, local_path, ssh_key, ssh_user,
                              remote_host, remote_path,
                              scp_options=None,
                              error_level=ERROR,
                              create_remote_directory=True,
-                            ):
+                             ):
         """
         Create a remote directory and upload the contents of
         a local directory to it via scp only
 
         Returns:
             None: on success
               -1: if local_path is not a directory
               -2: if the remote_directory cannot be created
@@ -72,18 +72,22 @@ class TransferMixin(object):
                 'level': ERROR
             }] + SSHErrorList
             if self.run_command([ssh, '-oIdentityFile=%s' % ssh_key,
                                  '%s@%s' % (ssh_user, remote_host),
                                  'mkdir', '-p', remote_path],
                                 cwd=dirs['abs_work_dir'],
                                 return_type='num_errors',
                                 error_list=mkdir_error_list):
-                self.log("Unable to create remote directory %s:%s!" % (remote_host, remote_path), level=error_level)
+                self.log(
+                    "Unable to create remote directory %s:%s!"
+                    % (remote_host, remote_path), level=error_level)
                 return -2
         if self.run_command([scp, '-oIdentityFile=%s' % ssh_key,
                              scp_options, '.',
                              '%s@%s:%s/' % (ssh_user, remote_host, remote_path)],
                             cwd=local_path,
                             return_type='num_errors',
                             error_list=SSHErrorList):
-            self.log("Unable to scp %s to %s:%s!" % (local_path, remote_host, remote_path), level=error_level)
-            return -3
\ No newline at end of file
+            self.log(
+                "Unable to scp %s to %s:%s!"
+                % (local_path, remote_host, remote_path), level=error_level)
+            return -3
--- a/testing/mozharness/mozharness/base/vcs/gittool.py
+++ b/testing/mozharness/mozharness/base/vcs/gittool.py
@@ -74,17 +74,18 @@ class GittoolVCS(ScriptMixin, LogMixin):
         cmd = self.gittool[:]
         if branch:
             cmd.extend(['-b', branch])
         if revision:
             cmd.extend(['-r', revision])
         if clean:
             cmd.append('--clean')
 
-        for base_mirror_url in self.config.get('gittool_base_mirror_urls', self.config.get('vcs_base_mirror_urls', [])):
+        for base_mirror_url in self.config.get(
+                'gittool_base_mirror_urls', self.config.get('vcs_base_mirror_urls', [])):
             bits = urlparse.urlparse(repo)
             mirror_url = urlparse.urljoin(base_mirror_url, bits.path)
             cmd.extend(['--mirror', mirror_url])
 
         cmd.extend([repo, dest])
         parser = GittoolParser(config=self.config, log_obj=self.log_obj,
                                error_list=GitErrorList)
         retval = self.run_command(cmd, error_list=GitErrorList, env=env, output_parser=parser)
--- a/testing/mozharness/mozharness/base/vcs/mercurial.py
+++ b/testing/mozharness/mozharness/base/vcs/mercurial.py
@@ -284,17 +284,18 @@ class MercurialVCS(ScriptMixin, LogMixin
         """Check for outgoing changesets present in a repo"""
         self.info("Checking for outgoing changesets from %s to %s." % (src, remote))
         cmd = self.hg + ['-q', 'out', '--template', '{node} {branches}\n']
         cmd.extend(self.common_args(**kwargs))
         cmd.append(remote)
         if os.path.exists(src):
             try:
                 revs = []
-                for line in self.get_output_from_command(cmd, cwd=src, throw_exception=True).rstrip().split("\n"):
+                for line in self.get_output_from_command(
+                        cmd, cwd=src, throw_exception=True).rstrip().split("\n"):
                     try:
                         rev, branch = line.split()
                     # Mercurial displays no branch at all if the revision
                     # is on "default"
                     except ValueError:
                         rev = line.rstrip()
                         branch = "default"
                     revs.append((rev, branch))
--- a/testing/mozharness/mozharness/lib/python/authentication.py
+++ b/testing/mozharness/mozharness/lib/python/authentication.py
@@ -7,16 +7,17 @@
 """module for http authentication operations"""
 import getpass
 import os
 
 CREDENTIALS_PATH = os.path.expanduser("~/.mozilla/credentials.cfg")
 DIRNAME = os.path.dirname(CREDENTIALS_PATH)
 LDAP_PASSWORD = None
 
+
 def get_credentials():
     """ Returns http credentials.
 
     The user's email address is stored on disk (for convenience in the future)
     while the password is requested from the user on first invocation.
     """
     global LDAP_PASSWORD
     if not os.path.exists(DIRNAME):
@@ -41,13 +42,14 @@ def get_credentials():
         os.chmod(CREDENTIALS_PATH, 0600)
 
     if not LDAP_PASSWORD:
         print "Please enter your LDAP password (we won't store it):"
         LDAP_PASSWORD = getpass.getpass()
 
     return https_username, LDAP_PASSWORD
 
+
 def get_credentials_path():
     if os.path.isfile(CREDENTIALS_PATH):
         get_credentials()
 
     return CREDENTIALS_PATH
--- a/testing/mozharness/mozharness/mozilla/building/buildbase.py
+++ b/testing/mozharness/mozharness/mozilla/building/buildbase.py
@@ -18,25 +18,26 @@ import time
 import uuid
 import copy
 import glob
 
 # import the power of mozharness ;)
 import sys
 from datetime import datetime
 import re
-from mozharness.base.config import BaseConfig, parse_config_file, DEFAULT_CONFIG_PATH
+from mozharness.base.config import (
+    BaseConfig, parse_config_file, DEFAULT_CONFIG_PATH,
+)
 from mozharness.base.log import ERROR, OutputParser, FATAL
 from mozharness.base.script import PostScriptRun
 from mozharness.base.vcs.vcsbase import MercurialScript
 from mozharness.mozilla.automation import (
     AutomationMixin,
     EXIT_STATUS_DICT,
     TBPL_STATUS_DICT,
-    TBPL_EXCEPTION,
     TBPL_FAILURE,
     TBPL_RETRY,
     TBPL_WARNING,
     TBPL_SUCCESS,
     TBPL_WORST_LEVEL_TUPLE,
 )
 from mozharness.mozilla.secrets import SecretsMixin
 from mozharness.mozilla.testing.errors import TinderBoxPrintRe
@@ -57,17 +58,17 @@ ERROR_MSGS = {
 Please make sure that "repo" is in your config.',
     'comments_undetermined': '"comments" could not be determined. This may be \
 because it was a forced build.',
     'tooltool_manifest_undetermined': '"tooltool_manifest_src" not set, \
 Skipping run_tooltool...',
 }
 
 
-### Output Parsers
+# Output Parsers
 
 TBPL_UPLOAD_ERRORS = [
     {
         'regex': re.compile("Connection timed out"),
         'level': TBPL_RETRY,
     },
     {
         'regex': re.compile("Connection reset by peer"),
@@ -159,21 +160,23 @@ class CheckTestCompleteParser(OutputPars
         # Print the summary.
         summary = tbox_print_summary(self.pass_count,
                                      self.fail_count,
                                      self.leaked)
         self.info("TinderboxPrint: check<br/>%s\n" % summary)
 
         return self.tbpl_status
 
+
 class MozconfigPathError(Exception):
     """
     There was an error getting a mozconfig path from a mozharness config.
     """
 
+
 def get_mozconfig_path(script, config, dirs):
     """
     Get the path to the mozconfig file to use from a mozharness config.
 
     :param script: The object to interact with the filesystem through.
     :type script: ScriptMixin:
 
     :param config: The mozharness config to inspect.
@@ -253,31 +256,29 @@ class BuildingConfig(BaseConfig):
         all_config_dicts = []
         # important config files
         variant_cfg_file = branch_cfg_file = pool_cfg_file = ''
 
         # we want to make the order in which the options were given
         # not matter. ie: you can supply --branch before --build-pool
         # or vice versa and the hierarchy will not be different
 
-        #### The order from highest precedence to lowest is:
-        ## There can only be one of these...
+        # ### The order from highest precedence to lowest is:
+        # # There can only be one of these...
         # 1) build_pool: this can be either staging, pre-prod, and prod cfgs
         # 2) branch: eg: mozilla-central, cedar, cypress, etc
         # 3) build_variant: these could be known like asan and debug
         #                   or a custom config
-        ##
-        ## There can be many of these
+        #
+        # # There can be many of these
         # 4) all other configs: these are any configs that are passed with
         #                       --cfg and --opt-cfg. There order is kept in
         #                       which they were passed on the cmd line. This
         #                       behaviour is maintains what happens by default
         #                       in mozharness
-        ##
-        ####
 
         # so, let's first assign the configs that hold a known position of
         # importance (1 through 3)
         for i, cf in enumerate(all_config_files):
             if options.build_pool:
                 if cf == BuildOptionParser.build_pool_cfg_file:
                     pool_cfg_file = all_config_files[i]
 
@@ -315,18 +316,19 @@ class BuildingConfig(BaseConfig):
                                                search_path=config_paths + [DEFAULT_CONFIG_PATH])
             if branch_configs.get(options.branch or ""):
                 all_config_dicts.append(
                     (branch_cfg_file, branch_configs[options.branch])
                 )
         if pool_cfg_file:
             # take only the specific pool. If we are here, the pool
             # must be present
-            build_pool_configs = parse_config_file(pool_cfg_file,
-                                                   search_path=config_paths + [DEFAULT_CONFIG_PATH])
+            build_pool_configs = parse_config_file(
+                pool_cfg_file,
+                search_path=config_paths + [DEFAULT_CONFIG_PATH])
             all_config_dicts.append(
                 (pool_cfg_file, build_pool_configs[options.build_pool])
             )
         return all_config_dicts
 
 
 # noinspection PyUnusedLocal
 class BuildOptionParser(object):
@@ -356,36 +358,38 @@ class BuildOptionParser(object):
         'debug': 'builds/releng_sub_%s_configs/%s_debug.py',
         'fuzzing-debug': 'builds/releng_sub_%s_configs/%s_fuzzing_debug.py',
         'asan-and-debug': 'builds/releng_sub_%s_configs/%s_asan_and_debug.py',
         'asan-tc-and-debug': 'builds/releng_sub_%s_configs/%s_asan_tc_and_debug.py',
         'stat-and-debug': 'builds/releng_sub_%s_configs/%s_stat_and_debug.py',
         'code-coverage': 'builds/releng_sub_%s_configs/%s_code_coverage.py',
         'source': 'builds/releng_sub_%s_configs/%s_source.py',
         'noopt-debug': 'builds/releng_sub_%s_configs/%s_noopt_debug.py',
-        'api-16-gradle-dependencies': 'builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py',
+        'api-16-gradle-dependencies':
+            'builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py',
         'api-16': 'builds/releng_sub_%s_configs/%s_api_16.py',
         'api-16-artifact': 'builds/releng_sub_%s_configs/%s_api_16_artifact.py',
         'api-16-debug': 'builds/releng_sub_%s_configs/%s_api_16_debug.py',
         'api-16-debug-artifact': 'builds/releng_sub_%s_configs/%s_api_16_debug_artifact.py',
         'api-16-gradle': 'builds/releng_sub_%s_configs/%s_api_16_gradle.py',
         'api-16-gradle-artifact': 'builds/releng_sub_%s_configs/%s_api_16_gradle_artifact.py',
-        'api-16-without-google-play-services': 'builds/releng_sub_%s_configs/%s_api_16_without_google_play_services.py',
+        'api-16-without-google-play-services':
+            'builds/releng_sub_%s_configs/%s_api_16_without_google_play_services.py',
         'rusttests': 'builds/releng_sub_%s_configs/%s_rusttests.py',
         'rusttests-debug': 'builds/releng_sub_%s_configs/%s_rusttests_debug.py',
         'x86': 'builds/releng_sub_%s_configs/%s_x86.py',
         'x86-artifact': 'builds/releng_sub_%s_configs/%s_x86_artifact.py',
         'api-16-partner-sample1': 'builds/releng_sub_%s_configs/%s_api_16_partner_sample1.py',
         'aarch64': 'builds/releng_sub_%s_configs/%s_aarch64.py',
         'android-test': 'builds/releng_sub_%s_configs/%s_test.py',
         'android-checkstyle': 'builds/releng_sub_%s_configs/%s_checkstyle.py',
         'android-lint': 'builds/releng_sub_%s_configs/%s_lint.py',
         'android-findbugs': 'builds/releng_sub_%s_configs/%s_findbugs.py',
         'android-geckoview-docs': 'builds/releng_sub_%s_configs/%s_geckoview_docs.py',
-        'valgrind' : 'builds/releng_sub_%s_configs/%s_valgrind.py',
+        'valgrind': 'builds/releng_sub_%s_configs/%s_valgrind.py',
         'artifact': 'builds/releng_sub_%s_configs/%s_artifact.py',
         'debug-artifact': 'builds/releng_sub_%s_configs/%s_debug_artifact.py',
         'devedition': 'builds/releng_sub_%s_configs/%s_devedition.py',
         'dmd': 'builds/releng_sub_%s_configs/%s_dmd.py',
     }
     build_pool_cfg_file = 'builds/build_pool_specifics.py'
     branch_cfg_file = 'builds/branch_specifics.py'
 
@@ -754,23 +758,22 @@ or run without that action (ie: --no-{ac
                 sys.executable, os.path.join(dirs['abs_src_dir'], 'mach'), 'python',
                 print_conf_setting_path, app_ini_path,
                 'App', prop
             ]
             env = self.query_build_env()
             # dirs['abs_obj_dir'] can be different from env['MOZ_OBJDIR'] on
             # mac, and that confuses mach.
             del env['MOZ_OBJDIR']
-            return self.get_output_from_command(cmd,
-                cwd=dirs['abs_obj_dir'], env=env)
+            return self.get_output_from_command(
+                cmd, cwd=dirs['abs_obj_dir'], env=env)
         else:
             return None
 
     def query_buildid(self):
-        c = self.config
         if self.buildid:
             return self.buildid
 
         # for taskcluster, we pass MOZ_BUILD_DATE into mozharness as an
         # environment variable, only to have it pass the same value out with
         # the same name.
         buildid = os.environ.get('MOZ_BUILD_DATE')
 
@@ -1118,17 +1121,18 @@ or run without that action (ie: --no-{ac
     def build(self):
         """builds application."""
         env = self.query_build_env()
         env.update(self.query_mach_build_env())
 
         dirs = self.query_abs_dirs()
 
         if 'MOZILLABUILD' in os.environ:
-            # We found many issues with intermittent build failures when not invoking mach via bash.
+            # We found many issues with intermittent build failures when not
+            # invoking mach via bash.
             # See bug 1364651 before considering changing.
             mach = [
                 os.path.join(os.environ['MOZILLABUILD'], 'msys', 'bin', 'bash.exe'),
                 os.path.join(dirs['abs_src_dir'], 'mach')
             ]
         else:
             mach = [sys.executable, 'mach']
 
@@ -1192,34 +1196,36 @@ or run without that action (ie: --no-{ac
             'echo-variable-PACKAGE',
             'AB_CD=multi',
         ]
         package_filename = self.get_output_from_command(
             package_cmd,
             cwd=objdir,
         )
         if not package_filename:
-            self.fatal("Unable to determine the package filename for the multi-l10n build. Was trying to run: %s" % package_cmd)
+            self.fatal(
+                "Unable to determine the package filename for the multi-l10n build. "
+                "Was trying to run: %s" % package_cmd)
 
         self.info('Multi-l10n package filename is: %s' % package_filename)
 
         parser = MakeUploadOutputParser(config=self.config,
                                         log_obj=self.log_obj,
                                         )
         upload_cmd = ['make', 'upload', 'AB_CD=multi']
         self.run_command(upload_cmd,
                          env=self.query_mach_build_env(multiLocale=False),
                          cwd=objdir, halt_on_failure=True,
                          output_parser=parser)
         upload_files_cmd = [
             'make',
             'echo-variable-UPLOAD_FILES',
             'AB_CD=multi',
         ]
-        output = self.get_output_from_command(
+        self.get_output_from_command(
             upload_files_cmd,
             cwd=objdir,
         )
 
     def postflight_build(self):
         """grabs properties from post build and calls ccache -s"""
         # A list of argument lists.  Better names gratefully accepted!
         mach_commands = self.config.get('postflight_build_mach_commands', [])
@@ -1355,17 +1361,16 @@ or run without that action (ie: --no-{ac
                 continue
             data['subtests'].append({
                 'name': phase['name'],
                 'value': phase['duration'],
             })
 
         return data
 
-
     def _load_sccache_stats(self):
         stats_file = os.path.join(
             self.query_abs_dirs()['abs_obj_dir'], 'sccache-stats.json'
         )
         if not os.path.exists(stats_file):
             self.info('%s does not exist; not loading sccache stats' % stats_file)
             return
 
@@ -1450,33 +1455,33 @@ or run without that action (ie: --no-{ac
                             self.fatal('should not see %s (%s) multiple times!'
                                        % (name, path))
                         subtests[name] = size
                 for name in subtests:
                     self.info('Size of %s: %s bytes' % (name,
                                                         subtests[name]))
                     size_measurements.append(
                         {'name': name, 'value': subtests[name]})
-            except:
+            except Exception:
                 self.info('Unable to search %s for component sizes.' % installer)
                 size_measurements = []
 
         if not installer_size and not size_measurements:
             return
 
         # We want to always collect metrics. But alerts for installer size are
         # only use for builds with ship. So nix the alerts for builds we don't
         # ship.
         def filter_alert(alert):
             if not self._is_configuration_shipped():
                 alert['shouldAlert'] = False
 
             return alert
 
-        if installer.endswith('.apk'): # Android
+        if installer.endswith('.apk'):  # Android
             yield filter_alert({
                 "name": "installer size",
                 "value": installer_size,
                 "alertChangeType": "absolute",
                 "alertThreshold": (200 * 1024),
                 "subtests": size_measurements
             })
         else:
--- a/testing/mozharness/mozharness/mozilla/checksums.py
+++ b/testing/mozharness/mozharness/mozilla/checksums.py
@@ -1,21 +1,30 @@
 def parse_checksums_file(checksums):
-    """Parses checksums files that the build system generates and uploads:
-        https://hg.mozilla.org/mozilla-central/file/default/build/checksums.py"""
+    """
+    Parses checksums files that the build system generates and uploads:
+    https://hg.mozilla.org/mozilla-central/file/default/build/checksums.py
+    """
     fileInfo = {}
     for line in checksums.splitlines():
         hash_, type_, size, file_ = line.split(None, 3)
         size = int(size)
         if size < 0:
             raise ValueError("Found negative value (%d) for size." % size)
         if file_ not in fileInfo:
             fileInfo[file_] = {"hashes": {}}
         # If the file already exists, make sure that the size matches the
         # previous entry.
         elif fileInfo[file_]['size'] != size:
-            raise ValueError("Found different sizes for same file %s (%s and %s)" % (file_, fileInfo[file_]['size'], size))
+            raise ValueError(
+                "Found different sizes for same file %s (%s and %s)"
+                % (file_, fileInfo[file_]['size'], size))
         # Same goes for the hash.
-        elif type_ in fileInfo[file_]['hashes'] and fileInfo[file_]['hashes'][type_] != hash_:
-            raise ValueError("Found different %s hashes for same file %s (%s and %s)" % (type_, file_, fileInfo[file_]['hashes'][type_], hash_))
+        elif (
+            type_ in fileInfo[file_]['hashes']
+            and fileInfo[file_]['hashes'][type_] != hash_
+        ):
+            raise ValueError(
+                "Found different %s hashes for same file %s (%s and %s)"
+                % (type_, file_, fileInfo[file_]['hashes'][type_], hash_))
         fileInfo[file_]['size'] = size
         fileInfo[file_]['hashes'][type_] = hash_
     return fileInfo
--- a/testing/mozharness/mozharness/mozilla/l10n/locales.py
+++ b/testing/mozharness/mozharness/mozilla/l10n/locales.py
@@ -4,17 +4,16 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 """Localization.
 """
 
 import os
 import pprint
-import sys
 
 from mozharness.base.config import parse_config_file
 
 
 # LocalesMixin {{{1
 class LocalesMixin(object):
     def __init__(self, **kwargs):
         """ Mixins generally don't have an __init__.
--- a/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
+++ b/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
@@ -80,17 +80,16 @@ class MultiLocaleBuild(LocalesMixin, Mer
                                               'summary'],
                                  require_config_file=require_config_file)
 
     def query_l10n_env(self):
         return self.query_env()
 
     # pull_locale_source() defined in LocalesMixin.
 
-
     def add_locales(self):
         dirs = self.query_abs_dirs()
         locales = self.query_locales()
 
         for locale in locales:
             command = 'make chrome-%s L10NBASEDIR=%s' % (locale, dirs['abs_l10n_dir'])
             status = self._process_command(command=command,
                                            cwd=dirs['abs_locales_dir'],
@@ -142,11 +141,12 @@ class MultiLocaleBuild(LocalesMixin, Mer
 
     def _process_command(self, **kwargs):
         """Stub wrapper function that allows us to call scratchbox in
            MaemoMultiLocaleBuild.
 
         """
         return self.run_command(**kwargs)
 
+
 # __main__ {{{1
 if __name__ == '__main__':
     pass
--- a/testing/mozharness/mozharness/mozilla/merkle.py
+++ b/testing/mozharness/mozharness/mozilla/merkle.py
@@ -1,24 +1,26 @@
-#!/usr/bin/env python
+import struct
 
-import struct
 
 def _round2(n):
     k = 1
     while k < n:
         k <<= 1
     return k >> 1
 
+
 def _leaf_hash(hash_fn, leaf):
     return hash_fn(b'\x00' + leaf).digest()
 
+
 def _pair_hash(hash_fn, left, right):
     return hash_fn(b'\x01' + left + right).digest()
 
+
 class InclusionProof:
     """
     Represents a Merkle inclusion proof for purposes of serialization,
     deserialization, and verification of the proof.  The format for inclusion
     proofs in RFC 6962-bis is as follows:
 
         opaque LogID<2..127>;
         opaque NodeHash<32..2^8-1>;
@@ -51,17 +53,17 @@ class InclusionProof:
     @staticmethod
     def from_rfc6962_bis(serialized):
         start = 0
         read = 1
         if len(serialized) < start + read:
             raise Exception('Inclusion proof too short for log ID header')
         log_id_len, = struct.unpack('B', serialized[start:start+read])
         start += read
-        start += log_id_len # Ignore the log ID itself
+        start += log_id_len  # Ignore the log ID itself
 
         read = 8 + 8 + 2
         if len(serialized) < start + read:
             raise Exception('Inclusion proof too short for middle section')
         tree_size, leaf_index, path_len = struct.unpack('!QQH', serialized[start:start+read])
         start += read
 
         path_elements = []
@@ -113,20 +115,20 @@ class InclusionProof:
         for i, elem in enumerate(self.path_elements):
             if lr[i]:
                 node = _pair_hash(hash_fn, node, elem)
             else:
                 node = _pair_hash(hash_fn, elem, node)
 
         return node
 
-
     def verify(self, hash_fn, leaf, leaf_index, tree_size, tree_head):
         return self._expected_head(hash_fn, leaf, leaf_index, tree_size) == tree_head
 
+
 class MerkleTree:
     """
     Implements a Merkle tree on a set of data items following the
     structure defined in RFC 6962-bis.  This allows us to create a
     single hash value that summarizes the data (the 'head'), and an
     'inclusion proof' for each element that connects it to the head.
 
     https://tools.ietf.org/html/draft-ietf-trans-rfc6962-bis-24
--- a/testing/mozharness/mozharness/mozilla/secrets.py
+++ b/testing/mozharness/mozharness/mozilla/secrets.py
@@ -3,20 +3,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 """Support for fetching secrets from the secrets API
 """
 
 import os
-import mozharness
 import urllib2
 import json
-from mozharness.base.log import ERROR
 
 
 class SecretsMixin(object):
 
     def _fetch_secret(self, secret_name):
         self.info("fetching secret {} from API".format(secret_name))
         # fetch from http://taskcluster, which points to the taskcluster proxy
         # within a taskcluster task.  Outside of that environment, do not
deleted file mode 100644
--- a/tools/lint/flake8.yml
+++ b/tools/lint/flake8.yml
@@ -21,18 +21,17 @@ flake8:
         - testing/mach_commands.py
         - testing/marionette/client
         - testing/marionette/harness
         - testing/marionette/puppeteer
         - testing/mochitest
         - testing/mozbase
         - testing/mozharness/configs
         - testing/mozharness/mozfile
-        - testing/mozharness/mozharness/mozilla/mar.py
-        - testing/mozharness/mozharness/mozilla/tooltool.py
+        - testing/mozharness/mozharness
         - testing/mozharness/mozinfo
         - testing/mozharness/scripts
         - testing/remotecppunittests.py
         - testing/runcppunittests.py
         - testing/talos/
         - testing/xpcshell
         - toolkit/components/telemetry
         - toolkit/crashreporter/tools/upload_symbols.py