Bug 1464869 - Fix flake8/pep8 issue by hand in testing/mozharness/mozharness/mozilla/testing/ r=jmaher
authorSylvestre Ledru <sledru@mozilla.com>
Fri, 01 Jun 2018 12:20:53 +0200
changeset 421552 c019186dbab51795d7a4465dccd9bf354bb12c36
parent 421551 6576d030231342d51625ac712a9ffc5d29ff9f30
child 421553 10e8e89771c9793a0240cc05e0dc05faa04e8d36
push id34097
push usernbeleuzu@mozilla.com
push dateWed, 06 Jun 2018 16:59:16 +0000
treeherdermozilla-central@223e5900fbbb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs1464869
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1464869 - Fix flake8/pep8 issue by hand in testing/mozharness/mozharness/mozilla/testing/ r=jmaher MozReview-Commit-ID: BXZarR8VsKC
testing/mozharness/mozharness/mozilla/testing/codecoverage.py
testing/mozharness/mozharness/mozilla/testing/errors.py
testing/mozharness/mozharness/mozilla/testing/firefox_ui_tests.py
testing/mozharness/mozharness/mozilla/testing/per_test_base.py
testing/mozharness/mozharness/mozilla/testing/raptor.py
testing/mozharness/mozharness/mozilla/testing/talos.py
testing/mozharness/mozharness/mozilla/testing/testbase.py
testing/mozharness/mozharness/mozilla/testing/unittest.py
--- a/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
+++ b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
@@ -122,17 +122,18 @@ class CodeCoverageMixin(SingleTestMixin)
             tar_file = 'grcov-linux-x86_64.tar.bz2'
         elif mozinfo.os == 'win':
             platform = 'win32'
             tar_file = 'grcov-win-i686.tar.bz2'
         elif mozinfo.os == 'mac':
             platform = 'macosx64'
             tar_file = 'grcov-osx-x86_64.tar.bz2'
 
-        manifest = os.path.join(dirs.get('abs_test_install_dir', os.path.join(dirs['abs_work_dir'], 'tests')),
+        manifest = os.path.join(dirs.get('abs_test_install_dir',
+                                         os.path.join(dirs['abs_work_dir'], 'tests')),
                                 'config/tooltool-manifests/%s/ccov.manifest' % platform)
 
         self.tooltool_fetch(
             manifest=manifest,
             output_dir=self.grcov_dir,
             cache=self.config.get('tooltool_cache')
         )
 
@@ -164,26 +165,26 @@ class CodeCoverageMixin(SingleTestMixin)
 
         # Add baseline code coverage collection tests
         baseline_tests = {
             '.html': {
                 'test': 'testing/mochitest/baselinecoverage/plain/test_baselinecoverage.html',
                 'suite': 'plain'
             },
             '.js': {
-                'test': 'testing/mochitest/baselinecoverage/browser_chrome/browser_baselinecoverage.js',
+                'test': 'testing/mochitest/baselinecoverage/browser_chrome/browser_baselinecoverage.js',  # NOQA: E501
                 'suite': 'browser-chrome'
             },
             '.xul': {
                 'test': 'testing/mochitest/baselinecoverage/chrome/test_baselinecoverage.xul',
                 'suite': 'chrome'
             }
         }
 
-        wpt_baseline_test = 'tests/web-platform/mozilla/tests/baselinecoverage/wpt_baselinecoverage.html'
+        wpt_baseline_test = 'tests/web-platform/mozilla/tests/baselinecoverage/wpt_baselinecoverage.html'  # NOQA: E501
         if self.config.get('per_test_category') == "web-platform":
             if 'testharness' not in self.suites:
                 self.suites['testharness'] = []
             if wpt_baseline_test not in self.suites['testharness']:
                 self.suites["testharness"].append(wpt_baseline_test)
             return
 
         # Go through all the tests and find all
@@ -233,17 +234,22 @@ class CodeCoverageMixin(SingleTestMixin)
         if not self.code_coverage_enabled:
             return
 
         if self.per_test_coverage:
             return
 
         self.gcov_dir, self.jsvm_dir = self.set_coverage_env(os.environ)
 
-    def parse_coverage_artifacts(self, gcov_dir, jsvm_dir, merge=False, output_format='lcov', filter_covered=False):
+    def parse_coverage_artifacts(self,
+                                 gcov_dir,
+                                 jsvm_dir,
+                                 merge=False,
+                                 output_format='lcov',
+                                 filter_covered=False):
         jsvm_output_file = 'jsvm_lcov_output.info'
         grcov_output_file = 'grcov_lcov_output.info'
 
         dirs = self.query_abs_dirs()
 
         sys.path.append(dirs['abs_test_install_dir'])
         sys.path.append(os.path.join(dirs['abs_test_install_dir'], 'mozbuild/codecoverage'))
 
@@ -384,17 +390,18 @@ class CodeCoverageMixin(SingleTestMixin)
                         if 'baselinecoverage' in test:
                             # Don't keep the baseline coverage
                             continue
                         else:
                             # Get test coverage
                             with open(grcov_file, 'r') as f:
                                 report = json.load(f)
 
-                            # Remove uncovered files, as they are unneeded for per-test coverage purposes.
+                            # Remove uncovered files, as they are unneeded for per-test
+                            # coverage purposes.
                             report['source_files'] = [
                                 sf for sf in report['source_files'] if self.is_covered(sf)]
 
                             # Get baseline coverage
                             baseline_coverage = {}
                             if self.config.get('per_test_category') == "web-platform":
                                 baseline_coverage = baseline_tests_cov['.html']
                             else:
@@ -421,18 +428,18 @@ class CodeCoverageMixin(SingleTestMixin)
                         z.write(grcov_file)
             return
 
         del os.environ['GCOV_PREFIX_STRIP']
         del os.environ['GCOV_PREFIX']
         del os.environ['JS_CODE_COVERAGE_OUTPUT_DIR']
 
         if not self.ccov_upload_disabled:
-            grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(
-                self.gcov_dir, self.jsvm_dir)
+            grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(self.gcov_dir,
+                                                                                self.jsvm_dir)
 
             # Zip the grcov output and upload it.
             grcov_zip_path = os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip')
             with zipfile.ZipFile(grcov_zip_path, 'w', zipfile.ZIP_DEFLATED) as z:
                 z.write(grcov_output_file)
 
             # Zip the JSVM coverage data and upload it.
             jsvm_zip_path = os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-jsvm.zip')
--- a/testing/mozharness/mozharness/mozilla/testing/errors.py
+++ b/testing/mozharness/mozharness/mozilla/testing/errors.py
@@ -14,17 +14,17 @@ whether IGNORE, DEBUG, INFO, WARNING, ER
 
 """
 
 import re
 from mozharness.base.log import INFO, WARNING, ERROR
 
 # ErrorLists {{{1
 _mochitest_summary = {
-    'regex': re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))'''),
+    'regex': re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))'''),  # NOQA: E501
     'pass_group': "Passed",
     'fail_group': "Failed",
     'known_fail_group': "Todo",
 }
 
 TinderBoxPrintRe = {
     "mochitest_summary": _mochitest_summary,
     "mochitest-chrome_summary": _mochitest_summary,
@@ -34,35 +34,35 @@ TinderBoxPrintRe = {
     "mochitest-plain-gpu_summary": _mochitest_summary,
     "marionette_summary": {
         'regex': re.compile(r'''(passed|failed|todo):\ +(\d+)'''),
         'pass_group': "passed",
         'fail_group': "failed",
         'known_fail_group': "todo",
     },
     "reftest_summary": {
-        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),  # NOQA: E501
         'pass_group': "Successful",
         'fail_group': "Unexpected",
         'known_fail_group': "Known problems",
     },
     "crashtest_summary": {
-        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),  # NOQA: E501
         'pass_group': "Successful",
         'fail_group': "Unexpected",
         'known_fail_group': "Known problems",
     },
     "xpcshell_summary": {
         'regex': re.compile(r'''INFO \| (Passed|Failed|Todo): (\d+)'''),
         'pass_group': "Passed",
         'fail_group': "Failed",
         'known_fail_group': "Todo",
     },
     "jsreftest_summary": {
-        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+        'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),  # NOQA: E501
         'pass_group': "Successful",
         'fail_group': "Unexpected",
         'known_fail_group': "Known problems",
     },
     "robocop_summary": _mochitest_summary,
     "instrumentation_summary": _mochitest_summary,
     "cppunittest_summary": {
         'regex': re.compile(r'''cppunittests INFO \| (Passed|Failed): (\d+)'''),
@@ -103,19 +103,19 @@ TinderBoxPrintRe = {
     "geckoview-junit_summary": {
         'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
         'pass_group': "Passed",
         'fail_group': "Failed",
         'known_fail_group': None,
     },
 
     "harness_error": {
-        'full_regex': re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \| (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)"),
+        'full_regex': re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \| (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)"),  # NOQA: E501
         'minimum_regex': re.compile(r'''(TEST-UNEXPECTED|PROCESS-CRASH)'''),
-        'retry_regex': re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|DMError|Connection to the other side was lost in a non-clean fashion|program finished with exit code 80|INFRA-ERROR|twisted.spread.pb.PBConnectionLost|_dl_open: Assertion|Timeout exceeded for _runCmd call)''')
+        'retry_regex': re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|DMError|Connection to the other side was lost in a non-clean fashion|program finished with exit code 80|INFRA-ERROR|twisted.spread.pb.PBConnectionLost|_dl_open: Assertion|Timeout exceeded for _runCmd call)''')  # NOQA: E501
     },
 }
 
 TestPassed = [
     {'regex': re.compile('''(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )'''), 'level': INFO},
 ]
 
 HarnessErrorList = [
--- a/testing/mozharness/mozharness/mozilla/testing/firefox_ui_tests.py
+++ b/testing/mozharness/mozharness/mozilla/testing/firefox_ui_tests.py
@@ -5,18 +5,17 @@
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
 
 import copy
 import os
 import sys
 
-from mozharness.base.log import FATAL, WARNING
-from mozharness.base.python import PostScriptRun, PreScriptAction
+from mozharness.base.python import PreScriptAction
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 from mozharness.mozilla.testing.testbase import (
     TestingMixin,
     testing_config_options,
 )
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
@@ -25,17 +24,18 @@ from mozharness.mozilla.vcstools import 
 
 
 # General command line arguments for Firefox ui tests
 firefox_ui_tests_config_options = [
     [["--allow-software-gl-layers"], {
         "action": "store_true",
         "dest": "allow_software_gl_layers",
         "default": False,
-        "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.",
+        "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL "
+        "compositor.",
     }],
     [["--enable-webrender"], {
         "action": "store_true",
         "dest": "enable_webrender",
         "default": False,
         "help": "Tries to enable the WebRender compositor.",
     }],
     [['--dry-run'], {
@@ -256,17 +256,18 @@ class FirefoxUITests(TestingMixin, VCSTo
 
         # Set further environment settings
         env = env or self.query_env()
         env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
         if self.query_minidump_stackwalk():
             env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
         env['RUST_BACKTRACE'] = 'full'
 
-        # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR env variables
+        # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
+        # env variables
         if self.config.get('code_coverage'):
             env['GCOV_PREFIX'] = self.gcov_dir
             env['JS_CODE_COVERAGE_OUTPUT_DIR'] = self.jsvm_dir
 
         if self.config['allow_software_gl_layers']:
             env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
         if self.config['enable_webrender']:
             env['MOZ_WEBRENDER'] = '1'
--- a/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
+++ b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
@@ -1,24 +1,22 @@
 #!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
-import argparse
 import math
 import os
 import posixpath
 import re
 import sys
 import mozinfo
 from manifestparser import TestManifest
-from mozharness.base.script import PostScriptAction
 
 
 class SingleTestMixin(object):
     """Utility functions for per-test testing like test verification and per-test coverage."""
 
     def __init__(self):
         self.suites = {}
         self.tests_downloaded = False
@@ -58,17 +56,19 @@ class SingleTestMixin(object):
         ]
         sys.path.append(dirs['abs_reftest_dir'])
         import manifest
         self.reftest_test_dir = os.path.join(dirs['abs_reftest_dir'], 'tests')
         for (path, suite, subsuite) in ref_manifests:
             if os.path.exists(path):
                 man = manifest.ReftestManifest()
                 man.load(path)
-                tests_by_path.update({os.path.relpath(t, self.reftest_test_dir): (suite, subsuite) for t in man.files})
+                tests_by_path.update({
+                    os.path.relpath(t, self.reftest_test_dir): (suite, subsuite) for t in man.files
+                })
                 self.info("Per-test run updated with manifest %s" % path)
 
         suite = 'jsreftest'
         self.jsreftest_test_dir = os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests')
         path = os.path.join(self.jsreftest_test_dir, 'jstests.list')
         if os.path.exists(path):
             man = manifest.ReftestManifest()
             man.load(path)
@@ -194,17 +194,17 @@ class SingleTestMixin(object):
             for c in contents['changesets']:
                 self.info(" {cset} {desc}".format(
                     cset=c['node'][0:12],
                     desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
                 changed_files |= set(c['files'])
 
         if self.config.get('per_test_category') == "web-platform":
             self._find_wpt_tests(dirs, changed_files)
-        elif self.config.get('gpu_required') == True:
+        elif self.config.get('gpu_required'):
             self._find_misc_tests(dirs, changed_files, gpu=True)
         else:
             self._find_misc_tests(dirs, changed_files)
 
         # per test mode run specific tests from any given test suite
         # _find_*_tests organizes tests to run into suites so we can
         # run each suite at a time
 
--- a/testing/mozharness/mozharness/mozilla/testing/raptor.py
+++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py
@@ -4,27 +4,26 @@
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import copy
 import json
 import os
 import re
 import sys
+import subprocess
 
 from shutil import copyfile
 
 import mozharness
 
-from mozharness.base.config import parse_config_file
 from mozharness.base.errors import PythonErrorList
-from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL, INFO, WARNING
+from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL, INFO
 from mozharness.base.python import Python3Virtualenv
 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
-from mozharness.mozilla.tooltool import TooltoolMixin
 from mozharness.base.vcs.vcsbase import MercurialScript
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
 )
 
 scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
 external_tools_path = os.path.join(scripts_path, 'external_tools')
@@ -33,17 +32,18 @@ RaptorErrorList = PythonErrorList + [
     {'regex': re.compile(r'''run-as: Package '.*' is unknown'''), 'level': DEBUG},
     {'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
     {'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
     {'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
     {'substr': r'''raptorError''', 'level': CRITICAL},
     {'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
     {'substr': r"""No such file or directory: 'browser_output.txt'""",
      'level': CRITICAL,
-     'explanation': r"""Most likely the browser failed to launch, or the test was otherwise unsuccessful in even starting."""},
+     'explanation': "Most likely the browser failed to launch, or the test was otherwise "
+     "unsuccessful in even starting."},
 ]
 
 
 class Raptor(TestingMixin, MercurialScript, Python3Virtualenv, CodeCoverageMixin):
     """
     install and run raptor tests
     """
     config_options = [
@@ -91,23 +91,27 @@ class Raptor(TestingMixin, MercurialScri
         self.raptor_json_url = self.config.get("raptor_json_url")
         self.raptor_json = self.config.get("raptor_json")
         self.raptor_json_config = self.config.get("raptor_json_config")
         self.repo_path = self.config.get("repo_path")
         self.obj_path = self.config.get("obj_path")
         self.tests = None
         self.gecko_profile = self.config.get('gecko_profile')
         self.gecko_profile_interval = self.config.get('gecko_profile_interval')
-        self.mitmproxy_rel_bin = None  # some platforms download a mitmproxy release binary
-        self.mitmproxy_pageset = None  # zip file found on tooltool that contains all of the mitmproxy recordings
-        self.mitmproxy_recordings_file_list = self.config.get(
-            'mitmproxy', None)  # files inside the recording set
-        self.mitmdump = None  # path to mitmdump tool itself, in py3 venv
+        # some platforms download a mitmproxy release binary
+        self.mitmproxy_rel_bin = None
+        # zip file found on tooltool that contains all of the mitmproxy recordings
+        self.mitmproxy_pageset = None
+        # files inside the recording set
+        self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None)
+        # path to mitmdump tool itself, in py3 venv
+        self.mitmdump = None
 
-    # We accept some configuration options from the try commit message in the format mozharness: <options>
+    # We accept some configuration options from the try commit message in the
+    # format mozharness: <options>
     # Example try commit message:
     #   mozharness: --geckoProfile try: <stuff>
     def query_gecko_profile_options(self):
         gecko_results = []
         # if gecko_profile is set, we add that to the raptor options
         if self.gecko_profile:
             gecko_results.append('--geckoProfile')
             if self.gecko_profile_interval:
@@ -115,29 +119,30 @@ class Raptor(TestingMixin, MercurialScri
                     ['--geckoProfileInterval', str(self.gecko_profile_interval)]
                 )
         return gecko_results
 
     def query_abs_dirs(self):
         if self.abs_dirs:
             return self.abs_dirs
         abs_dirs = super(Raptor, self).query_abs_dirs()
-        abs_dirs['abs_blob_upload_dir'] = os.path.join(
-            abs_dirs['abs_work_dir'], 'blobber_upload_dir')
+        abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
+                                                       'blobber_upload_dir')
         abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests')
         self.abs_dirs = abs_dirs
         return self.abs_dirs
 
     def raptor_options(self, args=None, **kw):
         """return options to raptor"""
         # binary path
         binary_path = self.binary_path or self.config.get('binary_path')
         if not binary_path:
-            self.fatal(
-                "Raptor requires a path to the binary.  You can specify binary_path or add download-and-extract to your action list.")
+            msg = """Raptor requires a path to the binary. You can specify binary_path or add
+            download-and-extract to your action list."""
+            self.fatal(msg)
         # raptor options
         if binary_path.endswith('.exe'):
             binary_path = binary_path[:-4]
         options = []
         kw_options = {'binary': binary_path}
         # options overwritten from **kw
         if 'test' in self.config:
             kw_options['test'] = self.config['test']
@@ -345,24 +350,16 @@ class Raptor(TestingMixin, MercurialScri
                                                 output_timeout=output_timeout,
                                                 output_parser=parser,
                                                 env=env)
         if parser.minidump_output:
             self.info("Looking at the minidump files for debugging purposes...")
             for item in parser.minidump_output:
                 self.run_command(["ls", "-l", item])
 
-        if self.return_code not in [0]:
-            # update the worst log level
-            log_level = ERROR
-            if self.return_code == 1:
-                log_level = WARNING
-            if self.return_code == 4:
-                log_level = WARNING
-
         elif '--no-upload-results' not in options:
             if not self.gecko_profile:
                 self._validate_treeherder_data(parser)
                 if not self.run_local:
                     # copy results to upload dir so they are included as an artifact
                     self.info("copying raptor results to upload dir:")
                     dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
                     self.info(str(dest))
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -43,17 +43,18 @@ TalosErrorList = PythonErrorList + [
     {'substr': r'''FAIL: Graph server unreachable''', 'level': CRITICAL},
     {'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
     {'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
     {'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
     {'substr': r'''talosError''', 'level': CRITICAL},
     {'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
     {'substr': r"""No such file or directory: 'browser_output.txt'""",
      'level': CRITICAL,
-     'explanation': r"""Most likely the browser failed to launch, or the test was otherwise unsuccessful in even starting."""},
+     'explanation': "Most likely the browser failed to launch, or the test was otherwise "
+     "unsuccessful in even starting."},
 ]
 
 # TODO: check for running processes on script invocation
 
 
 class TalosOutputParser(OutputParser):
     minidump_regex = re.compile(r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"''')
     RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
@@ -141,17 +142,18 @@ class Talos(TestingMixin, MercurialScrip
             "type": "int",
             "default": 0,
             "help": "The interval between samples taken by the profiler (milliseconds)"
         }],
         [["--e10s"], {
             "dest": "e10s",
             "action": "store_true",
             "default": False,
-            "help": "we should have --disable-e10s, but instead we assume non-e10s and use --e10s to help"
+            "help": "we should have --disable-e10s, but instead we assume non-e10s and use "
+            "--e10s to help"
         }],
         [["--enable-webrender"], {
             "action": "store_true",
             "dest": "enable_webrender",
             "default": False,
             "help": "Tries to enable the WebRender compositor.",
         }],
     ] + testing_config_options + copy.deepcopy(code_coverage_config_options)
@@ -187,24 +189,27 @@ class Talos(TestingMixin, MercurialScrip
         self.repo_path = self.config.get("repo_path")
         self.obj_path = self.config.get("obj_path")
         self.tests = None
         self.gecko_profile = self.config.get('gecko_profile') or \
             "--geckoProfile" in self.config.get("talos_extra_options", [])
         self.gecko_profile_interval = self.config.get('gecko_profile_interval')
         self.pagesets_name = None
         self.benchmark_zip = None
-        self.mitmproxy_rel_bin = None  # some platforms download a mitmproxy release binary
+        # some platforms download a mitmproxy release binary
+        self.mitmproxy_rel_bin = None
         # zip file found on tooltool that contains all of the mitmproxy recordings
         self.mitmproxy_recording_set = None
-        self.mitmproxy_recordings_file_list = self.config.get(
-            'mitmproxy', None)  # files inside the recording set
-        self.mitmdump = None  # path to mitdump tool itself, in py3 venv
+        # files inside the recording set
+        self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None)
+        # path to mitdump tool itself, in py3 venv
+        self.mitmdump = None
 
-    # We accept some configuration options from the try commit message in the format mozharness: <options>
+    # We accept some configuration options from the try commit message in the format
+    # mozharness: <options>
     # Example try commit message:
     #   mozharness: --geckoProfile try: <stuff>
     def query_gecko_profile_options(self):
         gecko_results = []
         # finally, if gecko_profile is set, we add that to the talos options
         if self.gecko_profile:
             gecko_results.append('--geckoProfile')
             if self.gecko_profile_interval:
@@ -212,18 +217,18 @@ class Talos(TestingMixin, MercurialScrip
                     ['--geckoProfileInterval', str(self.gecko_profile_interval)]
                 )
         return gecko_results
 
     def query_abs_dirs(self):
         if self.abs_dirs:
             return self.abs_dirs
         abs_dirs = super(Talos, self).query_abs_dirs()
-        abs_dirs['abs_blob_upload_dir'] = os.path.join(
-            abs_dirs['abs_work_dir'], 'blobber_upload_dir')
+        abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
+                                                       'blobber_upload_dir')
         abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests')
         self.abs_dirs = abs_dirs
         return self.abs_dirs
 
     def query_talos_json_config(self):
         """Return the talos json config."""
         if self.talos_json_config:
             return self.talos_json_config
@@ -265,17 +270,18 @@ class Talos(TestingMixin, MercurialScrip
             talos_opts = self.talos_json_config['suites'][self.suite].get('talos_options', None)
             for index, val in enumerate(talos_opts):
                 if val == '--mitmproxy':
                     self.mitmproxy_recordings_file_list = talos_opts[index + 1]
             return self.mitmproxy_recordings_file_list
 
     def get_suite_from_test(self):
         """ Retrieve the talos suite name from a given talos test name."""
-        # running locally, single test name provided instead of suite; go through tests and find suite name
+        # running locally, single test name provided instead of suite; go through tests and
+        # find suite name
         suite_name = None
         if self.query_talos_json_config():
             if '-a' in self.config['talos_extra_options']:
                 test_name_index = self.config['talos_extra_options'].index('-a') + 1
             if '--activeTests' in self.config['talos_extra_options']:
                 test_name_index = self.config['talos_extra_options'].index('--activeTests') + 1
             if test_name_index < len(self.config['talos_extra_options']):
                 test_name = self.config['talos_extra_options'][test_name_index]
@@ -287,26 +293,27 @@ class Talos(TestingMixin, MercurialScrip
                 self.fatal("Test name is missing or invalid")
         else:
             self.fatal("Talos json config not found, cannot verify suite")
         return suite_name
 
     def validate_suite(self):
         """ Ensure suite name is a valid talos suite. """
         if self.query_talos_json_config() and self.suite is not None:
-            if not self.suite in self.talos_json_config.get('suites'):
+            if self.suite not in self.talos_json_config.get('suites'):
                 self.fatal("Suite '%s' is not valid (not found in talos json config)" % self.suite)
 
     def talos_options(self, args=None, **kw):
         """return options to talos"""
         # binary path
         binary_path = self.binary_path or self.config.get('binary_path')
         if not binary_path:
-            self.fatal(
-                "Talos requires a path to the binary.  You can specify binary_path or add download-and-extract to your action list.")
+            msg = """Talos requires a path to the binary.  You can specify binary_path or add
+            download-and-extract to your action list."""
+            self.fatal(msg)
 
         # talos options
         options = []
         # talos can't gather data if the process name ends with '.exe'
         if binary_path.endswith('.exe'):
             binary_path = binary_path[:-4]
         # options overwritten from **kw
         kw_options = {'executablePath': binary_path}
@@ -356,39 +363,40 @@ class Talos(TestingMixin, MercurialScrip
         self.talos_path = os.path.join(
             self.query_abs_dirs()['abs_test_install_dir'], 'talos'
         )
 
         # need to determine if talos pageset is required to be downloaded
         if self.config.get('run_local') and 'talos_extra_options' in self.config:
             # talos initiated locally, get and verify test/suite from cmd line
             self.talos_path = os.path.dirname(self.talos_json)
-            if '-a' in self.config['talos_extra_options'] or '--activeTests' in self.config['talos_extra_options']:
+            if ('-a' in self.config['talos_extra_options'] or
+                '--activeTests' in self.config['talos_extra_options']):
                 # test name (-a or --activeTests) specified, find out what suite it is a part of
                 self.suite = self.get_suite_from_test()
             elif '--suite' in self.config['talos_extra_options']:
                 # --suite specified, get suite from cmd line and ensure is valid
                 suite_name_index = self.config['talos_extra_options'].index('--suite') + 1
                 if suite_name_index < len(self.config['talos_extra_options']):
                     self.suite = self.config['talos_extra_options'][suite_name_index]
                     self.validate_suite()
                 else:
                     self.fatal("Suite name not provided")
         else:
             # talos initiated in production via mozharness
             self.suite = self.config['suite']
 
         tooltool_artifacts = []
         if self.query_pagesets_name():
-            tooltool_artifacts.append(
-                {'name': self.pagesets_name, 'manifest': self.pagesets_name_manifest})
+            tooltool_artifacts.append({'name': self.pagesets_name,
+                                       'manifest': self.pagesets_name_manifest})
 
         if self.query_benchmark_zip():
-            tooltool_artifacts.append(
-                {'name': self.benchmark_zip, 'manifest': self.benchmark_zip_manifest})
+            tooltool_artifacts.append({'name': self.benchmark_zip,
+                                       'manifest': self.benchmark_zip_manifest})
 
         # now that have the suite name, check if artifact is required, if so download it
         # the --no-download option will override this
         for artifact in tooltool_artifacts:
             if '--no-download' not in self.config.get('talos_extra_options', []):
                 self.info("Downloading %s with tooltool..." % artifact)
                 self.src_talos_webdir = os.path.join(self.talos_path, 'talos')
                 src_talos_pageset = os.path.join(self.src_talos_webdir, 'tests')
@@ -401,17 +409,18 @@ class Talos(TestingMixin, MercurialScrip
                     )
                     archive = os.path.join(src_talos_pageset, artifact['name'])
                     unzip = self.query_exe('unzip')
                     unzip_cmd = [unzip, '-q', '-o', archive, '-d', src_talos_pageset]
                     self.run_command(unzip_cmd, halt_on_failure=True)
                 else:
                     self.info("%s already available" % artifact)
             else:
-                self.info("Not downloading %s because the no-download option was specified" % artifact)
+                self.info("Not downloading %s because the no-download option was specified" %
+                          artifact)
 
         # if running webkit tests locally, need to copy webkit source into talos/tests
         if self.config.get('run_local') and ('speedometer' in self.suite or
                                              'stylebench' in self.suite or
                                              'motionmark' in self.suite):
             self.get_webkit_source()
 
     def get_webkit_source(self):
@@ -423,17 +432,17 @@ class Talos(TestingMixin, MercurialScrip
                             'PerformanceTests')
 
         if os.path.exists(dest):
             shutil.rmtree(dest)
 
         self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
         try:
             shutil.copytree(src, dest)
-        except:
+        except Exception:
             self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
 
     def setup_mitmproxy(self):
         """Some talos tests require the use of mitmproxy to playback the pages,
         set it up here.
         """
         if not self.query_mitmproxy_recording_set():
             self.info("Skipping: mitmproxy is not required")
@@ -492,28 +501,29 @@ class Talos(TestingMixin, MercurialScrip
                         _platform = 'osx'
                     else:
                         _platform = 'linux64'
                     self.query_mitmproxy_rel_bin(_platform)
                     if self.mitmproxy_rel_bin is None:
                         self.fatal("Aborting: mitmproxy_release_bin_osx not found in talos.json")
                     self.download_mitmproxy_binary(_platform)
                 else:
-                    self.info("Not downloading mitmproxy rel binary because no-download was specified")
+                    self.info("Not downloading mitmproxy rel binary because no-download was "
+                              "specified")
             self.info('The mitmdump macosx binary is found at: %s' % self.mitmdump)
         self.run_command([self.mitmdump, '--version'], env=self.query_env())
 
     def query_mitmproxy_rel_bin(self, platform):
         """Mitmproxy requires external playback archives to be downloaded and extracted"""
         if self.mitmproxy_rel_bin:
             return self.mitmproxy_rel_bin
         if self.query_talos_json_config() and self.suite is not None:
             config_key = "mitmproxy_release_bin_" + platform
-            self.mitmproxy_rel_bin = self.talos_json_config['suites'][self.suite].get(
-                config_key, False)
+            self.mitmproxy_rel_bin = self.talos_json_config['suites'][self.suite].get(config_key,
+                                                                                      False)
             return self.mitmproxy_rel_bin
 
     def download_mitmproxy_binary(self, platform):
         """Download the mitmproxy release binary from tooltool"""
         self.info("Downloading the mitmproxy release binary using tooltool")
         dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
         _manifest = "mitmproxy-rel-bin-%s.manifest" % platform
         manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy', _manifest)
@@ -530,18 +540,18 @@ class Talos(TestingMixin, MercurialScrip
             unzip_cmd = [tar, '-xvzf', archive, '-C', dest]
             self.run_command(unzip_cmd, halt_on_failure=True)
 
     def query_mitmproxy_recording_set(self):
         """Mitmproxy requires external playback archives to be downloaded and extracted"""
         if self.mitmproxy_recording_set:
             return self.mitmproxy_recording_set
         if self.query_talos_json_config() and self.suite is not None:
-            self.mitmproxy_recording_set = self.talos_json_config['suites'][self.suite].get(
-                'mitmproxy_recording_set', False)
+            self.mitmproxy_recording_set = (
+                self.talos_json_config['suites'][self.suite].get('mitmproxy_recording_set', False))
             return self.mitmproxy_recording_set
 
     def download_mitmproxy_recording_set(self):
         """Download the set of mitmproxy recording files that will be played back"""
         self.info("Downloading the mitmproxy recording set using tooltool")
         dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
         manifest_file = os.path.join(self.talos_path, 'talos',
                                      'mitmproxy', 'mitmproxy-playback-set.manifest')
@@ -636,25 +646,25 @@ class Talos(TestingMixin, MercurialScrip
         schema_path = os.path.join(external_tools_path,
                                    'performance-artifact-schema.json')
         self.info("Validating PERFHERDER_DATA against %s" % schema_path)
         try:
             with open(schema_path) as f:
                 schema = json.load(f)
             data = json.loads(parser.found_perf_data[0])
             jsonschema.validate(data, schema)
-        except:
+        except Exception:
             self.exception("Error while validating PERFHERDER_DATA")
             parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
 
-    def _artifact_perf_data(self, dest):
+    def _artifact_perf_data(self, parser, dest):
         src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'local.json')
         try:
             shutil.copyfile(src, dest)
-        except:
+        except Exception:
             self.critical("Error copying results %s to upload dir %s" % (src, dest))
             parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
 
     def run_tests(self, args=None, **kw):
         """run Talos tests"""
 
         # get talos options
         options = self.talos_options(args=args, **kw)
@@ -742,17 +752,17 @@ class Talos(TestingMixin, MercurialScrip
 
             parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
         elif '--no-upload-results' not in options:
             if not self.gecko_profile:
                 self._validate_treeherder_data(parser)
                 if not self.run_local:
                     # copy results to upload dir so they are included as an artifact
                     dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
-                    self._artifact_perf_data(dest)
+                    self._artifact_perf_data(parser, dest)
 
         self.record_status(parser.worst_tbpl_status,
                            level=parser.worst_log_level)
 
     def fetch_python3(self):
         manifest_file = os.path.join(
             self.talos_path,
             'talos',
--- a/testing/mozharness/mozharness/mozilla/testing/testbase.py
+++ b/testing/mozharness/mozharness/mozilla/testing/testbase.py
@@ -3,21 +3,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
 import copy
 import os
 import platform
-import pprint
-import re
 import urllib2
 import json
-import socket
 from urlparse import urlparse, ParseResult
 
 from mozharness.base.errors import BaseErrorList
 from mozharness.base.log import FATAL, WARNING
 from mozharness.base.python import (
     ResourceMonitoringMixin,
     VirtualenvMixin,
     virtualenv_config_options,
@@ -53,17 +50,18 @@ testing_config_options = [
       "dest": "installer_url",
       "default": None,
       "help": "URL to the installer to install",
       }],
     [["--installer-path"],
      {"action": "store",
       "dest": "installer_path",
       "default": None,
-      "help": "Path to the installer to install.  This is set automatically if run with --download-and-extract.",
+      "help": "Path to the installer to install. "
+      "This is set automatically if run with --download-and-extract.",
       }],
     [["--binary-path"],
      {"action": "store",
       "dest": "binary_path",
       "default": None,
       "help": "Path to installed binary.  This is set automatically if run with --install.",
       }],
     [["--exe-suffix"],
@@ -219,18 +217,18 @@ class TestingMixin(VirtualenvMixin, Auto
                     self.info("Replacing url %s -> %s" % (url, new_url))
                     return new_url
             return url
 
         if c.get("installer_url") is None:
             self.exception("You must use --installer-url with developer_config.py")
         if c.get("require_test_zip"):
             if not c.get('test_url') and not c.get('test_packages_url'):
-                self.exception(
-                    "You must use --test-url or --test-packages-url with developer_config.py")
+                self.exception("You must use --test-url or --test-packages-url with "
+                               "developer_config.py")
 
         c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
         if c.get("test_url"):
             c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
         if c.get("test_packages_url"):
             c["test_packages_url"] = _replace_url(c["test_packages_url"], c["replace_urls"])
 
         for key, value in self.config.iteritems():
@@ -252,17 +250,18 @@ class TestingMixin(VirtualenvMixin, Auto
             if not hasattr(self, "https_username"):
                 self.info("NOTICE: Files downloaded from outside of "
                           "Release Engineering network require LDAP "
                           "credentials.")
 
             self.https_username, self.https_password = get_credentials()
             # This creates a password manager
             passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
-            # Because we have put None at the start it will use this username/password combination from here on
+            # Because we have put None at the start it will use this username/password
+            # combination from here on
             passman.add_password(None, url, self.https_username, self.https_password)
             authhandler = urllib2.HTTPBasicAuthHandler(passman)
 
             return urllib2.build_opener(authhandler).open(url, **kwargs)
 
         # If we have the developer_run flag enabled then we will switch
         # URLs to the right place and enable http authentication
         if "developer_config.py" in self.config["config_files"]:
@@ -276,17 +275,19 @@ class TestingMixin(VirtualenvMixin, Auto
 
     def preflight_download_and_extract(self):
         message = ""
         if not self.installer_url:
             message += """installer_url isn't set!
 
 You can set this by specifying --installer-url URL
 """
-        if self.config.get("require_test_zip") and not self.test_url and not self.test_packages_url:
+        if (self.config.get("require_test_zip") and
+            not self.test_url and
+            not self.test_packages_url):
             message += """test_url isn't set!
 
 You can set this by specifying --test-url URL
 """
         if message:
             self.fatal(message + "Can't run download-and-extract... exiting")
 
     def _read_packages_manifest(self):
@@ -619,18 +620,19 @@ Did you run with --create-virtualenv? Is
 
         abs_nodejs_path = os.path.join(dirs['abs_work_dir'], nodejs_path)
 
         if os.path.exists(abs_nodejs_path):
             if self.platform_name() not in ('win32', 'win64'):
                 self.chmod(abs_nodejs_path, 0755)
             self.nodejs_path = abs_nodejs_path
         else:
-            self.warning(
-                "nodejs path was given but couldn't be found. Tried looking in '%s'" % abs_nodejs_path)
+            msg = """nodejs path was given but couldn't be found. Tried looking in '%s'""" % \
+                abs_nodejs_path
+            self.warning(msg)
             self.record_status(TBPL_WARNING, WARNING)
 
         return self.nodejs_path
 
     def query_minidump_stackwalk(self, manifest=None):
         if self.minidump_stackwalk_path:
             return self.minidump_stackwalk_path
 
--- a/testing/mozharness/mozharness/mozilla/testing/unittest.py
+++ b/testing/mozharness/mozharness/mozilla/testing/unittest.py
@@ -1,16 +1,15 @@
 #!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
-import os
 import re
 
 from mozharness.mozilla.testing.errors import TinderBoxPrintRe
 from mozharness.base.log import OutputParser, WARNING, INFO, CRITICAL, ERROR
 from mozharness.mozilla.automation import TBPL_WARNING, TBPL_FAILURE, TBPL_RETRY
 from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
 
 SUITE_CATEGORIES = ['mochitest', 'reftest', 'xpcshell']