Merge mozilla-central to mozilla-inbound. a=merge on a CLOSED TREE
authorRazvan Maries <rmaries@mozilla.com>
Tue, 01 Jan 2019 11:52:51 +0200
changeset 509338 9264db4a86077ae257815435c887303cb1230d86
parent 509337 c8faeeccdb0eb21465e878d98b766fcf194e517d (current diff)
parent 509336 34970df803855000ad8ad935765ba1b4ea9f9050 (diff)
child 509339 c762e770dec4e6e968809dd22bcc59f27d1a4a1b
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to mozilla-inbound. a=merge on a CLOSED TREE
--- a/gfx/webrender_bindings/revision.txt
+++ b/gfx/webrender_bindings/revision.txt
@@ -1,1 +1,1 @@
-8b8ca774f555aeb8ef99743196593a192298dd60
+a970cbaa88a7516758046c8be016c34627d31355
--- a/gfx/wr/webrender/src/display_list_flattener.rs
+++ b/gfx/wr/webrender/src/display_list_flattener.rs
@@ -258,31 +258,46 @@ impl<'a> DisplayListFlattener<'a> {
         //  [trailing root primitives]
         //
         // This step is typically very quick, because there are only
         // a small number of items in the root stacking context, since
         // most of the content is embedded in its own picture.
         //
 
         // Find the first primitive which has the desired scroll root.
+        let mut first_index = None;
         let mut main_scroll_root = None;
 
-        let first_index = primitives.iter().position(|instance| {
+        for (i, instance) in primitives.iter().enumerate() {
             let scroll_root = self.find_scroll_root(
                 instance.spatial_node_index,
             );
 
-            if scroll_root == ROOT_SPATIAL_NODE_INDEX {
-                false
-            } else {
-                debug_assert!(main_scroll_root.is_none());
-                main_scroll_root = Some(scroll_root);
-                true
+            if scroll_root != ROOT_SPATIAL_NODE_INDEX {
+                // If we find multiple scroll roots in this page, then skip
+                // picture caching for now. In future, we can handle picture
+                // caching on these sites by creating a tile cache per
+                // scroll root, or (more likely) selecting the common parent
+                // scroll root between the detected scroll roots.
+                match main_scroll_root {
+                    Some(main_scroll_root) => {
+                        if main_scroll_root != scroll_root {
+                            return;
+                        }
+                    }
+                    None => {
+                        main_scroll_root = Some(scroll_root);
+                    }
+                }
+
+                if first_index.is_none() {
+                    first_index = Some(i);
+                }
             }
-        });
+        }
 
         let main_scroll_root = match main_scroll_root {
             Some(main_scroll_root) => main_scroll_root,
             None => ROOT_SPATIAL_NODE_INDEX,
         };
 
         // Get the list of existing primitives in the main stacking context.
         let mut old_prim_list = mem::replace(
--- a/gfx/wr/webrender/src/renderer.rs
+++ b/gfx/wr/webrender/src/renderer.rs
@@ -1358,28 +1358,37 @@ impl VertexDataTexture {
         }
 
         let width =
             (MAX_VERTEX_TEXTURE_WIDTH - (MAX_VERTEX_TEXTURE_WIDTH % texels_per_item)) as i32;
         let needed_height = (data.len() / items_per_row) as i32;
         let existing_height = self.texture.as_ref().map_or(0, |t| t.get_dimensions().height);
 
         // Create a new texture if needed.
-        if needed_height > existing_height {
+        //
+        // These textures are generally very small, which is why we don't bother
+        // with incremental updates and just re-upload every frame. For most pages
+        // they're one row each, and on stress tests like css-francine they end up
+        // in the 6-14 range. So we size the texture tightly to what we need (usually
+        // 1), and shrink it if the waste would be more than 10 rows. This helps
+        // with memory overhead, especially because there are several instances of
+        // these textures per Renderer.
+        if needed_height > existing_height || needed_height + 10 < existing_height {
             // Drop the existing texture, if any.
             if let Some(t) = self.texture.take() {
                 device.delete_texture(t);
             }
-            let new_height = (needed_height + 127) & !127;
 
             let texture = device.create_texture(
                 TextureTarget::Default,
                 self.format,
                 width,
-                new_height,
+                // Ensure height is at least two to work around
+                // https://bugs.chromium.org/p/angleproject/issues/detail?id=3039
+                needed_height.max(2),
                 TextureFilter::Nearest,
                 None,
                 1,
             );
             self.texture = Some(texture);
         }
 
         let rect = DeviceIntRect::new(
--- a/taskcluster/docker/image_builder/build-image.sh
+++ b/taskcluster/docker/image_builder/build-image.sh
@@ -33,18 +33,18 @@ if [ -n "$DOCKER_IMAGE_PARENT" ]; then
     LOAD_COMMAND="\
       /builds/worker/checkouts/gecko/mach taskcluster-load-image \
       --task-id \"$DOCKER_IMAGE_PARENT_TASK\" \
       -t \"$DOCKER_IMAGE_PARENT\" && "
 fi
 
 # Build image
 run-task \
-  --vcs-checkout "/builds/worker/checkouts/gecko" \
-  --sparse-profile build/sparse-profiles/docker-image \
+  --gecko-checkout "/builds/worker/checkouts/gecko" \
+  --gecko-sparse-profile build/sparse-profiles/docker-image \
   -- \
   sh -x -c "$LOAD_COMMAND \
   /builds/worker/checkouts/gecko/mach taskcluster-build-image \
   -t \"$IMAGE_NAME:$HASH\" \
   \"$IMAGE_NAME\""
 
 # Create artifact folder (note that this must occur after run-task)
 mkdir -p /builds/worker/workspace/artifacts
--- a/taskcluster/scripts/run-task
+++ b/taskcluster/scripts/run-task
@@ -497,16 +497,84 @@ def fetch_artifacts():
         print(FETCH_CONTENT_NOT_FOUND)
         sys.exit(1)
 
     cmd = [fetch_content, 'task-artifacts']
     subprocess.run(cmd, check=True, env=os.environ)
     print_line(b'fetches', b'finished fetching artifacts\n')
 
 
+def add_vcs_arguments(parser, project, name):
+    """Adds arguments to ArgumentParser to control VCS options for a project."""
+
+    parser.add_argument('--%s-checkout' % project,
+                        help='Directory where %s checkout should be created' %
+                             name)
+    parser.add_argument('--%s-sparse-profile' % project,
+                        help='Path to sparse profile for %s checkout' % name)
+
+
+def collect_vcs_options(args, project):
+    checkout = getattr(args, '%s_checkout' % project)
+    sparse_profile = getattr(args, '%s_sparse_profile' % project)
+
+    env_prefix = project.upper()
+
+    base_repo = os.environ.get('%s_BASE_REPOSITORY' % env_prefix)
+    head_repo = os.environ.get('%s_HEAD_REPOSITORY' % env_prefix)
+    revision = os.environ.get('%s_HEAD_REV' % env_prefix)
+    branch = os.environ.get('%s_HEAD_REF' % env_prefix)
+
+    store_path = os.environ.get('HG_STORE_PATH')
+
+    # Expand ~ in some paths.
+    if checkout:
+        checkout = os.path.expanduser(checkout)
+    if store_path:
+        store_path = os.path.expanduser(store_path)
+
+    # Some callers set the base repository to mozilla-central for historical
+    # reasons. Switch to mozilla-unified because robustcheckout works best
+    # with it.
+    if base_repo == 'https://hg.mozilla.org/mozilla-central':
+        base_repo = 'https://hg.mozilla.org/mozilla-unified'
+
+    return {
+        'store-path': store_path,
+        'project': project,
+        'env-prefix': env_prefix,
+        'checkout': checkout,
+        'sparse-profile': sparse_profile,
+        'base-repo': base_repo,
+        'head-repo': head_repo,
+        'revision': revision,
+        'branch': branch,
+    }
+
+
+def vcs_checkout_from_args(args, project):
+    options = collect_vcs_options(args, project)
+
+    if not options['checkout']:
+        if options['branch'] and not options['revision']:
+            print('task should be defined in terms of non-symbolic revision')
+            sys.exit(1)
+        return
+
+    os.environ['%s_HEAD_REV' % options['env-prefix']] = vcs_checkout(
+        options['head-repo'],
+        options['checkout'],
+        options['store-path'],
+        base_repo=options['base-repo'],
+        revision=options['revision'],
+        fetch_hgfingerprint=args.fetch_hgfingerprint,
+        branch=options['branch'],
+        sparse_profile=options['sparse-profile'])
+
+
 def main(args):
     print_line(b'setup', b'run-task started in %s\n' % os.getcwd().encode('utf-8'))
     running_as_root = IS_POSIX and os.getuid() == 0
 
     # Arguments up to '--' are ours. After are for the main task
     # to be executed.
     try:
         i = args.index('--')
@@ -514,38 +582,26 @@ def main(args):
         task_args = args[i + 1:]
     except ValueError:
         our_args = args
         task_args = []
 
     parser = argparse.ArgumentParser()
     parser.add_argument('--user', default='worker', help='user to run as')
     parser.add_argument('--group', default='worker', help='group to run as')
-    parser.add_argument('--vcs-checkout',
-                        help='Directory where Gecko checkout should be created')
-    parser.add_argument('--sparse-profile',
-                        help='Path to sparse checkout profile to use')
-    parser.add_argument('--comm-checkout',
-                        help='Directory where Comm checkout should be created')
-    parser.add_argument('--tools-checkout',
-                        help='Directory where build/tools checkout should be created')
+
+    add_vcs_arguments(parser, 'gecko', 'Firefox')
+    add_vcs_arguments(parser, 'comm', 'Comm')
+
     parser.add_argument('--fetch-hgfingerprint', action='store_true',
                         help='Fetch the latest hgfingerprint from the secrets store, '
                         'using the taskclsuerProxy')
 
     args = parser.parse_args(our_args)
 
-    # expand ~ in some paths
-    if args.vcs_checkout:
-        args.vcs_checkout = os.path.expanduser(args.vcs_checkout)
-    if args.tools_checkout:
-        args.tools_checkout = os.path.expanduser(args.tools_checkout)
-    if 'HG_STORE_PATH' in os.environ:
-        os.environ['HG_STORE_PATH'] = os.path.expanduser(os.environ['HG_STORE_PATH'])
-
     uid = gid = gids = None
     if IS_POSIX and running_as_root:
         user, group, gids = get_posix_user_group(args.user, args.group)
         uid = user.pw_uid
         gid = group.gr_gid
 
     if running_as_root and os.path.exists("/dev/kvm"):
         # Ensure kvm permissions for worker, required for Android x86
@@ -665,90 +721,35 @@ def main(args):
             os.makedirs(store_path)
         except OSError as e:
             if e.errno != errno.EEXIST:
                 raise
 
         if running_as_root:
             os.chown(store_path, uid, gid)
 
-    prepare_checkout_dir(args.vcs_checkout)
-    prepare_checkout_dir(args.tools_checkout)
-    if args.vcs_checkout or args.tools_checkout or args.comm_checkout:
+    prepare_checkout_dir(args.gecko_checkout)
+    if args.gecko_checkout or args.comm_checkout:
         prepare_hg_store_path()
 
     if IS_POSIX and running_as_root:
         # Drop permissions to requested user.
         # This code is modeled after what `sudo` was observed to do in a Docker
         # container. We do not bother calling setrlimit() because containers have
         # their own limits.
         print_line(b'setup', b'running as %s:%s\n' % (
             args.user.encode('utf-8'), args.group.encode('utf-8')))
 
         os.setgroups(gids)
         os.umask(0o22)
         os.setresgid(gid, gid, gid)
         os.setresuid(uid, uid, uid)
 
-    # Checkout the repository, setting the GECKO_HEAD_REV to the current
-    # revision hash. Revision hashes have priority over symbolic revisions. We
-    # disallow running tasks with symbolic revisions unless they have been
-    # resolved by a checkout.
-    if args.vcs_checkout:
-        base_repo = os.environ.get('GECKO_BASE_REPOSITORY')
-        # Some callers set the base repository to mozilla-central for historical
-        # reasons. Switch to mozilla-unified because robustcheckout works best
-        # with it.
-        if base_repo == 'https://hg.mozilla.org/mozilla-central':
-            base_repo = 'https://hg.mozilla.org/mozilla-unified'
-
-        os.environ['GECKO_HEAD_REV'] = vcs_checkout(
-            os.environ['GECKO_HEAD_REPOSITORY'],
-            args.vcs_checkout,
-            os.environ['HG_STORE_PATH'],
-            fetch_hgfingerprint=args.fetch_hgfingerprint,
-            base_repo=base_repo,
-            revision=os.environ.get('GECKO_HEAD_REV'),
-            branch=os.environ.get('GECKO_HEAD_REF'),
-            sparse_profile=args.sparse_profile)
-
-    elif not os.environ.get('GECKO_HEAD_REV') and \
-            os.environ.get('GECKO_HEAD_REF'):
-        print('task should be defined in terms of non-symbolic revision')
-        return 1
-
-    if args.tools_checkout:
-        vcs_checkout('https://hg.mozilla.org/build/tools',
-                     args.tools_checkout,
-                     os.environ['HG_STORE_PATH'],
-                     fetch_hgfingerprint=args.fetch_hgfingerprint,
-                     # Always check out the latest commit on default branch.
-                     # This is non-deterministic!
-                     branch='default')
-
-    # Checkout the repository, setting the COMM_HEAD_REV to the current
-    # revision hash. Revision hashes have priority over symbolic revisions. We
-    # disallow running tasks with symbolic revisions unless they have been
-    # resolved by a checkout.
-    if args.comm_checkout:
-        base_repo = os.environ.get('COMM_BASE_REPOSITORY')
-
-        os.environ['COMM_HEAD_REV'] = vcs_checkout(
-            os.environ['COMM_HEAD_REPOSITORY'],
-            args.comm_checkout,
-            os.environ['HG_STORE_PATH'],
-            fetch_hgfingerprint=args.fetch_hgfingerprint,
-            base_repo=base_repo,
-            revision=os.environ.get('COMM_HEAD_REV'),
-            branch=os.environ.get('COMM_HEAD_REF'))
-
-    elif not os.environ.get('COMM_HEAD_REV') and \
-            os.environ.get('COMM_HEAD_REF'):
-        print('task should be defined in terms of non-symbolic revision')
-        return 1
+    vcs_checkout_from_args(args, 'gecko')
+    vcs_checkout_from_args(args, 'comm')
 
     try:
         if 'GECKO_PATH' in os.environ:
             os.environ['GECKO_PATH'] = os.path.abspath(os.environ['GECKO_PATH'])
 
         if 'MOZ_FETCHES' in os.environ:
             fetch_artifacts()
 
--- a/taskcluster/taskgraph/transforms/job/hazard.py
+++ b/taskcluster/taskgraph/transforms/job/hazard.py
@@ -64,12 +64,12 @@ def docker_worker_hazard(config, job, ta
         env['MOZCONFIG'] = run['mozconfig']
 
     # build-haz-linux.sh needs this otherwise it assumes the checkout is in
     # the workspace.
     env['GECKO_DIR'] = '{workdir}/checkouts/gecko'.format(**run)
 
     worker['command'] = [
         '{workdir}/bin/run-task'.format(**run),
-        '--vcs-checkout', '{workdir}/checkouts/gecko'.format(**run),
+        '--gecko-checkout', '{workdir}/checkouts/gecko'.format(**run),
         '--',
         '/bin/bash', '-c', run['command']
     ]
--- a/taskcluster/taskgraph/transforms/job/mozharness.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness.py
@@ -214,18 +214,17 @@ def mozharness_on_docker_worker_setup(co
 
     # Retry if mozharness returns TBPL_RETRY
     worker['retry-exit-status'] = [4]
 
     docker_worker_setup_secrets(config, job, taskdesc)
 
     command = [
         '{workdir}/bin/run-task'.format(**run),
-        '--vcs-checkout', env['GECKO_PATH'],
-        '--tools-checkout', '{workdir}/workspace/build/tools'.format(**run),
+        '--gecko-checkout', env['GECKO_PATH'],
     ]
     if run['comm-checkout']:
         command.append('--comm-checkout={workdir}/workspace/build/src/comm'.format(**run))
 
     command += [
         '--',
         '{workdir}/workspace/build/src/{script}'.format(
             workdir=run['workdir'],
--- a/taskcluster/taskgraph/transforms/job/mozharness_test.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness_test.py
@@ -135,17 +135,17 @@ def mozharness_test_on_docker(config, jo
 
     # Support vcs checkouts regardless of whether the task runs from
     # source or not in case it is needed on an interactive loaner.
     support_vcs_checkout(config, job, taskdesc)
 
     # If we have a source checkout, run mozharness from it instead of
     # downloading a zip file with the same content.
     if test['checkout']:
-        command.extend(['--vcs-checkout', '{workdir}/checkouts/gecko'.format(**run)])
+        command.extend(['--gecko-checkout', '{workdir}/checkouts/gecko'.format(**run)])
         env['MOZHARNESS_PATH'] = '{workdir}/checkouts/gecko/testing/mozharness'.format(**run)
     else:
         env['MOZHARNESS_URL'] = {'task-reference': mozharness_url}
 
     command.extend([
         '--',
         '{workdir}/bin/test-linux.sh'.format(**run),
     ])
--- a/taskcluster/taskgraph/transforms/job/run_task.py
+++ b/taskcluster/taskgraph/transforms/job/run_task.py
@@ -41,20 +41,20 @@ run_task_schema = Schema({
 })
 
 
 def common_setup(config, job, taskdesc, command):
     run = job['run']
     if run['checkout']:
         support_vcs_checkout(config, job, taskdesc,
                              sparse=bool(run['sparse-profile']))
-        command.append('--vcs-checkout={}'.format(taskdesc['worker']['env']['GECKO_PATH']))
+        command.append('--gecko-checkout={}'.format(taskdesc['worker']['env']['GECKO_PATH']))
 
     if run['sparse-profile']:
-        command.append('--sparse-profile=build/sparse-profiles/%s' %
+        command.append('--gecko-sparse-profile=build/sparse-profiles/%s' %
                        run['sparse-profile'])
 
     taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
 
 
 worker_defaults = {
     'cache-dotcache': False,
     'checkout': True,
--- a/taskcluster/taskgraph/transforms/job/spidermonkey.py
+++ b/taskcluster/taskgraph/transforms/job/spidermonkey.py
@@ -68,17 +68,17 @@ def docker_worker_spidermonkey(config, j
         script = "build-sm-package.sh"
     elif run['using'] == 'spidermonkey-mozjs-crate':
         script = "build-sm-mozjs-crate.sh"
     elif run['using'] == 'spidermonkey-rust-bindings':
         script = "build-sm-rust-bindings.sh"
 
     worker['command'] = [
         '{workdir}/bin/run-task'.format(**run),
-        '--vcs-checkout', '{workdir}/workspace/build/src'.format(**run),
+        '--gecko-checkout', '{workdir}/workspace/build/src'.format(**run),
         '--',
         '/bin/bash',
         '-c',
         'cd {workdir} && workspace/build/src/taskcluster/scripts/builder/{script}'.format(
             workdir=run['workdir'], script=script)
     ]
 
 
--- a/taskcluster/taskgraph/transforms/job/toolchain.py
+++ b/taskcluster/taskgraph/transforms/job/toolchain.py
@@ -147,22 +147,22 @@ def docker_worker_toolchain(config, job,
         wrapper = ''
 
     args = run.get('arguments', '')
     if args:
         args = ' ' + shell_quote(*args)
 
     sparse_profile = []
     if run.get('sparse-profile'):
-        sparse_profile = ['--sparse-profile=build/sparse-profiles/{}'
+        sparse_profile = ['--gecko-sparse-profile=build/sparse-profiles/{}'
                           .format(run['sparse-profile'])]
 
     worker['command'] = [
         '{workdir}/bin/run-task'.format(**run),
-        '--vcs-checkout={}'.format(gecko_path),
+        '--gecko-checkout={}'.format(gecko_path),
     ] + sparse_profile + [
         '--',
         'bash',
         '-c',
         'cd {} && '
         '{}workspace/build/src/taskcluster/scripts/misc/{}{}'.format(
             run['workdir'], wrapper, run['script'], args)
     ]
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -1937,21 +1937,22 @@ def check_run_task_caches(config, tasks)
         if run_task:
             for arg in command[1:]:
                 if not isinstance(arg, basestring):
                     continue
 
                 if arg == '--':
                     break
 
-                if arg.startswith('--sparse-profile'):
+                if arg.startswith('--gecko-sparse-profile'):
                     if '=' not in arg:
                         raise Exception(
-                            '{} is specifying `--sparse-profile` to run-task as two arguments. '
-                            'Unable to determine if the sparse profile exists.'.format(
+                            '{} is specifying `--gecko-sparse-profile` to run-task '
+                            'as two arguments. Unable to determine if the sparse '
+                            'profile exists.'.format(
                                 task['label']))
                     _, sparse_profile = arg.split('=', 1)
                     if not os.path.exists(os.path.join(GECKO, sparse_profile)):
                         raise Exception(
                             '{} is using non-existant sparse profile {}.'.format(
                                 task['label'], sparse_profile))
                     require_sparse_cache = True
                     break
--- a/testing/gtest/bench.py
+++ b/testing/gtest/bench.py
@@ -1,11 +1,14 @@
 #!/usr/bin/env python3
 
-import sys, subprocess, json, statistics
+import sys
+import subprocess
+import json
+import statistics
 
 proc = subprocess.Popen(["./mach", "gtest", sys.argv[1]], stdout=subprocess.PIPE)
 for line in proc.stdout:
     if line.startswith(b"PERFHERDER_DATA:"):
         data = json.loads(line[len("PERFHERDER_DATA:"):].decode("utf8"))
         for suite in data["suites"]:
             for subtest in suite["subtests"]:
                 print("%4d.%03d ± %6s ms    %s.%s" % (
--- a/testing/gtest/rungtests.py
+++ b/testing/gtest/rungtests.py
@@ -13,16 +13,17 @@ import sys
 import mozcrash
 import mozinfo
 import mozlog
 import mozprocess
 from mozrunner.utils import get_stack_fixer_function
 
 log = mozlog.unstructured.getLogger('gtest')
 
+
 class GTests(object):
     # Time (seconds) to wait for test process to complete
     TEST_PROC_TIMEOUT = 1200
     # Time (seconds) in which process will be killed if it produces no output.
     TEST_PROC_NO_OUTPUT_TIMEOUT = 300
 
     def run_gtest(self, prog, xre_path, cwd, symbols_path=None,
                   utility_path=None):
@@ -49,25 +50,25 @@ class GTests(object):
         if cwd and not os.path.isdir(cwd):
             os.makedirs(cwd)
 
         stream_output = mozprocess.StreamOutput(sys.stdout)
         process_output = stream_output
         if utility_path:
             stack_fixer = get_stack_fixer_function(utility_path, symbols_path)
             if stack_fixer:
-                process_output = lambda line: stream_output(stack_fixer(line))
-
+                def f(line): return stream_output(stack_fixer(line))
+                process_output = f
 
         proc = mozprocess.ProcessHandler([prog, "-unittest",
                                          "--gtest_death_test_style=threadsafe"],
                                          cwd=cwd,
                                          env=env,
                                          processOutputLine=process_output)
-        #TODO: After bug 811320 is fixed, don't let .run() kill the process,
+        # TODO: After bug 811320 is fixed, don't let .run() kill the process,
         # instead use a timeout in .wait() and then kill to get a stack.
         proc.run(timeout=GTests.TEST_PROC_TIMEOUT,
                  outputTimeout=GTests.TEST_PROC_NO_OUTPUT_TIMEOUT)
         proc.wait()
         if proc.timedOut:
             if proc.outputTimedOut:
                 log.testFail("gtest | timed out after %d seconds without output",
                              GTests.TEST_PROC_NO_OUTPUT_TIMEOUT)
@@ -78,45 +79,46 @@ class GTests(object):
         if mozcrash.check_for_crashes(cwd, symbols_path, test_name="gtest"):
             # mozcrash will output the log failure line for us.
             return False
         result = proc.proc.returncode == 0
         if not result:
             log.testFail("gtest | test failed with return code %d", proc.proc.returncode)
         return result
 
-    def build_core_environment(self, env = {}):
+    def build_core_environment(self, env={}):
         """
         Add environment variables likely to be used across all platforms, including remote systems.
         """
         env["MOZ_XRE_DIR"] = self.xre_path
         env["MOZ_GMP_PATH"] = os.pathsep.join(
             os.path.join(self.xre_path, p, "1.0")
             for p in ('gmp-fake', 'gmp-fakeopenh264')
         )
         env["XPCOM_DEBUG_BREAK"] = "stack-and-abort"
         env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
         env["MOZ_CRASHREPORTER"] = "1"
         env["MOZ_RUN_GTEST"] = "1"
         # Normally we run with GTest default output, override this to use the TBPL test format.
         env["MOZ_TBPL_PARSER"] = "1"
 
         if not mozinfo.has_sandbox:
-          # Bug 1082193 - This is horrible. Our linux build boxes run CentOS 6,
-          # which is too old to support sandboxing. Disable sandbox for gtests
-          # on machines which don't support sandboxing until they can be
-          # upgraded, or gtests are run on test machines instead.
-          env["MOZ_DISABLE_GMP_SANDBOX"] = "1"
+            # Bug 1082193 - This is horrible. Our linux build boxes run CentOS 6,
+            # which is too old to support sandboxing. Disable sandbox for gtests
+            # on machines which don't support sandboxing until they can be
+            # upgraded, or gtests are run on test machines instead.
+            env["MOZ_DISABLE_GMP_SANDBOX"] = "1"
 
         return env
 
     def build_environment(self):
         """
-        Create and return a dictionary of all the appropriate env variables and values.
-        On a remote system, we overload this to set different values and are missing things like os.environ and PATH.
+        Create and return a dictionary of all the appropriate env variables
+        and values. On a remote system, we overload this to set different
+        values and are missing things like os.environ and PATH.
         """
         if not os.path.isdir(self.xre_path):
             raise Exception("xre_path does not exist: %s", self.xre_path)
         env = dict(os.environ)
         env = self.build_core_environment(env)
         env["PERFHERDER_ALERTING_ENABLED"] = "1"
         pathvar = ""
         if mozinfo.os == "linux":
@@ -143,47 +145,54 @@ class GTests(object):
                 env["ASAN_SYMBOLIZER_PATH"] = llvmsym
                 log.info("gtest | ASan using symbolizer at %s", llvmsym)
             else:
                 # This should be |testFail| instead of |info|. See bug 1050891.
                 log.info("gtest | Failed to find ASan symbolizer at %s", llvmsym)
 
         return env
 
+
 class gtestOptions(OptionParser):
     def __init__(self):
         OptionParser.__init__(self)
         self.add_option("--cwd",
                         dest="cwd",
                         default=os.getcwd(),
-                        help="absolute path to directory from which to run the binary")
+                        help="absolute path to directory from which "
+                             "to run the binary")
         self.add_option("--xre-path",
                         dest="xre_path",
                         default=None,
-                        help="absolute path to directory containing XRE (probably xulrunner)")
+                        help="absolute path to directory containing XRE "
+                             "(probably xulrunner)")
         self.add_option("--symbols-path",
                         dest="symbols_path",
                         default=None,
-                        help="absolute path to directory containing breakpad symbols, or the URL of a zip file containing symbols")
+                        help="absolute path to directory containing breakpad "
+                             "symbols, or the URL of a zip file containing "
+                             "symbols")
         self.add_option("--utility-path",
                         dest="utility_path",
                         default=None,
                         help="path to a directory containing utility program binaries")
 
+
 def update_mozinfo():
     """walk up directories to find mozinfo.json update the info"""
     path = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
     dirs = set()
     while path != os.path.expanduser('~'):
         if path in dirs:
             break
         dirs.add(path)
         path = os.path.split(path)[0]
     mozinfo.find_and_update_from_json(*dirs)
 
+
 def main():
     parser = gtestOptions()
     options, args = parser.parse_args()
     if not args:
         print >>sys.stderr, """Usage: %s <binary>""" % sys.argv[0]
         sys.exit(1)
     if not options.xre_path:
         print >>sys.stderr, """Error: --xre-path is required"""
@@ -200,11 +209,11 @@ def main():
                                   options.cwd,
                                   symbols_path=options.symbols_path,
                                   utility_path=options.utility_path)
     except Exception, e:
         log.error(str(e))
         result = False
     sys.exit(0 if result else 1)
 
+
 if __name__ == '__main__':
     main()
-
--- a/tools/lint/cpp-virtual-final.yml
+++ b/tools/lint/cpp-virtual-final.yml
@@ -1,28 +1,25 @@
 ---
 cpp-virtual-final:
     description: "Virtual function declarations should specify only one of
                   `virtual`, `final`, or `override`"
     level: error
     include: ['.']
-    exclude:
-        # Third-party code with cpp-virtual-final warnings:
-        - '**/node_modules'
-    extensions: ['cpp', 'h', 'mm']
+    extensions: ['cc', 'cpp', 'h', 'mm']
     type: regex
     #
     # This lint warns about:
     #
     #   virtual void Bad1() final
     #   void Bad2() final override
     #   void Bad3() override final
     #
     # Caveats: This lint ...
     #
     #   * Doesn't warn about `virtual void NotBad() override` at this time
-    #     because there are 8000+ instances.
+    #     because there are currently 6963 instances of this pattern.
     #
     #   * Doesn't warn about function declarations that span multiple lines
     #     because the regex can't match across line breaks.
     #
     #           virtual    )    final  |  final  override  |  override  final
     payload: ^ *virtual .+\).+\bfinal\b|\bfinal +override\b|\boverride +final\b
--- a/tools/lint/flake8.yml
+++ b/tools/lint/flake8.yml
@@ -27,16 +27,17 @@ flake8:
         - python/mozversioncontrol
         - python/safety
         - security/
         - security/manager
         - taskcluster
         - testing/awsy
         - testing/firefox-ui
         - testing/geckodriver
+        - testing/gtest
         - testing/mach_commands.py
         - testing/marionette/client
         - testing/marionette/harness
         - testing/marionette/puppeteer
         - testing/mochitest
         - testing/mozbase
         - testing/mozharness/configs
         - testing/mozharness/mozfile