Bug 1431753 - Add a test-coverage test suite, similar to test-verify, that runs tests in isolation in coverage mode. r=gbrown,jmaher
authorMarco Castelluccio <mcastelluccio@mozilla.com>
Thu, 19 Apr 2018 19:19:31 +0200
changeset 468227 4ab22db3a416642d05354e782a1088990cd2bcd3
parent 468226 8ce15c227c6600eb6235a6e99c4a1f7882be3227
child 468228 9520b795d6f3ff81df01cc0158d7227a06dc568e
push id9165
push userasasaki@mozilla.com
push dateThu, 26 Apr 2018 21:04:54 +0000
treeherdermozilla-beta@064c3804de2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersgbrown, jmaher
bugs1431753
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1431753 - Add a test-coverage test suite, similar to test-verify, that runs tests in isolation in coverage mode. r=gbrown,jmaher
python/mozbuild/mozbuild/schedules.py
taskcluster/ci/test/misc.yml
taskcluster/ci/test/test-sets.yml
taskcluster/ci/test/web-platform.yml
taskcluster/taskgraph/transforms/tests.py
testing/mozharness/configs/web_platform_tests/prod_config.py
testing/mozharness/configs/web_platform_tests/prod_config_windows.py
testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py
testing/mozharness/mozharness/mozilla/testing/codecoverage.py
testing/mozharness/mozharness/mozilla/testing/per_test_base.py
testing/mozharness/mozharness/mozilla/testing/verify_tools.py
testing/mozharness/scripts/android_emulator_unittest.py
testing/mozharness/scripts/desktop_unittest.py
testing/mozharness/scripts/web_platform_tests.py
--- a/python/mozbuild/mozbuild/schedules.py
+++ b/python/mozbuild/mozbuild/schedules.py
@@ -19,16 +19,18 @@ INCLUSIVE_COMPONENTS = [
     'docs',
     'py-lint',
     'js-lint',
     'yaml-lint',
     # inclusive test suites -- these *only* run when certain files have changed
     'jittest',
     'test-verify',
     'test-verify-wpt',
+    'test-coverage',
+    'test-coverage-wpt',
     'jsreftest',
 ]
 INCLUSIVE_COMPONENTS = sorted(INCLUSIVE_COMPONENTS)
 
 # Exclusive components are those which are scheduled by default, but for which
 # some files *only* affect that component.  For example, most files affect all
 # platforms, but platform-specific files exclusively affect a single platform.
 # These components are assigned to `SCHEDULES.exclusive` with `=`.
--- a/taskcluster/ci/test/misc.yml
+++ b/taskcluster/ci/test/misc.yml
@@ -92,8 +92,47 @@ test-verify:
                     - remove_executables.py
                 macosx.*:
                     - unittests/mac_unittest.py
                 windows.*:
                     - unittests/win_taskcluster_unittest.py
         no-read-buildbot-config: true
         extra-options:
             - --verify
+
+test-coverage:
+    description: "Per-test coverage"
+    suite: test-coverage
+    treeherder-symbol: TC
+    loopback-video: true
+    instance-size: default
+    max-run-time: 10800
+    allow-software-gl-layers: false
+    run-on-projects:
+        by-test-platform:
+            # do not run on beta or release: usually just confirms earlier results
+            linux64-ccov/.*: ['trunk', 'try']
+            windows10-64-ccov/debug: ['trunk', 'try']
+            default: []
+    tier:
+        by-test-platform:
+            windows10-64-asan.*: 3
+            default: 2
+    mozharness:
+        script:
+            by-test-platform:
+                android.*: android_emulator_unittest.py
+                default: desktop_unittest.py
+        config:
+            by-test-platform:
+                android.*:
+                    - android/android_common.py
+                    - android/androidarm_4_3.py
+                linux.*:
+                    - unittests/linux_unittest.py
+                    - remove_executables.py
+                macosx.*:
+                    - unittests/mac_unittest.py
+                windows.*:
+                    - unittests/win_taskcluster_unittest.py
+        no-read-buildbot-config: true
+        extra-options:
+            - --per-test-coverage
--- a/taskcluster/ci/test/test-sets.yml
+++ b/taskcluster/ci/test/test-sets.yml
@@ -32,16 +32,18 @@ common-tests:
     - mochitest-clipboard
     - mochitest-devtools-chrome
     - mochitest-gpu
     - mochitest-media
     - mochitest-webgl
     - reftest
     - reftest-no-accel
     - telemetry-tests-client
+    - test-coverage
+    - test-coverage-wpt
     - test-verify
     - test-verify-wpt
     - xpcshell
 
 web-platform-tests:
     - web-platform-tests
     - web-platform-tests-reftests
     - web-platform-tests-wdspec
@@ -180,16 +182,18 @@ windows-tests:
     - mochitest-chrome
     - mochitest-clipboard
     - mochitest-devtools-chrome
     - mochitest-gpu
     - mochitest-media
     - mochitest-webgl
     - reftest
     - reftest-no-accel
+    - test-coverage
+    - test-coverage-wpt
     - test-verify
     - test-verify-wpt
     - web-platform-tests
     - web-platform-tests-reftests
     - xpcshell
 
 windows-talos:
     - talos-chrome
--- a/taskcluster/ci/test/web-platform.yml
+++ b/taskcluster/ci/test/web-platform.yml
@@ -158,8 +158,27 @@ test-verify-wpt:
             default: ['trunk', 'try']
     tier:
         by-test-platform:
             windows10-64-asan.*: 3
             default: 2
     mozharness:
         extra-options:
             - --verify
+
+test-coverage-wpt:
+    description: "Per web-platform test coverage"
+    suite: test-coverage-wpt
+    treeherder-symbol: TCw
+    max-run-time: 10800
+    run-on-projects:
+        by-test-platform:
+            # do not run on beta or release: usually just confirms earlier results
+            linux64-ccov/.*: ['trunk', 'try']
+            windows10-64-ccov/debug: ['trunk', 'try']
+            default: []
+    tier:
+        by-test-platform:
+            windows10-64-asan.*: 3
+            default: 2
+    mozharness:
+        extra-options:
+            - --per-test-coverage
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -668,17 +668,17 @@ def handle_suite_category(config, tests)
             suite = flavor = test['suite']
 
         test.setdefault('attributes', {})
         test['attributes']['unittest_suite'] = suite
         test['attributes']['unittest_flavor'] = flavor
 
         script = test['mozharness']['script']
         category_arg = None
-        if suite == 'test-verify':
+        if suite == 'test-verify' or suite == 'test-coverage':
             pass
         elif script == 'android_emulator_unittest.py':
             category_arg = '--test-suite'
         elif script == 'desktop_unittest.py':
             category_arg = '--{}-suite'.format(suite)
 
         if category_arg:
             test['mozharness'].setdefault('extra-options', [])
--- a/testing/mozharness/configs/web_platform_tests/prod_config.py
+++ b/testing/mozharness/configs/web_platform_tests/prod_config.py
@@ -31,11 +31,11 @@ config = {
 
     "blob_uploader_auth_file" : os.path.join(os.getcwd(), "oauth.txt"),
 
     "download_minidump_stackwalk": True,
 
     # this would normally be in "exes", but "exes" is clobbered by remove_executables
     "geckodriver": "%(abs_test_bin_dir)s/geckodriver",
 
-    "verify_category": "web-platform",
+    "per_test_category": "web-platform",
 }
 
--- a/testing/mozharness/configs/web_platform_tests/prod_config_windows.py
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_windows.py
@@ -37,10 +37,10 @@ config = {
     "default_blob_upload_servers": [
          "https://blobupload.elasticbeanstalk.com",
     ],
 
     "blob_uploader_auth_file" : os.path.join(os.getcwd(), "oauth.txt"),
 
     "download_minidump_stackwalk": True,
 
-    "verify_category": "web-platform",
+    "per_test_category": "web-platform",
 }
--- a/testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py
@@ -59,10 +59,10 @@ config = {
             'enabled': True
         }
     ],
 
     "blob_uploader_auth_file" : 'C:/builds/oauth.txt',
 
     "download_minidump_stackwalk": True,
 
-    "verify_category": "web-platform",
+    "per_test_category": "web-platform",
 }
--- a/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
+++ b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
@@ -9,65 +9,83 @@ import sys
 import tarfile
 import tempfile
 
 import mozinfo
 from mozharness.base.script import (
     PreScriptAction,
     PostScriptAction,
 )
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
 
 _here = os.path.abspath(os.path.dirname(__file__))
 _tooltool_path = os.path.normpath(os.path.join(_here, '..', '..', '..',
                                                'external_tools',
                                                'tooltool.py'))
 
 code_coverage_config_options = [
     [["--code-coverage"],
      {"action": "store_true",
       "dest": "code_coverage",
       "default": False,
       "help": "Whether gcov c++ code coverage should be run."
       }],
+    [["--per-test-coverage"],
+     {"action": "store_true",
+      "dest": "per_test_coverage",
+      "default": False,
+      "help": "Whether per-test coverage should be collected."
+      }],
     [["--disable-ccov-upload"],
      {"action": "store_true",
       "dest": "disable_ccov_upload",
       "default": False,
       "help": "Whether test run should package and upload code coverage data."
       }],
     [["--jsd-code-coverage"],
      {"action": "store_true",
       "dest": "jsd_code_coverage",
       "default": False,
       "help": "Whether JSDebugger code coverage should be run."
       }],
 ]
 
 
-class CodeCoverageMixin(object):
+class CodeCoverageMixin(SingleTestMixin):
     """
     Mixin for setting GCOV_PREFIX during test execution, packaging up
     the resulting .gcda files and uploading them to blobber.
     """
     gcov_dir = None
     jsvm_dir = None
     prefix = None
 
+    def __init__(self):
+        super(CodeCoverageMixin, self).__init__()
+
     @property
     def code_coverage_enabled(self):
         try:
             if self.config.get('code_coverage'):
                 return True
 
             # XXX workaround because bug 1110465 is hard
             return 'ccov' in self.buildbot_config['properties']['stage_platform']
         except (AttributeError, KeyError, TypeError):
             return False
 
     @property
+    def per_test_coverage(self):
+        try:
+            return bool(self.config.get('per_test_coverage'))
+        except (AttributeError, KeyError, TypeError):
+            return False
+
+    @property
     def ccov_upload_disabled(self):
         try:
             if self.config.get('disable_ccov_upload'):
                 return True
             return False
         except (AttributeError, KeyError, TypeError):
             return False
 
@@ -77,40 +95,32 @@ class CodeCoverageMixin(object):
             if self.config.get('jsd_code_coverage'):
                 return True
 
             # XXX workaround because bug 1110465 is hard
             return 'jsdcov' in self.buildbot_config['properties']['stage_platform']
         except (AttributeError, KeyError, TypeError):
             return False
 
-    @PreScriptAction('run-tests')
-    def _set_gcov_prefix(self, action):
+    @PostScriptAction('download-and-extract')
+    def setup_coverage_tools(self, action, success=None):
         if not self.code_coverage_enabled:
             return
 
         if mozinfo.os == 'linux':
             self.prefix = '/builds/worker/workspace/build/src/'
             strip_count = self.prefix.count('/')
         elif mozinfo.os == 'win':
             self.prefix = 'z:/build/build/src/'
             # Add 1 as on Windows the path where the compiler tries to write the
             # gcda files has an additional 'obj-firefox' component.
             strip_count = self.prefix.count('/') + 1
 
         os.environ['GCOV_PREFIX_STRIP'] = str(strip_count)
 
-        # Set the GCOV directory.
-        self.gcov_dir = tempfile.mkdtemp()
-        os.environ['GCOV_PREFIX'] = self.gcov_dir
-
-        # Set JSVM directory.
-        self.jsvm_dir = tempfile.mkdtemp()
-        os.environ['JS_CODE_COVERAGE_OUTPUT_DIR'] = self.jsvm_dir
-
         # Install grcov on the test machine
         # Get the path to the build machines gcno files.
         self.url_to_gcno = self.query_build_dir_url('target.code-coverage-gcno.zip')
         self.url_to_chrome_map = self.query_build_dir_url('chrome-map.json')
         dirs = self.query_abs_dirs()
 
         # Create the grcov directory, get the tooltool manifest, and finally
         # download and unpack the grcov binary.
@@ -128,16 +138,107 @@ class CodeCoverageMixin(object):
 
         cmd = [sys.executable, _tooltool_path, '--url', 'https://tooltool.mozilla-releng.net/', 'fetch', \
             '-m', manifest, '-o', '-c', '/builds/worker/tooltool-cache']
         self.run_command(cmd, cwd=self.grcov_dir)
 
         with tarfile.open(os.path.join(self.grcov_dir, tar_file)) as tar:
             tar.extractall(self.grcov_dir)
 
+        # Download the gcno archive from the build machine.
+        self.download_file(self.url_to_gcno, parent_dir=self.grcov_dir)
+
+        # Download the chrome-map.json file from the build machine.
+        self.download_file(self.url_to_chrome_map, parent_dir=self.grcov_dir)
+
+    @PostScriptAction('download-and-extract')
+    def find_tests_for_coverage(self, action, success=None):
+        """
+           For each file modified on this push, determine if the modified file
+           is a test, by searching test manifests. Populate self.verify_suites
+           with test files, organized by suite.
+
+           This depends on test manifests, so can only run after test zips have
+           been downloaded and extracted.
+        """
+        if not self.per_test_coverage:
+            return
+
+        self.find_modified_tests()
+        # TODO: Add tests that haven't been run for a while (a week? N pushes?)
+
+    @property
+    def coverage_args(self):
+        return []
+
+    def set_coverage_env(self, env):
+        # Set the GCOV directory.
+        gcov_dir = tempfile.mkdtemp()
+        env['GCOV_PREFIX'] = gcov_dir
+
+        # Set JSVM directory.
+        jsvm_dir = tempfile.mkdtemp()
+        env['JS_CODE_COVERAGE_OUTPUT_DIR'] = jsvm_dir
+
+        return (gcov_dir, jsvm_dir)
+
+    @PreScriptAction('run-tests')
+    def _set_gcov_prefix(self, action):
+        if not self.code_coverage_enabled:
+            return
+
+        if self.per_test_coverage:
+            return
+
+        self.gcov_dir, self.jsvm_dir = self.set_coverage_env(os.environ)
+
+    def parse_coverage_artifacts(self, gcov_dir, jsvm_dir):
+        jsvm_output_file = 'jsvm_lcov_output.info'
+        grcov_output_file = 'grcov_lcov_output.info'
+
+        dirs = self.query_abs_dirs()
+
+        # Zip gcda files (will be given in input to grcov).
+        file_path_gcda = os.path.join(os.getcwd(), 'code-coverage-gcda.zip')
+        self.run_command(['zip', '-q', '-0', '-r', file_path_gcda, '.'], cwd=gcov_dir)
+
+        sys.path.append(dirs['abs_test_install_dir'])
+        sys.path.append(os.path.join(dirs['abs_test_install_dir'], 'mozbuild/codecoverage'))
+
+        from lcov_rewriter import LcovFileRewriter
+        jsvm_files = [os.path.join(jsvm_dir, e) for e in os.listdir(jsvm_dir)]
+        rewriter = LcovFileRewriter(os.path.join(self.grcov_dir, 'chrome-map.json'))
+        rewriter.rewrite_files(jsvm_files, jsvm_output_file, '')
+
+        # Run grcov on the zipped .gcno and .gcda files.
+        grcov_command = [
+            os.path.join(self.grcov_dir, 'grcov'),
+            '-t', 'lcov',
+            '-p', self.prefix,
+            '--ignore-dir', 'gcc*',
+            '--ignore-dir', 'vs2017_*',
+            os.path.join(self.grcov_dir, 'target.code-coverage-gcno.zip'), file_path_gcda
+        ]
+
+        if mozinfo.os == 'win':
+            grcov_command += ['--llvm']
+
+        # 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
+        # the other is the path to the standard error output.
+        tmp_output_file, _ = self.get_output_from_command(
+            grcov_command,
+            silent=True,
+            save_tmpfiles=True,
+            return_type='files',
+            throw_exception=True,
+        )
+        shutil.move(tmp_output_file, grcov_output_file)
+
+        return grcov_output_file, jsvm_output_file
+
     @PostScriptAction('run-tests')
     def _package_coverage_data(self, action, success=None):
         if self.jsd_code_coverage_enabled:
             # Setup the command for compression
             dirs = self.query_abs_dirs()
             jsdcov_dir = dirs['abs_blob_upload_dir']
             zipFile = os.path.join(jsdcov_dir, "jsdcov_artifacts.zip")
             command = ["zip", "-r", "-q", zipFile, ".", "-i", "jscov*.json"]
@@ -151,72 +252,33 @@ class CodeCoverageMixin(object):
                     os.remove(os.path.join(jsdcov_dir, filename))
 
             self.info("Completed compression of JSDCov artifacts!")
             self.info("Path to JSDCov compressed artifacts: " + zipFile)
 
         if not self.code_coverage_enabled:
             return
 
+        if self.per_test_coverage:
+            return
+
         del os.environ['GCOV_PREFIX_STRIP']
         del os.environ['GCOV_PREFIX']
         del os.environ['JS_CODE_COVERAGE_OUTPUT_DIR']
 
         if not self.ccov_upload_disabled:
-            dirs = self.query_abs_dirs()
-
-            # Zip gcda files (will be given in input to grcov).
-            file_path_gcda = os.path.join(os.getcwd(), 'code-coverage-gcda.zip')
-            self.run_command(['zip', '-q', '-0', '-r', file_path_gcda, '.'], cwd=self.gcov_dir)
-
-            sys.path.append(dirs['abs_test_install_dir'])
-            sys.path.append(os.path.join(dirs['abs_test_install_dir'], 'mozbuild/codecoverage'))
-
-            # Download the chrome-map.json file from the build machine.
-            self.download_file(self.url_to_chrome_map)
-
-            from lcov_rewriter import LcovFileRewriter
-            jsvm_files = [os.path.join(self.jsvm_dir, e) for e in os.listdir(self.jsvm_dir)]
-            rewriter = LcovFileRewriter('chrome-map.json')
-            rewriter.rewrite_files(jsvm_files, 'jsvm_lcov_output.info', '')
-
-            # Package JSVM coverage data.
-            file_path_jsvm = os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-jsvm.zip')
-            self.run_command(['zip', '-q', file_path_jsvm, 'jsvm_lcov_output.info'])
-
-            # GRCOV post-processing
-            # Download the gcno from the build machine.
-            self.download_file(self.url_to_gcno, parent_dir=self.grcov_dir)
+            grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(self.gcov_dir, self.jsvm_dir)
 
-            # Run grcov on the zipped .gcno and .gcda files.
-            grcov_command = [
-                os.path.join(self.grcov_dir, 'grcov'),
-                '-t', 'lcov',
-                '-p', self.prefix,
-                '--ignore-dir', 'gcc*',
-                '--ignore-dir', 'vs2017_*',
-                os.path.join(self.grcov_dir, 'target.code-coverage-gcno.zip'), file_path_gcda
-            ]
-
-            if mozinfo.os == 'win':
-                grcov_command += ['--llvm']
-
-            # 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
-            # the other is the path to the standard error output.
-            grcov_output, _ = self.get_output_from_command(
-                grcov_command,
-                silent=True,
-                save_tmpfiles=True,
-                return_type='files',
-                throw_exception=True,
-            )
-            output_file_name = 'grcov_lcov_output.info'
-            shutil.move(grcov_output, os.path.join(self.grcov_dir, output_file_name))
+            dirs = self.query_abs_dirs()
 
             # Zip the grcov output and upload it.
             self.run_command(
-                ['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip'), output_file_name],
-                cwd=self.grcov_dir
+                ['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip'), grcov_output_file]
+            )
+
+            # Zip the JSVM coverage data and upload it.
+            self.run_command(
+                ['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-jsvm.zip'), jsvm_output_file]
             )
 
         shutil.rmtree(self.gcov_dir)
         shutil.rmtree(self.jsvm_dir)
         shutil.rmtree(self.grcov_dir)
--- a/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
+++ b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
@@ -9,32 +9,23 @@ import argparse
 import os
 import posixpath
 import re
 import sys
 import mozinfo
 from manifestparser import TestManifest
 from mozharness.base.script import PostScriptAction
 
-verify_config_options = [
-    [["--verify"],
-     {"action": "store_true",
-      "dest": "verify",
-      "default": "False",
-      "help": "Run additional verification on modified tests."
-      }],
-]
 
-
-class VerifyToolsMixin(object):
-    """Utility functions for test verification."""
+class SingleTestMixin(object):
+    """Utility functions for per-test testing like test verification and per-test coverage."""
 
     def __init__(self):
-        self.verify_suites = {}
-        self.verify_downloaded = False
+        self.suites = {}
+        self.tests_downloaded = False
         self.reftest_test_dir = None
         self.jsreftest_test_dir = None
 
     def _find_misc_tests(self, dirs, changed_files):
         manifests = [
             (os.path.join(dirs['abs_mochitest_dir'], 'tests', 'mochitest.ini'), 'plain'),
             (os.path.join(dirs['abs_mochitest_dir'], 'chrome', 'chrome.ini'), 'chrome'),
             (os.path.join(dirs['abs_mochitest_dir'], 'browser', 'browser-chrome.ini'), 'browser-chrome'),
@@ -42,39 +33,39 @@ class VerifyToolsMixin(object):
             (os.path.join(dirs['abs_xpcshell_dir'], 'tests', 'xpcshell.ini'), 'xpcshell'),
         ]
         tests_by_path = {}
         for (path, suite) in manifests:
             if os.path.exists(path):
                 man = TestManifest([path], strict=False)
                 active = man.active_tests(exists=False, disabled=True, filters=[], **mozinfo.info)
                 # Remove disabled tests. Also, remove tests with the same path as
-                # disabled tests, even if they are not disabled, since test-verify
+                # disabled tests, even if they are not disabled, since per-test mode
                 # specifies tests by path (it cannot distinguish between two or more
                 # tests with the same path specified in multiple manifests).
                 disabled = [t['relpath'] for t in active if 'disabled' in t]
                 new_by_path = {t['relpath']:(suite,t.get('subsuite')) \
                                for t in active if 'disabled' not in t and \
                                t['relpath'] not in disabled}
                 tests_by_path.update(new_by_path)
-                self.info("Verification updated with manifest %s" % path)
+                self.info("Per-test run updated with manifest %s" % path)
 
         ref_manifests = [
             (os.path.join(dirs['abs_reftest_dir'], 'tests', 'layout', 'reftests', 'reftest.list'), 'reftest'),
             (os.path.join(dirs['abs_reftest_dir'], 'tests', 'testing', 'crashtest', 'crashtests.list'), 'crashtest'),
         ]
         sys.path.append(dirs['abs_reftest_dir'])
         import manifest
         self.reftest_test_dir = os.path.join(dirs['abs_reftest_dir'], 'tests')
         for (path, suite) in ref_manifests:
             if os.path.exists(path):
                 man = manifest.ReftestManifest()
                 man.load(path)
                 tests_by_path.update({os.path.relpath(t,self.reftest_test_dir):(suite,None) for t in man.files})
-                self.info("Verification updated with manifest %s" % path)
+                self.info("Per-test run updated with manifest %s" % path)
 
         suite = 'jsreftest'
         self.jsreftest_test_dir = os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests')
         path = os.path.join(self.jsreftest_test_dir, 'jstests.list')
         if os.path.exists(path):
             man = manifest.ReftestManifest()
             man.load(path)
             for t in man.files:
@@ -84,47 +75,47 @@ class VerifyToolsMixin(object):
                 #    js/src/tests/test262/.../some_test.js
                 epos = t.find('=')
                 if epos > 0:
                     relpath = t[epos+1:]
                     relpath = os.path.join('js', 'src', 'tests', relpath)
                     tests_by_path.update({relpath:(suite,None)})
                 else:
                     self.warning("unexpected jsreftest test format: %s" % str(t))
-            self.info("Verification updated with manifest %s" % path)
+            self.info("Per-test run updated with manifest %s" % path)
 
         # for each changed file, determine if it is a test file, and what suite it is in
         for file in changed_files:
             # manifest paths use os.sep (like backslash on Windows) but
             # automation-relevance uses posixpath.sep
             file = file.replace(posixpath.sep, os.sep)
             entry = tests_by_path.get(file)
             if entry:
-                self.info("Verification found test %s" % file)
+                self.info("Per-test run found test %s" % file)
                 subsuite_mapping = {
                     ('browser-chrome', 'clipboard') : 'browser-chrome-clipboard',
                     ('chrome', 'clipboard') : 'chrome-clipboard',
                     ('plain', 'clipboard') : 'plain-clipboard',
                     ('browser-chrome', 'devtools') : 'mochitest-devtools-chrome',
                     ('browser-chrome', 'gpu') : 'browser-chrome-gpu',
                     ('browser-chrome', 'screenshots') : 'browser-chrome-screenshots',
                     ('chrome', 'gpu') : 'chrome-gpu',
                     ('plain', 'gpu') : 'plain-gpu',
                     ('plain', 'media') : 'mochitest-media',
                     ('plain', 'webgl') : 'mochitest-gl',
                 }
                 if entry in subsuite_mapping:
                     suite = subsuite_mapping[entry]
                 else:
                     suite = entry[0]
-                suite_files = self.verify_suites.get(suite)
+                suite_files = self.suites.get(suite)
                 if not suite_files:
                     suite_files = []
                 suite_files.append(file)
-                self.verify_suites[suite] = suite_files
+                self.suites[suite] = suite_files
 
     def _find_wpt_tests(self, dirs, changed_files):
         # Setup sys.path to include all the dependencies required to import
         # the web-platform-tests manifest parser. web-platform-tests provides
         # the localpaths.py to do the path manipulation, which we load,
         # providing the __file__ variable so it can resolve the relative
         # paths correctly.
         paths_file = os.path.join(dirs['abs_wpttest_dir'],
@@ -141,163 +132,160 @@ class VerifyToolsMixin(object):
             if type not in ["testharness", "reftest", "wdspec"]:
                 continue
             repo_path = os.path.join(repo_tests_path, path)
             # manifest paths use os.sep (like backslash on Windows) but
             # automation-relevance uses posixpath.sep
             repo_path = repo_path.replace(os.sep, posixpath.sep)
             if repo_path in changed_files:
                 self.info("found web-platform test file '%s', type %s" % (path, type))
-                suite_files = self.verify_suites.get(type)
+                suite_files = self.suites.get(type)
                 if not suite_files:
                     suite_files = []
                 path = os.path.join(tests_path, path)
                 suite_files.append(path)
-                self.verify_suites[type] = suite_files
+                self.suites[type] = suite_files
 
-    @PostScriptAction('download-and-extract')
-    def find_tests_for_verification(self, action, success=None):
+    def find_modified_tests(self):
         """
            For each file modified on this push, determine if the modified file
-           is a test, by searching test manifests. Populate self.verify_suites
+           is a test, by searching test manifests. Populate self.suites
            with test files, organized by suite.
 
            This depends on test manifests, so can only run after test zips have
            been downloaded and extracted.
         """
-
-        if self.config.get('verify') != True:
-            return
-
         repository = os.environ.get("GECKO_HEAD_REPOSITORY")
         revision = os.environ.get("GECKO_HEAD_REV")
         if not repository or not revision:
-            self.warning("unable to verify tests: no repo or revision!")
+            self.warning("unable to run tests in per-test mode: no repo or revision!")
             return []
 
         def get_automationrelevance():
             response = self.load_json_url(url)
             return response
 
         dirs = self.query_abs_dirs()
         mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
         e10s = self.config.get('e10s', False)
         mozinfo.update({"e10s": e10s})
         headless = self.config.get('headless', False)
         mozinfo.update({"headless": headless})
         # FIXME(emilio): Need to update test expectations.
         mozinfo.update({'stylo': True})
         mozinfo.update({'verify': True})
-        self.info("Verification using mozinfo: %s" % str(mozinfo.info))
+        self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))
 
         # determine which files were changed on this push
         url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
         contents = self.retry(get_automationrelevance, attempts=2, sleeptime=10)
         changed_files = set()
         for c in contents['changesets']:
             self.info(" {cset} {desc}".format(
                 cset=c['node'][0:12],
                 desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
             changed_files |= set(c['files'])
 
-        if self.config.get('verify_category') == "web-platform":
+        if self.config.get('per_test_category') == "web-platform":
             self._find_wpt_tests(dirs, changed_files)
         else:
             self._find_misc_tests(dirs, changed_files)
 
-        self.verify_downloaded = True
+        self.tests_downloaded = True
 
-    def query_verify_args(self, suite):
+    def query_args(self, suite):
         """
            For the specified suite, return an array of command line arguments to
-           be passed to test harnesses when running in verify mode.
+           be passed to test harnesses when running in per-test mode.
 
            Each array element is an array of command line arguments for a modified
            test in the suite.
         """
-
-        # Limit each test harness run to 15 minutes, to avoid task timeouts
-        # when verifying long-running tests.
-        MAX_TIME_PER_TEST = 900
+        # not in verify or per-test coverage mode: run once, with no additional args
+        if not self.per_test_coverage and not self.verify_enabled:
+            return [[]]
 
-        if self.config.get('verify') != True:
-            # not in verify mode: run once, with no additional args
-            args = [[]]
-        else:
-            # in verify mode, run nothing by default (unsupported suite or no files modified)
-            args = []
-            # otherwise, run once for each file in requested suite
-            references = re.compile(r"(-ref|-notref|-noref|-noref.)\.")
-            files = []
-            jsreftest_extra_dir = os.path.join('js', 'src', 'tests')
-            # For some suites, the test path needs to be updated before passing to
-            # the test harness.
-            for file in self.verify_suites.get(suite):
-                if (self.config.get('verify_category') != "web-platform" and
-                    suite in ['reftest', 'crashtest']):
-                    file = os.path.join(self.reftest_test_dir, file)
-                elif (self.config.get('verify_category') != "web-platform" and
-                      suite == 'jsreftest'):
-                    file = os.path.relpath(file, jsreftest_extra_dir)
-                    file = os.path.join(self.jsreftest_test_dir, file)
-                file = file.replace(os.sep, posixpath.sep)
-                files.append(file)
-            for file in files:
-                if self.config.get('verify_category') == "web-platform":
-                    args.append(['--verify-log-full', '--verify', file])
-                else:
-                    if suite == 'reftest':
-                        # Special handling for modified reftest reference files:
-                        #  - if both test and reference modified, verify the test file
-                        #  - if only reference modified, verify the test file
-                        nonref = references.sub('.', file)
-                        if nonref != file:
-                            file = None
-                            if nonref not in files and os.path.exists(nonref):
-                                file = nonref
-                    if file:
-                        args.append(['--verify-max-time=%d' % MAX_TIME_PER_TEST, '--verify', file])
-            self.info("Verification file(s) for '%s': %s" % (suite, files))
+        references = re.compile(r"(-ref|-notref|-noref|-noref.)\.")
+        files = []
+        jsreftest_extra_dir = os.path.join('js', 'src', 'tests')
+        # For some suites, the test path needs to be updated before passing to
+        # the test harness.
+        for file in self.suites.get(suite):
+            if (self.config.get('per_test_category') != "web-platform" and
+                suite in ['reftest', 'crashtest']):
+                file = os.path.join(self.reftest_test_dir, file)
+                if suite == 'reftest':
+                    # Special handling for modified reftest reference files:
+                    #  - if both test and reference modified, run the test file
+                    #  - if only reference modified, run the test file
+                    nonref = references.sub('.', file)
+                    if nonref != file:
+                        file = None
+                        if nonref not in files and os.path.exists(nonref):
+                            file = nonref
+            elif (self.config.get('per_test_category') != "web-platform" and
+                  suite == 'jsreftest'):
+                file = os.path.relpath(file, jsreftest_extra_dir)
+                file = os.path.join(self.jsreftest_test_dir, file)
+
+            if file is None:
+                continue
+
+            file = file.replace(os.sep, posixpath.sep)
+            files.append(file)
+
+        self.info("Per-test file(s) for '%s': %s" % (suite, files))
+
+        args = []
+        for file in files:
+            cur = []
+
+            cur.extend(self.coverage_args)
+            cur.extend(self.verify_args)
+
+            cur.append(file)
+            args.append(cur)
+
         return args
 
-    def query_verify_category_suites(self, category, all_suites):
+    def query_per_test_category_suites(self, category, all_suites):
         """
-           In verify mode, determine which suites are active, for the given
+           In per-test mode, determine which suites are active, for the given
            suite category.
         """
         suites = None
-        if self.config.get('verify') == True:
-            if self.config.get('verify_category') == "web-platform":
-                suites = self.verify_suites.keys()
-            elif all_suites and self.verify_downloaded:
+        if self.verify_enabled or self.per_test_coverage:
+            if self.config.get('per_test_category') == "web-platform":
+                suites = self.suites.keys()
+            elif all_suites and self.tests_downloaded:
                 suites = dict((key, all_suites.get(key)) for key in
-                    self.verify_suites if key in all_suites.keys())
+                    self.suites if key in all_suites.keys())
             else:
                 # Until test zips are downloaded, manifests are not available,
                 # so it is not possible to determine which suites are active/
-                # required for verification; assume all suites from supported
+                # required for per-test mode; assume all suites from supported
                 # suite categories are required.
                 if category in ['mochitest', 'xpcshell', 'reftest']:
                     suites = all_suites
         return suites
 
-    def log_verify_status(self, test_name, tbpl_status, log_level):
+    def log_per_test_status(self, test_name, tbpl_status, log_level):
         """
-           Log verification status of a single test. This will display in the
-           Job Details pane in treeherder - a convenient summary of verification.
+           Log status of a single test. This will display in the
+           Job Details pane in treeherder - a convenient summary of per-test mode.
            Special test name formatting is needed because treeherder truncates
            lines that are too long, and may remove duplicates after truncation.
         """
         max_test_name_len = 40
         if len(test_name) > max_test_name_len:
             head = test_name
             new = ""
             previous = None
             max_test_name_len = max_test_name_len - len('.../')
             while len(new) < max_test_name_len:
                 head, tail = os.path.split(head)
                 previous = new
                 new = os.path.join(tail, new)
             test_name = os.path.join('...', previous or new)
             test_name = test_name.rstrip(os.path.sep)
-        self.log("TinderboxPrint: Verification of %s<br/>: %s" %
+        self.log("TinderboxPrint: Per-test run of %s<br/>: %s" %
                  (test_name, tbpl_status), level=log_level)
 
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from mozharness.base.script import PostScriptAction
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
+
+verify_config_options = [
+    [["--verify"],
+     {"action": "store_true",
+      "dest": "verify",
+      "default": False,
+      "help": "Run additional verification on modified tests."
+      }],
+]
+
+
+class VerifyToolsMixin(SingleTestMixin):
+    """Utility functions for test verification."""
+
+    def __init__(self):
+        super(VerifyToolsMixin, self).__init__()
+
+    @property
+    def verify_enabled(self):
+        try:
+            return bool(self.config.get('verify'))
+        except (AttributeError, KeyError, TypeError):
+            return False
+
+    @PostScriptAction('download-and-extract')
+    def find_tests_for_verification(self, action, success=None):
+        """
+           For each file modified on this push, determine if the modified file
+           is a test, by searching test manifests. Populate self.verify_suites
+           with test files, organized by suite.
+
+           This depends on test manifests, so can only run after test zips have
+           been downloaded and extracted.
+        """
+
+        if not self.verify_enabled:
+            return
+
+        self.find_modified_tests()
+
+    @property
+    def verify_args(self):
+        if not self.verify_enabled:
+            return []
+
+        # Limit each test harness run to 15 minutes, to avoid task timeouts
+        # when executing long-running tests.
+        MAX_TIME_PER_TEST = 900
+
+        if self.config.get('per_test_category') == "web-platform":
+            args = ['--verify-log-full']
+        else:
+            args = ['--verify-max-time=%d' % MAX_TIME_PER_TEST]
+
+        args.append('--verify')
+
+        return args
--- a/testing/mozharness/scripts/android_emulator_unittest.py
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -22,19 +22,21 @@ sys.path.insert(1, os.path.dirname(sys.p
 from mozprocess import ProcessHandler
 
 from mozharness.base.log import FATAL
 from mozharness.base.script import BaseScript, PreScriptAction, PostScriptAction
 from mozharness.mozilla.buildbot import TBPL_RETRY, EXIT_STATUS_DICT
 from mozharness.mozilla.mozbase import MozbaseMixin
 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
 from mozharness.mozilla.testing.unittest import EmulatorMixin
+from mozharness.mozilla.testing.codecoverage import CodeCoverageMixin
 
 
-class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin):
+class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin,
+                          CodeCoverageMixin):
     config_options = [[
         ["--test-suite"],
         {"action": "store",
          "dest": "test_suite",
          "default": None
          }
     ], [
         ["--total-chunk"],
@@ -466,17 +468,17 @@ class AndroidEmulatorTest(TestingMixin, 
         else:
             if self.this_chunk is not None:
                 cmd.extend(['--this-chunk', self.this_chunk])
             if self.total_chunks is not None:
                 cmd.extend(['--total-chunks', self.total_chunks])
 
         try_options, try_tests = self.try_args(self.test_suite)
         cmd.extend(try_options)
-        if self.config.get('verify') is not True:
+        if self.verify_enabled or self.per_test_coverage:
             cmd.extend(self.query_tests_args(
                 self.config["suite_definitions"][self.test_suite].get("tests"),
                 None,
                 try_tests))
 
         return cmd
 
     def _get_repo_url(self, path):
@@ -735,82 +737,82 @@ class AndroidEmulatorTest(TestingMixin, 
                            (self.robocop_path, self.emulator["name"]),
                            EXIT_STATUS_DICT[TBPL_RETRY])
 
         self.info("Finished installing apps for %s" % self.emulator["name"])
 
     def _query_suites(self):
         if self.test_suite:
             return [(self.test_suite, self.test_suite)]
-        # test-verification: determine test suites to be verified
+        # per-test mode: determine test suites to run
         all = [('mochitest', {'plain': 'mochitest',
                               'chrome': 'mochitest-chrome',
                               'plain-clipboard': 'mochitest-plain-clipboard',
                               'plain-gpu': 'mochitest-plain-gpu'}),
                ('reftest', {'reftest': 'reftest', 'crashtest': 'crashtest'}),
                ('xpcshell', {'xpcshell': 'xpcshell'})]
         suites = []
         for (category, all_suites) in all:
-            cat_suites = self.query_verify_category_suites(category, all_suites)
+            cat_suites = self.query_per_test_category_suites(category, all_suites)
             for k in cat_suites.keys():
                 suites.append((k, cat_suites[k]))
         return suites
 
     def _query_suite_categories(self):
         if self.test_suite:
             categories = [self.test_suite]
         else:
-            # test-verification
+            # per-test mode
             categories = ['mochitest', 'reftest', 'xpcshell']
         return categories
 
     def run_tests(self):
         """
         Run the tests
         """
         self.start_time = datetime.datetime.now()
-        max_verify_time = datetime.timedelta(minutes=60)
+        max_per_test_time = datetime.timedelta(minutes=60)
 
-        verify_args = []
+        per_test_args = []
         suites = self._query_suites()
         minidump = self.query_minidump_stackwalk()
-        for (verify_suite, suite) in suites:
+        for (per_test_suite, suite) in suites:
             self.test_suite = suite
 
             cmd = self._build_command()
 
             try:
                 cwd = self._query_tests_dir()
             except Exception:
                 self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
             env = self.query_env()
             if minidump:
                 env['MINIDUMP_STACKWALK'] = minidump
             env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
             env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
             env['RUST_BACKTRACE'] = 'full'
 
-            for verify_args in self.query_verify_args(verify_suite):
-                if (datetime.datetime.now() - self.start_time) > max_verify_time:
-                    # Verification has run out of time. That is okay! Stop running
-                    # tests so that a task timeout is not triggered, and so that
+            for per_test_args in self.query_args(per_test_suite):
+                if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+                    # Running tests has run out of time. That is okay! Stop running
+                    # them so that a task timeout is not triggered, and so that
                     # (partial) results are made available in a timely manner.
-                    self.info("TinderboxPrint: Verification too long: "
-                              "Not all tests were verified.<br/>")
-                    # Signal verify time exceeded, to break out of suites and
+                    self.info("TinderboxPrint: Running tests took too long: "
+                              "Not all tests were executed.<br/>")
+                    # Signal per-test time exceeded, to break out of suites and
                     # suite categories loops also.
                     return False
 
                 final_cmd = copy.copy(cmd)
-                if len(verify_args) > 0:
-                    # in verify mode, remove any chunk arguments from command
+                if len(per_test_args) > 0:
+                    # in per-test mode, remove any chunk arguments from command
                     for arg in final_cmd:
                         if 'total-chunk' in arg or 'this-chunk' in arg:
                             final_cmd.remove(arg)
-                final_cmd.extend(verify_args)
+                final_cmd.extend(per_test_args)
 
                 self.info("Running on %s the command %s" % (self.emulator["name"],
                           subprocess.list2cmdline(final_cmd)))
                 self.info("##### %s log begins" % self.test_suite)
 
                 suite_category = self.test_suite
                 parser = self.get_test_output_parser(
                     suite_category,
@@ -818,19 +820,19 @@ class AndroidEmulatorTest(TestingMixin, 
                     log_obj=self.log_obj,
                     error_list=[])
                 self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
                 tbpl_status, log_level = parser.evaluate_parser(0)
                 parser.append_tinderboxprint_line(self.test_suite)
 
                 self.info("##### %s log ends" % self.test_suite)
 
-                if len(verify_args) > 0:
+                if len(per_test_args) > 0:
                     self.buildbot_status(tbpl_status, level=log_level)
-                    self.log_verify_status(verify_args[-1], tbpl_status, log_level)
+                    self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
                 else:
                     self.buildbot_status(tbpl_status, level=log_level)
                     self.log("The %s suite: %s ran with return status: %s" %
                              (suite_category, suite, tbpl_status), level=log_level)
 
     @PostScriptAction('run-tests')
     def stop_emulator(self, action, success=None):
         '''
--- a/testing/mozharness/scripts/desktop_unittest.py
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -474,17 +474,17 @@ class DesktopUnittest(TestingMixin, Merc
                 # suites gets a dict of everything from all_suites where a key
                 # is also in specified_suites
                 suites = dict((key, all_suites.get(key)) for key in
                               specified_suites if key in all_suites.keys())
         else:
             if c.get('run_all_suites'):  # needed if you dont specify any suites
                 suites = all_suites
             else:
-                suites = self.query_verify_category_suites(category, all_suites)
+                suites = self.query_per_test_category_suites(category, all_suites)
 
         return suites
 
     def _query_try_flavor(self, category, suite):
         flavors = {
             "mochitest": [("plain.*", "mochitest"),
                           ("browser-chrome.*", "browser-chrome"),
                           ("mochitest-devtools-chrome.*", "devtools-chrome"),
@@ -762,19 +762,19 @@ class DesktopUnittest(TestingMixin, Merc
 
     def _run_category_suites(self, suite_category):
         """run suite(s) to a specific category"""
         dirs = self.query_abs_dirs()
         suites = self._query_specified_suites(suite_category)
         abs_app_dir = self.query_abs_app_dir()
         abs_res_dir = self.query_abs_res_dir()
 
-        max_verify_time = timedelta(minutes=60)
-        max_verify_tests = 10
-        verified_tests = 0
+        max_per_test_time = timedelta(minutes=60)
+        max_per_test_tests = 10
+        executed_tests = 0
 
         if suites:
             self.info('#### Running %s suites' % suite_category)
             for suite in suites:
                 abs_base_cmd = self._query_abs_base_cmd(suite_category, suite)
                 cmd = abs_base_cmd[:]
                 replace_dict = {
                     'abs_app_dir': abs_app_dir,
@@ -784,17 +784,17 @@ class DesktopUnittest(TestingMixin, Merc
                     'abs_res_dir': abs_res_dir,
                 }
                 options_list = []
                 env = {
                     'TEST_SUITE': suite
                 }
                 if isinstance(suites[suite], dict):
                     options_list = suites[suite].get('options', [])
-                    if self.config.get('verify') is True:
+                    if self.verify_enabled or self.per_test_coverage:
                         tests_list = []
                     else:
                         tests_list = suites[suite].get('tests', [])
                     env = copy.deepcopy(suites[suite].get('env', {}))
                 else:
                     options_list = suites[suite]
                     tests_list = []
 
@@ -843,43 +843,66 @@ class DesktopUnittest(TestingMixin, Merc
                 if self.config['single_stylo_traversal']:
                     env['STYLO_THREADS'] = '1'
                 else:
                     env['STYLO_THREADS'] = '4'
 
                 env = self.query_env(partial_env=env, log_level=INFO)
                 cmd_timeout = self.get_timeout_for_category(suite_category)
 
-                for verify_args in self.query_verify_args(suite):
-                    if (datetime.now() - self.start_time) > max_verify_time:
-                        # Verification has run out of time. That is okay! Stop running
-                        # tests so that a task timeout is not triggered, and so that
+                # Run basic startup/shutdown test to collect baseline coverage.
+                # This way, after we run a test, we can generate a diff between the
+                # full coverage of the test and the baseline coverage and only get
+                # the coverage data specific to the test.
+                if self.per_test_coverage:
+                    gcov_dir, jsvm_dir = self.set_coverage_env(env)
+                    # TODO: Run basic startup/shutdown test to collect baseline coverage.
+                    # grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
+                    # shutil.rmtree(gcov_dir)
+                    # shutil.rmtree(jsvm_dir)
+                    # TODO: Parse coverage report
+
+                for per_test_args in self.query_args(suite):
+                    if (datetime.now() - self.start_time) > max_per_test_time:
+                        # Running tests has run out of time. That is okay! Stop running
+                        # them so that a task timeout is not triggered, and so that
                         # (partial) results are made available in a timely manner.
-                        self.info("TinderboxPrint: Verification too long: Not all tests "
-                                  "were verified.<br/>")
-                        # Signal verify time exceeded, to break out of suites and
+                        self.info("TinderboxPrint: Running tests took too long: Not all tests "
+                                  "were executed.<br/>")
+                        # Signal per-test time exceeded, to break out of suites and
                         # suite categories loops also.
                         return False
-                    if verified_tests >= max_verify_tests:
+                    if executed_tests >= max_per_test_tests:
                         # When changesets are merged between trees or many tests are
                         # otherwise updated at once, there probably is not enough time
-                        # to verify all tests, and attempting to do so may cause other
+                        # to run all tests, and attempting to do so may cause other
                         # problems, such as generating too much log output.
                         self.info("TinderboxPrint: Too many modified tests: Not all tests "
-                                  "were verified.<br/>")
+                                  "were executed.<br/>")
                         return False
-                    verified_tests = verified_tests + 1
+                    executed_tests = executed_tests + 1
 
                     final_cmd = copy.copy(cmd)
-                    final_cmd.extend(verify_args)
+                    final_cmd.extend(per_test_args)
+
+                    if self.per_test_coverage:
+                        gcov_dir, jsvm_dir = self.set_coverage_env(env)
+
                     return_code = self.run_command(final_cmd, cwd=dirs['abs_work_dir'],
                                                    output_timeout=cmd_timeout,
                                                    output_parser=parser,
                                                    env=env)
 
+                    if self.per_test_coverage:
+                        grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
+                        shutil.rmtree(gcov_dir)
+                        shutil.rmtree(jsvm_dir)
+                        # TODO: Parse coverage report
+                        # TODO: Diff this coverage report with the baseline one
+
                     # mochitest, reftest, and xpcshell suites do not return
                     # appropriate return codes. Therefore, we must parse the output
                     # to determine what the tbpl_status and worst_log_level must
                     # be. We do this by:
                     # 1) checking to see if our mozharness script ran into any
                     #    errors itself with 'num_errors' <- OutputParser
                     # 2) if num_errors is 0 then we look in the subclassed 'parser'
                     #    findings for harness/suite errors <- DesktopUnittestOutputParser
@@ -890,18 +913,18 @@ class DesktopUnittest(TestingMixin, Merc
                         # bug 1120644
                         success_codes = [0, 1]
 
                     tbpl_status, log_level = parser.evaluate_parser(return_code,
                                                                     success_codes=success_codes)
                     parser.append_tinderboxprint_line(suite_name)
 
                     self.buildbot_status(tbpl_status, level=log_level)
-                    if len(verify_args) > 0:
-                        self.log_verify_status(verify_args[-1], tbpl_status, log_level)
+                    if len(per_test_args) > 0:
+                        self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
                     else:
                         self.log("The %s suite: %s ran with return status: %s" %
                                  (suite_category, suite, tbpl_status), level=log_level)
         else:
             self.debug('There were no suites to run for %s' % suite_category)
         return True
 
 
--- a/testing/mozharness/scripts/web_platform_tests.py
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -1,16 +1,17 @@
 #!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 import copy
 import os
+import shutil
 import sys
 
 from datetime import datetime, timedelta
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozharness.base.errors import BaseErrorList
@@ -313,64 +314,87 @@ class WebPlatformTest(TestingMixin, Merc
         if self.config['single_stylo_traversal']:
             env['STYLO_THREADS'] = '1'
         else:
             env['STYLO_THREADS'] = '4'
 
         env = self.query_env(partial_env=env, log_level=INFO)
 
         start_time = datetime.now()
-        max_verify_time = timedelta(minutes=60)
-        max_verify_tests = 10
-        verified_tests = 0
+        max_per_test_time = timedelta(minutes=60)
+        max_per_test_tests = 10
+        executed_tests = 0
 
-        if self.config.get("verify") is True:
-            verify_suites = self.query_verify_category_suites(None, None)
-            if "wdspec" in verify_suites:
+        if self.per_test_coverage or self.verify_enabled:
+            suites = self.query_per_test_category_suites(None, None)
+            if "wdspec" in suites:
                 # geckodriver is required for wdspec, but not always available
                 geckodriver_path = self._query_geckodriver()
                 if not geckodriver_path or not os.path.isfile(geckodriver_path):
-                    verify_suites.remove("wdspec")
-                    self.info("Test verification skipping 'wdspec' tests - no geckodriver")
+                    suites.remove("wdspec")
+                    self.info("Skipping 'wdspec' tests - no geckodriver")
         else:
             test_types = self.config.get("test_type", [])
-            verify_suites = [None]
-        for verify_suite in verify_suites:
-            if verify_suite:
-                test_types = [verify_suite]
-            for verify_args in self.query_verify_args(verify_suite):
-                if (datetime.now() - start_time) > max_verify_time:
-                    # Verification has run out of time. That is okay! Stop running
-                    # tests so that a task timeout is not triggered, and so that
+            suites = [None]
+        for suite in suites:
+            if suite:
+                test_types = [suite]
+
+            # Run basic startup/shutdown test to collect baseline coverage.
+            # This way, after we run a test, we can generate a diff between the
+            # full coverage of the test and the baseline coverage and only get
+            # the coverage data specific to the test.
+            if self.per_test_coverage:
+                gcov_dir, jsvm_dir = self.set_coverage_env(env)
+                # TODO: Run basic startup/shutdown test to collect baseline coverage.
+                # grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
+                # shutil.rmtree(gcov_dir)
+                # shutil.rmtree(jsvm_dir)
+                # TODO: Parse coverage report
+
+            for per_test_args in self.query_args(suite):
+                if (datetime.now() - start_time) > max_per_test_time:
+                    # Running tests has run out of time. That is okay! Stop running
+                    # them so that a task timeout is not triggered, and so that
                     # (partial) results are made available in a timely manner.
-                    self.info("TinderboxPrint: Verification too long: Not all tests "
-                              "were verified.<br/>")
+                    self.info("TinderboxPrint: Running tests took too long: Not all tests "
+                              "were executed.<br/>")
                     return
-                if verified_tests >= max_verify_tests:
+                if executed_tests >= max_per_test_tests:
                     # When changesets are merged between trees or many tests are
                     # otherwise updated at once, there probably is not enough time
-                    # to verify all tests, and attempting to do so may cause other
+                    # to run all tests, and attempting to do so may cause other
                     # problems, such as generating too much log output.
                     self.info("TinderboxPrint: Too many modified tests: Not all tests "
-                              "were verified.<br/>")
+                              "were executed.<br/>")
                     return
-                verified_tests = verified_tests + 1
+                executed_tests = executed_tests + 1
 
                 cmd = self._query_cmd(test_types)
-                cmd.extend(verify_args)
+                cmd.extend(per_test_args)
+
+                if self.per_test_coverage:
+                    gcov_dir, jsvm_dir = self.set_coverage_env(env)
 
                 return_code = self.run_command(cmd,
                                                cwd=dirs['abs_work_dir'],
                                                output_timeout=1000,
                                                output_parser=parser,
                                                env=env)
 
+                if self.per_test_coverage:
+                    grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
+                    shutil.rmtree(gcov_dir)
+                    shutil.rmtree(jsvm_dir)
+                    # TODO: Parse coverage report
+                    # TODO: Diff this coverage report with the baseline one
+
                 tbpl_status, log_level = parser.evaluate_parser(return_code)
                 self.buildbot_status(tbpl_status, level=log_level)
 
-                if len(verify_args) > 0:
-                    self.log_verify_status(verify_args[-1], tbpl_status, log_level)
+                if len(per_test_args) > 0:
+                    self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
 
 
 # main {{{1
 if __name__ == '__main__':
     web_platform_tests = WebPlatformTest()
     web_platform_tests.run_and_exit()