Backed out changeset 53b1fa0faa6d (bug 1696251) for breaking the static-analysis integration. a=backout
authorButkovits Atila <abutkovits@mozilla.com>
Thu, 23 Sep 2021 13:06:40 +0300
changeset 593004 c38d34be7c3f757510f037fef5a78ea08af32980
parent 593003 907ebdeb89a26949ecce30dbf3c065cba8595e25
child 593005 ba378c62cecf6a3048baddde1f9c7d61b45a2914
child 593036 672fd0dd542fd067e7dd811e548244512b8f4c6b
push id38818
push userabutkovits@mozilla.com
push dateThu, 23 Sep 2021 10:09:06 +0000
treeherdermozilla-central@c38d34be7c3f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1696251
milestone94.0a1
backs out53b1fa0faa6d78f7cf72f9cbe3dc85221a04eea3
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 53b1fa0faa6d (bug 1696251) for breaking the static-analysis integration. a=backout
build/valgrind/mach_commands.py
devtools/shared/css/generated/mach_commands.py
dom/bindings/mach_commands.py
js/src/devtools/rootAnalysis/mach_commands.py
layout/tools/reftest/mach_commands.py
layout/tools/reftest/mach_test_package_commands.py
mobile/android/mach_commands.py
python/mach/docs/commands.rst
python/mach/docs/settings.rst
python/mach/docs/telemetry.rst
python/mach/mach/commands/commandinfo.py
python/mach/mach/commands/settings.py
python/mach/mach/decorators.py
python/mach/mach/registrar.py
python/mach/mach/test/providers/basic.py
python/mach/mach/test/providers/commands.py
python/mach/mach/test/providers/conditions.py
python/mach/mach/test/providers/conditions_invalid.py
python/mach/mach/test/providers/throw.py
python/mach/mach/test/test_conditions.py
python/mach/mach/test/test_decorators.py
python/mach_commands.py
python/mozboot/mozboot/mach_commands.py
python/mozbuild/mozbuild/artifact_commands.py
python/mozbuild/mozbuild/backend/mach_commands.py
python/mozbuild/mozbuild/build_commands.py
python/mozbuild/mozbuild/code_analysis/mach_commands.py
python/mozbuild/mozbuild/compilation/codecomplete.py
python/mozbuild/mozbuild/frontend/mach_commands.py
python/mozbuild/mozbuild/mach_commands.py
python/mozbuild/mozbuild/test/code_analysis/test_mach_commands.py
python/mozbuild/mozbuild/vendor/mach_commands.py
python/mozperftest/mozperftest/mach_commands.py
python/mozperftest/mozperftest/tests/test_mach_commands.py
python/mozrelease/mozrelease/mach_commands.py
remote/mach_commands.py
security/manager/tools/mach_commands.py
taskcluster/mach_commands.py
testing/awsy/mach_commands.py
testing/condprofile/mach_commands.py
testing/firefox-ui/mach_commands.py
testing/geckodriver/mach_commands.py
testing/gtest/mach_test_package_commands.py
testing/mach_commands.py
testing/marionette/mach_commands.py
testing/marionette/mach_test_package_commands.py
testing/mochitest/mach_commands.py
testing/mochitest/mach_test_package_commands.py
testing/mozharness/mach_commands.py
testing/raptor/mach_commands.py
testing/talos/mach_commands.py
testing/tps/mach_commands.py
testing/web-platform/mach_commands.py
testing/web-platform/mach_test_package_commands.py
testing/xpcshell/mach_commands.py
testing/xpcshell/mach_test_package_commands.py
toolkit/components/glean/build_scripts/mach_commands.py
toolkit/components/telemetry/tests/marionette/mach_commands.py
tools/browsertime/mach_commands.py
tools/compare-locales/mach_commands.py
tools/lint/mach_commands.py
tools/mach_commands.py
tools/moztreedocs/mach_commands.py
tools/phabricator/mach_commands.py
tools/power/mach_commands.py
tools/tryselect/mach_commands.py
tools/vcs/mach_commands.py
--- a/build/valgrind/mach_commands.py
+++ b/build/valgrind/mach_commands.py
@@ -7,234 +7,238 @@ from __future__ import absolute_import, 
 import json
 import logging
 import mozinfo
 import os
 
 from mach.decorators import (
     Command,
     CommandArgument,
+    CommandProvider,
 )
 from mozbuild.base import (
+    MachCommandBase,
     MachCommandConditions as conditions,
     BinaryNotFoundException,
 )
 
 
 def is_valgrind_build(cls):
     """Must be a build with --enable-valgrind and --disable-jemalloc."""
     defines = cls.config_environment.defines
     return "MOZ_VALGRIND" in defines and "MOZ_MEMORY" not in defines
 
 
-@Command(
-    "valgrind-test",
-    category="testing",
-    conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build],
-    description="Run the Valgrind test job (memory-related errors).",
-)
-@CommandArgument(
-    "--suppressions",
-    default=[],
-    action="append",
-    metavar="FILENAME",
-    help="Specify a suppression file for Valgrind to use. Use "
-    "--suppression multiple times to specify multiple suppression "
-    "files.",
-)
-def valgrind_test(command_context, suppressions):
-    """
-    Run Valgrind tests.
-    """
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command(
+        "valgrind-test",
+        category="testing",
+        conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build],
+        description="Run the Valgrind test job (memory-related errors).",
+    )
+    @CommandArgument(
+        "--suppressions",
+        default=[],
+        action="append",
+        metavar="FILENAME",
+        help="Specify a suppression file for Valgrind to use. Use "
+        "--suppression multiple times to specify multiple suppression "
+        "files.",
+    )
+    def valgrind_test(self, command_context, suppressions):
+        """
+        Run Valgrind tests.
+        """
 
-    from mozfile import TemporaryDirectory
-    from mozhttpd import MozHttpd
-    from mozprofile import FirefoxProfile, Preferences
-    from mozprofile.permissions import ServerLocations
-    from mozrunner import FirefoxRunner
-    from mozrunner.utils import findInPath
-    from six import string_types
-    from valgrind.output_handler import OutputHandler
+        from mozfile import TemporaryDirectory
+        from mozhttpd import MozHttpd
+        from mozprofile import FirefoxProfile, Preferences
+        from mozprofile.permissions import ServerLocations
+        from mozrunner import FirefoxRunner
+        from mozrunner.utils import findInPath
+        from six import string_types
+        from valgrind.output_handler import OutputHandler
 
-    build_dir = os.path.join(command_context.topsrcdir, "build")
+        build_dir = os.path.join(command_context.topsrcdir, "build")
 
-    # XXX: currently we just use the PGO inputs for Valgrind runs.  This may
-    # change in the future.
-    httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo"))
-    httpd.start(block=False)
+        # XXX: currently we just use the PGO inputs for Valgrind runs.  This may
+        # change in the future.
+        httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo"))
+        httpd.start(block=False)
 
-    with TemporaryDirectory() as profilePath:
-        # TODO: refactor this into mozprofile
-        profile_data_dir = os.path.join(
-            command_context.topsrcdir, "testing", "profiles"
-        )
-        with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
-            base_profiles = json.load(fh)["valgrind"]
+        with TemporaryDirectory() as profilePath:
+            # TODO: refactor this into mozprofile
+            profile_data_dir = os.path.join(
+                command_context.topsrcdir, "testing", "profiles"
+            )
+            with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+                base_profiles = json.load(fh)["valgrind"]
 
-        prefpaths = [
-            os.path.join(profile_data_dir, profile, "user.js")
-            for profile in base_profiles
-        ]
-        prefs = {}
-        for path in prefpaths:
-            prefs.update(Preferences.read_prefs(path))
+            prefpaths = [
+                os.path.join(profile_data_dir, profile, "user.js")
+                for profile in base_profiles
+            ]
+            prefs = {}
+            for path in prefpaths:
+                prefs.update(Preferences.read_prefs(path))
 
-        interpolation = {
-            "server": "%s:%d" % httpd.httpd.server_address,
-        }
-        for k, v in prefs.items():
-            if isinstance(v, string_types):
-                v = v.format(**interpolation)
-            prefs[k] = Preferences.cast(v)
+            interpolation = {
+                "server": "%s:%d" % httpd.httpd.server_address,
+            }
+            for k, v in prefs.items():
+                if isinstance(v, string_types):
+                    v = v.format(**interpolation)
+                prefs[k] = Preferences.cast(v)
 
-        quitter = os.path.join(
-            command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
-        )
+            quitter = os.path.join(
+                command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
+            )
 
-        locations = ServerLocations()
-        locations.add_host(
-            host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
-        )
+            locations = ServerLocations()
+            locations.add_host(
+                host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
+            )
 
-        profile = FirefoxProfile(
-            profile=profilePath,
-            preferences=prefs,
-            addons=[quitter],
-            locations=locations,
-        )
+            profile = FirefoxProfile(
+                profile=profilePath,
+                preferences=prefs,
+                addons=[quitter],
+                locations=locations,
+            )
 
-        firefox_args = [httpd.get_url()]
+            firefox_args = [httpd.get_url()]
 
-        env = os.environ.copy()
-        env["G_SLICE"] = "always-malloc"
-        env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
-        env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
-        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
-        env["XPCOM_DEBUG_BREAK"] = "warn"
+            env = os.environ.copy()
+            env["G_SLICE"] = "always-malloc"
+            env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
+            env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
+            env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+            env["XPCOM_DEBUG_BREAK"] = "warn"
 
-        outputHandler = OutputHandler(command_context.log)
-        kp_kwargs = {
-            "processOutputLine": [outputHandler],
-            "universal_newlines": True,
-        }
+            outputHandler = OutputHandler(command_context.log)
+            kp_kwargs = {
+                "processOutputLine": [outputHandler],
+                "universal_newlines": True,
+            }
 
-        valgrind = "valgrind"
-        if not os.path.exists(valgrind):
-            valgrind = findInPath(valgrind)
+            valgrind = "valgrind"
+            if not os.path.exists(valgrind):
+                valgrind = findInPath(valgrind)
 
-        valgrind_args = [
-            valgrind,
-            "--sym-offsets=yes",
-            "--smc-check=all-non-file",
-            "--vex-iropt-register-updates=allregs-at-mem-access",
-            "--gen-suppressions=all",
-            "--num-callers=36",
-            "--leak-check=full",
-            "--show-possibly-lost=no",
-            "--track-origins=yes",
-            "--trace-children=yes",
-            "-v",  # Enable verbosity to get the list of used suppressions
-            # Avoid excessive delays in the presence of spinlocks.
-            # See bug 1309851.
-            "--fair-sched=yes",
-            # Keep debuginfo after library unmap.  See bug 1382280.
-            "--keep-debuginfo=yes",
-            # Reduce noise level on rustc and/or LLVM compiled code.
-            # See bug 1365915
-            "--expensive-definedness-checks=yes",
-            # Compensate for the compiler inlining `new` but not `delete`
-            # or vice versa.
-            "--show-mismatched-frees=no",
-        ]
+            valgrind_args = [
+                valgrind,
+                "--sym-offsets=yes",
+                "--smc-check=all-non-file",
+                "--vex-iropt-register-updates=allregs-at-mem-access",
+                "--gen-suppressions=all",
+                "--num-callers=36",
+                "--leak-check=full",
+                "--show-possibly-lost=no",
+                "--track-origins=yes",
+                "--trace-children=yes",
+                "-v",  # Enable verbosity to get the list of used suppressions
+                # Avoid excessive delays in the presence of spinlocks.
+                # See bug 1309851.
+                "--fair-sched=yes",
+                # Keep debuginfo after library unmap.  See bug 1382280.
+                "--keep-debuginfo=yes",
+                # Reduce noise level on rustc and/or LLVM compiled code.
+                # See bug 1365915
+                "--expensive-definedness-checks=yes",
+                # Compensate for the compiler inlining `new` but not `delete`
+                # or vice versa.
+                "--show-mismatched-frees=no",
+            ]
 
-        for s in suppressions:
-            valgrind_args.append("--suppressions=" + s)
+            for s in suppressions:
+                valgrind_args.append("--suppressions=" + s)
 
-        supps_dir = os.path.join(build_dir, "valgrind")
-        supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
-        valgrind_args.append("--suppressions=" + supps_file1)
+            supps_dir = os.path.join(build_dir, "valgrind")
+            supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
+            valgrind_args.append("--suppressions=" + supps_file1)
 
-        if mozinfo.os == "linux":
-            machtype = {
-                "x86_64": "x86_64-pc-linux-gnu",
-                "x86": "i386-pc-linux-gnu",
-            }.get(mozinfo.processor)
-            if machtype:
-                supps_file2 = os.path.join(supps_dir, machtype + ".sup")
-                if os.path.isfile(supps_file2):
-                    valgrind_args.append("--suppressions=" + supps_file2)
+            if mozinfo.os == "linux":
+                machtype = {
+                    "x86_64": "x86_64-pc-linux-gnu",
+                    "x86": "i386-pc-linux-gnu",
+                }.get(mozinfo.processor)
+                if machtype:
+                    supps_file2 = os.path.join(supps_dir, machtype + ".sup")
+                    if os.path.isfile(supps_file2):
+                        valgrind_args.append("--suppressions=" + supps_file2)
 
-        exitcode = None
-        timeout = 1800
-        binary_not_found_exception = None
-        try:
-            runner = FirefoxRunner(
-                profile=profile,
-                binary=command_context.get_binary_path(),
-                cmdargs=firefox_args,
-                env=env,
-                process_args=kp_kwargs,
-            )
-            runner.start(debug_args=valgrind_args)
-            exitcode = runner.wait(timeout=timeout)
-        except BinaryNotFoundException as e:
-            binary_not_found_exception = e
-        finally:
-            errs = outputHandler.error_count
-            supps = outputHandler.suppression_count
-            if errs != supps:
-                status = 1  # turns the TBPL job orange
-                command_context.log(
-                    logging.ERROR,
-                    "valgrind-fail-parsing",
-                    {"errs": errs, "supps": supps},
-                    "TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
-                    "seen, but {supps} generated suppressions seen",
-                )
-
-            elif errs == 0:
-                status = 0
-                command_context.log(
-                    logging.INFO,
-                    "valgrind-pass",
-                    {},
-                    "TEST-PASS | valgrind-test | valgrind found no errors",
+            exitcode = None
+            timeout = 1800
+            binary_not_found_exception = None
+            try:
+                runner = FirefoxRunner(
+                    profile=profile,
+                    binary=command_context.get_binary_path(),
+                    cmdargs=firefox_args,
+                    env=env,
+                    process_args=kp_kwargs,
                 )
-            else:
-                status = 1  # turns the TBPL job orange
-                # We've already printed details of the errors.
+                runner.start(debug_args=valgrind_args)
+                exitcode = runner.wait(timeout=timeout)
+            except BinaryNotFoundException as e:
+                binary_not_found_exception = e
+            finally:
+                errs = outputHandler.error_count
+                supps = outputHandler.suppression_count
+                if errs != supps:
+                    status = 1  # turns the TBPL job orange
+                    command_context.log(
+                        logging.ERROR,
+                        "valgrind-fail-parsing",
+                        {"errs": errs, "supps": supps},
+                        "TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
+                        "seen, but {supps} generated suppressions seen",
+                    )
+
+                elif errs == 0:
+                    status = 0
+                    command_context.log(
+                        logging.INFO,
+                        "valgrind-pass",
+                        {},
+                        "TEST-PASS | valgrind-test | valgrind found no errors",
+                    )
+                else:
+                    status = 1  # turns the TBPL job orange
+                    # We've already printed details of the errors.
 
-            if binary_not_found_exception:
-                status = 2  # turns the TBPL job red
-                command_context.log(
-                    logging.ERROR,
-                    "valgrind-fail-errors",
-                    {"error": str(binary_not_found_exception)},
-                    "TEST-UNEXPECTED-FAIL | valgrind-test | {error}",
-                )
-                command_context.log(
-                    logging.INFO,
-                    "valgrind-fail-errors",
-                    {"help": binary_not_found_exception.help()},
-                    "{help}",
-                )
-            elif exitcode is None:
-                status = 2  # turns the TBPL job red
-                command_context.log(
-                    logging.ERROR,
-                    "valgrind-fail-timeout",
-                    {"timeout": timeout},
-                    "TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out "
-                    "(reached {timeout} second limit)",
-                )
-            elif exitcode != 0:
-                status = 2  # turns the TBPL job red
-                command_context.log(
-                    logging.ERROR,
-                    "valgrind-fail-errors",
-                    {"exitcode": exitcode},
-                    "TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code "
-                    "from Valgrind: {exitcode}",
-                )
+                if binary_not_found_exception:
+                    status = 2  # turns the TBPL job red
+                    command_context.log(
+                        logging.ERROR,
+                        "valgrind-fail-errors",
+                        {"error": str(binary_not_found_exception)},
+                        "TEST-UNEXPECTED-FAIL | valgrind-test | {error}",
+                    )
+                    command_context.log(
+                        logging.INFO,
+                        "valgrind-fail-errors",
+                        {"help": binary_not_found_exception.help()},
+                        "{help}",
+                    )
+                elif exitcode is None:
+                    status = 2  # turns the TBPL job red
+                    command_context.log(
+                        logging.ERROR,
+                        "valgrind-fail-timeout",
+                        {"timeout": timeout},
+                        "TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out "
+                        "(reached {timeout} second limit)",
+                    )
+                elif exitcode != 0:
+                    status = 2  # turns the TBPL job red
+                    command_context.log(
+                        logging.ERROR,
+                        "valgrind-fail-errors",
+                        {"exitcode": exitcode},
+                        "TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code "
+                        "from Valgrind: {exitcode}",
+                    )
 
-            httpd.stop()
+                httpd.stop()
 
-        return status
+            return status
--- a/devtools/shared/css/generated/mach_commands.py
+++ b/devtools/shared/css/generated/mach_commands.py
@@ -14,109 +14,111 @@ import logging
 import os
 import runpy
 import sys
 import string
 import subprocess
 from mozbuild import shellutil
 from mozbuild.base import (
     MozbuildObject,
+    MachCommandBase,
     BinaryNotFoundException,
 )
 from mach.decorators import (
+    CommandProvider,
     Command,
 )
 
 
 def resolve_path(start, relativePath):
     """Helper to resolve a path from a start, and a relative path"""
     return os.path.normpath(os.path.join(start, relativePath))
 
 
 def stringify(obj):
     """Helper to stringify to JSON"""
     return json.dumps(obj, sort_keys=True, indent=2, separators=(",", ": "))
 
 
-@Command(
-    "devtools-css-db",
-    category="post-build",
-    description="Rebuild the devtool's static css properties database.",
-)
-def generate_css_db(command_context):
-    """Generate the static css properties database for devtools and write it to file."""
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command(
+        "devtools-css-db",
+        category="post-build",
+        description="Rebuild the devtool's static css properties database.",
+    )
+    def generate_css_db(self, command_context):
+        """Generate the static css properties database for devtools and write it to file."""
 
-    print("Re-generating the css properties database...")
-    db = get_properties_db_from_xpcshell(command_context)
-    if not db:
-        return 1
+        print("Re-generating the css properties database...")
+        db = self.get_properties_db_from_xpcshell(command_context)
+        if not db:
+            return 1
 
-    output_template(
-        command_context,
-        {
-            "preferences": stringify(db["preferences"]),
-            "cssProperties": stringify(db["cssProperties"]),
-            "pseudoElements": stringify(db["pseudoElements"]),
-        },
-    )
-
+        self.output_template(
+            command_context,
+            {
+                "preferences": stringify(db["preferences"]),
+                "cssProperties": stringify(db["cssProperties"]),
+                "pseudoElements": stringify(db["pseudoElements"]),
+            },
+        )
 
-def get_properties_db_from_xpcshell(command_context):
-    """Generate the static css properties db for devtools from an xpcshell script."""
-    build = MozbuildObject.from_environment()
+    def get_properties_db_from_xpcshell(self, command_context):
+        """Generate the static css properties db for devtools from an xpcshell script."""
+        build = MozbuildObject.from_environment()
 
-    # Get the paths
-    script_path = resolve_path(
-        command_context.topsrcdir,
-        "devtools/shared/css/generated/generate-properties-db.js",
-    )
-    gre_path = resolve_path(command_context.topobjdir, "dist/bin")
-    browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
-    try:
-        xpcshell_path = build.get_binary_path(what="xpcshell")
-    except BinaryNotFoundException as e:
-        command_context.log(
-            logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
+        # Get the paths
+        script_path = resolve_path(
+            command_context.topsrcdir,
+            "devtools/shared/css/generated/generate-properties-db.js",
         )
-        command_context.log(
-            logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
-        )
-        return None
-
-    print(browser_path)
+        gre_path = resolve_path(command_context.topobjdir, "dist/bin")
+        browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
+        try:
+            xpcshell_path = build.get_binary_path(what="xpcshell")
+        except BinaryNotFoundException as e:
+            command_context.log(
+                logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
+            )
+            command_context.log(
+                logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
+            )
+            return None
 
-    sub_env = dict(os.environ)
-    if sys.platform.startswith("linux"):
-        sub_env["LD_LIBRARY_PATH"] = gre_path
+        print(browser_path)
 
-    # Run the xcpshell script, and set the appdir flag to the browser path so that
-    # we have the proper dependencies for requiring the loader.
-    contents = subprocess.check_output(
-        [xpcshell_path, "-g", gre_path, "-a", browser_path, script_path],
-        env=sub_env,
-    )
-    # Extract just the output between the delimiters as the xpcshell output can
-    # have extra output that we don't want.
-    contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1]
+        sub_env = dict(os.environ)
+        if sys.platform.startswith("linux"):
+            sub_env["LD_LIBRARY_PATH"] = gre_path
 
-    return json.loads(contents)
+        # Run the xcpshell script, and set the appdir flag to the browser path so that
+        # we have the proper dependencies for requiring the loader.
+        contents = subprocess.check_output(
+            [xpcshell_path, "-g", gre_path, "-a", browser_path, script_path],
+            env=sub_env,
+        )
+        # Extract just the output between the delimiters as the xpcshell output can
+        # have extra output that we don't want.
+        contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1]
 
+        return json.loads(contents)
 
-def output_template(command_context, substitutions):
-    """Output a the properties-db.js from a template."""
-    js_template_path = resolve_path(
-        command_context.topsrcdir,
-        "devtools/shared/css/generated/properties-db.js.in",
-    )
-    destination_path = resolve_path(
-        command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
-    )
+    def output_template(self, command_context, substitutions):
+        """Output a the properties-db.js from a template."""
+        js_template_path = resolve_path(
+            command_context.topsrcdir,
+            "devtools/shared/css/generated/properties-db.js.in",
+        )
+        destination_path = resolve_path(
+            command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
+        )
 
-    with open(js_template_path, "rb") as handle:
-        js_template = handle.read().decode()
+        with open(js_template_path, "rb") as handle:
+            js_template = handle.read().decode()
 
-    preamble = "/* THIS IS AN AUTOGENERATED FILE.  DO NOT EDIT */\n\n"
-    contents = string.Template(js_template).substitute(substitutions)
+        preamble = "/* THIS IS AN AUTOGENERATED FILE.  DO NOT EDIT */\n\n"
+        contents = string.Template(js_template).substitute(substitutions)
 
-    with open(destination_path, "wb") as destination:
-        destination.write(preamble.encode() + contents.encode())
+        with open(destination_path, "wb") as destination:
+            destination.write(preamble.encode() + contents.encode())
 
-    print("The database was successfully generated at " + destination_path)
+        print("The database was successfully generated at " + destination_path)
--- a/dom/bindings/mach_commands.py
+++ b/dom/bindings/mach_commands.py
@@ -4,65 +4,70 @@
 
 from __future__ import absolute_import, unicode_literals
 
 import os
 import sys
 
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
 )
 
+from mozbuild.base import MachCommandBase
 from mozbuild.util import mkdir
 
 
 def get_test_parser():
     import runtests
 
     return runtests.get_parser
 
 
-@Command(
-    "webidl-example",
-    category="misc",
-    description="Generate example files for a WebIDL interface.",
-)
-@CommandArgument(
-    "interface", nargs="+", help="Interface(s) whose examples to generate."
-)
-def webidl_example(command_context, interface):
-    from mozwebidlcodegen import BuildSystemWebIDL
+@CommandProvider
+class WebIDLProvider(MachCommandBase):
+    @Command(
+        "webidl-example",
+        category="misc",
+        description="Generate example files for a WebIDL interface.",
+    )
+    @CommandArgument(
+        "interface", nargs="+", help="Interface(s) whose examples to generate."
+    )
+    def webidl_example(self, command_context, interface):
+        from mozwebidlcodegen import BuildSystemWebIDL
 
-    manager = command_context._spawn(BuildSystemWebIDL).manager
-    for i in interface:
-        manager.generate_example_files(i)
-
+        manager = command_context._spawn(BuildSystemWebIDL).manager
+        for i in interface:
+            manager.generate_example_files(i)
 
-@Command(
-    "webidl-parser-test",
-    category="testing",
-    parser=get_test_parser,
-    description="Run WebIDL tests (Interface Browser parser).",
-)
-def webidl_test(command_context, **kwargs):
-    sys.path.insert(0, os.path.join(command_context.topsrcdir, "other-licenses", "ply"))
+    @Command(
+        "webidl-parser-test",
+        category="testing",
+        parser=get_test_parser,
+        description="Run WebIDL tests (Interface Browser parser).",
+    )
+    def webidl_test(self, command_context, **kwargs):
+        sys.path.insert(
+            0, os.path.join(command_context.topsrcdir, "other-licenses", "ply")
+        )
 
-    # Ensure the topobjdir exists. On a Taskcluster test run there won't be
-    # an objdir yet.
-    mkdir(command_context.topobjdir)
+        # Ensure the topobjdir exists. On a Taskcluster test run there won't be
+        # an objdir yet.
+        mkdir(command_context.topobjdir)
 
-    # Make sure we drop our cached grammar bits in the objdir, not
-    # wherever we happen to be running from.
-    os.chdir(command_context.topobjdir)
+        # Make sure we drop our cached grammar bits in the objdir, not
+        # wherever we happen to be running from.
+        os.chdir(command_context.topobjdir)
 
-    if kwargs["verbose"] is None:
-        kwargs["verbose"] = False
+        if kwargs["verbose"] is None:
+            kwargs["verbose"] = False
 
-    # Now we're going to create the cached grammar file in the
-    # objdir.  But we're going to try loading it as a python
-    # module, so we need to make sure the objdir is in our search
-    # path.
-    sys.path.insert(0, command_context.topobjdir)
+        # Now we're going to create the cached grammar file in the
+        # objdir.  But we're going to try loading it as a python
+        # module, so we need to make sure the objdir is in our search
+        # path.
+        sys.path.insert(0, command_context.topobjdir)
 
-    import runtests
+        import runtests
 
-    return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])
+        return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])
--- a/js/src/devtools/rootAnalysis/mach_commands.py
+++ b/js/src/devtools/rootAnalysis/mach_commands.py
@@ -11,22 +11,24 @@ import argparse
 import json
 import os
 import sys
 import textwrap
 
 from mach.base import FailedCommandError, MachError
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
     SubCommand,
 )
 from mach.registrar import Registrar
 
 from mozbuild.mozconfig import MozconfigLoader
+from mozbuild.base import MachCommandBase
 
 # Command files like this are listed in build/mach_initialize.py in alphabetical
 # order, but we need to access commands earlier in the sorted order to grab
 # their arguments. Force them to load now.
 import mozbuild.artifact_commands  # NOQA: F401
 import mozbuild.build_commands  # NOQA: F401
 
 
@@ -50,370 +52,360 @@ def inherit_command_args(command, subcom
                 )
             )
         func._mach_command.arguments.extend(handler.arguments)
         return func
 
     return inherited
 
 
-def state_dir():
-    return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
-
-
-def tools_dir():
-    if os.environ.get("MOZ_FETCHES_DIR"):
-        # In automation, tools are provided by toolchain dependencies.
-        return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
-
-    # In development, `mach hazard bootstrap` installs the tools separately
-    # to avoid colliding with the "main" compiler versions, which can
-    # change separately (and the precompiled sixgill and compiler version
-    # must match exactly).
-    return os.path.join(state_dir(), "hazard-tools")
-
-
-def sixgill_dir():
-    return os.path.join(tools_dir(), "sixgill")
-
-
-def gcc_dir():
-    return os.path.join(tools_dir(), "gcc")
-
-
-def script_dir(command_context):
-    return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis")
+@CommandProvider
+class MachCommands(MachCommandBase):
+    def state_dir(self):
+        return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
 
-
-def get_work_dir(command_context, application, given):
-    if given is not None:
-        return given
-    return os.path.join(command_context.topsrcdir, "haz-" + application)
-
-
-def ensure_dir_exists(dir):
-    os.makedirs(dir, exist_ok=True)
-    return dir
-
+    def tools_dir(self):
+        if os.environ.get("MOZ_FETCHES_DIR"):
+            # In automation, tools are provided by toolchain dependencies.
+            return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
 
-# Force the use of hazard-compatible installs of tools.
-def setup_env_for_tools(env):
-    gccbin = os.path.join(gcc_dir(), "bin")
-    env["CC"] = os.path.join(gccbin, "gcc")
-    env["CXX"] = os.path.join(gccbin, "g++")
-    env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format(
-        sixgill_dir=sixgill_dir(), gccbin=gccbin, PATH=env["PATH"]
-    )
+        # In development, `mach hazard bootstrap` installs the tools separately
+        # to avoid colliding with the "main" compiler versions, which can
+        # change separately (and the precompiled sixgill and compiler version
+        # must match exactly).
+        return os.path.join(self.state_dir(), "hazard-tools")
 
-
-def setup_env_for_shell(env, shell):
-    """Add JS shell directory to dynamic lib search path"""
-    for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
-        env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
-
+    def sixgill_dir(self):
+        return os.path.join(self.tools_dir(), "sixgill")
 
-@Command(
-    "hazards",
-    category="build",
-    order="declaration",
-    description="Commands for running the static analysis for GC rooting hazards",
-)
-def hazards(command_context):
-    """Commands related to performing the GC rooting hazard analysis"""
-    print("See `mach hazards --help` for a list of subcommands")
-
+    def gcc_dir(self):
+        return os.path.join(self.tools_dir(), "gcc")
 
-@inherit_command_args("artifact", "toolchain")
-@SubCommand(
-    "hazards",
-    "bootstrap",
-    description="Install prerequisites for the hazard analysis",
-)
-def bootstrap(command_context, **kwargs):
-    orig_dir = os.getcwd()
-    os.chdir(ensure_dir_exists(tools_dir()))
-    try:
-        kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
-        command_context._mach_context.commands.dispatch(
-            "artifact", command_context._mach_context, subcommand="toolchain", **kwargs
-        )
-    finally:
-        os.chdir(orig_dir)
+    def script_dir(self, command_context):
+        return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis")
 
+    def get_work_dir(self, command_context, application, given):
+        if given is not None:
+            return given
+        return os.path.join(command_context.topsrcdir, "haz-" + application)
 
-@inherit_command_args("build")
-@SubCommand(
-    "hazards", "build-shell", description="Build a shell for the hazard analysis"
-)
-@CommandArgument(
-    "--mozconfig",
-    default=None,
-    metavar="FILENAME",
-    help="Build with the given mozconfig.",
-)
-def build_shell(command_context, **kwargs):
-    """Build a JS shell to use to run the rooting hazard analysis."""
-    # The JS shell requires some specific configuration settings to execute
-    # the hazard analysis code, and configuration is done via mozconfig.
-    # Subprocesses find MOZCONFIG in the environment, so we can't just
-    # modify the settings in this process's loaded version. Pass it through
-    # the environment.
+    def ensure_dir_exists(self, dir):
+        os.makedirs(dir, exist_ok=True)
+        return dir
 
-    default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell"
-    mozconfig_path = (
-        kwargs.pop("mozconfig", None)
-        or os.environ.get("MOZCONFIG")
-        or default_mozconfig
-    )
-    mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
-    loader = MozconfigLoader(command_context.topsrcdir)
-    mozconfig = loader.read_mozconfig(mozconfig_path)
-
-    # Validate the mozconfig settings in case the user overrode the default.
-    configure_args = mozconfig["configure_args"]
-    if "--enable-ctypes" not in configure_args:
-        raise FailedCommandError(
-            "ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
+    # Force the use of hazard-compatible installs of tools.
+    def setup_env_for_tools(self, env):
+        gccbin = os.path.join(self.gcc_dir(), "bin")
+        env["CC"] = os.path.join(gccbin, "gcc")
+        env["CXX"] = os.path.join(gccbin, "g++")
+        env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format(
+            sixgill_dir=self.sixgill_dir(), gccbin=gccbin, PATH=env["PATH"]
         )
 
-    # Transmit the mozconfig location to build subprocesses.
-    os.environ["MOZCONFIG"] = mozconfig_path
+    def setup_env_for_shell(self, env, shell):
+        """Add JS shell directory to dynamic lib search path"""
+        for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
+            env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
 
-    setup_env_for_tools(os.environ)
+    @Command(
+        "hazards",
+        category="build",
+        order="declaration",
+        description="Commands for running the static analysis for GC rooting hazards",
+    )
+    def hazards(self, command_context):
+        """Commands related to performing the GC rooting hazard analysis"""
+        print("See `mach hazards --help` for a list of subcommands")
 
-    # Set a default objdir for the shell, for developer builds.
-    os.environ.setdefault(
-        "MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell")
+    @inherit_command_args("artifact", "toolchain")
+    @SubCommand(
+        "hazards",
+        "bootstrap",
+        description="Install prerequisites for the hazard analysis",
     )
-
-    return command_context._mach_context.commands.dispatch(
-        "build", command_context._mach_context, **kwargs
-    )
-
+    def bootstrap(self, command_context, **kwargs):
+        orig_dir = os.getcwd()
+        os.chdir(self.ensure_dir_exists(self.tools_dir()))
+        try:
+            kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
+            command_context._mach_context.commands.dispatch(
+                "artifact",
+                command_context._mach_context,
+                subcommand="toolchain",
+                **kwargs
+            )
+        finally:
+            os.chdir(orig_dir)
 
-def read_json_file(filename):
-    with open(filename) as fh:
-        return json.load(fh)
-
-
-def ensure_shell(command_context, objdir):
-    if objdir is None:
-        objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
+    @inherit_command_args("build")
+    @SubCommand(
+        "hazards", "build-shell", description="Build a shell for the hazard analysis"
+    )
+    @CommandArgument(
+        "--mozconfig",
+        default=None,
+        metavar="FILENAME",
+        help="Build with the given mozconfig.",
+    )
+    def build_shell(self, command_context, **kwargs):
+        """Build a JS shell to use to run the rooting hazard analysis."""
+        # The JS shell requires some specific configuration settings to execute
+        # the hazard analysis code, and configuration is done via mozconfig.
+        # Subprocesses find MOZCONFIG in the environment, so we can't just
+        # modify the settings in this process's loaded version. Pass it through
+        # the environment.
 
-    try:
-        binaries = read_json_file(os.path.join(objdir, "binaries.json"))
-        info = [b for b in binaries["programs"] if b["program"] == "js"][0]
-        return os.path.join(objdir, info["install_target"], "js")
-    except (OSError, KeyError):
-        raise FailedCommandError(
-            """\
-no shell found in %s -- must build the JS shell with `mach hazards build-shell` first"""
-            % objdir
+        default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell"
+        mozconfig_path = (
+            kwargs.pop("mozconfig", None)
+            or os.environ.get("MOZCONFIG")
+            or default_mozconfig
+        )
+        mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
+        loader = MozconfigLoader(command_context.topsrcdir)
+        mozconfig = loader.read_mozconfig(mozconfig_path)
+
+        # Validate the mozconfig settings in case the user overrode the default.
+        configure_args = mozconfig["configure_args"]
+        if "--enable-ctypes" not in configure_args:
+            raise FailedCommandError(
+                "ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
+            )
+
+        # Transmit the mozconfig location to build subprocesses.
+        os.environ["MOZCONFIG"] = mozconfig_path
+
+        self.setup_env_for_tools(os.environ)
+
+        # Set a default objdir for the shell, for developer builds.
+        os.environ.setdefault(
+            "MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell")
+        )
+
+        return command_context._mach_context.commands.dispatch(
+            "build", command_context._mach_context, **kwargs
         )
 
+    def read_json_file(self, filename):
+        with open(filename) as fh:
+            return json.load(fh)
 
-@inherit_command_args("build")
-@SubCommand(
-    "hazards",
-    "gather",
-    description="Gather analysis data by compiling the given application",
-)
-@CommandArgument(
-    "--application", default="browser", help="Build the given application."
-)
-@CommandArgument(
-    "--haz-objdir", default=None, help="Write object files to this directory."
-)
-@CommandArgument(
-    "--work-dir", default=None, help="Directory for output and working files."
-)
-def gather_hazard_data(command_context, **kwargs):
-    """Gather analysis information by compiling the tree"""
-    application = kwargs["application"]
-    objdir = kwargs["haz_objdir"]
-    if objdir is None:
-        objdir = os.environ.get("HAZ_OBJDIR")
-    if objdir is None:
-        objdir = os.path.join(command_context.topsrcdir, "obj-analyzed-" + application)
+    def ensure_shell(self, command_context, objdir):
+        if objdir is None:
+            objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
+
+        try:
+            binaries = self.read_json_file(os.path.join(objdir, "binaries.json"))
+            info = [b for b in binaries["programs"] if b["program"] == "js"][0]
+            return os.path.join(objdir, info["install_target"], "js")
+        except (OSError, KeyError):
+            raise FailedCommandError(
+                """\
+no shell found in %s -- must build the JS shell with `mach hazards build-shell` first"""
+                % objdir
+            )
 
-    work_dir = get_work_dir(command_context, application, kwargs["work_dir"])
-    ensure_dir_exists(work_dir)
-    with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
-        data = textwrap.dedent(
-            """\
-            analysis_scriptdir = "{script_dir}"
-            objdir = "{objdir}"
-            source = "{srcdir}"
-            sixgill = "{sixgill_dir}/usr/libexec/sixgill"
-            sixgill_bin = "{sixgill_dir}/usr/bin"
-            gcc_bin = "{gcc_dir}/bin"
-        """
-        ).format(
-            script_dir=script_dir(command_context),
-            objdir=objdir,
-            srcdir=command_context.topsrcdir,
-            sixgill_dir=sixgill_dir(),
-            gcc_dir=gcc_dir(),
-        )
-        fh.write(data)
+    @inherit_command_args("build")
+    @SubCommand(
+        "hazards",
+        "gather",
+        description="Gather analysis data by compiling the given application",
+    )
+    @CommandArgument(
+        "--application", default="browser", help="Build the given application."
+    )
+    @CommandArgument(
+        "--haz-objdir", default=None, help="Write object files to this directory."
+    )
+    @CommandArgument(
+        "--work-dir", default=None, help="Directory for output and working files."
+    )
+    def gather_hazard_data(self, command_context, **kwargs):
+        """Gather analysis information by compiling the tree"""
+        application = kwargs["application"]
+        objdir = kwargs["haz_objdir"]
+        if objdir is None:
+            objdir = os.environ.get("HAZ_OBJDIR")
+        if objdir is None:
+            objdir = os.path.join(
+                command_context.topsrcdir, "obj-analyzed-" + application
+            )
+
+        work_dir = self.get_work_dir(command_context, application, kwargs["work_dir"])
+        self.ensure_dir_exists(work_dir)
+        with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
+            data = textwrap.dedent(
+                """\
+                analysis_scriptdir = "{script_dir}"
+                objdir = "{objdir}"
+                source = "{srcdir}"
+                sixgill = "{sixgill_dir}/usr/libexec/sixgill"
+                sixgill_bin = "{sixgill_dir}/usr/bin"
+                gcc_bin = "{gcc_dir}/bin"
+            """
+            ).format(
+                script_dir=self.script_dir(command_context),
+                objdir=objdir,
+                srcdir=command_context.topsrcdir,
+                sixgill_dir=self.sixgill_dir(),
+                gcc_dir=self.gcc_dir(),
+            )
+            fh.write(data)
 
-    buildscript = " ".join(
-        [
-            command_context.topsrcdir + "/mach hazards compile",
-            "--job-size=3.0",  # Conservatively estimate 3GB/process
-            "--application=" + application,
-            "--haz-objdir=" + objdir,
+        buildscript = " ".join(
+            [
+                command_context.topsrcdir + "/mach hazards compile",
+                "--job-size=3.0",  # Conservatively estimate 3GB/process
+                "--application=" + application,
+                "--haz-objdir=" + objdir,
+            ]
+        )
+        args = [
+            sys.executable,
+            os.path.join(self.script_dir(command_context), "analyze.py"),
+            "dbs",
+            "--upto",
+            "dbs",
+            "-v",
+            "--buildcommand=" + buildscript,
         ]
+
+        return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
+
+    @inherit_command_args("build")
+    @SubCommand("hazards", "compile", description=argparse.SUPPRESS)
+    @CommandArgument(
+        "--mozconfig",
+        default=None,
+        metavar="FILENAME",
+        help="Build with the given mozconfig.",
     )
-    args = [
-        sys.executable,
-        os.path.join(script_dir(command_context), "analyze.py"),
-        "dbs",
-        "--upto",
-        "dbs",
-        "-v",
-        "--buildcommand=" + buildscript,
-    ]
+    @CommandArgument(
+        "--application", default="browser", help="Build the given application."
+    )
+    @CommandArgument(
+        "--haz-objdir",
+        default=os.environ.get("HAZ_OBJDIR"),
+        help="Write object files to this directory.",
+    )
+    def inner_compile(self, command_context, **kwargs):
+        """Build a source tree and gather analysis information while running
+        under the influence of the analysis collection server."""
 
-    return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
-
+        env = os.environ
 
-@inherit_command_args("build")
-@SubCommand("hazards", "compile", description=argparse.SUPPRESS)
-@CommandArgument(
-    "--mozconfig",
-    default=None,
-    metavar="FILENAME",
-    help="Build with the given mozconfig.",
-)
-@CommandArgument(
-    "--application", default="browser", help="Build the given application."
-)
-@CommandArgument(
-    "--haz-objdir",
-    default=os.environ.get("HAZ_OBJDIR"),
-    help="Write object files to this directory.",
-)
-def inner_compile(command_context, **kwargs):
-    """Build a source tree and gather analysis information while running
-    under the influence of the analysis collection server."""
+        # Check whether we are running underneath the manager (and therefore
+        # have a server to talk to).
+        if "XGILL_CONFIG" not in env:
+            raise Exception(
+                "no sixgill manager detected. `mach hazards compile` "
+                + "should only be run from `mach hazards gather`"
+            )
+
+        app = kwargs.pop("application")
+        default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app
+        mozconfig_path = (
+            kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
+        )
+        mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
+
+        # Validate the mozconfig.
 
-    env = os.environ
+        # Require an explicit --enable-application=APP (even if you just
+        # want to build the default browser application.)
+        loader = MozconfigLoader(command_context.topsrcdir)
+        mozconfig = loader.read_mozconfig(mozconfig_path)
+        configure_args = mozconfig["configure_args"]
+        if "--enable-application=%s" % app not in configure_args:
+            raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
+        if not any("--with-compiler-wrapper" in a for a in configure_args):
+            raise Exception("mozconfig must wrap compiles")
+
+        # Communicate mozconfig to build subprocesses.
+        env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
 
-    # Check whether we are running underneath the manager (and therefore
-    # have a server to talk to).
-    if "XGILL_CONFIG" not in env:
-        raise Exception(
-            "no sixgill manager detected. `mach hazards compile` "
-            + "should only be run from `mach hazards gather`"
+        # hazard mozconfigs need to find binaries in .mozbuild
+        env["MOZBUILD_STATE_PATH"] = self.state_dir()
+
+        # Suppress the gathering of sources, to save disk space and memory.
+        env["XGILL_NO_SOURCE"] = "1"
+
+        self.setup_env_for_tools(env)
+
+        if "haz_objdir" in kwargs:
+            env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
+
+        return command_context._mach_context.commands.dispatch(
+            "build", command_context._mach_context, **kwargs
         )
 
-    app = kwargs.pop("application")
-    default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app
-    mozconfig_path = (
-        kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
+    @SubCommand(
+        "hazards", "analyze", description="Analyzed gathered data for rooting hazards"
+    )
+    @CommandArgument(
+        "--application",
+        default="browser",
+        help="Analyze the output for the given application.",
     )
-    mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
-
-    # Validate the mozconfig.
-
-    # Require an explicit --enable-application=APP (even if you just
-    # want to build the default browser application.)
-    loader = MozconfigLoader(command_context.topsrcdir)
-    mozconfig = loader.read_mozconfig(mozconfig_path)
-    configure_args = mozconfig["configure_args"]
-    if "--enable-application=%s" % app not in configure_args:
-        raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
-    if not any("--with-compiler-wrapper" in a for a in configure_args):
-        raise Exception("mozconfig must wrap compiles")
-
-    # Communicate mozconfig to build subprocesses.
-    env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
-
-    # hazard mozconfigs need to find binaries in .mozbuild
-    env["MOZBUILD_STATE_PATH"] = state_dir()
-
-    # Suppress the gathering of sources, to save disk space and memory.
-    env["XGILL_NO_SOURCE"] = "1"
-
-    setup_env_for_tools(env)
-
-    if "haz_objdir" in kwargs:
-        env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
-
-    return command_context._mach_context.commands.dispatch(
-        "build", command_context._mach_context, **kwargs
+    @CommandArgument(
+        "--shell-objdir",
+        default=None,
+        help="objdir containing the optimized JS shell for running the analysis.",
+    )
+    @CommandArgument(
+        "--work-dir", default=None, help="Directory for output and working files."
     )
+    @CommandArgument(
+        "extra",
+        nargs=argparse.REMAINDER,
+        help="Remaining non-optional arguments to analyze.py script",
+    )
+    def analyze(self, command_context, application, shell_objdir, work_dir, extra):
+        """Analyzed gathered data for rooting hazards"""
 
+        shell = self.ensure_shell(command_context, shell_objdir)
+        args = [
+            os.path.join(self.script_dir(command_context), "analyze.py"),
+            "--js",
+            shell,
+        ]
+        if extra:
+            args += extra
+        else:
+            args += [
+                "gcTypes",
+                "-v",
+            ]
 
-@SubCommand(
-    "hazards", "analyze", description="Analyzed gathered data for rooting hazards"
-)
-@CommandArgument(
-    "--application",
-    default="browser",
-    help="Analyze the output for the given application.",
-)
-@CommandArgument(
-    "--shell-objdir",
-    default=None,
-    help="objdir containing the optimized JS shell for running the analysis.",
-)
-@CommandArgument(
-    "--work-dir", default=None, help="Directory for output and working files."
-)
-@CommandArgument(
-    "extra",
-    nargs=argparse.REMAINDER,
-    help="Remaining non-optional arguments to analyze.py script",
-)
-def analyze(command_context, application, shell_objdir, work_dir, extra):
-    """Analyzed gathered data for rooting hazards"""
+        self.setup_env_for_tools(os.environ)
+        self.setup_env_for_shell(os.environ, shell)
+
+        work_dir = self.get_work_dir(command_context, application, work_dir)
+        return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
 
-    shell = ensure_shell(command_context, shell_objdir)
-    args = [
-        os.path.join(script_dir(command_context), "analyze.py"),
-        "--js",
-        shell,
-    ]
-    if extra:
-        args += extra
-    else:
-        args += [
-            "gcTypes",
+    @SubCommand(
+        "hazards",
+        "self-test",
+        description="Run a self-test to verify hazards are detected",
+    )
+    @CommandArgument(
+        "--shell-objdir",
+        default=None,
+        help="objdir containing the optimized JS shell for running the analysis.",
+    )
+    def self_test(self, command_context, shell_objdir):
+        """Analyzed gathered data for rooting hazards"""
+        shell = self.ensure_shell(command_context, shell_objdir)
+        args = [
+            os.path.join(self.script_dir(command_context), "run-test.py"),
             "-v",
+            "--js",
+            shell,
+            "--sixgill",
+            os.path.join(self.tools_dir(), "sixgill"),
+            "--gccdir",
+            self.gcc_dir(),
         ]
 
-    setup_env_for_tools(os.environ)
-    setup_env_for_shell(os.environ, shell)
-
-    work_dir = get_work_dir(command_context, application, work_dir)
-    return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
-
+        self.setup_env_for_tools(os.environ)
+        self.setup_env_for_shell(os.environ, shell)
 
-@SubCommand(
-    "hazards",
-    "self-test",
-    description="Run a self-test to verify hazards are detected",
-)
-@CommandArgument(
-    "--shell-objdir",
-    default=None,
-    help="objdir containing the optimized JS shell for running the analysis.",
-)
-def self_test(command_context, shell_objdir):
-    """Analyzed gathered data for rooting hazards"""
-    shell = ensure_shell(command_context, shell_objdir)
-    args = [
-        os.path.join(script_dir(command_context), "run-test.py"),
-        "-v",
-        "--js",
-        shell,
-        "--sixgill",
-        os.path.join(tools_dir(), "sixgill"),
-        "--gccdir",
-        gcc_dir(),
-    ]
-
-    setup_env_for_tools(os.environ)
-    setup_env_for_shell(os.environ, shell)
-
-    return command_context.run_process(args=args, pass_thru=True)
+        return command_context.run_process(args=args, pass_thru=True)
--- a/layout/tools/reftest/mach_commands.py
+++ b/layout/tools/reftest/mach_commands.py
@@ -5,21 +5,23 @@
 from __future__ import absolute_import, unicode_literals, print_function
 
 import os
 import re
 import sys
 from argparse import Namespace
 
 from mozbuild.base import (
+    MachCommandBase,
     MachCommandConditions as conditions,
     MozbuildObject,
 )
 
 from mach.decorators import (
+    CommandProvider,
     Command,
 )
 
 
 parser = None
 
 
 class ReftestRunner(MozbuildObject):
@@ -221,81 +223,82 @@ def get_parser():
     build_obj = MozbuildObject.from_environment(cwd=here)
     if conditions.is_android(build_obj):
         parser = reftestcommandline.RemoteArgumentsParser()
     else:
         parser = reftestcommandline.DesktopArgumentsParser()
     return parser
 
 
-@Command(
-    "reftest",
-    category="testing",
-    description="Run reftests (layout and graphics correctness).",
-    parser=get_parser,
-)
-def run_reftest(command_context, **kwargs):
-    kwargs["suite"] = "reftest"
-    return _run_reftest(command_context, **kwargs)
-
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command(
+        "reftest",
+        category="testing",
+        description="Run reftests (layout and graphics correctness).",
+        parser=get_parser,
+    )
+    def run_reftest(self, command_context, **kwargs):
+        kwargs["suite"] = "reftest"
+        return self._run_reftest(command_context, **kwargs)
 
-@Command(
-    "jstestbrowser",
-    category="testing",
-    description="Run js/src/tests in the browser.",
-    parser=get_parser,
-)
-def run_jstestbrowser(command_context, **kwargs):
-    if "--enable-js-shell" not in command_context.mozconfig["configure_args"]:
-        raise Exception(
-            "jstestbrowser requires --enable-js-shell be specified in mozconfig."
+    @Command(
+        "jstestbrowser",
+        category="testing",
+        description="Run js/src/tests in the browser.",
+        parser=get_parser,
+    )
+    def run_jstestbrowser(self, command_context, **kwargs):
+        if "--enable-js-shell" not in command_context.mozconfig["configure_args"]:
+            raise Exception(
+                "jstestbrowser requires --enable-js-shell be specified in mozconfig."
+            )
+        command_context._mach_context.commands.dispatch(
+            "build", command_context._mach_context, what=["stage-jstests"]
         )
-    command_context._mach_context.commands.dispatch(
-        "build", command_context._mach_context, what=["stage-jstests"]
-    )
-    kwargs["suite"] = "jstestbrowser"
-    return _run_reftest(command_context, **kwargs)
-
+        kwargs["suite"] = "jstestbrowser"
+        return self._run_reftest(command_context, **kwargs)
 
-@Command(
-    "crashtest",
-    category="testing",
-    description="Run crashtests (Check if crashes on a page).",
-    parser=get_parser,
-)
-def run_crashtest(command_context, **kwargs):
-    kwargs["suite"] = "crashtest"
-    return _run_reftest(command_context, **kwargs)
+    @Command(
+        "crashtest",
+        category="testing",
+        description="Run crashtests (Check if crashes on a page).",
+        parser=get_parser,
+    )
+    def run_crashtest(self, command_context, **kwargs):
+        kwargs["suite"] = "crashtest"
+        return self._run_reftest(command_context, **kwargs)
 
+    def _run_reftest(self, command_context, **kwargs):
+        kwargs["topsrcdir"] = command_context.topsrcdir
+        process_test_objects(kwargs)
+        reftest = command_context._spawn(ReftestRunner)
+        # Unstructured logging must be enabled prior to calling
+        # adb which uses an unstructured logger in its constructor.
+        reftest.log_manager.enable_unstructured()
+        if conditions.is_android(command_context):
+            from mozrunner.devices.android_device import (
+                verify_android_device,
+                InstallIntent,
+            )
 
-def _run_reftest(command_context, **kwargs):
-    kwargs["topsrcdir"] = command_context.topsrcdir
-    process_test_objects(kwargs)
-    reftest = command_context._spawn(ReftestRunner)
-    # Unstructured logging must be enabled prior to calling
-    # adb which uses an unstructured logger in its constructor.
-    reftest.log_manager.enable_unstructured()
-    if conditions.is_android(command_context):
-        from mozrunner.devices.android_device import (
-            verify_android_device,
-            InstallIntent,
-        )
-
-        install = InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
-        verbose = False
-        if (
-            kwargs.get("log_mach_verbose")
-            or kwargs.get("log_tbpl_level") == "debug"
-            or kwargs.get("log_mach_level") == "debug"
-            or kwargs.get("log_raw_level") == "debug"
-        ):
-            verbose = True
-        verify_android_device(
-            command_context,
-            install=install,
-            xre=True,
-            network=True,
-            app=kwargs["app"],
-            device_serial=kwargs["deviceSerial"],
-            verbose=verbose,
-        )
-        return reftest.run_android_test(**kwargs)
-    return reftest.run_desktop_test(**kwargs)
+            install = (
+                InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
+            )
+            verbose = False
+            if (
+                kwargs.get("log_mach_verbose")
+                or kwargs.get("log_tbpl_level") == "debug"
+                or kwargs.get("log_mach_level") == "debug"
+                or kwargs.get("log_raw_level") == "debug"
+            ):
+                verbose = True
+            verify_android_device(
+                command_context,
+                install=install,
+                xre=True,
+                network=True,
+                app=kwargs["app"],
+                device_serial=kwargs["deviceSerial"],
+                verbose=verbose,
+            )
+            return reftest.run_android_test(**kwargs)
+        return reftest.run_desktop_test(**kwargs)
--- a/layout/tools/reftest/mach_test_package_commands.py
+++ b/layout/tools/reftest/mach_test_package_commands.py
@@ -5,18 +5,20 @@
 from __future__ import absolute_import, unicode_literals, print_function
 
 import os
 import sys
 from argparse import Namespace
 from functools import partial
 
 from mach.decorators import (
+    CommandProvider,
     Command,
 )
+from mozbuild.base import MachCommandBase
 
 here = os.path.abspath(os.path.dirname(__file__))
 logger = None
 
 
 def run_reftest(context, **kwargs):
     import mozinfo
     from mozlog.commandline import setup_logging
@@ -100,18 +102,20 @@ def setup_argument_parser():
     if mozinfo.info.get("buildapp") == "mobile/android":
         parser = reftestcommandline.RemoteArgumentsParser()
     else:
         parser = reftestcommandline.DesktopArgumentsParser()
     add_global_arguments(parser)
     return parser
 
 
-@Command(
-    "reftest",
-    category="testing",
-    description="Run the reftest harness.",
-    parser=setup_argument_parser,
-)
-def reftest(command_context, **kwargs):
-    command_context._mach_context.activate_mozharness_venv()
-    kwargs["suite"] = "reftest"
-    return run_reftest(command_context._mach_context, **kwargs)
+@CommandProvider
+class ReftestCommands(MachCommandBase):
+    @Command(
+        "reftest",
+        category="testing",
+        description="Run the reftest harness.",
+        parser=setup_argument_parser,
+    )
+    def reftest(self, command_context, **kwargs):
+        command_context._mach_context.activate_mozharness_venv()
+        kwargs["suite"] = "reftest"
+        return run_reftest(command_context._mach_context, **kwargs)
--- a/mobile/android/mach_commands.py
+++ b/mobile/android/mach_commands.py
@@ -6,25 +6,27 @@ from __future__ import absolute_import, 
 
 import argparse
 import logging
 import os
 
 import mozpack.path as mozpath
 
 from mozbuild.base import (
+    MachCommandBase,
     MachCommandConditions as conditions,
 )
 
 from mozbuild.shellutil import (
     split as shell_split,
 )
 
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
     SubCommand,
 )
 
 
 # Mach's conditions facility doesn't support subcommands.  Print a
 # deprecation message ourselves instead.
 LINT_DEPRECATION_MESSAGE = """
@@ -42,560 +44,569 @@ def REMOVED(cls):
     """Command no longer exists! Use the Gradle configuration rooted in the top source directory
     instead.
 
     See https://developer.mozilla.org/en-US/docs/Simple_Firefox_for_Android_build#Developing_Firefox_for_Android_in_Android_Studio_or_IDEA_IntelliJ.  # NOQA: E501
     """
     return False
 
 
-@Command(
-    "android",
-    category="devenv",
-    description="Run Android-specific commands.",
-    conditions=[conditions.is_android],
-)
-def android(command_context):
-    pass
-
-
-@SubCommand(
-    "android",
-    "assemble-app",
-    """Assemble Firefox for Android.
-    See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_assemble_app(command_context, args):
-    ret = gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_APP_TASKS"] + ["-x", "lint"] + args,
-        verbose=True,
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command(
+        "android",
+        category="devenv",
+        description="Run Android-specific commands.",
+        conditions=[conditions.is_android],
     )
-
-    return ret
-
-
-@SubCommand(
-    "android",
-    "generate-sdk-bindings",
-    """Generate SDK bindings used when building GeckoView.""",
-)
-@CommandArgument(
-    "inputs",
-    nargs="+",
-    help="config files, like [/path/to/ClassName-classes.txt]+",
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_generate_sdk_bindings(command_context, inputs, args):
-    import itertools
-
-    def stem(input):
-        # Turn "/path/to/ClassName-classes.txt" into "ClassName".
-        return os.path.basename(input).rsplit("-classes.txt", 1)[0]
-
-    bindings_inputs = list(itertools.chain(*((input, stem(input)) for input in inputs)))
-    bindings_args = "-Pgenerate_sdk_bindings_args={}".format(";".join(bindings_inputs))
-
-    ret = gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS"]
-        + [bindings_args]
-        + args,
-        verbose=True,
-    )
-
-    return ret
-
+    def android(self, command_context):
+        pass
 
-@SubCommand(
-    "android",
-    "generate-generated-jni-wrappers",
-    """Generate GeckoView JNI wrappers used when building GeckoView.""",
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_generate_generated_jni_wrappers(command_context, args):
-    ret = gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS"]
-        + args,
-        verbose=True,
+    @SubCommand(
+        "android",
+        "assemble-app",
+        """Assemble Firefox for Android.
+        See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
     )
-
-    return ret
-
-
-@SubCommand(
-    "android",
-    "api-lint",
-    """Run Android api-lint.
-REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""",
-)
-def android_apilint_REMOVED(command_context):
-    print(LINT_DEPRECATION_MESSAGE)
-    return 1
-
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_assemble_app(self, command_context, args):
+        ret = self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_APP_TASKS"] + ["-x", "lint"] + args,
+            verbose=True,
+        )
 
-@SubCommand(
-    "android",
-    "test",
-    """Run Android test.
-REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""",
-)
-def android_test_REMOVED(command_context):
-    print(LINT_DEPRECATION_MESSAGE)
-    return 1
-
-
-@SubCommand(
-    "android",
-    "lint",
-    """Run Android lint.
-REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""",
-)
-def android_lint_REMOVED(command_context):
-    print(LINT_DEPRECATION_MESSAGE)
-    return 1
-
-
-@SubCommand(
-    "android",
-    "checkstyle",
-    """Run Android checkstyle.
-REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""",
-)
-def android_checkstyle_REMOVED(command_context):
-    print(LINT_DEPRECATION_MESSAGE)
-    return 1
-
+        return ret
 
-@SubCommand(
-    "android",
-    "gradle-dependencies",
-    """Collect Android Gradle dependencies.
-    See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_gradle_dependencies(command_context, args):
-    # We don't want to gate producing dependency archives on clean
-    # lint or checkstyle, particularly because toolchain versions
-    # can change the outputs for those processes.
-    gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_DEPENDENCIES_TASKS"]
-        + ["--continue"]
-        + args,
-        verbose=True,
+    @SubCommand(
+        "android",
+        "generate-sdk-bindings",
+        """Generate SDK bindings used when building GeckoView.""",
     )
-
-    return 0
-
+    @CommandArgument(
+        "inputs",
+        nargs="+",
+        help="config files, " "like [/path/to/ClassName-classes.txt]+",
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_generate_sdk_bindings(self, command_context, inputs, args):
+        import itertools
 
-@SubCommand(
-    "android",
-    "archive-geckoview",
-    """Create GeckoView archives.
-    See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_archive_geckoview(command_context, args):
-    ret = gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS"] + args,
-        verbose=True,
-    )
-
-    return ret
-
+        def stem(input):
+            # Turn "/path/to/ClassName-classes.txt" into "ClassName".
+            return os.path.basename(input).rsplit("-classes.txt", 1)[0]
 
-@SubCommand("android", "build-geckoview_example", """Build geckoview_example """)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_build_geckoview_example(command_context, args):
-    gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS"] + args,
-        verbose=True,
-    )
-
-    print(
-        "Execute `mach android install-geckoview_example` "
-        "to push the geckoview_example and test APKs to a device."
-    )
-
-    return 0
-
+        bindings_inputs = list(
+            itertools.chain(*((input, stem(input)) for input in inputs))
+        )
+        bindings_args = "-Pgenerate_sdk_bindings_args={}".format(
+            ";".join(bindings_inputs)
+        )
 
-@SubCommand("android", "install-geckoview_example", """Install geckoview_example """)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def android_install_geckoview_example(command_context, args):
-    gradle(
-        command_context,
-        command_context.substs["GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS"] + args,
-        verbose=True,
-    )
+        ret = self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS"]
+            + [bindings_args]
+            + args,
+            verbose=True,
+        )
 
-    print(
-        "Execute `mach android build-geckoview_example` "
-        "to just build the geckoview_example and test APKs."
-    )
-
-    return 0
-
+        return ret
 
-@SubCommand(
-    "android",
-    "geckoview-docs",
-    """Create GeckoView javadoc and optionally upload to Github""",
-)
-@CommandArgument("--archive", action="store_true", help="Generate a javadoc archive.")
-@CommandArgument(
-    "--upload",
-    metavar="USER/REPO",
-    help="Upload geckoview documentation to Github, using the specified USER/REPO.",
-)
-@CommandArgument(
-    "--upload-branch",
-    metavar="BRANCH[/PATH]",
-    default="gh-pages",
-    help="Use the specified branch/path for documentation commits.",
-)
-@CommandArgument(
-    "--javadoc-path",
-    metavar="/PATH",
-    default="javadoc",
-    help="Use the specified path for javadoc commits.",
-)
-@CommandArgument(
-    "--upload-message",
-    metavar="MSG",
-    default="GeckoView docs upload",
-    help="Use the specified message for commits.",
-)
-def android_geckoview_docs(
-    command_context,
-    archive,
-    upload,
-    upload_branch,
-    javadoc_path,
-    upload_message,
-):
+    @SubCommand(
+        "android",
+        "generate-generated-jni-wrappers",
+        """Generate GeckoView JNI wrappers used when building GeckoView.""",
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_generate_generated_jni_wrappers(self, command_context, args):
+        ret = self.gradle(
+            command_context,
+            command_context.substs[
+                "GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS"
+            ]
+            + args,
+            verbose=True,
+        )
 
-    tasks = (
-        command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"]
-        if archive or upload
-        else command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS"]
-    )
-
-    ret = gradle(command_context, tasks, verbose=True)
-    if ret or not upload:
         return ret
 
-    # Upload to Github.
-    fmt = {
-        "level": os.environ.get("MOZ_SCM_LEVEL", "0"),
-        "project": os.environ.get("MH_BRANCH", "unknown"),
-        "revision": os.environ.get("GECKO_HEAD_REV", "tip"),
-    }
-    env = {}
+    @SubCommand(
+        "android",
+        "api-lint",
+        """Run Android api-lint.
+REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""",
+    )
+    def android_apilint_REMOVED(self, command_context):
+        print(LINT_DEPRECATION_MESSAGE)
+        return 1
+
+    @SubCommand(
+        "android",
+        "test",
+        """Run Android test.
+REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""",
+    )
+    def android_test_REMOVED(self, command_context):
+        print(LINT_DEPRECATION_MESSAGE)
+        return 1
+
+    @SubCommand(
+        "android",
+        "lint",
+        """Run Android lint.
+REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""",
+    )
+    def android_lint_REMOVED(self, command_context):
+        print(LINT_DEPRECATION_MESSAGE)
+        return 1
+
+    @SubCommand(
+        "android",
+        "checkstyle",
+        """Run Android checkstyle.
+REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""",
+    )
+    def android_checkstyle_REMOVED(self, command_context):
+        print(LINT_DEPRECATION_MESSAGE)
+        return 1
 
-    # In order to push to GitHub from TaskCluster, we store a private key
-    # in the TaskCluster secrets store in the format {"content": "<KEY>"},
-    # and the corresponding public key as a writable deploy key for the
-    # destination repo on GitHub.
-    secret = os.environ.get("GECKOVIEW_DOCS_UPLOAD_SECRET", "").format(**fmt)
-    if secret:
-        # Set up a private key from the secrets store if applicable.
-        import requests
+    @SubCommand(
+        "android",
+        "gradle-dependencies",
+        """Collect Android Gradle dependencies.
+        See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_gradle_dependencies(self, command_context, args):
+        # We don't want to gate producing dependency archives on clean
+        # lint or checkstyle, particularly because toolchain versions
+        # can change the outputs for those processes.
+        self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_DEPENDENCIES_TASKS"]
+            + ["--continue"]
+            + args,
+            verbose=True,
+        )
+
+        return 0
 
-        req = requests.get("http://taskcluster/secrets/v1/secret/" + secret)
-        req.raise_for_status()
+    @SubCommand(
+        "android",
+        "archive-geckoview",
+        """Create GeckoView archives.
+        See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""",  # NOQA: E501
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_archive_geckoview(self, command_context, args):
+        ret = self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS"] + args,
+            verbose=True,
+        )
+
+        return ret
 
-        keyfile = mozpath.abspath("gv-docs-upload-key")
-        with open(keyfile, "w") as f:
-            os.chmod(keyfile, 0o600)
-            f.write(req.json()["secret"]["content"])
+    @SubCommand("android", "build-geckoview_example", """Build geckoview_example """)
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_build_geckoview_example(self, command_context, args):
+        self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS"]
+            + args,
+            verbose=True,
+        )
 
-        # Turn off strict host key checking so ssh does not complain about
-        # unknown github.com host. We're not pushing anything sensitive, so
-        # it's okay to not check GitHub's host keys.
-        env["GIT_SSH_COMMAND"] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile
+        print(
+            "Execute `mach android install-geckoview_example` "
+            "to push the geckoview_example and test APKs to a device."
+        )
+
+        return 0
 
-    # Clone remote repo.
-    branch = upload_branch.format(**fmt)
-    repo_url = "git@github.com:%s.git" % upload
-    repo_path = mozpath.abspath("gv-docs-repo")
-    command_context.run_process(
-        [
-            "git",
-            "clone",
-            "--branch",
-            upload_branch,
-            "--depth",
-            "1",
-            repo_url,
-            repo_path,
-        ],
-        append_env=env,
-        pass_thru=True,
+    @SubCommand(
+        "android", "install-geckoview_example", """Install geckoview_example """
     )
-    env["GIT_DIR"] = mozpath.join(repo_path, ".git")
-    env["GIT_WORK_TREE"] = repo_path
-    env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = "GeckoView Docs Bot"
-    env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = "nobody@mozilla.com"
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def android_install_geckoview_example(self, command_context, args):
+        self.gradle(
+            command_context,
+            command_context.substs["GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS"]
+            + args,
+            verbose=True,
+        )
 
-    # Copy over user documentation.
-    import mozfile
+        print(
+            "Execute `mach android build-geckoview_example` "
+            "to just build the geckoview_example and test APKs."
+        )
+
+        return 0
 
-    # Extract new javadoc to specified directory inside repo.
-    src_tar = mozpath.join(
-        command_context.topobjdir,
-        "gradle",
-        "build",
-        "mobile",
+    @SubCommand(
         "android",
-        "geckoview",
-        "libs",
-        "geckoview-javadoc.jar",
+        "geckoview-docs",
+        """Create GeckoView javadoc and optionally upload to Github""",
+    )
+    @CommandArgument(
+        "--archive", action="store_true", help="Generate a javadoc archive."
+    )
+    @CommandArgument(
+        "--upload",
+        metavar="USER/REPO",
+        help="Upload geckoview documentation to Github, "
+        "using the specified USER/REPO.",
+    )
+    @CommandArgument(
+        "--upload-branch",
+        metavar="BRANCH[/PATH]",
+        default="gh-pages",
+        help="Use the specified branch/path for documentation commits.",
     )
-    dst_path = mozpath.join(repo_path, javadoc_path.format(**fmt))
-    mozfile.remove(dst_path)
-    mozfile.extract_zip(src_tar, dst_path)
+    @CommandArgument(
+        "--javadoc-path",
+        metavar="/PATH",
+        default="javadoc",
+        help="Use the specified path for javadoc commits.",
+    )
+    @CommandArgument(
+        "--upload-message",
+        metavar="MSG",
+        default="GeckoView docs upload",
+        help="Use the specified message for commits.",
+    )
+    def android_geckoview_docs(
+        self,
+        command_context,
+        archive,
+        upload,
+        upload_branch,
+        javadoc_path,
+        upload_message,
+    ):
+
+        tasks = (
+            command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"]
+            if archive or upload
+            else command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS"]
+        )
 
-    # Commit and push.
-    command_context.run_process(["git", "add", "--all"], append_env=env, pass_thru=True)
-    if (
+        ret = self.gradle(command_context, tasks, verbose=True)
+        if ret or not upload:
+            return ret
+
+        # Upload to Github.
+        fmt = {
+            "level": os.environ.get("MOZ_SCM_LEVEL", "0"),
+            "project": os.environ.get("MH_BRANCH", "unknown"),
+            "revision": os.environ.get("GECKO_HEAD_REV", "tip"),
+        }
+        env = {}
+
+        # In order to push to GitHub from TaskCluster, we store a private key
+        # in the TaskCluster secrets store in the format {"content": "<KEY>"},
+        # and the corresponding public key as a writable deploy key for the
+        # destination repo on GitHub.
+        secret = os.environ.get("GECKOVIEW_DOCS_UPLOAD_SECRET", "").format(**fmt)
+        if secret:
+            # Set up a private key from the secrets store if applicable.
+            import requests
+
+            req = requests.get("http://taskcluster/secrets/v1/secret/" + secret)
+            req.raise_for_status()
+
+            keyfile = mozpath.abspath("gv-docs-upload-key")
+            with open(keyfile, "w") as f:
+                os.chmod(keyfile, 0o600)
+                f.write(req.json()["secret"]["content"])
+
+            # Turn off strict host key checking so ssh does not complain about
+            # unknown github.com host. We're not pushing anything sensitive, so
+            # it's okay to not check GitHub's host keys.
+            env["GIT_SSH_COMMAND"] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile
+
+        # Clone remote repo.
+        branch = upload_branch.format(**fmt)
+        repo_url = "git@github.com:%s.git" % upload
+        repo_path = mozpath.abspath("gv-docs-repo")
         command_context.run_process(
-            ["git", "diff", "--cached", "--quiet"],
-            append_env=env,
-            pass_thru=True,
-            ensure_exit_code=False,
-        )
-        != 0
-    ):
-        # We have something to commit.
-        command_context.run_process(
-            ["git", "commit", "--message", upload_message.format(**fmt)],
+            [
+                "git",
+                "clone",
+                "--branch",
+                upload_branch,
+                "--depth",
+                "1",
+                repo_url,
+                repo_path,
+            ],
             append_env=env,
             pass_thru=True,
         )
+        env["GIT_DIR"] = mozpath.join(repo_path, ".git")
+        env["GIT_WORK_TREE"] = repo_path
+        env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = "GeckoView Docs Bot"
+        env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = "nobody@mozilla.com"
+
+        # Copy over user documentation.
+        import mozfile
+
+        # Extract new javadoc to specified directory inside repo.
+        src_tar = mozpath.join(
+            command_context.topobjdir,
+            "gradle",
+            "build",
+            "mobile",
+            "android",
+            "geckoview",
+            "libs",
+            "geckoview-javadoc.jar",
+        )
+        dst_path = mozpath.join(repo_path, javadoc_path.format(**fmt))
+        mozfile.remove(dst_path)
+        mozfile.extract_zip(src_tar, dst_path)
+
+        # Commit and push.
         command_context.run_process(
-            ["git", "push", "origin", branch], append_env=env, pass_thru=True
+            ["git", "add", "--all"], append_env=env, pass_thru=True
+        )
+        if (
+            command_context.run_process(
+                ["git", "diff", "--cached", "--quiet"],
+                append_env=env,
+                pass_thru=True,
+                ensure_exit_code=False,
+            )
+            != 0
+        ):
+            # We have something to commit.
+            command_context.run_process(
+                ["git", "commit", "--message", upload_message.format(**fmt)],
+                append_env=env,
+                pass_thru=True,
+            )
+            command_context.run_process(
+                ["git", "push", "origin", branch], append_env=env, pass_thru=True
+            )
+
+        mozfile.remove(repo_path)
+        if secret:
+            mozfile.remove(keyfile)
+        return 0
+
+    @Command(
+        "gradle",
+        category="devenv",
+        description="Run gradle.",
+        conditions=[conditions.is_android],
+    )
+    @CommandArgument(
+        "-v",
+        "--verbose",
+        action="store_true",
+        help="Verbose output for what commands the build is running.",
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def gradle(self, command_context, args, verbose=False):
+        if not verbose:
+            # Avoid logging the command
+            command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
+
+        # In automation, JAVA_HOME is set via mozconfig, which needs
+        # to be specially handled in each mach command. This turns
+        # $JAVA_HOME/bin/java into $JAVA_HOME.
+        java_home = os.path.dirname(os.path.dirname(command_context.substs["JAVA"]))
+
+        gradle_flags = command_context.substs.get("GRADLE_FLAGS", "") or os.environ.get(
+            "GRADLE_FLAGS", ""
+        )
+        gradle_flags = shell_split(gradle_flags)
+
+        # We force the Gradle JVM to run with the UTF-8 encoding, since we
+        # filter strings.xml, which is really UTF-8; the ellipsis character is
+        # replaced with ??? in some encodings (including ASCII).  It's not yet
+        # possible to filter with encodings in Gradle
+        # (https://github.com/gradle/gradle/pull/520) and it's challenging to
+        # do our filtering with Gradle's Ant support.  Moreover, all of the
+        # Android tools expect UTF-8: see
+        # http://tools.android.com/knownissues/encoding.  See
+        # http://stackoverflow.com/a/21267635 for discussion of this approach.
+        #
+        # It's not even enough to set the encoding just for Gradle; it
+        # needs to be for JVMs spawned by Gradle as well.  This
+        # happens during the maven deployment generating the GeckoView
+        # documents; this works around "error: unmappable character
+        # for encoding ASCII" in exoplayer2.  See
+        # https://discuss.gradle.org/t/unmappable-character-for-encoding-ascii-when-building-a-utf-8-project/10692/11  # NOQA: E501
+        # and especially https://stackoverflow.com/a/21755671.
+
+        if command_context.substs.get("MOZ_AUTOMATION"):
+            gradle_flags += ["--console=plain"]
+
+        env = os.environ.copy()
+        env.update(
+            {
+                "GRADLE_OPTS": "-Dfile.encoding=utf-8",
+                "JAVA_HOME": java_home,
+                "JAVA_TOOL_OPTIONS": "-Dfile.encoding=utf-8",
+            }
+        )
+        # Set ANDROID_SDK_ROOT if --with-android-sdk was set.
+        # See https://bugzilla.mozilla.org/show_bug.cgi?id=1576471
+        android_sdk_root = command_context.substs.get("ANDROID_SDK_ROOT", "")
+        if android_sdk_root:
+            env["ANDROID_SDK_ROOT"] = android_sdk_root
+
+        return command_context.run_process(
+            [command_context.substs["GRADLE"]] + gradle_flags + args,
+            explicit_env=env,
+            pass_thru=True,  # Allow user to run gradle interactively.
+            ensure_exit_code=False,  # Don't throw on non-zero exit code.
+            cwd=mozpath.join(command_context.topsrcdir),
         )
 
-    mozfile.remove(repo_path)
-    if secret:
-        mozfile.remove(keyfile)
-    return 0
-
-
-@Command(
-    "gradle",
-    category="devenv",
-    description="Run gradle.",
-    conditions=[conditions.is_android],
-)
-@CommandArgument(
-    "-v",
-    "--verbose",
-    action="store_true",
-    help="Verbose output for what commands the build is running.",
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def gradle(command_context, args, verbose=False):
-    if not verbose:
-        # Avoid logging the command
-        command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
-
-    # In automation, JAVA_HOME is set via mozconfig, which needs
-    # to be specially handled in each mach command. This turns
-    # $JAVA_HOME/bin/java into $JAVA_HOME.
-    java_home = os.path.dirname(os.path.dirname(command_context.substs["JAVA"]))
-
-    gradle_flags = command_context.substs.get("GRADLE_FLAGS", "") or os.environ.get(
-        "GRADLE_FLAGS", ""
-    )
-    gradle_flags = shell_split(gradle_flags)
-
-    # We force the Gradle JVM to run with the UTF-8 encoding, since we
-    # filter strings.xml, which is really UTF-8; the ellipsis character is
-    # replaced with ??? in some encodings (including ASCII).  It's not yet
-    # possible to filter with encodings in Gradle
-    # (https://github.com/gradle/gradle/pull/520) and it's challenging to
-    # do our filtering with Gradle's Ant support.  Moreover, all of the
-    # Android tools expect UTF-8: see
-    # http://tools.android.com/knownissues/encoding.  See
-    # http://stackoverflow.com/a/21267635 for discussion of this approach.
-    #
-    # It's not even enough to set the encoding just for Gradle; it
-    # needs to be for JVMs spawned by Gradle as well.  This
-    # happens during the maven deployment generating the GeckoView
-    # documents; this works around "error: unmappable character
-    # for encoding ASCII" in exoplayer2.  See
-    # https://discuss.gradle.org/t/unmappable-character-for-encoding-ascii-when-building-a-utf-8-project/10692/11  # NOQA: E501
-    # and especially https://stackoverflow.com/a/21755671.
-
-    if command_context.substs.get("MOZ_AUTOMATION"):
-        gradle_flags += ["--console=plain"]
-
-    env = os.environ.copy()
-    env.update(
-        {
-            "GRADLE_OPTS": "-Dfile.encoding=utf-8",
-            "JAVA_HOME": java_home,
-            "JAVA_TOOL_OPTIONS": "-Dfile.encoding=utf-8",
-        }
-    )
-    # Set ANDROID_SDK_ROOT if --with-android-sdk was set.
-    # See https://bugzilla.mozilla.org/show_bug.cgi?id=1576471
-    android_sdk_root = command_context.substs.get("ANDROID_SDK_ROOT", "")
-    if android_sdk_root:
-        env["ANDROID_SDK_ROOT"] = android_sdk_root
-
-    return command_context.run_process(
-        [command_context.substs["GRADLE"]] + gradle_flags + args,
-        explicit_env=env,
-        pass_thru=True,  # Allow user to run gradle interactively.
-        ensure_exit_code=False,  # Don't throw on non-zero exit code.
-        cwd=mozpath.join(command_context.topsrcdir),
-    )
-
-
-@Command("gradle-install", category="devenv", conditions=[REMOVED])
-def gradle_install_REMOVED(command_context):
-    pass
+    @Command("gradle-install", category="devenv", conditions=[REMOVED])
+    def gradle_install_REMOVED(self, command_context):
+        pass
 
 
-@Command(
-    "android-emulator",
-    category="devenv",
-    conditions=[],
-    description="Run the Android emulator with an AVD from test automation. "
-    "Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will "
-    "over-ride the command line arguments used to launch the emulator.",
-)
-@CommandArgument(
-    "--version",
-    metavar="VERSION",
-    choices=["arm", "x86_64"],
-    help="Specify which AVD to run in emulator. "
-    'One of "arm" (Android supporting armv7 binaries), or '
-    '"x86_64" (Android supporting x86 or x86_64 binaries, '
-    "recommended for most applications). "
-    'By default, "arm" will be used if the current build environment '
-    'architecture is arm; otherwise "x86_64".',
-)
-@CommandArgument("--wait", action="store_true", help="Wait for emulator to be closed.")
-@CommandArgument("--gpu", help="Over-ride the emulator -gpu argument.")
-@CommandArgument(
-    "--verbose", action="store_true", help="Log informative status messages."
-)
-def emulator(
-    command_context,
-    version,
-    wait=False,
-    gpu=None,
-    verbose=False,
-):
-    """
-    Run the Android emulator with one of the AVDs used in the Mozilla
-    automated test environment. If necessary, the AVD is fetched from
-    the taskcluster server and installed.
-    """
-    from mozrunner.devices.android_device import AndroidEmulator
-
-    emulator = AndroidEmulator(
-        version,
-        verbose,
-        substs=command_context.substs,
-        device_serial="emulator-5554",
+@CommandProvider
+class AndroidEmulatorCommands(MachCommandBase):
+    @Command(
+        "android-emulator",
+        category="devenv",
+        conditions=[],
+        description="Run the Android emulator with an AVD from test automation. "
+        "Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will "
+        "over-ride the command line arguments used to launch the emulator.",
+    )
+    @CommandArgument(
+        "--version",
+        metavar="VERSION",
+        choices=["arm", "x86_64"],
+        help="Specify which AVD to run in emulator. "
+        'One of "arm" (Android supporting armv7 binaries), or '
+        '"x86_64" (Android supporting x86 or x86_64 binaries, '
+        "recommended for most applications). "
+        'By default, "arm" will be used if the current build environment '
+        'architecture is arm; otherwise "x86_64".',
+    )
+    @CommandArgument(
+        "--wait", action="store_true", help="Wait for emulator to be closed."
+    )
+    @CommandArgument("--gpu", help="Over-ride the emulator -gpu argument.")
+    @CommandArgument(
+        "--verbose", action="store_true", help="Log informative status messages."
     )
-    if emulator.is_running():
-        # It is possible to run multiple emulators simultaneously, but:
-        #  - if more than one emulator is using the same avd, errors may
-        #    occur due to locked resources;
-        #  - additional parameters must be specified when running tests,
-        #    to select a specific device.
-        # To avoid these complications, allow just one emulator at a time.
-        command_context.log(
-            logging.ERROR,
-            "emulator",
-            {},
-            "An Android emulator is already running.\n"
-            "Close the existing emulator and re-run this command.",
-        )
-        return 1
-
-    if not emulator.check_avd():
-        command_context.log(
-            logging.WARN,
-            "emulator",
-            {},
-            "AVD not found. Please run |mach bootstrap|.",
-        )
-        return 2
+    def emulator(
+        self,
+        command_context,
+        version,
+        wait=False,
+        gpu=None,
+        verbose=False,
+    ):
+        """
+        Run the Android emulator with one of the AVDs used in the Mozilla
+        automated test environment. If necessary, the AVD is fetched from
+        the taskcluster server and installed.
+        """
+        from mozrunner.devices.android_device import AndroidEmulator
 
-    if not emulator.is_available():
-        command_context.log(
-            logging.WARN,
-            "emulator",
-            {},
-            "Emulator binary not found.\n"
-            "Install the Android SDK and make sure 'emulator' is in your PATH.",
+        emulator = AndroidEmulator(
+            version,
+            verbose,
+            substs=command_context.substs,
+            device_serial="emulator-5554",
         )
-        return 2
+        if emulator.is_running():
+            # It is possible to run multiple emulators simultaneously, but:
+            #  - if more than one emulator is using the same avd, errors may
+            #    occur due to locked resources;
+            #  - additional parameters must be specified when running tests,
+            #    to select a specific device.
+            # To avoid these complications, allow just one emulator at a time.
+            command_context.log(
+                logging.ERROR,
+                "emulator",
+                {},
+                "An Android emulator is already running.\n"
+                "Close the existing emulator and re-run this command.",
+            )
+            return 1
 
-    command_context.log(
-        logging.INFO,
-        "emulator",
-        {},
-        "Starting Android emulator running %s..." % emulator.get_avd_description(),
-    )
-    emulator.start(gpu)
-    if emulator.wait_for_start():
-        command_context.log(
-            logging.INFO, "emulator", {}, "Android emulator is running."
-        )
-    else:
-        # This is unusual but the emulator may still function.
-        command_context.log(
-            logging.WARN,
-            "emulator",
-            {},
-            "Unable to verify that emulator is running.",
-        )
+        if not emulator.check_avd():
+            command_context.log(
+                logging.WARN,
+                "emulator",
+                {},
+                "AVD not found. Please run |mach bootstrap|.",
+            )
+            return 2
 
-    if conditions.is_android(command_context):
+        if not emulator.is_available():
+            command_context.log(
+                logging.WARN,
+                "emulator",
+                {},
+                "Emulator binary not found.\n"
+                "Install the Android SDK and make sure 'emulator' is in your PATH.",
+            )
+            return 2
+
         command_context.log(
             logging.INFO,
             "emulator",
             {},
-            "Use 'mach install' to install or update Firefox on your emulator.",
+            "Starting Android emulator running %s..." % emulator.get_avd_description(),
         )
-    else:
-        command_context.log(
-            logging.WARN,
-            "emulator",
-            {},
-            "No Firefox for Android build detected.\n"
-            "Switch to a Firefox for Android build context or use 'mach bootstrap'\n"
-            "to setup an Android build environment.",
-        )
+        emulator.start(gpu)
+        if emulator.wait_for_start():
+            command_context.log(
+                logging.INFO, "emulator", {}, "Android emulator is running."
+            )
+        else:
+            # This is unusual but the emulator may still function.
+            command_context.log(
+                logging.WARN,
+                "emulator",
+                {},
+                "Unable to verify that emulator is running.",
+            )
 
-    if wait:
-        command_context.log(
-            logging.INFO, "emulator", {}, "Waiting for Android emulator to close..."
-        )
-        rc = emulator.wait()
-        if rc is not None:
+        if conditions.is_android(command_context):
             command_context.log(
                 logging.INFO,
                 "emulator",
                 {},
-                "Android emulator completed with return code %d." % rc,
+                "Use 'mach install' to install or update Firefox on your emulator.",
             )
         else:
             command_context.log(
                 logging.WARN,
                 "emulator",
                 {},
-                "Unable to retrieve Android emulator return code.",
+                "No Firefox for Android build detected.\n"
+                "Switch to a Firefox for Android build context or use 'mach bootstrap'\n"
+                "to setup an Android build environment.",
+            )
+
+        if wait:
+            command_context.log(
+                logging.INFO, "emulator", {}, "Waiting for Android emulator to close..."
             )
-    return 0
+            rc = emulator.wait()
+            if rc is not None:
+                command_context.log(
+                    logging.INFO,
+                    "emulator",
+                    {},
+                    "Android emulator completed with return code %d." % rc,
+                )
+            else:
+                command_context.log(
+                    logging.WARN,
+                    "emulator",
+                    {},
+                    "Unable to retrieve Android emulator return code.",
+                )
+        return 0
--- a/python/mach/docs/commands.rst
+++ b/python/mach/docs/commands.rst
@@ -4,54 +4,64 @@
 Implementing Commands
 =====================
 
 Mach commands are defined via Python decorators.
 
 All the relevant decorators are defined in the *mach.decorators* module.
 The important decorators are as follows:
 
+:py:func:`CommandProvider <mach.decorators.CommandProvider>`
+  A class decorator that denotes that a class contains mach
+  commands. The decorator takes no arguments.
+
 :py:func:`Command <mach.decorators.Command>`
-  A function decorator that denotes that the function should be called when
+  A method decorator that denotes that the method should be called when
   the specified command is requested. The decorator takes a command name
   as its first argument and a number of additional arguments to
-  configure the behavior of the command. The decorated function must take a
-  ``command_context`` argument as its first.
+  configure the behavior of the command. The decorated method must take a
+  ``command_context`` argument as its first (after ``self``).
   ``command_context`` is a properly configured instance of a ``MozbuildObject``
   subclass, meaning it can be used for accessing things like the current config
   and running processes.
 
 :py:func:`CommandArgument <mach.decorators.CommandArgument>`
-  A function decorator that defines an argument to the command. Its
+  A method decorator that defines an argument to the command. Its
   arguments are essentially proxied to ArgumentParser.add_argument()
 
 :py:func:`SubCommand <mach.decorators.SubCommand>`
-  A function decorator that denotes that the function should be a
+  A method decorator that denotes that the method should be a
   sub-command to an existing ``@Command``. The decorator takes the
   parent command name as its first argument and the sub-command name
   as its second argument.
 
   ``@CommandArgument`` can be used on ``@SubCommand`` instances just
   like they can on ``@Command`` instances.
 
+Classes with the ``@CommandProvider`` decorator **must** subclass
+``MachCommandBase`` and have a compatible ``__init__`` method.
 
 Here is a complete example:
 
 .. code-block:: python
 
    from mach.decorators import (
        CommandArgument,
+       CommandProvider,
        Command,
    )
+   from mozbuild.base import MachCommandBase
 
-    @Command('doit', help='Do ALL OF THE THINGS.')
-    @CommandArgument('--force', '-f', action='store_true',
-        help='Force doing it.')
-    def doit(command_context, force=False):
-        # Do stuff here.
+   @CommandProvider
+   class MyClass(MachCommandBase):
+       @Command('doit', help='Do ALL OF THE THINGS.')
+       @CommandArgument('--force', '-f', action='store_true',
+           help='Force doing it.')
+       def doit(self, command_context, force=False):
+           # Do stuff here.
 
 When the module is loaded, the decorators tell mach about all handlers.
 When mach runs, it takes the assembled metadata from these handlers and
 hooks it up to the command line driver. Under the hood, arguments passed
 to the decorators are being used to help mach parse command arguments,
 formulate arguments to the methods, etc. See the documentation in the
 :py:mod:`mach.base` module for more.
 
@@ -64,37 +74,47 @@ Conditionally Filtering Commands
 Sometimes it might only make sense to run a command given a certain
 context. For example, running tests only makes sense if the product
 they are testing has been built, and said build is available. To make
 sure a command is only runnable from within a correct context, you can
 define a series of conditions on the
 :py:func:`Command <mach.decorators.Command>` decorator.
 
 A condition is simply a function that takes an instance of the
-:py:func:`mozbuild.base.MachCommandBase` class as an argument, and
+:py:func:`mach.decorators.CommandProvider` class as an argument, and
 returns ``True`` or ``False``. If any of the conditions defined on a
 command return ``False``, the command will not be runnable. The
 docstring of a condition function is used in error messages, to explain
 why the command cannot currently be run.
 
 Here is an example:
 
 .. code-block:: python
 
    from mach.decorators import (
+       CommandProvider,
        Command,
    )
 
    def build_available(cls):
        """The build needs to be available."""
        return cls.build_path is not None
 
-   @Command('run_tests', conditions=[build_available])
-   def run_tests(command_context):
-       # Do stuff here.
+   @CommandProvider
+   class MyClass(MachCommandBase):
+       def __init__(self, *args, **kwargs):
+           super(MyClass, self).__init__(*args, **kwargs)
+           self.build_path = ...
+
+       @Command('run_tests', conditions=[build_available])
+       def run_tests(self, command_context):
+           # Do stuff here.
+
+It is important to make sure that any state needed by the condition is
+available to instances of the command provider.
 
 By default all commands without any conditions applied will be runnable,
 but it is possible to change this behaviour by setting
 ``require_conditions`` to ``True``:
 
 .. code-block:: python
 
    m = mach.main.Mach()
--- a/python/mach/docs/settings.rst
+++ b/python/mach/docs/settings.rst
@@ -111,27 +111,33 @@ Accessing Settings
 Now that the settings are defined and documented, they're accessible from
 individual mach commands from the mach command context.
 For example:
 
 .. code-block:: python
 
     from mach.decorators import (
         Command,
+        CommandProvider,
         SettingsProvider,
     )
     from mozbuild.base import MachCommandBase
 
     @SettingsProvider
     class ExampleSettings(object):
         config_settings = [
             ('a.b', 'string', 'desc', 'default'),
             ('foo.bar', 'string', 'desc',),
             ('foo.baz', 'int', 'desc', 0, {'choices': set([0,1,2])}),
         ]
 
-    @Command('command', category='misc',
-             description='Prints a setting')
-    def command(command_context):
-        settings = command_context._mach_context.settings
-        print(settings.a.b)
-        for option in settings.foo:
-            print(settings.foo[option])
+    @CommandProvider
+    class Commands(MachCommandBase):
+        def __init__(self, *args, **kwargs):
+            super(Commands, self).__init__(*args, **kwargs)
+            self.settings = self._mach_context.settings
+
+        @Command('command', category='misc',
+                 description='Prints a setting')
+        def command(self):
+            print(self.settings.a.b)
+            for option in self.settings.foo:
+                print(self.settings.foo[option])
--- a/python/mach/docs/telemetry.rst
+++ b/python/mach/docs/telemetry.rst
@@ -14,24 +14,26 @@ These files are all documented in a sing
    metrics
 
 Adding Metrics to a new Command
 ===============================
 
 If you would like to submit telemetry metrics from your mach ``@Command``, you should take two steps:
 
 #. Parameterize your ``@Command`` annotation with ``metrics_path``.
-#. Use the ``command_context.metrics`` handle provided by ``MachCommandBase``
+#. Use the ``self.metrics`` handle provided by ``MachCommandBase``
 
 For example::
 
     METRICS_PATH = os.path.abspath(os.path.join(__file__, '..', '..', 'metrics.yaml'))
 
-    @Command('custom-command', metrics_path=METRICS_PATH)
-    def custom_command(command_context):
-        command_context.metrics.custom.foo.set('bar')
+    @CommandProvider
+    class CustomCommand(MachCommandBase):
+        @Command('custom-command', metrics_path=METRICS_PATH)
+        def custom_command(self):
+            self.metrics.custom.foo.set('bar')
 
 Updating Generated Metrics Docs
 ===============================
 
 When a ``metrics.yaml`` is added/changed/removed, :ref:`the metrics document<metrics>` will need to be updated::
 
     ./mach doc mach-telemetry
--- a/python/mach/mach/commands/commandinfo.py
+++ b/python/mach/mach/commands/commandinfo.py
@@ -8,17 +8,18 @@ import argparse
 import os
 import re
 import subprocess
 import sys
 from itertools import chain
 
 import attr
 
-from mach.decorators import Command, CommandArgument, SubCommand
+from mach.decorators import CommandProvider, Command, CommandArgument, SubCommand
+from mozbuild.base import MachCommandBase
 from mozbuild.util import memoize
 
 here = os.path.abspath(os.path.dirname(__file__))
 COMPLETION_TEMPLATES_DIR = os.path.join(here, "completion_templates")
 
 
 @attr.s
 class CommandInfo(object):
@@ -31,462 +32,463 @@ class CommandInfo(object):
 
 def render_template(shell, context):
     filename = "{}.template".format(shell)
     with open(os.path.join(COMPLETION_TEMPLATES_DIR, filename)) as fh:
         template = fh.read()
     return template % context
 
 
-@memoize
-def command_handlers(command_context):
-    """A dictionary of command handlers keyed by command name."""
-    return command_context._mach_context.commands.command_handlers
-
-
-@memoize
-def commands(command_context):
-    """A sorted list of all command names."""
-    return sorted(command_handlers(command_context))
-
+@CommandProvider
+class BuiltinCommands(MachCommandBase):
+    @memoize
+    def command_handlers(self, command_context):
+        """A dictionary of command handlers keyed by command name."""
+        return command_context._mach_context.commands.command_handlers
 
-def _get_parser_options(parser):
-    options = {}
-    for action in parser._actions:
-        # ignore positional args
-        if not action.option_strings:
-            continue
+    @memoize
+    def commands(self, command_context):
+        """A sorted list of all command names."""
+        return sorted(self.command_handlers(command_context))
 
-        # ignore suppressed args
-        if action.help == argparse.SUPPRESS:
-            continue
-
-        options[tuple(action.option_strings)] = action.help or ""
-    return options
-
+    def _get_parser_options(self, parser):
+        options = {}
+        for action in parser._actions:
+            # ignore positional args
+            if not action.option_strings:
+                continue
 
-@memoize
-def global_options(command_context):
-    """Return a dict of global options.
+            # ignore suppressed args
+            if action.help == argparse.SUPPRESS:
+                continue
 
-    Of the form `{("-o", "--option"): "description"}`.
-    """
-    for group in command_context._mach_context.global_parser._action_groups:
-        if group.title == "Global Arguments":
-            return _get_parser_options(group)
-
-
-@memoize
-def _get_handler_options(handler):
-    """Return a dict of options for the given handler.
+            options[tuple(action.option_strings)] = action.help or ""
+        return options
 
-    Of the form `{("-o", "--option"): "description"}`.
-    """
-    options = {}
-    for option_strings, val in handler.arguments:
-        # ignore positional args
-        if option_strings[0][0] != "-":
-            continue
+    @memoize
+    def global_options(self, command_context):
+        """Return a dict of global options.
 
-        options[tuple(option_strings)] = val.get("help", "")
+        Of the form `{("-o", "--option"): "description"}`.
+        """
+        for group in command_context._mach_context.global_parser._action_groups:
+            if group.title == "Global Arguments":
+                return self._get_parser_options(group)
 
-    if handler._parser:
-        options.update(_get_parser_options(handler.parser))
-
-    return options
+    @memoize
+    def _get_handler_options(self, handler):
+        """Return a dict of options for the given handler.
 
+        Of the form `{("-o", "--option"): "description"}`.
+        """
+        options = {}
+        for option_strings, val in handler.arguments:
+            # ignore positional args
+            if option_strings[0][0] != "-":
+                continue
 
-def _get_handler_info(handler):
-    try:
-        options = _get_handler_options(handler)
-    except (Exception, SystemExit):
-        # We don't want misbehaving commands to break tab completion,
-        # ignore any exceptions.
-        options = {}
+            options[tuple(option_strings)] = val.get("help", "")
 
-    subcommands = []
-    for sub in sorted(handler.subcommand_handlers):
-        subcommands.append(_get_handler_info(handler.subcommand_handlers[sub]))
+        if handler._parser:
+            options.update(self._get_parser_options(handler.parser))
+
+        return options
 
-    return CommandInfo(
-        name=handler.name,
-        description=handler.description or "",
-        options=options,
-        subcommands=subcommands,
-        subcommand=handler.subcommand,
-    )
-
+    def _get_handler_info(self, handler):
+        try:
+            options = self._get_handler_options(handler)
+        except (Exception, SystemExit):
+            # We don't want misbehaving commands to break tab completion,
+            # ignore any exceptions.
+            options = {}
 
-@memoize
-def commands_info(command_context):
-    """Return a list of CommandInfo objects for each command."""
-    commands_info = []
-    # Loop over self.commands() rather than self.command_handlers().items() for
-    # alphabetical order.
-    for c in commands(command_context):
-        commands_info.append(_get_handler_info(command_handlers(command_context)[c]))
-    return commands_info
+        subcommands = []
+        for sub in sorted(handler.subcommand_handlers):
+            subcommands.append(self._get_handler_info(handler.subcommand_handlers[sub]))
 
-
-@Command("mach-commands", category="misc", description="List all mach commands.")
-def run_commands(command_context):
-    print("\n".join(commands(command_context)))
-
+        return CommandInfo(
+            name=handler.name,
+            description=handler.description or "",
+            options=options,
+            subcommands=subcommands,
+            subcommand=handler.subcommand,
+        )
 
-@Command(
-    "mach-debug-commands",
-    category="misc",
-    description="Show info about available mach commands.",
-)
-@CommandArgument(
-    "match",
-    metavar="MATCH",
-    default=None,
-    nargs="?",
-    help="Only display commands containing given substring.",
-)
-def run_debug_commands(command_context, match=None):
-    import inspect
+    @memoize
+    def commands_info(self, command_context):
+        """Return a list of CommandInfo objects for each command."""
+        commands_info = []
+        # Loop over self.commands() rather than self.command_handlers().items() for
+        # alphabetical order.
+        for c in self.commands(command_context):
+            commands_info.append(
+                self._get_handler_info(self.command_handlers(command_context)[c])
+            )
+        return commands_info
 
-    for command, handler in command_handlers(command_context).items():
-        if match and match not in command:
-            continue
+    @Command("mach-commands", category="misc", description="List all mach commands.")
+    def run_commands(self, command_context):
+        print("\n".join(self.commands(command_context)))
 
-        cls = handler.cls
-        method = getattr(cls, getattr(handler, "method"))
-
-        print(command)
-        print("=" * len(command))
-        print("")
-        print("File: %s" % inspect.getsourcefile(method))
-        print("Class: %s" % cls.__name__)
-        print("Method: %s" % handler.method)
-        print("")
-
+    @Command(
+        "mach-debug-commands",
+        category="misc",
+        description="Show info about available mach commands.",
+    )
+    @CommandArgument(
+        "match",
+        metavar="MATCH",
+        default=None,
+        nargs="?",
+        help="Only display commands containing given substring.",
+    )
+    def run_debug_commands(self, command_context, match=None):
+        import inspect
 
-@Command(
-    "mach-completion",
-    category="misc",
-    description="Prints a list of completion strings for the specified command.",
-)
-@CommandArgument(
-    "args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
-)
-def run_completion(command_context, args):
-    if not args:
-        print("\n".join(commands(command_context)))
-        return
+        for command, handler in self.command_handlers(command_context).items():
+            if match and match not in command:
+                continue
+
+            cls = handler.cls
+            method = getattr(cls, getattr(handler, "method"))
 
-    is_help = "help" in args
-    command = None
-    for i, arg in enumerate(args):
-        if arg in commands(command_context):
-            command = arg
-            args = args[i + 1 :]
-            break
-
-    # If no command is typed yet, just offer the commands.
-    if not command:
-        print("\n".join(commands(command_context)))
-        return
+            print(command)
+            print("=" * len(command))
+            print("")
+            print("File: %s" % inspect.getsourcefile(method))
+            print("Class: %s" % cls.__name__)
+            print("Method: %s" % handler.method)
+            print("")
 
-    handler = command_handlers(command_context)[command]
-    # If a subcommand was typed, update the handler.
-    for arg in args:
-        if arg in handler.subcommand_handlers:
-            handler = handler.subcommand_handlers[arg]
-            break
+    @Command(
+        "mach-completion",
+        category="misc",
+        description="Prints a list of completion strings for the specified command.",
+    )
+    @CommandArgument(
+        "args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
+    )
+    def run_completion(self, command_context, args):
+        if not args:
+            print("\n".join(self.commands(command_context)))
+            return
 
-    targets = sorted(handler.subcommand_handlers.keys())
-    if is_help:
-        print("\n".join(targets))
-        return
-
-    targets.append("help")
-    targets.extend(chain(*_get_handler_options(handler).keys()))
-    print("\n".join(targets))
-
+        is_help = "help" in args
+        command = None
+        for i, arg in enumerate(args):
+            if arg in self.commands(command_context):
+                command = arg
+                args = args[i + 1 :]
+                break
 
-def _zsh_describe(value, description=None):
-    value = '"' + value.replace(":", "\\:")
-    if description:
-        description = subprocess.list2cmdline(
-            [re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
-        ).lstrip('"')
+        # If no command is typed yet, just offer the commands.
+        if not command:
+            print("\n".join(self.commands(command_context)))
+            return
+
+        handler = self.command_handlers(command_context)[command]
+        # If a subcommand was typed, update the handler.
+        for arg in args:
+            if arg in handler.subcommand_handlers:
+                handler = handler.subcommand_handlers[arg]
+                break
 
-        if description.endswith('"') and not description.endswith(r"\""):
-            description = description[:-1]
-
-        value += ":{}".format(description)
+        targets = sorted(handler.subcommand_handlers.keys())
+        if is_help:
+            print("\n".join(targets))
+            return
 
-    value += '"'
+        targets.append("help")
+        targets.extend(chain(*self._get_handler_options(handler).keys()))
+        print("\n".join(targets))
 
-    return value
-
+    def _zsh_describe(self, value, description=None):
+        value = '"' + value.replace(":", "\\:")
+        if description:
+            description = subprocess.list2cmdline(
+                [re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
+            ).lstrip('"')
 
-@SubCommand(
-    "mach-completion",
-    "bash",
-    description="Print mach completion script for bash shell",
-)
-@CommandArgument(
-    "-f",
-    "--file",
-    dest="outfile",
-    default=None,
-    help="File path to save completion script.",
-)
-def completion_bash(command_context, outfile):
-    commands_subcommands = []
-    case_options = []
-    case_subcommands = []
-    for i, cmd in enumerate(commands_info(command_context)):
-        # Build case statement for options.
-        options = []
-        for opt_strs, description in cmd.options.items():
-            for opt in opt_strs:
-                options.append(_zsh_describe(opt, None).strip('"'))
+            if description.endswith('"') and not description.endswith(r"\""):
+                description = description[:-1]
+
+            value += ":{}".format(description)
+
+        value += '"'
+
+        return value
 
-        if options:
-            case_options.append(
-                "\n".join(
-                    [
-                        "            ({})".format(cmd.name),
-                        '            opts="${{opts}} {}"'.format(" ".join(options)),
-                        "            ;;",
-                        "",
-                    ]
-                )
-            )
-
-        # Build case statement for subcommand options.
-        for sub in cmd.subcommands:
+    @SubCommand(
+        "mach-completion",
+        "bash",
+        description="Print mach completion script for bash shell",
+    )
+    @CommandArgument(
+        "-f",
+        "--file",
+        dest="outfile",
+        default=None,
+        help="File path to save completion script.",
+    )
+    def completion_bash(self, command_context, outfile):
+        commands_subcommands = []
+        case_options = []
+        case_subcommands = []
+        for i, cmd in enumerate(self.commands_info(command_context)):
+            # Build case statement for options.
             options = []
-            for opt_strs, description in sub.options.items():
+            for opt_strs, description in cmd.options.items():
                 for opt in opt_strs:
-                    options.append(_zsh_describe(opt, None))
+                    options.append(self._zsh_describe(opt, None).strip('"'))
 
             if options:
                 case_options.append(
                     "\n".join(
                         [
-                            '            ("{} {}")'.format(sub.name, sub.subcommand),
+                            "            ({})".format(cmd.name),
                             '            opts="${{opts}} {}"'.format(" ".join(options)),
                             "            ;;",
                             "",
                         ]
                     )
                 )
 
-        # Build case statement for subcommands.
-        subcommands = [_zsh_describe(s.subcommand, None) for s in cmd.subcommands]
-        if subcommands:
-            commands_subcommands.append(
-                '[{}]=" {} "'.format(
-                    cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
-                )
-            )
+            # Build case statement for subcommand options.
+            for sub in cmd.subcommands:
+                options = []
+                for opt_strs, description in sub.options.items():
+                    for opt in opt_strs:
+                        options.append(self._zsh_describe(opt, None))
 
-            case_subcommands.append(
-                "\n".join(
-                    [
-                        "            ({})".format(cmd.name),
-                        '            subs="${{subs}} {}"'.format(" ".join(subcommands)),
-                        "            ;;",
-                        "",
-                    ]
-                )
-            )
+                if options:
+                    case_options.append(
+                        "\n".join(
+                            [
+                                '            ("{} {}")'.format(
+                                    sub.name, sub.subcommand
+                                ),
+                                '            opts="${{opts}} {}"'.format(
+                                    " ".join(options)
+                                ),
+                                "            ;;",
+                                "",
+                            ]
+                        )
+                    )
 
-    globalopts = [
-        opt for opt_strs in global_options(command_context) for opt in opt_strs
-    ]
-    context = {
-        "case_options": "\n".join(case_options),
-        "case_subcommands": "\n".join(case_subcommands),
-        "commands": " ".join(commands(command_context)),
-        "commands_subcommands": " ".join(sorted(commands_subcommands)),
-        "globalopts": " ".join(sorted(globalopts)),
-    }
-
-    outfile = open(outfile, "w") if outfile else sys.stdout
-    print(render_template("bash", context), file=outfile)
-
+            # Build case statement for subcommands.
+            subcommands = [
+                self._zsh_describe(s.subcommand, None) for s in cmd.subcommands
+            ]
+            if subcommands:
+                commands_subcommands.append(
+                    '[{}]=" {} "'.format(
+                        cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
+                    )
+                )
 
-@SubCommand(
-    "mach-completion",
-    "zsh",
-    description="Print mach completion script for zsh shell",
-)
-@CommandArgument(
-    "-f",
-    "--file",
-    dest="outfile",
-    default=None,
-    help="File path to save completion script.",
-)
-def completion_zsh(command_context, outfile):
-    commands_descriptions = []
-    commands_subcommands = []
-    case_options = []
-    case_subcommands = []
-    for i, cmd in enumerate(commands_info(command_context)):
-        commands_descriptions.append(_zsh_describe(cmd.name, cmd.description))
+                case_subcommands.append(
+                    "\n".join(
+                        [
+                            "            ({})".format(cmd.name),
+                            '            subs="${{subs}} {}"'.format(
+                                " ".join(subcommands)
+                            ),
+                            "            ;;",
+                            "",
+                        ]
+                    )
+                )
+
+        globalopts = [
+            opt for opt_strs in self.global_options(command_context) for opt in opt_strs
+        ]
+        context = {
+            "case_options": "\n".join(case_options),
+            "case_subcommands": "\n".join(case_subcommands),
+            "commands": " ".join(self.commands(command_context)),
+            "commands_subcommands": " ".join(sorted(commands_subcommands)),
+            "globalopts": " ".join(sorted(globalopts)),
+        }
 
-        # Build case statement for options.
-        options = []
-        for opt_strs, description in cmd.options.items():
-            for opt in opt_strs:
-                options.append(_zsh_describe(opt, description))
+        outfile = open(outfile, "w") if outfile else sys.stdout
+        print(render_template("bash", context), file=outfile)
 
-        if options:
-            case_options.append(
-                "\n".join(
-                    [
-                        "            ({})".format(cmd.name),
-                        "            opts+=({})".format(" ".join(options)),
-                        "            ;;",
-                        "",
-                    ]
-                )
-            )
+    @SubCommand(
+        "mach-completion",
+        "zsh",
+        description="Print mach completion script for zsh shell",
+    )
+    @CommandArgument(
+        "-f",
+        "--file",
+        dest="outfile",
+        default=None,
+        help="File path to save completion script.",
+    )
+    def completion_zsh(self, command_context, outfile):
+        commands_descriptions = []
+        commands_subcommands = []
+        case_options = []
+        case_subcommands = []
+        for i, cmd in enumerate(self.commands_info(command_context)):
+            commands_descriptions.append(self._zsh_describe(cmd.name, cmd.description))
 
-        # Build case statement for subcommand options.
-        for sub in cmd.subcommands:
+            # Build case statement for options.
             options = []
-            for opt_strs, description in sub.options.items():
+            for opt_strs, description in cmd.options.items():
                 for opt in opt_strs:
-                    options.append(_zsh_describe(opt, description))
+                    options.append(self._zsh_describe(opt, description))
 
             if options:
                 case_options.append(
                     "\n".join(
                         [
-                            "            ({} {})".format(sub.name, sub.subcommand),
+                            "            ({})".format(cmd.name),
                             "            opts+=({})".format(" ".join(options)),
                             "            ;;",
                             "",
                         ]
                     )
                 )
 
-        # Build case statement for subcommands.
-        subcommands = [
-            _zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
-        ]
-        if subcommands:
-            commands_subcommands.append(
-                '[{}]=" {} "'.format(
-                    cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
+            # Build case statement for subcommand options.
+            for sub in cmd.subcommands:
+                options = []
+                for opt_strs, description in sub.options.items():
+                    for opt in opt_strs:
+                        options.append(self._zsh_describe(opt, description))
+
+                if options:
+                    case_options.append(
+                        "\n".join(
+                            [
+                                "            ({} {})".format(sub.name, sub.subcommand),
+                                "            opts+=({})".format(" ".join(options)),
+                                "            ;;",
+                                "",
+                            ]
+                        )
+                    )
+
+            # Build case statement for subcommands.
+            subcommands = [
+                self._zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
+            ]
+            if subcommands:
+                commands_subcommands.append(
+                    '[{}]=" {} "'.format(
+                        cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
+                    )
+                )
+
+                case_subcommands.append(
+                    "\n".join(
+                        [
+                            "            ({})".format(cmd.name),
+                            "            subs+=({})".format(" ".join(subcommands)),
+                            "            ;;",
+                            "",
+                        ]
+                    )
                 )
+
+        globalopts = []
+        for opt_strings, description in self.global_options(command_context).items():
+            for opt in opt_strings:
+                globalopts.append(self._zsh_describe(opt, description))
+
+        context = {
+            "case_options": "\n".join(case_options),
+            "case_subcommands": "\n".join(case_subcommands),
+            "commands": " ".join(sorted(commands_descriptions)),
+            "commands_subcommands": " ".join(sorted(commands_subcommands)),
+            "globalopts": " ".join(sorted(globalopts)),
+        }
+
+        outfile = open(outfile, "w") if outfile else sys.stdout
+        print(render_template("zsh", context), file=outfile)
+
+    @SubCommand(
+        "mach-completion",
+        "fish",
+        description="Print mach completion script for fish shell",
+    )
+    @CommandArgument(
+        "-f",
+        "--file",
+        dest="outfile",
+        default=None,
+        help="File path to save completion script.",
+    )
+    def completion_fish(self, command_context, outfile):
+        def _append_opt_strs(comp, opt_strs):
+            for opt in opt_strs:
+                if opt.startswith("--"):
+                    comp += " -l {}".format(opt[2:])
+                elif opt.startswith("-"):
+                    comp += " -s {}".format(opt[1:])
+            return comp
+
+        globalopts = []
+        for opt_strs, description in self.global_options(command_context).items():
+            comp = (
+                "complete -c mach -n '__fish_mach_complete_no_command' "
+                "-d '{}'".format(description.replace("'", "\\'"))
             )
+            comp = _append_opt_strs(comp, opt_strs)
+            globalopts.append(comp)
 
-            case_subcommands.append(
-                "\n".join(
-                    [
-                        "            ({})".format(cmd.name),
-                        "            subs+=({})".format(" ".join(subcommands)),
-                        "            ;;",
-                        "",
-                    ]
-                )
+        cmds = []
+        cmds_opts = []
+        for i, cmd in enumerate(self.commands_info(command_context)):
+            cmds.append(
+                "complete -c mach -f -n '__fish_mach_complete_no_command' "
+                "-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
             )
 
-    globalopts = []
-    for opt_strings, description in global_options(command_context).items():
-        for opt in opt_strings:
-            globalopts.append(_zsh_describe(opt, description))
-
-    context = {
-        "case_options": "\n".join(case_options),
-        "case_subcommands": "\n".join(case_subcommands),
-        "commands": " ".join(sorted(commands_descriptions)),
-        "commands_subcommands": " ".join(sorted(commands_subcommands)),
-        "globalopts": " ".join(sorted(globalopts)),
-    }
-
-    outfile = open(outfile, "w") if outfile else sys.stdout
-    print(render_template("zsh", context), file=outfile)
-
+            cmds_opts += ["# {}".format(cmd.name)]
 
-@SubCommand(
-    "mach-completion",
-    "fish",
-    description="Print mach completion script for fish shell",
-)
-@CommandArgument(
-    "-f",
-    "--file",
-    dest="outfile",
-    default=None,
-    help="File path to save completion script.",
-)
-def completion_fish(command_context, outfile):
-    def _append_opt_strs(comp, opt_strs):
-        for opt in opt_strs:
-            if opt.startswith("--"):
-                comp += " -l {}".format(opt[2:])
-            elif opt.startswith("-"):
-                comp += " -s {}".format(opt[1:])
-        return comp
-
-    globalopts = []
-    for opt_strs, description in global_options(command_context).items():
-        comp = (
-            "complete -c mach -n '__fish_mach_complete_no_command' "
-            "-d '{}'".format(description.replace("'", "\\'"))
-        )
-        comp = _append_opt_strs(comp, opt_strs)
-        globalopts.append(comp)
-
-    cmds = []
-    cmds_opts = []
-    for i, cmd in enumerate(commands_info(command_context)):
-        cmds.append(
-            "complete -c mach -f -n '__fish_mach_complete_no_command' "
-            "-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
-        )
-
-        cmds_opts += ["# {}".format(cmd.name)]
-
-        subcommands = " ".join([s.subcommand for s in cmd.subcommands])
-        for opt_strs, description in cmd.options.items():
-            comp = (
-                "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
-                "-d '{}'".format(cmd.name, subcommands, description.replace("'", "\\'"))
-            )
-            comp = _append_opt_strs(comp, opt_strs)
-            cmds_opts.append(comp)
-
-        for sub in cmd.subcommands:
-
-            for opt_strs, description in sub.options.items():
+            subcommands = " ".join([s.subcommand for s in cmd.subcommands])
+            for opt_strs, description in cmd.options.items():
                 comp = (
-                    "complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "
+                    "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
                     "-d '{}'".format(
-                        sub.name, sub.subcommand, description.replace("'", "\\'")
+                        cmd.name, subcommands, description.replace("'", "\\'")
                     )
                 )
                 comp = _append_opt_strs(comp, opt_strs)
                 cmds_opts.append(comp)
 
-            description = sub.description or ""
-            description = description.replace("'", "\\'")
-            comp = (
-                "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
-                "-d '{}' -a {}".format(
-                    cmd.name, subcommands, description, sub.subcommand
-                )
-            )
-            cmds_opts.append(comp)
+            for sub in cmd.subcommands:
+
+                for opt_strs, description in sub.options.items():
+                    comp = (
+                        "complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "
+                        "-d '{}'".format(
+                            sub.name, sub.subcommand, description.replace("'", "\\'")
+                        )
+                    )
+                    comp = _append_opt_strs(comp, opt_strs)
+                    cmds_opts.append(comp)
 
-        if i < len(commands(command_context)) - 1:
-            cmds_opts.append("")
+                description = sub.description or ""
+                description = description.replace("'", "\\'")
+                comp = (
+                    "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
+                    "-d '{}' -a {}".format(
+                        cmd.name, subcommands, description, sub.subcommand
+                    )
+                )
+                cmds_opts.append(comp)
 
-    context = {
-        "commands": " ".join(commands(command_context)),
-        "command_completions": "\n".join(cmds),
-        "command_option_completions": "\n".join(cmds_opts),
-        "global_option_completions": "\n".join(globalopts),
-    }
+            if i < len(self.commands(command_context)) - 1:
+                cmds_opts.append("")
 
-    outfile = open(outfile, "w") if outfile else sys.stdout
-    print(render_template("fish", context), file=outfile)
+        context = {
+            "commands": " ".join(self.commands(command_context)),
+            "command_completions": "\n".join(cmds),
+            "command_option_completions": "\n".join(cmds_opts),
+            "global_option_completions": "\n".join(globalopts),
+        }
+
+        outfile = open(outfile, "w") if outfile else sys.stdout
+        print(render_template("fish", context), file=outfile)
--- a/python/mach/mach/commands/settings.py
+++ b/python/mach/mach/commands/settings.py
@@ -2,52 +2,59 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 from textwrap import TextWrapper
 
 from mach.config import TYPE_CLASSES
-from mach.decorators import CommandArgument, Command
-
-
-# Interact with settings for mach.
-
-# Currently, we only provide functionality to view what settings are
-# available. In the future, this module will be used to modify settings, help
-# people create configs via a wizard, etc.
+from mach.decorators import CommandArgument, CommandProvider, Command
+from mozbuild.base import MachCommandBase
 
 
-@Command("settings", category="devenv", description="Show available config settings.")
-@CommandArgument(
-    "-l",
-    "--list",
-    dest="short",
-    action="store_true",
-    help="Show settings in a concise list",
-)
-def run_settings(command_context, short=None):
-    """List available settings."""
-    types = {v: k for k, v in TYPE_CLASSES.items()}
-    wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
-    for i, section in enumerate(sorted(command_context._mach_context.settings)):
-        if not short:
-            print("%s[%s]" % ("" if i == 0 else "\n", section))
+@CommandProvider
+class Settings(MachCommandBase):
+    """Interact with settings for mach.
+
+    Currently, we only provide functionality to view what settings are
+    available. In the future, this module will be used to modify settings, help
+    people create configs via a wizard, etc.
+    """
 
-        for option in sorted(command_context._mach_context.settings[section]._settings):
-            meta = command_context._mach_context.settings[section].get_meta(option)
-            desc = meta["description"]
-
-            if short:
-                print("%s.%s -- %s" % (section, option, desc.splitlines()[0]))
-                continue
+    @Command(
+        "settings", category="devenv", description="Show available config settings."
+    )
+    @CommandArgument(
+        "-l",
+        "--list",
+        dest="short",
+        action="store_true",
+        help="Show settings in a concise list",
+    )
+    def run_settings(self, command_context, short=None):
+        """List available settings."""
+        types = {v: k for k, v in TYPE_CLASSES.items()}
+        wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
+        for i, section in enumerate(sorted(command_context._mach_context.settings)):
+            if not short:
+                print("%s[%s]" % ("" if i == 0 else "\n", section))
 
-            if option == "*":
-                option = "<option>"
+            for option in sorted(
+                command_context._mach_context.settings[section]._settings
+            ):
+                meta = command_context._mach_context.settings[section].get_meta(option)
+                desc = meta["description"]
+
+                if short:
+                    print("%s.%s -- %s" % (section, option, desc.splitlines()[0]))
+                    continue
 
-            if "choices" in meta:
-                value = "{%s}" % ", ".join(meta["choices"])
-            else:
-                value = "<%s>" % types[meta["type_cls"]]
+                if option == "*":
+                    option = "<option>"
 
-            print(wrapper.fill(desc))
-            print(";%s=%s" % (option, value))
+                if "choices" in meta:
+                    value = "{%s}" % ", ".join(meta["choices"])
+                else:
+                    value = "<%s>" % types[meta["type_cls"]]
+
+                print(wrapper.fill(desc))
+                print(";%s=%s" % (option, value))
--- a/python/mach/mach/decorators.py
+++ b/python/mach/mach/decorators.py
@@ -26,22 +26,28 @@ class _MachCommand(object):
         "_parser",
         "arguments",
         "argument_group_names",
         "virtualenv_name",
         "ok_if_tests_disabled",
         # By default, subcommands will be sorted. If this is set to
         # 'declaration', they will be left in declaration order.
         "order",
-        # This is the function or callable that will be called when
-        # the command is invoked
-        "func",
+        # Describes how dispatch is performed.
+        # The Python class providing the command. This is the class type not
+        # an instance of the class. Mach will instantiate a new instance of
+        # the class if the command is executed.
+        "cls",
         # The path to the `metrics.yaml` file that describes data that telemetry will
         # gather for this command. This path is optional.
         "metrics_path",
+        # The name of the method providing the command. In other words, this
+        # is the str name of the attribute on the class type corresponding to
+        # the name of the function.
+        "method",
         # Dict of string to _MachCommand defining sub-commands for this
         # command.
         "subcommand_handlers",
         # For subcommands, the global order that the subcommand's declaration
         # was seen.
         "decl_order",
     )
 
@@ -68,92 +74,129 @@ class _MachCommand(object):
         self.virtualenv_name = virtualenv_name
         self.order = order
         if ok_if_tests_disabled and category != "testing":
             raise ValueError(
                 "ok_if_tests_disabled should only be set for " "`testing` mach commands"
             )
         self.ok_if_tests_disabled = ok_if_tests_disabled
 
-        self.func = None
+        self.cls = None
         self.metrics_path = None
+        self.method = None
         self.subcommand_handlers = {}
         self.decl_order = None
 
     def create_instance(self, context, virtualenv_name):
         metrics = None
         if self.metrics_path:
             metrics = context.telemetry.metrics(self.metrics_path)
-
-        # This ensures the resulting class is defined inside `mach` so that logging
-        # works as expected, and has a meaningful name
-        subclass = type(self.name, (MachCommandBase,), {})
-        return subclass(context, virtualenv_name=virtualenv_name, metrics=metrics)
+        return self.cls(context, virtualenv_name=virtualenv_name, metrics=metrics)
 
     @property
     def parser(self):
         # Creating CLI parsers at command dispatch time can be expensive. Make
         # it possible to lazy load them by using functions.
         if callable(self._parser):
             self._parser = self._parser()
 
         return self._parser
 
     @property
     def docstring(self):
-        return self.func.__doc__
+        return self.cls.__dict__[self.method].__doc__
 
     def __ior__(self, other):
         if not isinstance(other, _MachCommand):
             raise ValueError("can only operate on _MachCommand instances")
 
         for a in self.__slots__:
             if not getattr(self, a):
                 setattr(self, a, getattr(other, a))
 
         return self
 
-    def register(self, func):
-        """Register the command in the Registrar with the function to be called on invocation."""
-        if not self.subcommand:
-            if not self.conditions and Registrar.require_conditions:
-                return
+
+def CommandProvider(cls):
+    if not issubclass(cls, MachCommandBase):
+        raise MachError(
+            "Mach command provider class %s must be a subclass of "
+            "mozbuild.base.MachComandBase" % cls.__name__
+        )
+
+    seen_commands = set()
+
+    # We scan __dict__ because we only care about the classes' own attributes,
+    # not inherited ones. If we did inherited attributes, we could potentially
+    # define commands multiple times. We also sort keys so commands defined in
+    # the same class are grouped in a sane order.
+    command_methods = sorted(
+        [
+            (name, value._mach_command)
+            for name, value in cls.__dict__.items()
+            if hasattr(value, "_mach_command")
+        ]
+    )
 
-            msg = (
-                "Mach command '%s' implemented incorrectly. "
-                + "Conditions argument must take a list "
-                + "of functions. Found %s instead."
-            )
+    for method, command in command_methods:
+        # Ignore subcommands for now: we handle them later.
+        if command.subcommand:
+            continue
+
+        seen_commands.add(command.name)
+
+        if not command.conditions and Registrar.require_conditions:
+            continue
 
-            if not isinstance(self.conditions, collections.abc.Iterable):
-                msg = msg % (self.name, type(self.conditions))
+        msg = (
+            "Mach command '%s' implemented incorrectly. "
+            + "Conditions argument must take a list "
+            + "of functions. Found %s instead."
+        )
+
+        if not isinstance(command.conditions, collections.abc.Iterable):
+            msg = msg % (command.name, type(command.conditions))
+            raise MachError(msg)
+
+        for c in command.conditions:
+            if not hasattr(c, "__call__"):
+                msg = msg % (command.name, type(c))
                 raise MachError(msg)
 
-            for c in self.conditions:
-                if not hasattr(c, "__call__"):
-                    msg = msg % (self.name, type(c))
-                    raise MachError(msg)
+        command.cls = cls
+        command.method = method
+
+        Registrar.register_command_handler(command)
 
-            self.func = func
-
-            Registrar.register_command_handler(self)
+    # Now do another pass to get sub-commands. We do this in two passes so
+    # we can check the parent command existence without having to hold
+    # state and reconcile after traversal.
+    for method, command in command_methods:
+        # It is a regular command.
+        if not command.subcommand:
+            continue
 
-        else:
-            if self.name not in Registrar.command_handlers:
-                raise MachError(
-                    "Command referenced by sub-command does not exist: %s" % self.name
-                )
+        if command.name not in seen_commands:
+            raise MachError(
+                "Command referenced by sub-command does not exist: %s" % command.name
+            )
+
+        if command.name not in Registrar.command_handlers:
+            continue
 
-            self.func = func
-            parent = Registrar.command_handlers[self.name]
+        command.cls = cls
+        command.method = method
+        parent = Registrar.command_handlers[command.name]
 
-            if self.subcommand in parent.subcommand_handlers:
-                raise MachError("sub-command already defined: %s" % self.subcommand)
+        if command.subcommand in parent.subcommand_handlers:
+            raise MachError("sub-command already defined: %s" % command.subcommand)
 
-            parent.subcommand_handlers[self.subcommand] = self
+        parent.subcommand_handlers[command.subcommand] = command
+
+    return cls
 
 
 class Command(object):
     """Decorator for functions or methods that provide a mach command.
 
     The decorator accepts arguments that define basic attributes of the
     command. The following arguments are recognized:
 
@@ -177,17 +220,16 @@ class Command(object):
         self._mach_command = _MachCommand(name=name, **kwargs)
         self._mach_command.metrics_path = metrics_path
 
     def __call__(self, func):
         if not hasattr(func, "_mach_command"):
             func._mach_command = _MachCommand()
 
         func._mach_command |= self._mach_command
-        func._mach_command.register(func)
 
         return func
 
 
 class SubCommand(object):
     """Decorator for functions or methods that provide a sub-command.
 
     Mach commands can have sub-commands. e.g. ``mach command foo`` or
@@ -218,17 +260,16 @@ class SubCommand(object):
 
         self._mach_command.metrics_path = metrics_path
 
     def __call__(self, func):
         if not hasattr(func, "_mach_command"):
             func._mach_command = _MachCommand()
 
         func._mach_command |= self._mach_command
-        func._mach_command.register(func)
 
         return func
 
 
 class CommandArgument(object):
     """Decorator for additional arguments to mach subcommands.
 
     This decorator should be used to add arguments to mach commands. Arguments
--- a/python/mach/mach/registrar.py
+++ b/python/mach/mach/registrar.py
@@ -90,17 +90,17 @@ class MachRegistrar(object):
         fail_conditions = MachRegistrar._fail_conditions(handler, instance)
         if fail_conditions:
             print(
                 MachRegistrar._condition_failed_message(handler.name, fail_conditions)
             )
             return 1
 
         self.command_depth += 1
-        fn = handler.func
+        fn = getattr(instance, handler.method)
 
         start_time = time.time()
 
         if debug_command:
             import pdb
 
             result = pdb.runcall(fn, instance, **kwargs)
         else:
--- a/python/mach/mach/test/providers/basic.py
+++ b/python/mach/mach/test/providers/basic.py
@@ -1,21 +1,24 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
 )
+from mozbuild.base import MachCommandBase
 
 
-@Command("cmd_foo", category="testing")
-def run_foo(command_context):
-    pass
-
+@CommandProvider
+class ConditionsProvider(MachCommandBase):
+    @Command("cmd_foo", category="testing")
+    def run_foo(self, command_context):
+        pass
 
-@Command("cmd_bar", category="testing")
-@CommandArgument("--baz", action="store_true", help="Run with baz")
-def run_bar(command_context, baz=None):
-    pass
+    @Command("cmd_bar", category="testing")
+    @CommandArgument("--baz", action="store_true", help="Run with baz")
+    def run_bar(self, command_context, baz=None):
+        pass
--- a/python/mach/mach/test/providers/commands.py
+++ b/python/mach/mach/test/providers/commands.py
@@ -3,36 +3,43 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, unicode_literals
 
 from functools import partial
 
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
 )
+from mozbuild.base import MachCommandBase
 
 
 def is_foo(cls):
     """Foo must be true"""
     return cls.foo
 
 
 def is_bar(val, cls):
     """Bar must equal val"""
     return cls.bar == val
 
 
-@Command("cmd_foo", category="testing")
-@CommandArgument("--arg", default=None, help="Argument help.")
-def run_foo(command_context):
-    pass
+@CommandProvider
+class MachCommands(MachCommandBase):
+    foo = True
+    bar = False
 
+    @Command("cmd_foo", category="testing")
+    @CommandArgument("--arg", default=None, help="Argument help.")
+    def run_foo(self, command_context):
+        pass
 
-@Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)])
-def run_bar(command_context):
-    pass
+    @Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)])
+    def run_bar(self, command_context):
+        pass
 
-
-@Command("cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)])
-def run_foobar(command_context):
-    pass
+    @Command(
+        "cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)]
+    )
+    def run_foobar(self, command_context):
+        pass
--- a/python/mach/mach/test/providers/conditions.py
+++ b/python/mach/mach/test/providers/conditions.py
@@ -1,60 +1,55 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 from mach.decorators import (
+    CommandProvider,
     Command,
 )
-
-
-def is_true(cls):
-    return True
+from mozbuild.base import MachCommandBase
 
 
-def is_false(cls):
-    return False
+def is_foo(cls):
+    """Foo must be true"""
+    return cls.foo
 
 
-@Command("cmd_condition_true", category="testing", conditions=[is_true])
-def run_condition_true(self, command_context):
-    pass
-
-
-@Command("cmd_condition_false", category="testing", conditions=[is_false])
-def run_condition_false(self, command_context):
-    pass
+def is_bar(cls):
+    """Bar must be true"""
+    return cls.bar
 
 
-@Command(
-    "cmd_condition_true_and_false", category="testing", conditions=[is_true, is_false]
-)
-def run_condition_true_and_false(self, command_context):
-    pass
+@CommandProvider
+class ConditionsProvider(MachCommandBase):
+    foo = True
+    bar = False
 
+    @Command("cmd_foo", category="testing", conditions=[is_foo])
+    def run_foo(self, command_context):
+        pass
 
-def is_ctx_foo(cls):
-    """Foo must be true"""
-    return cls._mach_context.foo
+    @Command("cmd_bar", category="testing", conditions=[is_bar])
+    def run_bar(self, command_context):
+        pass
+
+    @Command("cmd_foobar", category="testing", conditions=[is_foo, is_bar])
+    def run_foobar(self, command_context):
+        pass
 
 
-def is_ctx_bar(cls):
-    """Bar must be true"""
-    return cls._mach_context.bar
-
-
-@Command("cmd_foo_ctx", category="testing", conditions=[is_ctx_foo])
-def run_foo_ctx(self, command_context):
-    pass
+@CommandProvider
+class ConditionsContextProvider(MachCommandBase):
+    @Command("cmd_foo_ctx", category="testing", conditions=[is_foo])
+    def run_foo(self, command_context):
+        pass
 
-
-@Command("cmd_bar_ctx", category="testing", conditions=[is_ctx_bar])
-def run_bar_ctx(self, command_context):
-    pass
+    @Command("cmd_bar_ctx", category="testing", conditions=[is_bar])
+    def run_bar(self, command_context):
+        pass
 
-
-@Command("cmd_foobar_ctx", category="testing", conditions=[is_ctx_foo, is_ctx_bar])
-def run_foobar_ctx(self, command_context):
-    pass
+    @Command("cmd_foobar_ctx", category="testing", conditions=[is_foo, is_bar])
+    def run_foobar(self, command_context):
+        pass
--- a/python/mach/mach/test/providers/conditions_invalid.py
+++ b/python/mach/mach/test/providers/conditions_invalid.py
@@ -1,15 +1,19 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 from mach.decorators import (
+    CommandProvider,
     Command,
 )
+from mozbuild.base import MachCommandBase
 
 
-@Command("cmd_foo", category="testing", conditions=["invalid"])
-def run_foo(command_context):
-    pass
+@CommandProvider
+class ConditionsProvider(MachCommandBase):
+    @Command("cmd_foo", category="testing", conditions=["invalid"])
+    def run_foo(self, command_context):
+        pass
--- a/python/mach/mach/test/providers/throw.py
+++ b/python/mach/mach/test/providers/throw.py
@@ -2,23 +2,26 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 from mach.decorators import (
     CommandArgument,
+    CommandProvider,
     Command,
 )
 from mach.test.providers import throw2
+from mozbuild.base import MachCommandBase
 
 
-@Command("throw", category="testing")
-@CommandArgument("--message", "-m", default="General Error")
-def throw(command_context, message):
-    raise Exception(message)
+@CommandProvider
+class TestCommandProvider(MachCommandBase):
+    @Command("throw", category="testing")
+    @CommandArgument("--message", "-m", default="General Error")
+    def throw(self, command_context, message):
+        raise Exception(message)
 
-
-@Command("throw_deep", category="testing")
-@CommandArgument("--message", "-m", default="General Error")
-def throw_deep(command_context, message):
-    throw2.throw_deep(message)
+    @Command("throw_deep", category="testing")
+    @CommandArgument("--message", "-m", default="General Error")
+    def throw_deep(self, command_context, message):
+        throw2.throw_deep(message)
--- a/python/mach/mach/test/test_conditions.py
+++ b/python/mach/mach/test/test_conditions.py
@@ -44,29 +44,29 @@ class TestConditions(TestBase):
     """Tests for conditionally filtering commands."""
 
     def _run(self, args, context_handler=_populate_bare_context):
         return self._run_mach(args, "conditions.py", context_handler=context_handler)
 
     def test_conditions_pass(self):
         """Test that a command which passes its conditions is runnable."""
 
-        self.assertEquals((0, "", ""), self._run(["cmd_condition_true"]))
+        self.assertEquals((0, "", ""), self._run(["cmd_foo"]))
         self.assertEquals((0, "", ""), self._run(["cmd_foo_ctx"], _populate_context))
 
     def test_invalid_context_message(self):
         """Test that commands which do not pass all their conditions
         print the proper failure message."""
 
         def is_bar():
             """Bar must be true"""
 
         fail_conditions = [is_bar]
 
-        for name in ("cmd_condition_false", "cmd_condition_true_and_false"):
+        for name in ("cmd_bar", "cmd_foobar"):
             result, stdout, stderr = self._run([name])
             self.assertEquals(1, result)
 
             fail_msg = Registrar._condition_failed_message(name, fail_conditions)
             self.assertEquals(fail_msg.rstrip(), stdout.rstrip())
 
         for name in ("cmd_bar_ctx", "cmd_foobar_ctx"):
             result, stdout, stderr = self._run([name], _populate_context)
@@ -85,18 +85,18 @@ class TestConditions(TestBase):
             m.load_commands_from_file,
             os.path.join(PROVIDER_DIR, "conditions_invalid.py"),
         )
 
     def test_help_message(self):
         """Test that commands that are not runnable do not show up in help."""
 
         result, stdout, stderr = self._run(["help"], _populate_context)
-        self.assertIn("cmd_condition_true", stdout)
-        self.assertNotIn("cmd_condition_false", stdout)
-        self.assertNotIn("cmd_condition_true_and_false", stdout)
+        self.assertIn("cmd_foo", stdout)
+        self.assertNotIn("cmd_bar", stdout)
+        self.assertNotIn("cmd_foobar", stdout)
         self.assertIn("cmd_foo_ctx", stdout)
         self.assertNotIn("cmd_bar_ctx", stdout)
         self.assertNotIn("cmd_foobar_ctx", stdout)
 
 
 if __name__ == "__main__":
     main()
--- a/python/mach/mach/test/test_decorators.py
+++ b/python/mach/mach/test/test_decorators.py
@@ -4,22 +4,23 @@
 
 from __future__ import absolute_import, unicode_literals
 
 import os
 
 import pytest
 from unittest.mock import Mock
 
+from mozbuild.base import MachCommandBase
 from mozunit import main
 
 import mach.registrar
 import mach.decorators
 from mach.base import MachError
-from mach.decorators import CommandArgument, Command, SubCommand
+from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
 
 
 @pytest.fixture
 def registrar(monkeypatch):
     test_registrar = mach.registrar.MachRegistrar()
     test_registrar.register_category(
         "testing", "Mach unittest", "Testing for mach decorators"
     )
@@ -27,91 +28,101 @@ def registrar(monkeypatch):
     return test_registrar
 
 
 def test_register_command_with_argument(registrar):
     inner_function = Mock()
     context = Mock()
     context.cwd = "."
 
-    @Command("cmd_foo", category="testing")
-    @CommandArgument("--arg", default=None, help="Argument help.")
-    def run_foo(command_context, arg):
-        inner_function(arg)
+    @CommandProvider
+    class CommandFoo(MachCommandBase):
+        @Command("cmd_foo", category="testing")
+        @CommandArgument("--arg", default=None, help="Argument help.")
+        def run_foo(self, command_context, arg):
+            inner_function(arg)
 
     registrar.dispatch("cmd_foo", context, arg="argument")
 
     inner_function.assert_called_with("argument")
 
 
 def test_register_command_with_metrics_path(registrar):
     context = Mock()
     context.cwd = "."
 
     metrics_path = "metrics/path"
     metrics_mock = Mock()
     context.telemetry.metrics.return_value = metrics_mock
 
-    @Command("cmd_foo", category="testing", metrics_path=metrics_path)
-    def run_foo(command_context):
-        assert command_context.metrics == metrics_mock
+    @CommandProvider
+    class CommandFoo(MachCommandBase):
+        @Command("cmd_foo", category="testing", metrics_path=metrics_path)
+        def run_foo(self, command_context):
+            assert command_context.metrics == metrics_mock
 
-    @SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2")
-    def run_subfoo(command_context):
-        assert command_context.metrics == metrics_mock
+        @SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2")
+        def run_subfoo(self, command_context):
+            assert command_context.metrics == metrics_mock
 
     registrar.dispatch("cmd_foo", context)
 
     context.telemetry.metrics.assert_called_with(metrics_path)
     assert context.handler.metrics_path == metrics_path
 
     registrar.dispatch("cmd_foo", context, subcommand="sub_foo")
     assert context.handler.metrics_path == metrics_path + "2"
 
 
 def test_register_command_sets_up_class_at_runtime(registrar):
     inner_function = Mock()
 
     context = Mock()
     context.cwd = "."
 
-    # We test that the virtualenv is set up properly dynamically on
-    # the instance that actually runs the command.
-    @Command("cmd_foo", category="testing", virtualenv_name="env_foo")
-    def run_foo(command_context):
-        assert (
-            os.path.basename(command_context.virtualenv_manager.virtualenv_root)
-            == "env_foo"
-        )
-        inner_function("foo")
+    # Inside the following class, we test that the virtualenv is set up properly
+    # dynamically on the instance that actually runs the command.
+    @CommandProvider
+    class CommandFoo(MachCommandBase):
+        @Command("cmd_foo", category="testing", virtualenv_name="env_foo")
+        def run_foo(self, command_context):
+            assert (
+                os.path.basename(command_context.virtualenv_manager.virtualenv_root)
+                == "env_foo"
+            )
+            inner_function("foo")
 
-    @Command("cmd_bar", category="testing", virtualenv_name="env_bar")
-    def run_bar(command_context):
-        assert (
-            os.path.basename(command_context.virtualenv_manager.virtualenv_root)
-            == "env_bar"
-        )
-        inner_function("bar")
+        @Command("cmd_bar", category="testing", virtualenv_name="env_bar")
+        def run_bar(self, command_context):
+            assert (
+                os.path.basename(command_context.virtualenv_manager.virtualenv_root)
+                == "env_bar"
+            )
+            inner_function("bar")
 
     registrar.dispatch("cmd_foo", context)
     inner_function.assert_called_with("foo")
     registrar.dispatch("cmd_bar", context)
     inner_function.assert_called_with("bar")
 
 
 def test_cannot_create_command_nonexisting_category(registrar):
     with pytest.raises(MachError):
 
-        @Command("cmd_foo", category="bar")
-        def run_foo(command_context):
-            pass
+        @CommandProvider
+        class CommandFoo(MachCommandBase):
+            @Command("cmd_foo", category="bar")
+            def run_foo(self, command_context):
+                pass
 
 
 def test_subcommand_requires_parent_to_exist(registrar):
     with pytest.raises(MachError):
 
-        @SubCommand("sub_foo", "foo")
-        def run_foo(command_context):
-            pass
+        @CommandProvider
+        class CommandFoo(MachCommandBase):
+            @SubCommand("sub_foo", "foo")
+            def run_foo(self, command_context):
+                pass
 
 
 if __name__ == "__main__":
     main()
--- a/python/mach_commands.py
+++ b/python/mach_commands.py
@@ -15,352 +15,358 @@ import six
 
 from concurrent.futures import ThreadPoolExecutor, as_completed, thread
 
 import mozinfo
 from mozfile import which
 from manifestparser import TestManifest
 from manifestparser import filters as mpf
 
+from mozbuild.base import MachCommandBase
 
-from mach.decorators import CommandArgument, Command
+from mach.decorators import CommandArgument, CommandProvider, Command
 from mach.util import UserError
 
 here = os.path.abspath(os.path.dirname(__file__))
 
 
-@Command("python", category="devenv", description="Run Python.")
-@CommandArgument(
-    "--no-virtualenv", action="store_true", help="Do not set up a virtualenv"
-)
-@CommandArgument(
-    "--no-activate", action="store_true", help="Do not activate the virtualenv"
-)
-@CommandArgument(
-    "--exec-file", default=None, help="Execute this Python file using `exec`"
-)
-@CommandArgument(
-    "--ipython",
-    action="store_true",
-    default=False,
-    help="Use ipython instead of the default Python REPL.",
-)
-@CommandArgument(
-    "--requirements",
-    default=None,
-    help="Install this requirements file before running Python",
-)
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def python(
-    command_context,
-    no_virtualenv,
-    no_activate,
-    exec_file,
-    ipython,
-    requirements,
-    args,
-):
-    # Avoid logging the command
-    command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command("python", category="devenv", description="Run Python.")
+    @CommandArgument(
+        "--no-virtualenv", action="store_true", help="Do not set up a virtualenv"
+    )
+    @CommandArgument(
+        "--no-activate", action="store_true", help="Do not activate the virtualenv"
+    )
+    @CommandArgument(
+        "--exec-file", default=None, help="Execute this Python file using `exec`"
+    )
+    @CommandArgument(
+        "--ipython",
+        action="store_true",
+        default=False,
+        help="Use ipython instead of the default Python REPL.",
+    )
+    @CommandArgument(
+        "--requirements",
+        default=None,
+        help="Install this requirements file before running Python",
+    )
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def python(
+        self,
+        command_context,
+        no_virtualenv,
+        no_activate,
+        exec_file,
+        ipython,
+        requirements,
+        args,
+    ):
+        # Avoid logging the command
+        command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
+
+        # Note: subprocess requires native strings in os.environ on Windows.
+        append_env = {"PYTHONDONTWRITEBYTECODE": str("1")}
+
+        if requirements and no_virtualenv:
+            raise UserError("Cannot pass both --requirements and --no-virtualenv.")
+
+        if no_virtualenv:
+            from mach_initialize import mach_sys_path
+
+            python_path = sys.executable
+            append_env["PYTHONPATH"] = os.pathsep.join(
+                mach_sys_path(command_context.topsrcdir)
+            )
+        else:
+            command_context.virtualenv_manager.ensure()
+            if not no_activate:
+                command_context.virtualenv_manager.activate()
+            python_path = command_context.virtualenv_manager.python_path
+            if requirements:
+                command_context.virtualenv_manager.install_pip_requirements(
+                    requirements, require_hashes=False
+                )
+
+        if exec_file:
+            exec(open(exec_file).read())
+            return 0
+
+        if ipython:
+            bindir = os.path.dirname(python_path)
+            python_path = which("ipython", path=bindir)
+            if not python_path:
+                if not no_virtualenv:
+                    # Use `_run_pip` directly rather than `install_pip_package` to bypass
+                    # `req.check_if_exists()` which may detect a system installed ipython.
+                    command_context.virtualenv_manager._run_pip(["install", "ipython"])
+                    python_path = which("ipython", path=bindir)
+
+                if not python_path:
+                    print("error: could not detect or install ipython")
+                    return 1
+
+        return command_context.run_process(
+            [python_path] + args,
+            pass_thru=True,  # Allow user to run Python interactively.
+            ensure_exit_code=False,  # Don't throw on non-zero exit code.
+            python_unbuffered=False,  # Leave input buffered.
+            append_env=append_env,
+        )
+
+    @Command(
+        "python-test",
+        category="testing",
+        virtualenv_name="python-test",
+        description="Run Python unit tests with pytest.",
+    )
+    @CommandArgument(
+        "-v", "--verbose", default=False, action="store_true", help="Verbose output."
+    )
+    @CommandArgument(
+        "-j",
+        "--jobs",
+        default=None,
+        type=int,
+        help="Number of concurrent jobs to run. Default is the number of CPUs "
+        "in the system.",
+    )
+    @CommandArgument(
+        "-x",
+        "--exitfirst",
+        default=False,
+        action="store_true",
+        help="Runs all tests sequentially and breaks at the first failure.",
+    )
+    @CommandArgument(
+        "--subsuite",
+        default=None,
+        help=(
+            "Python subsuite to run. If not specified, all subsuites are run. "
+            "Use the string `default` to only run tests without a subsuite."
+        ),
+    )
+    @CommandArgument(
+        "tests",
+        nargs="*",
+        metavar="TEST",
+        help=(
+            "Tests to run. Each test can be a single file or a directory. "
+            "Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
+        ),
+    )
+    @CommandArgument(
+        "extra",
+        nargs=argparse.REMAINDER,
+        metavar="PYTEST ARGS",
+        help=(
+            "Arguments that aren't recognized by mach. These will be "
+            "passed as it is to pytest"
+        ),
+    )
+    def python_test(self, command_context, *args, **kwargs):
+        try:
+            tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
+            if six.PY2:
+                os.environ[b"PYTHON_TEST_TMP"] = tempdir
+            else:
+                os.environ["PYTHON_TEST_TMP"] = tempdir
+            return self.run_python_tests(command_context, *args, **kwargs)
+        finally:
+            import mozfile
+
+            mozfile.remove(tempdir)
+
+    def run_python_tests(
+        self,
+        command_context,
+        tests=None,
+        test_objects=None,
+        subsuite=None,
+        verbose=False,
+        jobs=None,
+        exitfirst=False,
+        extra=None,
+        **kwargs
+    ):
+
+        command_context.activate_virtualenv()
+        if test_objects is None:
+            from moztest.resolve import TestResolver
 
-    # Note: subprocess requires native strings in os.environ on Windows.
-    append_env = {"PYTHONDONTWRITEBYTECODE": str("1")}
+            resolver = command_context._spawn(TestResolver)
+            # If we were given test paths, try to find tests matching them.
+            test_objects = resolver.resolve_tests(paths=tests, flavor="python")
+        else:
+            # We've received test_objects from |mach test|. We need to ignore
+            # the subsuite because python-tests don't use this key like other
+            # harnesses do and |mach test| doesn't realize this.
+            subsuite = None
+
+        mp = TestManifest()
+        mp.tests.extend(test_objects)
+
+        filters = []
+        if subsuite == "default":
+            filters.append(mpf.subsuite(None))
+        elif subsuite:
+            filters.append(mpf.subsuite(subsuite))
+
+        tests = mp.active_tests(
+            filters=filters,
+            disabled=False,
+            python=command_context.virtualenv_manager.version_info()[0],
+            **mozinfo.info
+        )
+
+        if not tests:
+            submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
+            message = (
+                "TEST-UNEXPECTED-FAIL | No tests collected "
+                + "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
+            )
+            command_context.log(logging.WARN, "python-test", {}, message)
+            return 1
+
+        parallel = []
+        sequential = []
+        os.environ.setdefault("PYTEST_ADDOPTS", "")
+
+        if extra:
+            os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
 
-    if requirements and no_virtualenv:
-        raise UserError("Cannot pass both --requirements and --no-virtualenv.")
+        installed_requirements = set()
+        for test in tests:
+            if (
+                test.get("requirements")
+                and test["requirements"] not in installed_requirements
+            ):
+                command_context.virtualenv_manager.install_pip_requirements(
+                    test["requirements"], quiet=True
+                )
+                installed_requirements.add(test["requirements"])
+
+        if exitfirst:
+            sequential = tests
+            os.environ["PYTEST_ADDOPTS"] += " -x"
+        else:
+            for test in tests:
+                if test.get("sequential"):
+                    sequential.append(test)
+                else:
+                    parallel.append(test)
 
-    if no_virtualenv:
-        from mach_initialize import mach_sys_path
+        jobs = jobs or cpu_count()
+
+        return_code = 0
+
+        def on_test_finished(result):
+            output, ret, test_path = result
+
+            for line in output:
+                command_context.log(
+                    logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
+                )
+
+            if ret and not return_code:
+                command_context.log(
+                    logging.ERROR,
+                    "python-test",
+                    {"test_path": test_path, "ret": ret},
+                    "Setting retcode to {ret} from {test_path}",
+                )
+            return return_code or ret
 
-        python_path = sys.executable
-        append_env["PYTHONPATH"] = os.pathsep.join(
-            mach_sys_path(command_context.topsrcdir)
+        with ThreadPoolExecutor(max_workers=jobs) as executor:
+            futures = [
+                executor.submit(
+                    self._run_python_test, command_context, test, jobs, verbose
+                )
+                for test in parallel
+            ]
+
+            try:
+                for future in as_completed(futures):
+                    return_code = on_test_finished(future.result())
+            except KeyboardInterrupt:
+                # Hack to force stop currently running threads.
+                # https://gist.github.com/clchiou/f2608cbe54403edb0b13
+                executor._threads.clear()
+                thread._threads_queues.clear()
+                raise
+
+        for test in sequential:
+            return_code = on_test_finished(
+                self._run_python_test(command_context, test, jobs, verbose)
+            )
+            if return_code and exitfirst:
+                break
+
+        command_context.log(
+            logging.INFO,
+            "python-test",
+            {"return_code": return_code},
+            "Return code from mach python-test: {return_code}",
         )
-    else:
-        command_context.virtualenv_manager.ensure()
-        if not no_activate:
-            command_context.virtualenv_manager.activate()
-        python_path = command_context.virtualenv_manager.python_path
-        if requirements:
-            command_context.virtualenv_manager.install_pip_requirements(
-                requirements, require_hashes=False
+        return return_code
+
+    def _run_python_test(self, command_context, test, jobs, verbose):
+        from mozprocess import ProcessHandler
+
+        output = []
+
+        def _log(line):
+            # Buffer messages if more than one worker to avoid interleaving
+            if jobs > 1:
+                output.append(line)
+            else:
+                command_context.log(
+                    logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
+                )
+
+        file_displayed_test = []  # used as boolean
+
+        def _line_handler(line):
+            line = six.ensure_str(line)
+            if not file_displayed_test:
+                output = (
+                    "Ran" in line or "collected" in line or line.startswith("TEST-")
+                )
+                if output:
+                    file_displayed_test.append(True)
+
+            # Hack to make sure treeherder highlights pytest failures
+            if "FAILED" in line.rsplit(" ", 1)[-1]:
+                line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
+
+            _log(line)
+
+        _log(test["path"])
+        python = command_context.virtualenv_manager.python_path
+        cmd = [python, test["path"]]
+        env = os.environ.copy()
+        if six.PY2:
+            env[b"PYTHONDONTWRITEBYTECODE"] = b"1"
+        else:
+            env["PYTHONDONTWRITEBYTECODE"] = "1"
+
+        proc = ProcessHandler(
+            cmd, env=env, processOutputLine=_line_handler, storeOutput=False
+        )
+        proc.run()
+
+        return_code = proc.wait()
+
+        if not file_displayed_test:
+            _log(
+                "TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
+                "call?): {}".format(test["path"])
             )
 
-    if exec_file:
-        exec(open(exec_file).read())
-        return 0
-
-    if ipython:
-        bindir = os.path.dirname(python_path)
-        python_path = which("ipython", path=bindir)
-        if not python_path:
-            if not no_virtualenv:
-                # Use `_run_pip` directly rather than `install_pip_package` to bypass
-                # `req.check_if_exists()` which may detect a system installed ipython.
-                command_context.virtualenv_manager._run_pip(["install", "ipython"])
-                python_path = which("ipython", path=bindir)
-
-            if not python_path:
-                print("error: could not detect or install ipython")
-                return 1
-
-    return command_context.run_process(
-        [python_path] + args,
-        pass_thru=True,  # Allow user to run Python interactively.
-        ensure_exit_code=False,  # Don't throw on non-zero exit code.
-        python_unbuffered=False,  # Leave input buffered.
-        append_env=append_env,
-    )
-
-
-@Command(
-    "python-test",
-    category="testing",
-    virtualenv_name="python-test",
-    description="Run Python unit tests with pytest.",
-)
-@CommandArgument(
-    "-v", "--verbose", default=False, action="store_true", help="Verbose output."
-)
-@CommandArgument(
-    "-j",
-    "--jobs",
-    default=None,
-    type=int,
-    help="Number of concurrent jobs to run. Default is the number of CPUs "
-    "in the system.",
-)
-@CommandArgument(
-    "-x",
-    "--exitfirst",
-    default=False,
-    action="store_true",
-    help="Runs all tests sequentially and breaks at the first failure.",
-)
-@CommandArgument(
-    "--subsuite",
-    default=None,
-    help=(
-        "Python subsuite to run. If not specified, all subsuites are run. "
-        "Use the string `default` to only run tests without a subsuite."
-    ),
-)
-@CommandArgument(
-    "tests",
-    nargs="*",
-    metavar="TEST",
-    help=(
-        "Tests to run. Each test can be a single file or a directory. "
-        "Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
-    ),
-)
-@CommandArgument(
-    "extra",
-    nargs=argparse.REMAINDER,
-    metavar="PYTEST ARGS",
-    help=(
-        "Arguments that aren't recognized by mach. These will be "
-        "passed as it is to pytest"
-    ),
-)
-def python_test(command_context, *args, **kwargs):
-    try:
-        tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
-        if six.PY2:
-            os.environ[b"PYTHON_TEST_TMP"] = tempdir
-        else:
-            os.environ["PYTHON_TEST_TMP"] = tempdir
-        return run_python_tests(command_context, *args, **kwargs)
-    finally:
-        import mozfile
-
-        mozfile.remove(tempdir)
-
-
-def run_python_tests(
-    command_context,
-    tests=None,
-    test_objects=None,
-    subsuite=None,
-    verbose=False,
-    jobs=None,
-    exitfirst=False,
-    extra=None,
-    **kwargs
-):
-
-    command_context.activate_virtualenv()
-    if test_objects is None:
-        from moztest.resolve import TestResolver
-
-        resolver = command_context._spawn(TestResolver)
-        # If we were given test paths, try to find tests matching them.
-        test_objects = resolver.resolve_tests(paths=tests, flavor="python")
-    else:
-        # We've received test_objects from |mach test|. We need to ignore
-        # the subsuite because python-tests don't use this key like other
-        # harnesses do and |mach test| doesn't realize this.
-        subsuite = None
-
-    mp = TestManifest()
-    mp.tests.extend(test_objects)
-
-    filters = []
-    if subsuite == "default":
-        filters.append(mpf.subsuite(None))
-    elif subsuite:
-        filters.append(mpf.subsuite(subsuite))
-
-    tests = mp.active_tests(
-        filters=filters,
-        disabled=False,
-        python=command_context.virtualenv_manager.version_info()[0],
-        **mozinfo.info
-    )
+        if verbose:
+            if return_code != 0:
+                _log("Test failed: {}".format(test["path"]))
+            else:
+                _log("Test passed: {}".format(test["path"]))
 
-    if not tests:
-        submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
-        message = (
-            "TEST-UNEXPECTED-FAIL | No tests collected "
-            + "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
-        )
-        command_context.log(logging.WARN, "python-test", {}, message)
-        return 1
-
-    parallel = []
-    sequential = []
-    os.environ.setdefault("PYTEST_ADDOPTS", "")
-
-    if extra:
-        os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
-
-    installed_requirements = set()
-    for test in tests:
-        if (
-            test.get("requirements")
-            and test["requirements"] not in installed_requirements
-        ):
-            command_context.virtualenv_manager.install_pip_requirements(
-                test["requirements"], quiet=True
-            )
-            installed_requirements.add(test["requirements"])
-
-    if exitfirst:
-        sequential = tests
-        os.environ["PYTEST_ADDOPTS"] += " -x"
-    else:
-        for test in tests:
-            if test.get("sequential"):
-                sequential.append(test)
-            else:
-                parallel.append(test)
-
-    jobs = jobs or cpu_count()
-
-    return_code = 0
-
-    def on_test_finished(result):
-        output, ret, test_path = result
-
-        for line in output:
-            command_context.log(
-                logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
-            )
-
-        if ret and not return_code:
-            command_context.log(
-                logging.ERROR,
-                "python-test",
-                {"test_path": test_path, "ret": ret},
-                "Setting retcode to {ret} from {test_path}",
-            )
-        return return_code or ret
-
-    with ThreadPoolExecutor(max_workers=jobs) as executor:
-        futures = [
-            executor.submit(_run_python_test, command_context, test, jobs, verbose)
-            for test in parallel
-        ]
-
-        try:
-            for future in as_completed(futures):
-                return_code = on_test_finished(future.result())
-        except KeyboardInterrupt:
-            # Hack to force stop currently running threads.
-            # https://gist.github.com/clchiou/f2608cbe54403edb0b13
-            executor._threads.clear()
-            thread._threads_queues.clear()
-            raise
-
-    for test in sequential:
-        return_code = on_test_finished(
-            _run_python_test(command_context, test, jobs, verbose)
-        )
-        if return_code and exitfirst:
-            break
-
-    command_context.log(
-        logging.INFO,
-        "python-test",
-        {"return_code": return_code},
-        "Return code from mach python-test: {return_code}",
-    )
-    return return_code
-
-
-def _run_python_test(command_context, test, jobs, verbose):
-    from mozprocess import ProcessHandler
-
-    output = []
-
-    def _log(line):
-        # Buffer messages if more than one worker to avoid interleaving
-        if jobs > 1:
-            output.append(line)
-        else:
-            command_context.log(
-                logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
-            )
-
-    file_displayed_test = []  # used as boolean
-
-    def _line_handler(line):
-        line = six.ensure_str(line)
-        if not file_displayed_test:
-            output = "Ran" in line or "collected" in line or line.startswith("TEST-")
-            if output:
-                file_displayed_test.append(True)
-
-        # Hack to make sure treeherder highlights pytest failures
-        if "FAILED" in line.rsplit(" ", 1)[-1]:
-            line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
-
-        _log(line)
-
-    _log(test["path"])
-    python = command_context.virtualenv_manager.python_path
-    cmd = [python, test["path"]]
-    env = os.environ.copy()
-    if six.PY2:
-        env[b"PYTHONDONTWRITEBYTECODE"] = b"1"
-    else:
-        env["PYTHONDONTWRITEBYTECODE"] = "1"
-
-    proc = ProcessHandler(
-        cmd, env=env, processOutputLine=_line_handler, storeOutput=False
-    )
-    proc.run()
-
-    return_code = proc.wait()
-
-    if not file_displayed_test:
-        _log(
-            "TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
-            "call?): {}".format(test["path"])
-        )
-
-    if verbose:
-        if return_code != 0:
-            _log("Test failed: {}".format(test["path"]))
-        else:
-            _log("Test passed: {}".format(test["path"]))
-
-    return output, return_code, test["path"]
+        return output, return_code, test["path"]
--- a/python/mozboot/mozboot/mach_commands.py
+++ b/python/mozboot/mozboot/mach_commands.py
@@ -2,107 +2,118 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this,
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import errno
 import sys
 
-from mach.decorators import CommandArgument, Command
+from mach.decorators import CommandArgument, CommandProvider, Command
+from mozbuild.base import MachCommandBase
 from mozboot.bootstrap import APPLICATIONS
 
 
-@Command(
-    "bootstrap",
-    category="devenv",
-    description="Install required system packages for building.",
-)
-@CommandArgument(
-    "--application-choice",
-    choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
-    default=None,
-    help="Pass in an application choice instead of using the default "
-    "interactive prompt.",
-)
-@CommandArgument(
-    "--no-system-changes",
-    dest="no_system_changes",
-    action="store_true",
-    help="Only execute actions that leave the system configuration alone.",
-)
-def bootstrap(command_context, application_choice=None, no_system_changes=False):
-    """Bootstrap system and mach for optimal development experience."""
-    from mozboot.bootstrap import Bootstrapper
+@CommandProvider
+class Bootstrap(MachCommandBase):
+    @Command(
+        "bootstrap",
+        category="devenv",
+        description="Install required system packages for building.",
+    )
+    @CommandArgument(
+        "--application-choice",
+        choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
+        default=None,
+        help="Pass in an application choice instead of using the default "
+        "interactive prompt.",
+    )
+    @CommandArgument(
+        "--no-system-changes",
+        dest="no_system_changes",
+        action="store_true",
+        help="Only execute actions that leave the system " "configuration alone.",
+    )
+    def bootstrap(
+        self, command_context, application_choice=None, no_system_changes=False
+    ):
+        """Bootstrap system and mach for optimal development experience."""
+        from mozboot.bootstrap import Bootstrapper
 
-    bootstrapper = Bootstrapper(
-        choice=application_choice,
-        no_interactive=not command_context._mach_context.is_interactive,
-        no_system_changes=no_system_changes,
-        mach_context=command_context._mach_context,
-    )
-    bootstrapper.bootstrap(command_context.settings)
+        bootstrapper = Bootstrapper(
+            choice=application_choice,
+            no_interactive=not command_context._mach_context.is_interactive,
+            no_system_changes=no_system_changes,
+            mach_context=command_context._mach_context,
+        )
+        bootstrapper.bootstrap(command_context.settings)
 
 
-@Command(
-    "vcs-setup",
-    category="devenv",
-    description="Help configure a VCS for optimal development.",
-)
-@CommandArgument(
-    "-u",
-    "--update-only",
-    action="store_true",
-    help="Only update recommended extensions, don't run the wizard.",
-)
-def vcs_setup(command_context, update_only=False):
-    """Ensure a Version Control System (Mercurial or Git) is optimally
-    configured.
+@CommandProvider
+class VersionControlCommands(MachCommandBase):
+    @Command(
+        "vcs-setup",
+        category="devenv",
+        description="Help configure a VCS for optimal development.",
+    )
+    @CommandArgument(
+        "-u",
+        "--update-only",
+        action="store_true",
+        help="Only update recommended extensions, don't run the wizard.",
+    )
+    def vcs_setup(self, command_context, update_only=False):
+        """Ensure a Version Control System (Mercurial or Git) is optimally
+        configured.
 
-    This command will inspect your VCS configuration and
-    guide you through an interactive wizard helping you configure the
-    VCS for optimal use on Mozilla projects.
+        This command will inspect your VCS configuration and
+        guide you through an interactive wizard helping you configure the
+        VCS for optimal use on Mozilla projects.
 
-    User choice is respected: no changes are made without explicit
-    confirmation from you.
+        User choice is respected: no changes are made without explicit
+        confirmation from you.
 
-    If "--update-only" is used, the interactive wizard is disabled
-    and this command only ensures that remote repositories providing
-    VCS extensions are up to date.
-    """
-    import mozboot.bootstrap as bootstrap
-    import mozversioncontrol
-    from mozfile import which
+        If "--update-only" is used, the interactive wizard is disabled
+        and this command only ensures that remote repositories providing
+        VCS extensions are up to date.
+        """
+        import mozboot.bootstrap as bootstrap
+        import mozversioncontrol
+        from mozfile import which
 
-    repo = mozversioncontrol.get_repository_object(command_context._mach_context.topdir)
-    tool = "hg"
-    if repo.name == "git":
-        tool = "git"
-
-    # "hg" is an executable script with a shebang, which will be found by
-    # which. We need to pass a win32 executable to the function because we
-    # spawn a process from it.
-    if sys.platform in ("win32", "msys"):
-        tool += ".exe"
-
-    vcs = which(tool)
-    if not vcs:
-        raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
-
-    if update_only:
+        repo = mozversioncontrol.get_repository_object(
+            command_context._mach_context.topdir
+        )
+        tool = "hg"
         if repo.name == "git":
-            bootstrap.update_git_tools(
-                vcs,
-                command_context._mach_context.state_dir,
-                command_context._mach_context.topdir,
-            )
+            tool = "git"
+
+        # "hg" is an executable script with a shebang, which will be found by
+        # which. We need to pass a win32 executable to the function because we
+        # spawn a process from it.
+        if sys.platform in ("win32", "msys"):
+            tool += ".exe"
+
+        vcs = which(tool)
+        if not vcs:
+            raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
+
+        if update_only:
+            if repo.name == "git":
+                bootstrap.update_git_tools(
+                    vcs,
+                    command_context._mach_context.state_dir,
+                    command_context._mach_context.topdir,
+                )
+            else:
+                bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
         else:
-            bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
-    else:
-        if repo.name == "git":
-            bootstrap.configure_git(
-                vcs,
-                which("git-cinnabar"),
-                command_context._mach_context.state_dir,
-                command_context._mach_context.topdir,
-            )
-        else:
-            bootstrap.configure_mercurial(vcs, command_context._mach_context.state_dir)
+            if repo.name == "git":
+                bootstrap.configure_git(
+                    vcs,
+                    which("git-cinnabar"),
+                    command_context._mach_context.state_dir,
+                    command_context._mach_context.topdir,
+                )
+            else:
+                bootstrap.configure_mercurial(
+                    vcs, command_context._mach_context.state_dir
+                )
--- a/python/mozbuild/mozbuild/artifact_commands.py
+++ b/python/mozbuild/mozbuild/artifact_commands.py
@@ -8,19 +8,19 @@ import hashlib
 import json
 import logging
 import os
 import shutil
 import six
 
 from collections import OrderedDict
 
-from mach.decorators import CommandArgument, Command, SubCommand
+from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
 from mozbuild.artifact_builds import JOB_CHOICES
-from mozbuild.base import MachCommandConditions as conditions
+from mozbuild.base import MachCommandBase, MachCommandConditions as conditions
 from mozbuild.util import ensureParentDir
 import mozversioncontrol
 
 
 _COULD_NOT_FIND_ARTIFACTS_TEMPLATE = (
     "ERROR!!!!!! Could not find artifacts for a toolchain build named "
     "`{build}`. Local commits, dirty/stale files, and other changes in your "
     "checkout may cause this error. Make sure you are on a fresh, current "
@@ -50,547 +50,555 @@ class ArtifactSubCommand(SubCommand):
                 "--verbose", "-v", action="store_true", help="Print verbose output."
             ),
         ]
         for arg in args:
             after = arg(after)
         return after
 
 
-# Fetch and install binary artifacts from Mozilla automation.
-
-
-@Command(
-    "artifact",
-    category="post-build",
-    description="Use pre-built artifacts to build Firefox.",
-)
-def artifact(command_context):
-    """Download, cache, and install pre-built binary artifacts to build Firefox.
+@CommandProvider
+class PackageFrontend(MachCommandBase):
+    """Fetch and install binary artifacts from Mozilla automation."""
 
-    Use |mach build| as normal to freshen your installed binary libraries:
-    artifact builds automatically download, cache, and install binary
-    artifacts from Mozilla automation, replacing whatever may be in your
-    object directory.  Use |mach artifact last| to see what binary artifacts
-    were last used.
+    @Command(
+        "artifact",
+        category="post-build",
+        description="Use pre-built artifacts to build Firefox.",
+    )
+    def artifact(self, command_context):
+        """Download, cache, and install pre-built binary artifacts to build Firefox.
 
-    Never build libxul again!
-
-    """
-    pass
-
+        Use |mach build| as normal to freshen your installed binary libraries:
+        artifact builds automatically download, cache, and install binary
+        artifacts from Mozilla automation, replacing whatever may be in your
+        object directory.  Use |mach artifact last| to see what binary artifacts
+        were last used.
 
-def _make_artifacts(
-    command_context,
-    tree=None,
-    job=None,
-    skip_cache=False,
-    download_tests=True,
-    download_symbols=False,
-    download_host_bins=False,
-    download_maven_zip=False,
-    no_process=False,
-):
-    state_dir = command_context._mach_context.state_dir
-    cache_dir = os.path.join(state_dir, "package-frontend")
+        Never build libxul again!
 
-    hg = None
-    if conditions.is_hg(command_context):
-        hg = command_context.substs["HG"]
+        """
+        pass
 
-    git = None
-    if conditions.is_git(command_context):
-        git = command_context.substs["GIT"]
-
-    # If we're building Thunderbird, we should be checking for comm-central artifacts.
-    topsrcdir = command_context.substs.get("commtopsrcdir", command_context.topsrcdir)
-
-    if download_maven_zip:
-        if download_tests:
-            raise ValueError("--maven-zip requires --no-tests")
-        if download_symbols:
-            raise ValueError("--maven-zip requires no --symbols")
-        if download_host_bins:
-            raise ValueError("--maven-zip requires no --host-bins")
-        if not no_process:
-            raise ValueError("--maven-zip requires --no-process")
-
-    from mozbuild.artifacts import Artifacts
+    def _make_artifacts(
+        self,
+        command_context,
+        tree=None,
+        job=None,
+        skip_cache=False,
+        download_tests=True,
+        download_symbols=False,
+        download_host_bins=False,
+        download_maven_zip=False,
+        no_process=False,
+    ):
+        state_dir = command_context._mach_context.state_dir
+        cache_dir = os.path.join(state_dir, "package-frontend")
 
-    artifacts = Artifacts(
-        tree,
-        command_context.substs,
-        command_context.defines,
-        job,
-        log=command_context.log,
-        cache_dir=cache_dir,
-        skip_cache=skip_cache,
-        hg=hg,
-        git=git,
-        topsrcdir=topsrcdir,
-        download_tests=download_tests,
-        download_symbols=download_symbols,
-        download_host_bins=download_host_bins,
-        download_maven_zip=download_maven_zip,
-        no_process=no_process,
-        mozbuild=command_context,
-    )
-    return artifacts
+        hg = None
+        if conditions.is_hg(command_context):
+            hg = command_context.substs["HG"]
 
+        git = None
+        if conditions.is_git(command_context):
+            git = command_context.substs["GIT"]
+
+        # If we're building Thunderbird, we should be checking for comm-central artifacts.
+        topsrcdir = command_context.substs.get(
+            "commtopsrcdir", command_context.topsrcdir
+        )
 
-@ArtifactSubCommand("artifact", "install", "Install a good pre-built artifact.")
-@CommandArgument(
-    "source",
-    metavar="SRC",
-    nargs="?",
-    type=str,
-    help="Where to fetch and install artifacts from.  Can be omitted, in "
-    "which case the current hg repository is inspected; an hg revision; "
-    "a remote URL; or a local file.",
-    default=None,
-)
-@CommandArgument(
-    "--skip-cache",
-    action="store_true",
-    help="Skip all local caches to force re-fetching remote artifacts.",
-    default=False,
-)
-@CommandArgument("--no-tests", action="store_true", help="Don't install tests.")
-@CommandArgument("--symbols", nargs="?", action=SymbolsAction, help="Download symbols.")
-@CommandArgument("--host-bins", action="store_true", help="Download host binaries.")
-@CommandArgument("--distdir", help="Where to install artifacts to.")
-@CommandArgument(
-    "--no-process",
-    action="store_true",
-    help="Don't process (unpack) artifact packages, just download them.",
-)
-@CommandArgument(
-    "--maven-zip", action="store_true", help="Download Maven zip (Android-only)."
-)
-def artifact_install(
-    command_context,
-    source=None,
-    skip_cache=False,
-    tree=None,
-    job=None,
-    verbose=False,
-    no_tests=False,
-    symbols=False,
-    host_bins=False,
-    distdir=None,
-    no_process=False,
-    maven_zip=False,
-):
-    command_context._set_log_level(verbose)
-    artifacts = _make_artifacts(
-        command_context,
-        tree=tree,
-        job=job,
-        skip_cache=skip_cache,
-        download_tests=not no_tests,
-        download_symbols=symbols,
-        download_host_bins=host_bins,
-        download_maven_zip=maven_zip,
-        no_process=no_process,
-    )
+        if download_maven_zip:
+            if download_tests:
+                raise ValueError("--maven-zip requires --no-tests")
+            if download_symbols:
+                raise ValueError("--maven-zip requires no --symbols")
+            if download_host_bins:
+                raise ValueError("--maven-zip requires no --host-bins")
+            if not no_process:
+                raise ValueError("--maven-zip requires --no-process")
+
+        from mozbuild.artifacts import Artifacts
 
-    return artifacts.install_from(source, distdir or command_context.distdir)
-
+        artifacts = Artifacts(
+            tree,
+            command_context.substs,
+            command_context.defines,
+            job,
+            log=command_context.log,
+            cache_dir=cache_dir,
+            skip_cache=skip_cache,
+            hg=hg,
+            git=git,
+            topsrcdir=topsrcdir,
+            download_tests=download_tests,
+            download_symbols=download_symbols,
+            download_host_bins=download_host_bins,
+            download_maven_zip=download_maven_zip,
+            no_process=no_process,
+            mozbuild=command_context,
+        )
+        return artifacts
 
-@ArtifactSubCommand(
-    "artifact",
-    "clear-cache",
-    "Delete local artifacts and reset local artifact cache.",
-)
-def artifact_clear_cache(command_context, tree=None, job=None, verbose=False):
-    command_context._set_log_level(verbose)
-    artifacts = _make_artifacts(command_context, tree=tree, job=job)
-    artifacts.clear_cache()
-    return 0
-
+    @ArtifactSubCommand("artifact", "install", "Install a good pre-built artifact.")
+    @CommandArgument(
+        "source",
+        metavar="SRC",
+        nargs="?",
+        type=str,
+        help="Where to fetch and install artifacts from.  Can be omitted, in "
+        "which case the current hg repository is inspected; an hg revision; "
+        "a remote URL; or a local file.",
+        default=None,
+    )
+    @CommandArgument(
+        "--skip-cache",
+        action="store_true",
+        help="Skip all local caches to force re-fetching remote artifacts.",
+        default=False,
+    )
+    @CommandArgument("--no-tests", action="store_true", help="Don't install tests.")
+    @CommandArgument(
+        "--symbols", nargs="?", action=SymbolsAction, help="Download symbols."
+    )
+    @CommandArgument("--host-bins", action="store_true", help="Download host binaries.")
+    @CommandArgument("--distdir", help="Where to install artifacts to.")
+    @CommandArgument(
+        "--no-process",
+        action="store_true",
+        help="Don't process (unpack) artifact packages, just download them.",
+    )
+    @CommandArgument(
+        "--maven-zip", action="store_true", help="Download Maven zip (Android-only)."
+    )
+    def artifact_install(
+        self,
+        command_context,
+        source=None,
+        skip_cache=False,
+        tree=None,
+        job=None,
+        verbose=False,
+        no_tests=False,
+        symbols=False,
+        host_bins=False,
+        distdir=None,
+        no_process=False,
+        maven_zip=False,
+    ):
+        command_context._set_log_level(verbose)
+        artifacts = self._make_artifacts(
+            command_context,
+            tree=tree,
+            job=job,
+            skip_cache=skip_cache,
+            download_tests=not no_tests,
+            download_symbols=symbols,
+            download_host_bins=host_bins,
+            download_maven_zip=maven_zip,
+            no_process=no_process,
+        )
 
-@SubCommand("artifact", "toolchain")
-@CommandArgument("--verbose", "-v", action="store_true", help="Print verbose output.")
-@CommandArgument(
-    "--cache-dir",
-    metavar="DIR",
-    help="Directory where to store the artifacts cache",
-)
-@CommandArgument(
-    "--skip-cache",
-    action="store_true",
-    help="Skip all local caches to force re-fetching remote artifacts.",
-    default=False,
-)
-@CommandArgument(
-    "--from-build",
-    metavar="BUILD",
-    nargs="+",
-    help="Download toolchains resulting from the given build(s); "
-    "BUILD is a name of a toolchain task, e.g. linux64-clang",
-)
-@CommandArgument(
-    "--tooltool-manifest",
-    metavar="MANIFEST",
-    help="Explicit tooltool manifest to process",
-)
-@CommandArgument(
-    "--no-unpack", action="store_true", help="Do not unpack any downloaded file"
-)
-@CommandArgument(
-    "--retry", type=int, default=4, help="Number of times to retry failed downloads"
-)
-@CommandArgument(
-    "--bootstrap",
-    action="store_true",
-    help="Whether this is being called from bootstrap. "
-    "This verifies the toolchain is annotated as a toolchain used for local development.",
-)
-@CommandArgument(
-    "--artifact-manifest",
-    metavar="FILE",
-    help="Store a manifest about the downloaded taskcluster artifacts",
-)
-def artifact_toolchain(
-    command_context,
-    verbose=False,
-    cache_dir=None,
-    skip_cache=False,
-    from_build=(),
-    tooltool_manifest=None,
-    no_unpack=False,
-    retry=0,
-    bootstrap=False,
-    artifact_manifest=None,
-):
-    """Download, cache and install pre-built toolchains."""
-    from mozbuild.artifacts import ArtifactCache
-    from mozbuild.action.tooltool import FileRecord, open_manifest, unpack_file
-    import redo
-    import requests
-    import time
+        return artifacts.install_from(source, distdir or command_context.distdir)
 
-    from taskgraph.util.taskcluster import get_artifact_url
+    @ArtifactSubCommand(
+        "artifact",
+        "clear-cache",
+        "Delete local artifacts and reset local artifact cache.",
+    )
+    def artifact_clear_cache(self, command_context, tree=None, job=None, verbose=False):
+        command_context._set_log_level(verbose)
+        artifacts = self._make_artifacts(command_context, tree=tree, job=job)
+        artifacts.clear_cache()
+        return 0
 
-    start = time.time()
-    command_context._set_log_level(verbose)
-    # Normally, we'd use command_context.log_manager.enable_unstructured(),
-    # but that enables all logging, while we only really want tooltool's
-    # and it also makes structured log output twice.
-    # So we manually do what it does, and limit that to the tooltool
-    # logger.
-    if command_context.log_manager.terminal_handler:
-        logging.getLogger("mozbuild.action.tooltool").addHandler(
-            command_context.log_manager.terminal_handler
-        )
-        logging.getLogger("redo").addHandler(
-            command_context.log_manager.terminal_handler
-        )
-        command_context.log_manager.terminal_handler.addFilter(
-            command_context.log_manager.structured_filter
-        )
-    if not cache_dir:
-        cache_dir = os.path.join(command_context._mach_context.state_dir, "toolchains")
-
-    tooltool_host = os.environ.get("TOOLTOOL_HOST", "tooltool.mozilla-releng.net")
-    taskcluster_proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")
-    if taskcluster_proxy_url:
-        tooltool_url = "{}/{}".format(taskcluster_proxy_url, tooltool_host)
-    else:
-        tooltool_url = "https://{}".format(tooltool_host)
-
-    cache = ArtifactCache(
-        cache_dir=cache_dir, log=command_context.log, skip_cache=skip_cache
+    @SubCommand("artifact", "toolchain")
+    @CommandArgument(
+        "--verbose", "-v", action="store_true", help="Print verbose output."
+    )
+    @CommandArgument(
+        "--cache-dir",
+        metavar="DIR",
+        help="Directory where to store the artifacts cache",
+    )
+    @CommandArgument(
+        "--skip-cache",
+        action="store_true",
+        help="Skip all local caches to force re-fetching remote artifacts.",
+        default=False,
+    )
+    @CommandArgument(
+        "--from-build",
+        metavar="BUILD",
+        nargs="+",
+        help="Download toolchains resulting from the given build(s); "
+        "BUILD is a name of a toolchain task, e.g. linux64-clang",
+    )
+    @CommandArgument(
+        "--tooltool-manifest",
+        metavar="MANIFEST",
+        help="Explicit tooltool manifest to process",
+    )
+    @CommandArgument(
+        "--no-unpack", action="store_true", help="Do not unpack any downloaded file"
     )
-
-    class DownloadRecord(FileRecord):
-        def __init__(self, url, *args, **kwargs):
-            super(DownloadRecord, self).__init__(*args, **kwargs)
-            self.url = url
-            self.basename = self.filename
+    @CommandArgument(
+        "--retry", type=int, default=4, help="Number of times to retry failed downloads"
+    )
+    @CommandArgument(
+        "--bootstrap",
+        action="store_true",
+        help="Whether this is being called from bootstrap. "
+        "This verifies the toolchain is annotated as a toolchain used for local development.",
+    )
+    @CommandArgument(
+        "--artifact-manifest",
+        metavar="FILE",
+        help="Store a manifest about the downloaded taskcluster artifacts",
+    )
+    def artifact_toolchain(
+        self,
+        command_context,
+        verbose=False,
+        cache_dir=None,
+        skip_cache=False,
+        from_build=(),
+        tooltool_manifest=None,
+        no_unpack=False,
+        retry=0,
+        bootstrap=False,
+        artifact_manifest=None,
+    ):
+        """Download, cache and install pre-built toolchains."""
+        from mozbuild.artifacts import ArtifactCache
+        from mozbuild.action.tooltool import FileRecord, open_manifest, unpack_file
+        import redo
+        import requests
+        import time
 
-        def fetch_with(self, cache):
-            self.filename = cache.fetch(self.url)
-            return self.filename
-
-        def validate(self):
-            if self.size is None and self.digest is None:
-                return True
-            return super(DownloadRecord, self).validate()
+        from taskgraph.util.taskcluster import get_artifact_url
 
-    class ArtifactRecord(DownloadRecord):
-        def __init__(self, task_id, artifact_name):
-            for _ in redo.retrier(attempts=retry + 1, sleeptime=60):
-                cot = cache._download_manager.session.get(
-                    get_artifact_url(task_id, "public/chain-of-trust.json")
-                )
-                if cot.status_code >= 500:
-                    continue
-                cot.raise_for_status()
-                break
-            else:
-                cot.raise_for_status()
-
-            digest = algorithm = None
-            data = json.loads(cot.text)
-            for algorithm, digest in (
-                data.get("artifacts", {}).get(artifact_name, {}).items()
-            ):
-                pass
-
-            name = os.path.basename(artifact_name)
-            artifact_url = get_artifact_url(
-                task_id,
-                artifact_name,
-                use_proxy=not artifact_name.startswith("public/"),
+        start = time.time()
+        command_context._set_log_level(verbose)
+        # Normally, we'd use command_context.log_manager.enable_unstructured(),
+        # but that enables all logging, while we only really want tooltool's
+        # and it also makes structured log output twice.
+        # So we manually do what it does, and limit that to the tooltool
+        # logger.
+        if command_context.log_manager.terminal_handler:
+            logging.getLogger("mozbuild.action.tooltool").addHandler(
+                command_context.log_manager.terminal_handler
             )
-            super(ArtifactRecord, self).__init__(
-                artifact_url, name, None, digest, algorithm, unpack=True
+            logging.getLogger("redo").addHandler(
+                command_context.log_manager.terminal_handler
+            )
+            command_context.log_manager.terminal_handler.addFilter(
+                command_context.log_manager.structured_filter
+            )
+        if not cache_dir:
+            cache_dir = os.path.join(
+                command_context._mach_context.state_dir, "toolchains"
             )
 
-    records = OrderedDict()
-    downloaded = []
+        tooltool_host = os.environ.get("TOOLTOOL_HOST", "tooltool.mozilla-releng.net")
+        taskcluster_proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")
+        if taskcluster_proxy_url:
+            tooltool_url = "{}/{}".format(taskcluster_proxy_url, tooltool_host)
+        else:
+            tooltool_url = "https://{}".format(tooltool_host)
+
+        cache = ArtifactCache(
+            cache_dir=cache_dir, log=command_context.log, skip_cache=skip_cache
+        )
 
-    if tooltool_manifest:
-        manifest = open_manifest(tooltool_manifest)
-        for record in manifest.file_records:
-            url = "{}/{}/{}".format(tooltool_url, record.algorithm, record.digest)
-            records[record.filename] = DownloadRecord(
-                url,
-                record.filename,
-                record.size,
-                record.digest,
-                record.algorithm,
-                unpack=record.unpack,
-                version=record.version,
-                visibility=record.visibility,
-            )
+        class DownloadRecord(FileRecord):
+            def __init__(self, url, *args, **kwargs):
+                super(DownloadRecord, self).__init__(*args, **kwargs)
+                self.url = url
+                self.basename = self.filename
+
+            def fetch_with(self, cache):
+                self.filename = cache.fetch(self.url)
+                return self.filename
+
+            def validate(self):
+                if self.size is None and self.digest is None:
+                    return True
+                return super(DownloadRecord, self).validate()
 
-    if from_build:
-        if "MOZ_AUTOMATION" in os.environ:
-            command_context.log(
-                logging.ERROR,
-                "artifact",
-                {},
-                "Do not use --from-build in automation; all dependencies "
-                "should be determined in the decision task.",
-            )
-            return 1
-        from taskgraph.optimize.strategies import IndexSearch
-        from mozbuild.toolchains import toolchain_task_definitions
+        class ArtifactRecord(DownloadRecord):
+            def __init__(self, task_id, artifact_name):
+                for _ in redo.retrier(attempts=retry + 1, sleeptime=60):
+                    cot = cache._download_manager.session.get(
+                        get_artifact_url(task_id, "public/chain-of-trust.json")
+                    )
+                    if cot.status_code >= 500:
+                        continue
+                    cot.raise_for_status()
+                    break
+                else:
+                    cot.raise_for_status()
+
+                digest = algorithm = None
+                data = json.loads(cot.text)
+                for algorithm, digest in (
+                    data.get("artifacts", {}).get(artifact_name, {}).items()
+                ):
+                    pass
 
-        tasks = toolchain_task_definitions()
+                name = os.path.basename(artifact_name)
+                artifact_url = get_artifact_url(
+                    task_id,
+                    artifact_name,
+                    use_proxy=not artifact_name.startswith("public/"),
+                )
+                super(ArtifactRecord, self).__init__(
+                    artifact_url, name, None, digest, algorithm, unpack=True
+                )
 
-        for b in from_build:
-            user_value = b
+        records = OrderedDict()
+        downloaded = []
 
-            if not b.startswith("toolchain-"):
-                b = "toolchain-{}".format(b)
+        if tooltool_manifest:
+            manifest = open_manifest(tooltool_manifest)
+            for record in manifest.file_records:
+                url = "{}/{}/{}".format(tooltool_url, record.algorithm, record.digest)
+                records[record.filename] = DownloadRecord(
+                    url,
+                    record.filename,
+                    record.size,
+                    record.digest,
+                    record.algorithm,
+                    unpack=record.unpack,
+                    version=record.version,
+                    visibility=record.visibility,
+                )
 
-            task = tasks.get(b)
-            if not task:
+        if from_build:
+            if "MOZ_AUTOMATION" in os.environ:
                 command_context.log(
                     logging.ERROR,
                     "artifact",
-                    {"build": user_value},
-                    "Could not find a toolchain build named `{build}`",
-                )
-                return 1
-
-            # Ensure that toolchains installed by `mach bootstrap` have the
-            # `local-toolchain attribute set. Taskgraph ensures that these
-            # are built on trunk projects, so the task will be available to
-            # install here.
-            if bootstrap and not task.attributes.get("local-toolchain"):
-                command_context.log(
-                    logging.ERROR,
-                    "artifact",
-                    {"build": user_value},
-                    "Toolchain `{build}` is not annotated as used for local development.",
+                    {},
+                    "Do not use --from-build in automation; all dependencies "
+                    "should be determined in the decision task.",
                 )
                 return 1
+            from taskgraph.optimize.strategies import IndexSearch
+            from mozbuild.toolchains import toolchain_task_definitions
 
-            artifact_name = task.attributes.get("toolchain-artifact")
-            command_context.log(
-                logging.DEBUG,
-                "artifact",
-                {
-                    "name": artifact_name,
-                    "index": task.optimization.get("index-search"),
-                },
-                "Searching for {name} in {index}",
-            )
-            deadline = None
-            task_id = IndexSearch().should_replace_task(
-                task, {}, deadline, task.optimization.get("index-search", [])
-            )
-            if task_id in (True, False) or not artifact_name:
-                command_context.log(
-                    logging.ERROR,
-                    "artifact",
-                    {"build": user_value},
-                    _COULD_NOT_FIND_ARTIFACTS_TEMPLATE,
-                )
-                # Get and print some helpful info for diagnosis.
-                repo = mozversioncontrol.get_repository_object(
-                    command_context.topsrcdir
-                )
-                changed_files = set(repo.get_outgoing_files()) | set(
-                    repo.get_changed_files()
-                )
-                if changed_files:
-                    command_context.log(
-                        logging.ERROR,
-                        "artifact",
-                        {},
-                        "Hint: consider reverting your local changes "
-                        "to the following files: %s" % sorted(changed_files),
-                    )
-                if "TASKCLUSTER_ROOT_URL" in os.environ:
+            tasks = toolchain_task_definitions()
+
+            for b in from_build:
+                user_value = b
+
+                if not b.startswith("toolchain-"):
+                    b = "toolchain-{}".format(b)
+
+                task = tasks.get(b)
+                if not task:
                     command_context.log(
                         logging.ERROR,
                         "artifact",
                         {"build": user_value},
-                        "Due to the environment variable TASKCLUSTER_ROOT_URL "
-                        "being set, the artifacts were expected to be found "
-                        "on {}. If this was unintended, unset "
-                        "TASKCLUSTER_ROOT_URL and try again.".format(
-                            os.environ["TASKCLUSTER_ROOT_URL"]
-                        ),
+                        "Could not find a toolchain build named `{build}`",
+                    )
+                    return 1
+
+                # Ensure that toolchains installed by `mach bootstrap` have the
+                # `local-toolchain attribute set. Taskgraph ensures that these
+                # are built on trunk projects, so the task will be available to
+                # install here.
+                if bootstrap and not task.attributes.get("local-toolchain"):
+                    command_context.log(
+                        logging.ERROR,
+                        "artifact",
+                        {"build": user_value},
+                        "Toolchain `{build}` is not annotated as used for local development.",
+                    )
+                    return 1
+
+                artifact_name = task.attributes.get("toolchain-artifact")
+                command_context.log(
+                    logging.DEBUG,
+                    "artifact",
+                    {
+                        "name": artifact_name,
+                        "index": task.optimization.get("index-search"),
+                    },
+                    "Searching for {name} in {index}",
+                )
+                deadline = None
+                task_id = IndexSearch().should_replace_task(
+                    task, {}, deadline, task.optimization.get("index-search", [])
+                )
+                if task_id in (True, False) or not artifact_name:
+                    command_context.log(
+                        logging.ERROR,
+                        "artifact",
+                        {"build": user_value},
+                        _COULD_NOT_FIND_ARTIFACTS_TEMPLATE,
+                    )
+                    # Get and print some helpful info for diagnosis.
+                    repo = mozversioncontrol.get_repository_object(
+                        command_context.topsrcdir
+                    )
+                    changed_files = set(repo.get_outgoing_files()) | set(
+                        repo.get_changed_files()
                     )
+                    if changed_files:
+                        command_context.log(
+                            logging.ERROR,
+                            "artifact",
+                            {},
+                            "Hint: consider reverting your local changes "
+                            "to the following files: %s" % sorted(changed_files),
+                        )
+                    if "TASKCLUSTER_ROOT_URL" in os.environ:
+                        command_context.log(
+                            logging.ERROR,
+                            "artifact",
+                            {"build": user_value},
+                            "Due to the environment variable TASKCLUSTER_ROOT_URL "
+                            "being set, the artifacts were expected to be found "
+                            "on {}. If this was unintended, unset "
+                            "TASKCLUSTER_ROOT_URL and try again.".format(
+                                os.environ["TASKCLUSTER_ROOT_URL"]
+                            ),
+                        )
+                    return 1
+
+                command_context.log(
+                    logging.DEBUG,
+                    "artifact",
+                    {"name": artifact_name, "task_id": task_id},
+                    "Found {name} in {task_id}",
+                )
+
+                record = ArtifactRecord(task_id, artifact_name)
+                records[record.filename] = record
+
+        for record in six.itervalues(records):
+            command_context.log(
+                logging.INFO,
+                "artifact",
+                {"name": record.basename},
+                "Setting up artifact {name}",
+            )
+            valid = False
+            # sleeptime is 60 per retry.py, used by tooltool_wrapper.sh
+            for attempt, _ in enumerate(redo.retrier(attempts=retry + 1, sleeptime=60)):
+                try:
+                    record.fetch_with(cache)
+                except (
+                    requests.exceptions.HTTPError,
+                    requests.exceptions.ChunkedEncodingError,
+                    requests.exceptions.ConnectionError,
+                ) as e:
+
+                    if isinstance(e, requests.exceptions.HTTPError):
+                        # The relengapi proxy likes to return error 400 bad request
+                        # which seems improbably to be due to our (simple) GET
+                        # being borked.
+                        status = e.response.status_code
+                        should_retry = status >= 500 or status == 400
+                    else:
+                        should_retry = True
+
+                    if should_retry or attempt < retry:
+                        level = logging.WARN
+                    else:
+                        level = logging.ERROR
+                    command_context.log(level, "artifact", {}, str(e))
+                    if not should_retry:
+                        break
+                    if attempt < retry:
+                        command_context.log(
+                            logging.INFO, "artifact", {}, "Will retry in a moment..."
+                        )
+                    continue
+                try:
+                    valid = record.validate()
+                except Exception:
+                    pass
+                if not valid:
+                    os.unlink(record.filename)
+                    if attempt < retry:
+                        command_context.log(
+                            logging.INFO,
+                            "artifact",
+                            {},
+                            "Corrupt download. Will retry in a moment...",
+                        )
+                    continue
+
+                downloaded.append(record)
+                break
+
+            if not valid:
+                command_context.log(
+                    logging.ERROR,
+                    "artifact",
+                    {"name": record.basename},
+                    "Failed to download {name}",
+                )
                 return 1
 
+        artifacts = {} if artifact_manifest else None
+
+        for record in downloaded:
+            local = os.path.join(os.getcwd(), record.basename)
+            if os.path.exists(local):
+                os.unlink(local)
+            # unpack_file needs the file with its final name to work
+            # (https://github.com/mozilla/build-tooltool/issues/38), so we
+            # need to copy it, even though we remove it later. Use hard links
+            # when possible.
+            try:
+                os.link(record.filename, local)
+            except Exception:
+                shutil.copy(record.filename, local)
+            # Keep a sha256 of each downloaded file, for the chain-of-trust
+            # validation.
+            if artifact_manifest is not None:
+                with open(local, "rb") as fh:
+                    h = hashlib.sha256()
+                    while True:
+                        data = fh.read(1024 * 1024)
+                        if not data:
+                            break
+                        h.update(data)
+                artifacts[record.url] = {"sha256": h.hexdigest()}
+            if record.unpack and not no_unpack:
+                unpack_file(local)
+                os.unlink(local)
+
+        if not downloaded:
+            command_context.log(logging.ERROR, "artifact", {}, "Nothing to download")
+
+        if artifacts:
+            ensureParentDir(artifact_manifest)
+            with open(artifact_manifest, "w") as fh:
+                json.dump(artifacts, fh, indent=4, sort_keys=True)
+
+        if "MOZ_AUTOMATION" in os.environ:
+            end = time.time()
+
+            perfherder_data = {
+                "framework": {"name": "build_metrics"},
+                "suites": [
+                    {
+                        "name": "mach_artifact_toolchain",
+                        "value": end - start,
+                        "lowerIsBetter": True,
+                        "shouldAlert": False,
+                        "subtests": [],
+                    }
+                ],
+            }
             command_context.log(
-                logging.DEBUG,
-                "artifact",
-                {"name": artifact_name, "task_id": task_id},
-                "Found {name} in {task_id}",
+                logging.INFO,
+                "perfherder",
+                {"data": json.dumps(perfherder_data)},
+                "PERFHERDER_DATA: {data}",
             )
 
-            record = ArtifactRecord(task_id, artifact_name)
-            records[record.filename] = record
-
-    for record in six.itervalues(records):
-        command_context.log(
-            logging.INFO,
-            "artifact",
-            {"name": record.basename},
-            "Setting up artifact {name}",
-        )
-        valid = False
-        # sleeptime is 60 per retry.py, used by tooltool_wrapper.sh
-        for attempt, _ in enumerate(redo.retrier(attempts=retry + 1, sleeptime=60)):
-            try:
-                record.fetch_with(cache)
-            except (
-                requests.exceptions.HTTPError,
-                requests.exceptions.ChunkedEncodingError,
-                requests.exceptions.ConnectionError,
-            ) as e:
-
-                if isinstance(e, requests.exceptions.HTTPError):
-                    # The relengapi proxy likes to return error 400 bad request
-                    # which seems improbably to be due to our (simple) GET
-                    # being borked.
-                    status = e.response.status_code
-                    should_retry = status >= 500 or status == 400
-                else:
-                    should_retry = True
-
-                if should_retry or attempt < retry:
-                    level = logging.WARN
-                else:
-                    level = logging.ERROR
-                command_context.log(level, "artifact", {}, str(e))
-                if not should_retry:
-                    break
-                if attempt < retry:
-                    command_context.log(
-                        logging.INFO, "artifact", {}, "Will retry in a moment..."
-                    )
-                continue
-            try:
-                valid = record.validate()
-            except Exception:
-                pass
-            if not valid:
-                os.unlink(record.filename)
-                if attempt < retry:
-                    command_context.log(
-                        logging.INFO,
-                        "artifact",
-                        {},
-                        "Corrupt download. Will retry in a moment...",
-                    )
-                continue
-
-            downloaded.append(record)
-            break
-
-        if not valid:
-            command_context.log(
-                logging.ERROR,
-                "artifact",
-                {"name": record.basename},
-                "Failed to download {name}",
-            )
-            return 1
-
-    artifacts = {} if artifact_manifest else None
-
-    for record in downloaded:
-        local = os.path.join(os.getcwd(), record.basename)
-        if os.path.exists(local):
-            os.unlink(local)
-        # unpack_file needs the file with its final name to work
-        # (https://github.com/mozilla/build-tooltool/issues/38), so we
-        # need to copy it, even though we remove it later. Use hard links
-        # when possible.
-        try:
-            os.link(record.filename, local)
-        except Exception:
-            shutil.copy(record.filename, local)
-        # Keep a sha256 of each downloaded file, for the chain-of-trust
-        # validation.
-        if artifact_manifest is not None:
-            with open(local, "rb") as fh:
-                h = hashlib.sha256()
-                while True:
-                    data = fh.read(1024 * 1024)
-                    if not data:
-                        break
-                    h.update(data)
-            artifacts[record.url] = {"sha256": h.hexdigest()}
-        if record.unpack and not no_unpack:
-            unpack_file(local)
-            os.unlink(local)
-
-    if not downloaded:
-        command_context.log(logging.ERROR, "artifact", {}, "Nothing to download")
-
-    if artifacts:
-        ensureParentDir(artifact_manifest)
-        with open(artifact_manifest, "w") as fh:
-            json.dump(artifacts, fh, indent=4, sort_keys=True)
-
-    if "MOZ_AUTOMATION" in os.environ:
-        end = time.time()
-
-        perfherder_data = {
-            "framework": {"name": "build_metrics"},
-            "suites": [
-                {
-                    "name": "mach_artifact_toolchain",
-                    "value": end - start,
-                    "lowerIsBetter": True,
-                    "shouldAlert": False,
-                    "subtests": [],
-                }
-            ],
-        }
-        command_context.log(
-            logging.INFO,
-            "perfherder",
-            {"data": json.dumps(perfherder_data)},
-            "PERFHERDER_DATA: {data}",
-        )
-
-    return 0
+        return 0
--- a/python/mozbuild/mozbuild/backend/mach_commands.py
+++ b/python/mozbuild/mozbuild/backend/mach_commands.py
@@ -4,382 +4,384 @@
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
 import logging
 import os
 import subprocess
 
-from mozbuild import build_commands
+from mozbuild.base import MachCommandBase
+from mozbuild.build_commands import Build
 
 from mozfile import which
-from mach.decorators import CommandArgument, Command
+from mach.decorators import CommandArgument, CommandProvider, Command
 
 import mozpack.path as mozpath
 
 
-@Command(
-    "ide",
-    category="devenv",
-    description="Generate a project and launch an IDE.",
-    virtualenv_name="build",
-)
-@CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
-@CommandArgument("args", nargs=argparse.REMAINDER)
-def run(command_context, ide, args):
-    if ide == "eclipse":
-        backend = "CppEclipse"
-    elif ide == "visualstudio":
-        backend = "VisualStudio"
-    elif ide == "vscode":
-        backend = "Clangd"
-
-    if ide == "eclipse" and not which("eclipse"):
-        command_context.log(
-            logging.ERROR,
-            "ide",
-            {},
-            "Eclipse CDT 8.4 or later must be installed in your PATH.",
-        )
-        command_context.log(
-            logging.ERROR,
-            "ide",
-            {},
-            "Download: http://www.eclipse.org/cdt/downloads.php",
-        )
-        return 1
+@CommandProvider
+class MachCommands(MachCommandBase):
+    @Command(
+        "ide",
+        category="devenv",
+        description="Generate a project and launch an IDE.",
+        virtualenv_name="build",
+    )
+    @CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
+    @CommandArgument("args", nargs=argparse.REMAINDER)
+    def run(self, command_context, ide, args):
+        if ide == "eclipse":
+            backend = "CppEclipse"
+        elif ide == "visualstudio":
+            backend = "VisualStudio"
+        elif ide == "vscode":
+            backend = "Clangd"
 
-    if ide == "vscode":
-        # Check if platform has VSCode installed
-        vscode_cmd = find_vscode_cmd(command_context)
-        if vscode_cmd is None:
-            choice = prompt_bool(
-                "VSCode cannot be found, and may not be installed. Proceed?"
+        if ide == "eclipse" and not which("eclipse"):
+            command_context.log(
+                logging.ERROR,
+                "ide",
+                {},
+                "Eclipse CDT 8.4 or later must be installed in your PATH.",
             )
-            if not choice:
-                return 1
-
-        rc = build_commands.configure(command_context)
-
-        if rc != 0:
-            return rc
+            command_context.log(
+                logging.ERROR,
+                "ide",
+                {},
+                "Download: http://www.eclipse.org/cdt/downloads.php",
+            )
+            return 1
 
-        # First install what we can through install manifests.
-        rc = build_commands._run_make(
-            directory=command_context.topobjdir,
-            target="pre-export",
-            line_handler=None,
-        )
-        if rc != 0:
-            return rc
+        if ide == "vscode":
+            # Check if platform has VSCode installed
+            vscode_cmd = self.find_vscode_cmd(command_context)
+            if vscode_cmd is None:
+                choice = prompt_bool(
+                    "VSCode cannot be found, and may not be installed. Proceed?"
+                )
+                if not choice:
+                    return 1
 
-        # Then build the rest of the build dependencies by running the full
-        # export target, because we can't do anything better.
-        for target in ("export", "pre-compile"):
-            rc = build_commands._run_make(
+            # Create the Build environment to configure the tree
+            builder = Build(command_context._mach_context, None)
+
+            rc = builder.configure(command_context)
+            if rc != 0:
+                return rc
+
+            # First install what we can through install manifests.
+            rc = builder._run_make(
                 directory=command_context.topobjdir,
-                target=target,
+                target="pre-export",
                 line_handler=None,
             )
             if rc != 0:
                 return rc
-    else:
-        # Here we refresh the whole build. 'build export' is sufficient here and is
-        # probably more correct but it's also nice having a single target to get a fully
-        # built and indexed project (gives a easy target to use before go out to lunch).
-        res = command_context._mach_context.commands.dispatch(
-            "build", command_context._mach_context
+
+            # Then build the rest of the build dependencies by running the full
+            # export target, because we can't do anything better.
+            for target in ("export", "pre-compile"):
+                rc = builder._run_make(
+                    directory=command_context.topobjdir,
+                    target=target,
+                    line_handler=None,
+                )
+                if rc != 0:
+                    return rc
+        else:
+            # Here we refresh the whole build. 'build export' is sufficient here and is
+            # probably more correct but it's also nice having a single target to get a fully
+            # built and indexed project (gives a easy target to use before go out to lunch).
+            res = command_context._mach_context.commands.dispatch(
+                "build", command_context._mach_context
+            )
+            if res != 0:
+                return 1
+
+        # Generate or refresh the IDE backend.
+        python = command_context.virtualenv_manager.python_path
+        config_status = os.path.join(command_context.topobjdir, "config.status")
+        args = [python, config_status, "--backend=%s" % backend]
+        res = command_context._run_command_in_objdir(
+            args=args, pass_thru=True, ensure_exit_code=False
         )
         if res != 0:
             return 1
 
-    # Generate or refresh the IDE backend.
-    python = command_context.virtualenv_manager.python_path
-    config_status = os.path.join(command_context.topobjdir, "config.status")
-    args = [python, config_status, "--backend=%s" % backend]
-    res = command_context._run_command_in_objdir(
-        args=args, pass_thru=True, ensure_exit_code=False
-    )
-    if res != 0:
-        return 1
+        if ide == "eclipse":
+            eclipse_workspace_dir = self.get_eclipse_workspace_path(command_context)
+            subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
+        elif ide == "visualstudio":
+            visual_studio_workspace_dir = self.get_visualstudio_workspace_path(
+                command_context
+            )
+            subprocess.call(["explorer.exe", visual_studio_workspace_dir])
+        elif ide == "vscode":
+            return self.setup_vscode(command_context, vscode_cmd)
+
+    def get_eclipse_workspace_path(self, command_context):
+        from mozbuild.backend.cpp_eclipse import CppEclipseBackend
 
-    if ide == "eclipse":
-        eclipse_workspace_dir = get_eclipse_workspace_path(command_context)
-        subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
-    elif ide == "visualstudio":
-        visual_studio_workspace_dir = get_visualstudio_workspace_path(command_context)
-        subprocess.call(["explorer.exe", visual_studio_workspace_dir])
-    elif ide == "vscode":
-        return setup_vscode(command_context, vscode_cmd)
+        return CppEclipseBackend.get_workspace_path(
+            command_context.topsrcdir, command_context.topobjdir
+        )
+
+    def get_visualstudio_workspace_path(self, command_context):
+        return os.path.normpath(
+            os.path.join(command_context.topobjdir, "msvc", "mozilla.sln")
+        )
 
+    def find_vscode_cmd(self, command_context):
+        import shutil
+
+        # Try to look up the `code` binary on $PATH, and use it if present. This
+        # should catch cases like being run from within a vscode-remote shell,
+        # even if vscode itself is also installed on the remote host.
+        path = shutil.which("code")
+        if path is not None:
+            return [path]
 
-def get_eclipse_workspace_path(command_context):
-    from mozbuild.backend.cpp_eclipse import CppEclipseBackend
-
-    return CppEclipseBackend.get_workspace_path(
-        command_context.topsrcdir, command_context.topobjdir
-    )
-
+        # If the binary wasn't on $PATH, try to find it in a variety of other
+        # well-known install locations based on the current platform.
+        if "linux" in command_context.platform[0]:
+            cmd_and_path = [
+                {"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
+                {"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
+                {"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
+                {"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
+            ]
+        elif "macos" in command_context.platform[0]:
+            cmd_and_path = [
+                {"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
+                {
+                    "path": "/Applications/Visual Studio Code.app",
+                    "cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
+                },
+                {
+                    "path": "/Applications/Visual Studio Code - Insiders.app",
+                    "cmd": [
+                        "open",
+                        "/Applications/Visual Studio Code - Insiders.app",
+                        "--args",
+                    ],
+                },
+            ]
+        elif "win64" in command_context.platform[0]:
+            from pathlib import Path
 
-def get_visualstudio_workspace_path(command_context):
-    return os.path.normpath(
-        os.path.join(command_context.topobjdir, "msvc", "mozilla.sln")
-    )
-
+            vscode_path = mozpath.join(
+                str(Path.home()),
+                "AppData",
+                "Local",
+                "Programs",
+                "Microsoft VS Code",
+                "Code.exe",
+            )
+            vscode_insiders_path = mozpath.join(
+                str(Path.home()),
+                "AppData",
+                "Local",
+                "Programs",
+                "Microsoft VS Code Insiders",
+                "Code - Insiders.exe",
+            )
+            cmd_and_path = [
+                {"path": vscode_path, "cmd": [vscode_path]},
+                {"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
+            ]
 
-def find_vscode_cmd(command_context):
-    import shutil
+        # Did we guess the path?
+        for element in cmd_and_path:
+            if os.path.exists(element["path"]):
+                return element["cmd"]
+
+        # Path cannot be found
+        return None
+
+    def setup_vscode(self, command_context, vscode_cmd):
+        vscode_settings = mozpath.join(
+            command_context.topsrcdir, ".vscode", "settings.json"
+        )
 
-    # Try to look up the `code` binary on $PATH, and use it if present. This
-    # should catch cases like being run from within a vscode-remote shell,
-    # even if vscode itself is also installed on the remote host.
-    path = shutil.which("code")
-    if path is not None:
-        return [path]
+        clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
+
+        # Verify if the required files are present
+        clang_tools_path = mozpath.join(
+            command_context._mach_context.state_dir, "clang-tools"
+        )
+        clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
+
+        clangd_path = mozpath.join(
+            clang_tidy_bin,
+            "clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
+        )
 
-    # If the binary wasn't on $PATH, try to find it in a variety of other
-    # well-known install locations based on the current platform.
-    if "linux" in command_context.platform[0]:
-        cmd_and_path = [
-            {"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
-            {"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
-            {"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
-            {"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
-        ]
-    elif "macos" in command_context.platform[0]:
-        cmd_and_path = [
-            {"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
-            {
-                "path": "/Applications/Visual Studio Code.app",
-                "cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
-            },
-            {
-                "path": "/Applications/Visual Studio Code - Insiders.app",
-                "cmd": [
-                    "open",
-                    "/Applications/Visual Studio Code - Insiders.app",
-                    "--args",
-                ],
-            },
-        ]
-    elif "win64" in command_context.platform[0]:
-        from pathlib import Path
+        if not os.path.exists(clangd_path):
+            command_context.log(
+                logging.ERROR,
+                "ide",
+                {},
+                "Unable to locate clangd in {}.".format(clang_tidy_bin),
+            )
+            rc = self._get_clang_tools(command_context, clang_tools_path)
+
+            if rc != 0:
+                return rc
+
+        import multiprocessing
+        import json
+        import difflib
+        from mozbuild.code_analysis.utils import ClangTidyConfig
+
+        clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
+
+        clangd_json = {
+            "clangd.path": clangd_path,
+            "clangd.arguments": [
+                "--compile-commands-dir",
+                clangd_cc_path,
+                "-j",
+                str(multiprocessing.cpu_count() // 2),
+                "--limit-results",
+                "0",
+                "--completion-style",
+                "detailed",
+                "--background-index",
+                "--all-scopes-completion",
+                "--log",
+                "info",
+                "--pch-storage",
+                "memory",
+                "--clang-tidy",
+                "--clang-tidy-checks",
+                ",".join(clang_tidy_cfg.checks),
+            ],
+        }
+
+        # Load the existing .vscode/settings.json file, to check if if needs to
+        # be created or updated.
+        try:
+            with open(vscode_settings) as fh:
+                old_settings_str = fh.read()
+        except FileNotFoundError:
+            print("Configuration for {} will be created.".format(vscode_settings))
+            old_settings_str = None
 
-        vscode_path = mozpath.join(
-            str(Path.home()),
-            "AppData",
-            "Local",
-            "Programs",
-            "Microsoft VS Code",
-            "Code.exe",
-        )
-        vscode_insiders_path = mozpath.join(
-            str(Path.home()),
-            "AppData",
-            "Local",
-            "Programs",
-            "Microsoft VS Code Insiders",
-            "Code - Insiders.exe",
-        )
-        cmd_and_path = [
-            {"path": vscode_path, "cmd": [vscode_path]},
-            {"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
-        ]
+        if old_settings_str is None:
+            # No old settings exist
+            with open(vscode_settings, "w") as fh:
+                json.dump(clangd_json, fh, indent=4)
+        else:
+            # Merge our new settings with the existing settings, and check if we
+            # need to make changes. Only prompt & write out the updated config
+            # file if settings actually changed.
+            try:
+                old_settings = json.loads(old_settings_str)
+                prompt_prefix = ""
+            except ValueError:
+                old_settings = {}
+                prompt_prefix = (
+                    "\n**WARNING**: Parsing of existing settings file failed. "
+                    "Existing settings will be lost!"
+                )
 
-    # Did we guess the path?
-    for element in cmd_and_path:
-        if os.path.exists(element["path"]):
-            return element["cmd"]
+            settings = {**old_settings, **clangd_json}
 
-    # Path cannot be found
-    return None
-
-
-def setup_vscode(command_context, vscode_cmd):
-    vscode_settings = mozpath.join(
-        command_context.topsrcdir, ".vscode", "settings.json"
-    )
-
-    clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
+            if old_settings != settings:
+                # Prompt the user with a diff of the changes we're going to make
+                new_settings_str = json.dumps(settings, indent=4)
+                print(
+                    "\nThe following modifications to {settings} will occur:\n{diff}".format(
+                        settings=vscode_settings,
+                        diff="".join(
+                            difflib.unified_diff(
+                                old_settings_str.splitlines(keepends=True),
+                                new_settings_str.splitlines(keepends=True),
+                                "a/.vscode/settings.json",
+                                "b/.vscode/settings.json",
+                                n=30,
+                            )
+                        ),
+                    )
+                )
+                choice = prompt_bool(
+                    "{}\nProceed with modifications to {}?".format(
+                        prompt_prefix, vscode_settings
+                    )
+                )
+                if not choice:
+                    return 1
 
-    # Verify if the required files are present
-    clang_tools_path = mozpath.join(
-        command_context._mach_context.state_dir, "clang-tools"
-    )
-    clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
+                with open(vscode_settings, "w") as fh:
+                    fh.write(new_settings_str)
 
-    clangd_path = mozpath.join(
-        clang_tidy_bin,
-        "clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
-    )
+        # Open vscode with new configuration, or ask the user to do so if the
+        # binary was not found.
+        if vscode_cmd is None:
+            print(
+                "Please open VS Code manually and load directory: {}".format(
+                    command_context.topsrcdir
+                )
+            )
+            return 0
 
-    if not os.path.exists(clangd_path):
-        command_context.log(
-            logging.ERROR,
-            "ide",
-            {},
-            "Unable to locate clangd in {}.".format(clang_tidy_bin),
-        )
-        rc = _get_clang_tools(command_context, clang_tools_path)
+        rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
 
         if rc != 0:
+            command_context.log(
+                logging.ERROR,
+                "ide",
+                {},
+                "Unable to open VS Code. Please open VS Code manually and load "
+                "directory: {}".format(command_context.topsrcdir),
+            )
             return rc
 
-    import multiprocessing
-    import json
-    import difflib
-    from mozbuild.code_analysis.utils import ClangTidyConfig
-
-    clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
-
-    clangd_json = {
-        "clangd.path": clangd_path,
-        "clangd.arguments": [
-            "--compile-commands-dir",
-            clangd_cc_path,
-            "-j",
-            str(multiprocessing.cpu_count() // 2),
-            "--limit-results",
-            "0",
-            "--completion-style",
-            "detailed",
-            "--background-index",
-            "--all-scopes-completion",
-            "--log",
-            "info",
-            "--pch-storage",
-            "memory",
-            "--clang-tidy",
-            "--clang-tidy-checks",
-            ",".join(clang_tidy_cfg.checks),
-        ],
-    }
-
-    # Load the existing .vscode/settings.json file, to check if if needs to
-    # be created or updated.
-    try:
-        with open(vscode_settings) as fh:
-            old_settings_str = fh.read()
-    except FileNotFoundError:
-        print("Configuration for {} will be created.".format(vscode_settings))
-        old_settings_str = None
-
-    if old_settings_str is None:
-        # No old settings exist
-        with open(vscode_settings, "w") as fh:
-            json.dump(clangd_json, fh, indent=4)
-    else:
-        # Merge our new settings with the existing settings, and check if we
-        # need to make changes. Only prompt & write out the updated config
-        # file if settings actually changed.
-        try:
-            old_settings = json.loads(old_settings_str)
-            prompt_prefix = ""
-        except ValueError:
-            old_settings = {}
-            prompt_prefix = (
-                "\n**WARNING**: Parsing of existing settings file failed. "
-                "Existing settings will be lost!"
-            )
-
-        settings = {**old_settings, **clangd_json}
-
-        if old_settings != settings:
-            # Prompt the user with a diff of the changes we're going to make
-            new_settings_str = json.dumps(settings, indent=4)
-            print(
-                "\nThe following modifications to {settings} will occur:\n{diff}".format(
-                    settings=vscode_settings,
-                    diff="".join(
-                        difflib.unified_diff(
-                            old_settings_str.splitlines(keepends=True),
-                            new_settings_str.splitlines(keepends=True),
-                            "a/.vscode/settings.json",
-                            "b/.vscode/settings.json",
-                            n=30,
-                        )
-                    ),
-                )
-            )
-            choice = prompt_bool(
-                "{}\nProceed with modifications to {}?".format(
-                    prompt_prefix, vscode_settings
-                )
-            )
-            if not choice:
-                return 1
-
-            with open(vscode_settings, "w") as fh:
-                fh.write(new_settings_str)
-
-    # Open vscode with new configuration, or ask the user to do so if the
-    # binary was not found.
-    if vscode_cmd is None:
-        print(
-            "Please open VS Code manually and load directory: {}".format(
-                command_context.topsrcdir
-            )
-        )
         return 0
 
-    rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
+    def _get_clang_tools(self, command_context, clang_tools_path):
 
-    if rc != 0:
-        command_context.log(
-            logging.ERROR,
-            "ide",
-            {},
-            "Unable to open VS Code. Please open VS Code manually and load "
-            "directory: {}".format(command_context.topsrcdir),
-        )
-        return rc
+        import shutil
+
+        if os.path.isdir(clang_tools_path):
+            shutil.rmtree(clang_tools_path)
 
-    return 0
-
-
-def _get_clang_tools(command_context, clang_tools_path):
-
-    import shutil
+        # Create base directory where we store clang binary
+        os.mkdir(clang_tools_path)
 
-    if os.path.isdir(clang_tools_path):
-        shutil.rmtree(clang_tools_path)
+        from mozbuild.artifact_commands import PackageFrontend
 
-    # Create base directory where we store clang binary
-    os.mkdir(clang_tools_path)
+        _artifact_manager = PackageFrontend(command_context._mach_context)
 
-    from mozbuild.artifact_commands import PackageFrontend
+        job, _ = command_context.platform
 
-    _artifact_manager = PackageFrontend(command_context._mach_context)
-
-    job, _ = command_context.platform
+        if job is None:
+            command_context.log(
+                logging.ERROR,
+                "ide",
+                {},
+                "The current platform isn't supported. "
+                "Currently only the following platforms are "
+                "supported: win32/win64, linux64 and macosx64.",
+            )
+            return 1
 
-    if job is None:
-        command_context.log(
-            logging.ERROR,
-            "ide",
-            {},
-            "The current platform isn't supported. "
-            "Currently only the following platforms are "
-            "supported: win32/win64, linux64 and macosx64.",
+        job += "-clang-tidy"
+
+        # We want to unpack data in the clang-tidy mozbuild folder
+        currentWorkingDir = os.getcwd()
+        os.chdir(clang_tools_path)
+        rc = _artifact_manager.artifact_toolchain(
+            command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
         )
-        return 1
-
-    job += "-clang-tidy"
+        # Change back the cwd
+        os.chdir(currentWorkingDir)
 
-    # We want to unpack data in the clang-tidy mozbuild folder
-    currentWorkingDir = os.getcwd()
-    os.chdir(clang_tools_path)
-    rc = _artifact_manager.artifact_toolchain(
-        command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
-    )
-    # Change back the cwd
-    os.chdir(currentWorkingDir)
-
-    return rc
+        return rc
 
 
 def prompt_bool(prompt, limit=5):
     """ Prompts the user with prompt and requires a boolean value. """
     from distutils.util import strtobool
 
     for _ in range(limit):
         try:
--- a/python/mozbuild/mozbuild/build_commands.py
+++ b/python/mozbuild/mozbuild/build_commands.py
@@ -3,18 +3,19 @@
 # file, # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
 import os
 import subprocess
 
-from mach.decorators import CommandArgument, Command
+from mach.decorators import CommandArgument, CommandProvider, Command
 
+from mozbuild.base import MachCommandBase
 from mozbuild.util import MOZBUILD_METRICS_PATH
 from mozbuild.mozconfig import MozconfigLoader
 import mozpack.path as mozpath
 
 from mozbuild.backend import backends
 
 BUILD_WHAT_HELP = """
 What to build. Can be a top-level make target or a relative directory. If
@@ -63,302 +64,310 @@ def _set_priority(priority, verbose):
         return False
 
     psutil.Process().nice(prio_class_val)
     if verbose:
         print(f"psutil.Process().nice(psutil.{prio_class})")
     return True
 
 
-# Interface to build the tree.
+@CommandProvider
+class Build(MachCommandBase):
+    """Interface to build the tree."""
 
+    @Command(
+        "build",
+        category="build",
+        description="Build the tree.",
+        metrics_path=MOZBUILD_METRICS_PATH,
+        virtualenv_name="build",
+    )
+    @CommandArgument(
+        "--jobs",
+        "-j",
+        default="0",
+        metavar="jobs",
+        type=int,
+        help="Number of concurrent jobs to run. Default is based on the number of "
+        "CPUs and the estimated size of the jobs (see --job-size).",
+    )
+    @CommandArgument(
+        "--job-size",
+        default="0",
+        metavar="size",
+        type=float,
+        help="Estimated RAM required, in GiB, for each parallel job. Used to "
+        "compute a default number of concurrent jobs.",
+    )
+    @CommandArgument(
+        "-C",
+        "--directory",
+        default=None,
+        help="Change to a subdirectory of the build directory first.",
+    )
+    @CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
+    @CommandArgument(
+        "-v",
+        "--verbose",
+        action="store_true",
+        help="Verbose output for what commands the build is running.",
+    )
+    @CommandArgument(
+        "--keep-going",
+        action="store_true",
+        help="Keep building after an error has occurred",
+    )
+    @CommandArgument(
+        "--priority",
+        default="less",
+        metavar="priority",
+        type=str,
+        help="idle/less/normal/more/high. (Default less)",
+    )
+    def build(
+        self,
+        command_context,
+        what=None,
+        jobs=0,
+        job_size=0,
+        directory=None,
+        verbose=False,
+        keep_going=False,
+        priority="less",
+    ):
+        """Build the source tree.
+
+        With no arguments, this will perform a full build.
+
+        Positional arguments define targets to build. These can be make targets
+        or patterns like "<dir>/<target>" to indicate a make target within a
+        directory.
+
+        There are a few special targets that can be used to perform a partial
+        build faster than what `mach build` would perform:
 
-@Command(
-    "build",
-    category="build",
-    description="Build the tree.",
-    metrics_path=MOZBUILD_METRICS_PATH,
-    virtualenv_name="build",
-)
-@CommandArgument(
-    "--jobs",
-    "-j",
-    default="0",
-    metavar="jobs",
-    type=int,
-    help="Number of concurrent jobs to run. Default is based on the number of "
-    "CPUs and the estimated size of the jobs (see --job-size).",
-)
-@CommandArgument(
-    "--job-size",
-    default="0",
-    metavar="size",
-    type=float,
-    help="Estimated RAM required, in GiB, for each parallel job. Used to "
-    "compute a default number of concurrent jobs.",
-)
-@CommandArgument(
-    "-C",
-    "--directory",
-    default=None,
-    help="Change to a subdirectory of the build directory first.",
-)
-@CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
-@CommandArgument(
-    "-v",
-    "--verbose",
-    action="store_true",
-    help="Verbose output for what commands the build is running.",
-)
-@CommandArgument(
-    "--keep-going",
-    action="store_true",
-    help="Keep building after an error has occurred",
-)
-@CommandArgument(
-    "--priority",
-    default="less",
-    metavar="priority",
-    type=str,
-    help="idle/less/normal/more/high. (Default less)",
-)
-def build(
-    command_context,
-    what=None,
-    jobs=0,
-    job_size=0,
-    directory=None,
-    verbose=False,
-    keep_going=False,
-    priority="less",
-):
-    """Build the source tree.
+        * binaries - compiles and links all C/C++ sources and produces shared
+          libraries and executables (binaries).
+
+        * faster - builds JavaScript, XUL, CSS, etc files.
+
+        "binaries" and "faster" almost fully complement each other. However,
+        there are build actions not captured by either. If things don't appear to
+        be rebuilding, perform a vanilla `mach build` to rebuild the world.
+        """
+        from mozbuild.controller.building import BuildDriver
+
+        command_context.log_manager.enable_all_structured_loggers()
+
+        loader = MozconfigLoader(command_context.topsrcdir)
+        mozconfig = loader.read_mozconfig(loader.AUTODETECT)
+        configure_args = mozconfig["configure_args"]
+        doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
+        # Force verbosity on automation.
+        verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
+        append_env = None
+
+        # By setting the current process's priority, by default our child processes
+        # will also inherit this same priority.
+        if not _set_priority(priority, verbose):
+            print("--priority not supported on this platform.")
+
+        if doing_pgo:
+            if what:
+                raise Exception(
+                    "Cannot specify targets (%s) in MOZ_PGO=1 builds" % what
+                )
+            instr = command_context._spawn(BuildDriver)
+            orig_topobjdir = instr._topobjdir
+            instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
 
-    With no arguments, this will perform a full build.
-
-    Positional arguments define targets to build. These can be make targets
-    or patterns like "<dir>/<target>" to indicate a make target within a
-    directory.
-
-    There are a few special targets that can be used to perform a partial
-    build faster than what `mach build` would perform:
-
-    * binaries - compiles and links all C/C++ sources and produces shared
-      libraries and executables (binaries).
-
-    * faster - builds JavaScript, XUL, CSS, etc files.
-
-    "binaries" and "faster" almost fully complement each other. However,
-    there are build actions not captured by either. If things don't appear to
-    be rebuilding, perform a vanilla `mach build` to rebuild the world.
-    """
-    from mozbuild.controller.building import BuildDriver
-
-    command_context.log_manager.enable_all_structured_loggers()
+            append_env = {"MOZ_PROFILE_GENERATE": "1"}
+            status = instr.build(
+                command_context.metrics,
+                what=what,
+                jobs=jobs,
+                job_size=job_size,
+                directory=directory,
+                verbose=verbose,
+                keep_going=keep_going,
+                mach_context=command_context._mach_context,
+                append_env=append_env,
+            )
+            if status != 0:
+                return status
 
-    loader = MozconfigLoader(command_context.topsrcdir)
-    mozconfig = loader.read_mozconfig(loader.AUTODETECT)
-    configure_args = mozconfig["configure_args"]
-    doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
-    # Force verbosity on automation.
-    verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
-    append_env = None
+            # Packaging the instrumented build is required to get the jarlog
+            # data.
+            status = instr._run_make(
+                directory=".",
+                target="package",
+                silent=not verbose,
+                ensure_exit_code=False,
+                append_env=append_env,
+            )
+            if status != 0:
+                return status
 
-    # By setting the current process's priority, by default our child processes
-    # will also inherit this same priority.
-    if not _set_priority(priority, verbose):
-        print("--priority not supported on this platform.")
+            pgo_env = os.environ.copy()
+            if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
+                pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
+                    "LLVM_PROFDATA"
+                )
+            pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
+            pgo_cmd = [
+                instr.virtualenv_manager.python_path,
+                mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
+            ]
+            subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
 
-    if doing_pgo:
-        if what:
-            raise Exception("Cannot specify targets (%s) in MOZ_PGO=1 builds" % what)
-        instr = command_context._spawn(BuildDriver)
-        orig_topobjdir = instr._topobjdir
-        instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
+            # Set the default build to MOZ_PROFILE_USE
+            append_env = {"MOZ_PROFILE_USE": "1"}
 
-        append_env = {"MOZ_PROFILE_GENERATE": "1"}
-        status = instr.build(
+        driver = command_context._spawn(BuildDriver)
+        return driver.build(
             command_context.metrics,
             what=what,
             jobs=jobs,
             job_size=job_size,
             directory=directory,
             verbose=verbose,
             keep_going=keep_going,
             mach_context=command_context._mach_context,
             append_env=append_env,
         )
-        if status != 0:
-            return status
 
-        # Packaging the instrumented build is required to get the jarlog
-        # data.
-        status = instr._run_make(
-            directory=".",
-            target="package",
-            silent=not verbose,
-            ensure_exit_code=False,
-            append_env=append_env,
+    @Command(
+        "configure",
+        category="build",
+        description="Configure the tree (run configure and config.status).",
+        metrics_path=MOZBUILD_METRICS_PATH,
+        virtualenv_name="build",
+    )
+    @CommandArgument(
+        "options", default=None, nargs=argparse.REMAINDER, help="Configure options"
+    )
+    def configure(
+        self,
+        command_context,
+        options=None,
+        buildstatus_messages=False,
+        line_handler=None,
+    ):
+        from mozbuild.controller.building import BuildDriver
+
+        command_context.log_manager.enable_all_structured_loggers()
+        driver = command_context._spawn(BuildDriver)
+
+        return driver.configure(
+            command_context.metrics,
+            options=options,
+            buildstatus_messages=buildstatus_messages,
+            line_handler=line_handler,
         )
-        if status != 0:
-            return status
 
-        pgo_env = os.environ.copy()
-        if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
-            pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
-                "LLVM_PROFDATA"
-            )
-        pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
-        pgo_cmd = [
-            instr.virtualenv_manager.python_path,
-            mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
-        ]
-        subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
+    @Command(
+        "resource-usage",
+        category="post-build",
+        description="Show information about system resource usage for a build.",
+        virtualenv_name="build",
+    )
+    @CommandArgument(
+        "--address",
+        default="localhost",
+        help="Address the HTTP server should listen on.",
+    )
+    @CommandArgument(
+        "--port",
+        type=int,
+        default=0,
+        help="Port number the HTTP server should listen on.",
+    )
+    @CommandArgument(
+        "--browser",
+        default="firefox",
+        help="Web browser to automatically open. See webbrowser Python module.",
+    )
+    @CommandArgument("--url", help="URL of JSON document to display")
+    def resource_usage(
+        self, command_context, address=None, port=None, browser=None, url=None
+    ):
+        import webbrowser
+        from mozbuild.html_build_viewer import BuildViewerServer
 
-        # Set the default build to MOZ_PROFILE_USE
-        append_env = {"MOZ_PROFILE_USE": "1"}
-
-    driver = command_context._spawn(BuildDriver)
-    return driver.build(
-        command_context.metrics,
-        what=what,
-        jobs=jobs,
-        job_size=job_size,
-        directory=directory,
-        verbose=verbose,
-        keep_going=keep_going,
-        mach_context=command_context._mach_context,
-        append_env=append_env,
-    )
-
+        server = BuildViewerServer(address, port)
 
-@Command(
-    "configure",
-    category="build",
-    description="Configure the tree (run configure and config.status).",
-    metrics_path=MOZBUILD_METRICS_PATH,
-    virtualenv_name="build",
-)
-@CommandArgument(
-    "options", default=None, nargs=argparse.REMAINDER, help="Configure options"
-)
-def configure(
-    command_context,
-    options=None,
-    buildstatus_messages=False,
-    line_handler=None,
-):
-    from mozbuild.controller.building import BuildDriver
+        if url:
+            server.add_resource_json_url("url", url)
+        else:
+            last = command_context._get_state_filename("build_resources.json")
+            if not os.path.exists(last):
+                print(
+                    "Build resources not available. If you have performed a "
+                    "build and receive this message, the psutil Python package "
+                    "likely failed to initialize properly."
+                )
+                return 1
 
-    command_context.log_manager.enable_all_structured_loggers()
-    driver = command_context._spawn(BuildDriver)
+            server.add_resource_json_file("last", last)
+        try:
+            webbrowser.get(browser).open_new_tab(server.url)
+        except Exception:
+            print("Cannot get browser specified, trying the default instead.")
+            try:
+                browser = webbrowser.get().open_new_tab(server.url)
+            except Exception:
+                print("Please open %s in a browser." % server.url)
 
-    return driver.configure(
-        command_context.metrics,
-        options=options,
-        buildstatus_messages=buildstatus_messages,
-        line_handler=line_handler,
-    )
-
+        print("Hit CTRL+c to stop server.")
+        server.run()
 
-@Command(
-    "resource-usage",
-    category="post-build",
-    description="Show information about system resource usage for a build.",
-    virtualenv_name="build",
-)
-@CommandArgument(
-    "--address",
-    default="localhost",
-    help="Address the HTTP server should listen on.",
-)
-@CommandArgument(
-    "--port",
-    type=int,
-    default=0,
-    help="Port number the HTTP server should listen on.",
-)
-@CommandArgument(
-    "--browser",
-    default="firefox",
-    help="Web browser to automatically open. See webbrowser Python module.",
-)
-@CommandArgument("--url", help="URL of JSON document to display")
-def resource_usage(command_context, address=None, port=None, browser=None, url=None):
-    import webbrowser
-    from mozbuild.html_build_viewer import BuildViewerServer
+    @Command(
+        "build-backend",
+        category="build",
+        description="Generate a backend used to build the tree.",
+        virtualenv_name="build",
+    )
+    @CommandArgument(
+        "-d", "--diff", action="store_true", help="Show a diff of changes."
+    )
+    # It would be nice to filter the choices below based on
+    # conditions, but that is for another day.
+    @CommandArgument(
+        "-b",
+        "--backend",
+        nargs="+",
+        choices=sorted(backends),
+        help="Which backend to build.",
+    )
+    @CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
+    @CommandArgument(
+        "-n",
+        "--dry-run",
+        action="store_true",
+        help="Do everything except writing files out.",
+    )
+    def build_backend(
+        self, command_context, backend, diff=False, verbose=False, dry_run=False
+    ):
+        python = command_context.virtualenv_manager.python_path
+        config_status = os.path.join(command_context.topobjdir, "config.status")
 
-    server = BuildViewerServer(address, port)
-
-    if url:
-        server.add_resource_json_url("url", url)
-    else:
-        last = command_context._get_state_filename("build_resources.json")
-        if not os.path.exists(last):
+        if not os.path.exists(config_status):
             print(
-                "Build resources not available. If you have performed a "
-                "build and receive this message, the psutil Python package "
-                "likely failed to initialize properly."
+                "config.status not found.  Please run |mach configure| "
+                "or |mach build| prior to building the %s build backend." % backend
             )
             return 1
 
-        server.add_resource_json_file("last", last)
-    try:
-        webbrowser.get(browser).open_new_tab(server.url)
-    except Exception:
-        print("Cannot get browser specified, trying the default instead.")
-        try:
-            browser = webbrowser.get().open_new_tab(server.url)
-        except Exception:
-            print("Please open %s in a browser." % server.url)
-
-    print("Hit CTRL+c to stop server.")
-    server.run()
-
+        args = [python, config_status]
+        if backend:
+            args.append("--backend")
+            args.extend(backend)
+        if diff:
+            args.append("--diff")
+        if verbose:
+            args.append("--verbose")
+        if dry_run:
+            args.append("--dry-run")
 
-@Command(
-    "build-backend",
-    category="build",
-    description="Generate a backend used to build the tree.",
-    virtualenv_name="build",
-)
-@CommandArgument("-d", "--diff", action="store_true", help="Show a diff of changes.")
-# It would be nice to filter the choices below based on
-# conditions, but that is for another day.
-@CommandArgument(
-    "-b",
-    "--backend",
-    nargs="+",
-    choices=sorted(backends),
-    help="Which backend to build.",
-)
-@CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
-@CommandArgument(
-    "-n",
-    "--dry-run",
-    action="store_true",
-    help="Do everything except writing files out.",
-)
-def build_backend(command_context, backend, diff=False, verbose=False, dry_run=False):
-    python = command_context.virtualenv_manager.python_path
-    config_status = os.path.join(command_context.topobjdir, "config.status")
-
-    if not os.path.exists(config_status):
-        print(
-            "config.status not found.  Please run |mach configure| "
-            "or |mach build| prior to building the %s build backend." % backend
+        return command_context._run_command_in_objdir(
+            args=args, pass_thru=True, ensure_exit_code=False
         )
-        return 1
-
-    args = [python, config_status]
-    if backend:
-        args.append("--backend")
-        args.extend(backend)
-    if diff:
-        args.append("--diff")
-    if verbose:
-        args.append("--verbose")
-    if dry_run:
-        args.append("--dry-run")
-
-    return command_context._run_command_in_objdir(
-        args=args, pass_thru=True, ensure_exit_code=False
-    )
--- a/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+++ b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
@@ -20,21 +20,23 @@ import tarfile
 import tempfile
 import xml.etree.ElementTree as ET
 import yaml
 from types import SimpleNamespace
 
 import six
 from six.moves import input
 
-from mach.decorators import CommandArgument, Command, SubCommand
+from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
 
 from mach.main import Mach
 
-from mozbuild import build_commands
+from mozbuild.base import MachCommandBase
+
+from mozbuild.build_commands import Build
 from mozbuild.nodeutil import find_node_executable
 
 import mozpack.path as mozpath
 
 from mozbuild.util import memoize
 
 from mozversioncontrol import get_repository_object
 
@@ -172,2525 +174,2540 @@ class StaticAnalysisMonitor(object):
             elif warning["flag"] == "clang-diagnostic-error":
                 # For a "warning" that is flagged as "clang-diagnostic-error"
                 # set it as "publish"
                 warning["publish"] = True
 
         return (warning, True)
 
 
-# Utilities for running C++ static analysis checks and format.
+@CommandProvider
+class StaticAnalysis(MachCommandBase):
+    """Utilities for running C++ static analysis checks and format."""
 
-# List of file extension to consider (should start with dot)
-_format_include_extensions = (".cpp", ".c", ".cc", ".h", ".m", ".mm")
-# File contaning all paths to exclude from formatting
-_format_ignore_file = ".clang-format-ignore"
+    # List of file extension to consider (should start with dot)
+    _format_include_extensions = (".cpp", ".c", ".cc", ".h", ".m", ".mm")
+    # File contaning all paths to exclude from formatting
+    _format_ignore_file = ".clang-format-ignore"
 
-# List of file extension to consider (should start with dot)
-_check_syntax_include_extensions = (".cpp", ".c", ".cc", ".cxx")
+    # List of file extension to consider (should start with dot)
+    _check_syntax_include_extensions = (".cpp", ".c", ".cc", ".cxx")
 
-_cov_config = None
+    _cov_config = None
 
-# (TOOLS) Function return codes
-TOOLS_SUCCESS = 0
-TOOLS_FAILED_DOWNLOAD = 1
-TOOLS_UNSUPORTED_PLATFORM = 2
-TOOLS_CHECKER_NO_TEST_FILE = 3
-TOOLS_CHECKER_RETURNED_NO_ISSUES = 4
-TOOLS_CHECKER_RESULT_FILE_NOT_FOUND = 5
-TOOLS_CHECKER_DIFF_FAILED = 6
-TOOLS_CHECKER_NOT_FOUND = 7
-TOOLS_CHECKER_FAILED_FILE = 8
-TOOLS_CHECKER_LIST_EMPTY = 9
-TOOLS_GRADLE_FAILED = 10
-
+    # (TOOLS) Function return codes
+    TOOLS_SUCCESS = 0
+    TOOLS_FAILED_DOWNLOAD = 1
+    TOOLS_UNSUPORTED_PLATFORM = 2
+    TOOLS_CHECKER_NO_TEST_FILE = 3
+    TOOLS_CHECKER_RETURNED_NO_ISSUES = 4
+    TOOLS_CHECKER_RESULT_FILE_NOT_FOUND = 5
+    TOOLS_CHECKER_DIFF_FAILED = 6
+    TOOLS_CHECKER_NOT_FOUND = 7
+    TOOLS_CHECKER_FAILED_FILE = 8
+    TOOLS_CHECKER_LIST_EMPTY = 9
+    TOOLS_GRADLE_FAILED = 10
 
-@Command(
-    "static-analysis",
-    category="testing",
-    description="Run C++ static analysis checks",
-)
-def static_analysis(command_context):
-    # If no arguments are provided, just print a help message.
-    """Detailed documentation:
-    https://firefox-source-docs.mozilla.org/code-quality/static-analysis.html
-    """
-    mach = Mach(os.getcwd())
+    @Command(
+        "static-analysis",
+        category="testing",
+        description="Run C++ static analysis checks",
+    )
+    def static_analysis(self, command_context):
+        # If no arguments are provided, just print a help message.
+        """Detailed documentation:
+        https://firefox-source-docs.mozilla.org/code-quality/static-analysis.html
+        """
+        mach = Mach(os.getcwd())
 
-    def populate_context(key=None):
-        if key == "topdir":
-            return command_context.topsrcdir
+        def populate_context(key=None):
+            if key == "topdir":
+                return command_context.topsrcdir
 
-    mach.populate_context_handler = populate_context
-    mach.run(["static-analysis", "--help"])
-
+        mach.populate_context_handler = populate_context
+        mach.run(["static-analysis", "--help"])
 
-@StaticAnalysisSubCommand(
-    "static-analysis", "check", "Run the checks using the helper tool"
-)
-@CommandArgument(
-    "source",
-    nargs="*",
-    default=[".*"],
-    help="Source files to be analyzed (regex on path). "
-    "Can be omitted, in which case the entire code base "
-    "is analyzed.  The source argument is ignored if "
-    "there is anything fed through stdin, in which case "
-    "the analysis is only performed on the files changed "
-    "in the patch streamed through stdin.  This is called "
-    "the diff mode.",
-)
-@CommandArgument(
-    "--checks",
-    "-c",
-    default="-*",
-    metavar="checks",
-    help="Static analysis checks to enable.  By default, this enables only "
-    "checks that are published here: https://mzl.la/2DRHeTh, but can be any "
-    "clang-tidy checks syntax.",
-)
-@CommandArgument(
-    "--jobs",
-    "-j",
-    default="0",
-    metavar="jobs",
-    type=int,
-    help="Number of concurrent jobs to run. Default is the number of CPUs.",
-)
-@CommandArgument(
-    "--strip",
-    "-p",
-    default="1",
-    metavar="NUM",
-    help="Strip NUM leading components from file names in diff mode.",
-)
-@CommandArgument(
-    "--fix",
-    "-f",
-    default=False,
-    action="store_true",
-    help="Try to autofix errors detected by clang-tidy checkers.",
-)
-@CommandArgument(
-    "--header-filter",
-    "-h-f",
-    default="",
-    metavar="header_filter",
-    help="Regular expression matching the names of the headers to "
-    "output diagnostics from. Diagnostics from the main file "
-    "of each translation unit are always displayed",
-)
-@CommandArgument(
-    "--output", "-o", default=None, help="Write clang-tidy output in a file"
-)
-@CommandArgument(
-    "--format",
-    default="text",
-    choices=("text", "json"),
-    help="Output format to write in a file",
-)
-@CommandArgument(
-    "--outgoing",
-    default=False,
-    action="store_true",
-    help="Run static analysis checks on outgoing files from mercurial repository",
-)
-def check(
-    command_context,
-    source=None,
-    jobs=2,
-    strip=1,
-    verbose=False,
-    checks="-*",
-    fix=False,
-    header_filter="",
-    output=None,
-    format="text",
-    outgoing=False,
-):
-    from mozbuild.controller.building import (
-        StaticAnalysisFooter,
-        StaticAnalysisOutputManager,
+    @StaticAnalysisSubCommand(
+        "static-analysis", "check", "Run the checks using the helper tool"
+    )
+    @CommandArgument(
+        "source",
+        nargs="*",
+        default=[".*"],
+        help="Source files to be analyzed (regex on path). "
+        "Can be omitted, in which case the entire code base "
+        "is analyzed.  The source argument is ignored if "
+        "there is anything fed through stdin, in which case "
+        "the analysis is only performed on the files changed "
+        "in the patch streamed through stdin.  This is called "
+        "the diff mode.",
+    )
+    @CommandArgument(
+        "--checks",
+        "-c",
+        default="-*",
+        metavar="checks",
+        help="Static analysis checks to enable.  By default, this enables only "
+        "checks that are published here: https://mzl.la/2DRHeTh, but can be any "
+        "clang-tidy checks syntax.",
+    )
+    @CommandArgument(
+        "--jobs",
+        "-j",
+        default="0",
+        metavar="jobs",
+        type=int,
+        help="Number of concurrent jobs to run. Default is the number of CPUs.",
+    )
+    @CommandArgument(
+        "--strip",
+        "-p",
+        default="1",
+        metavar="NUM",
+        help="Strip NUM leading components from file names in diff mode.",
     )
-
-    command_context._set_log_level(verbose)
-    command_context.activate_virtualenv()
-    command_context.log_manager.enable_unstructured()
-
-    rc, clang_paths = _get_clang_tools(command_context, verbose=verbose)
-    if rc != 0:
-        return rc
-
-    if not _is_version_eligible(command_context, clang_paths):
-        return 1
-
-    rc, _compile_db, compilation_commands_path = _build_compile_db(
-        command_context, verbose=verbose
+    @CommandArgument(
+        "--fix",
+        "-f",
+        default=False,
+        action="store_true",
+        help="Try to autofix errors detected by clang-tidy checkers.",
+    )
+    @CommandArgument(
+        "--header-filter",
+        "-h-f",
+        default="",
+        metavar="header_filter",
+        help="Regular expression matching the names of the headers to "
+        "output diagnostics from. Diagnostics from the main file "
+        "of each translation unit are always displayed",
+    )
+    @CommandArgument(
+        "--output", "-o", default=None, help="Write clang-tidy output in a file"
     )
-    rc = rc or _build_export(command_context, jobs=jobs, verbose=verbose)
-    if rc != 0:
-        return rc
-
-    # Use outgoing files instead of source files
-    if outgoing:
-        repo = get_repository_object(command_context.topsrcdir)
-        files = repo.get_outgoing_files()
-        source = get_abspath_files(command_context, files)
-
-    # Split in several chunks to avoid hitting Python's limit of 100 groups in re
-    compile_db = json.loads(open(_compile_db, "r").read())
-    total = 0
-    import re
-
-    chunk_size = 50
-    for offset in range(0, len(source), chunk_size):
-        source_chunks = [
-            re.escape(f) for f in source[offset : offset + chunk_size].copy()
-        ]
-        name_re = re.compile("(" + ")|(".join(source_chunks) + ")")
-        for f in compile_db:
-            if name_re.search(f["file"]):
-                total = total + 1
-
-    # Filter source to remove excluded files
-    source = _generate_path_list(command_context, source, verbose=verbose)
-
-    if not total or not source:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "There are no files eligible for analysis. Please note that 'header' files "
-            "cannot be used for analysis since they do not consist compilation units.",
-        )
-        return 0
-
-    # Escape the files from source
-    source = [re.escape(f) for f in source]
-
-    cwd = command_context.topobjdir
-
-    monitor = StaticAnalysisMonitor(
-        command_context.topsrcdir,
-        command_context.topobjdir,
-        get_clang_tidy_config(command_context).checks_with_data,
-        total,
+    @CommandArgument(
+        "--format",
+        default="text",
+        choices=("text", "json"),
+        help="Output format to write in a file",
+    )
+    @CommandArgument(
+        "--outgoing",
+        default=False,
+        action="store_true",
+        help="Run static analysis checks on outgoing files from mercurial repository",
     )
-
-    footer = StaticAnalysisFooter(command_context.log_manager.terminal, monitor)
-
-    with StaticAnalysisOutputManager(
-        command_context.log_manager, monitor, footer
-    ) as output_manager:
-        import math
-
-        batch_size = int(math.ceil(float(len(source)) / multiprocessing.cpu_count()))
-        for i in range(0, len(source), batch_size):
-            args = _get_clang_tidy_command(
-                command_context,
-                clang_paths,
-                compilation_commands_path,
-                checks=checks,
-                header_filter=header_filter,
-                sources=source[i : (i + batch_size)],
-                jobs=jobs,
-                fix=fix,
-            )
-            rc = command_context.run_process(
-                args=args,
-                ensure_exit_code=False,
-                line_handler=output_manager.on_line,
-                cwd=cwd,
-            )
-
-        command_context.log(
-            logging.WARNING,
-            "warning_summary",
-            {"count": len(monitor.warnings_db)},
-            "{count} warnings present.",
+    def check(
+        self,
+        command_context,
+        source=None,
+        jobs=2,
+        strip=1,
+        verbose=False,
+        checks="-*",
+        fix=False,
+        header_filter="",
+        output=None,
+        format="text",
+        outgoing=False,
+    ):
+        from mozbuild.controller.building import (
+            StaticAnalysisFooter,
+            StaticAnalysisOutputManager,
         )
 
-        # Write output file
-        if output is not None:
-            output_manager.write(output, format)
-
-    if rc != 0:
-        return rc
-    # if we are building firefox for android it might be nice to
-    # also analyze the java code base
-    if command_context.substs["MOZ_BUILD_APP"] == "mobile/android":
-        rc = check_java(command_context, source, jobs, strip, verbose, skip_export=True)
-    return rc
-
-
-@StaticAnalysisSubCommand(
-    "static-analysis",
-    "check-coverity",
-    "Run coverity static-analysis tool on the given files. "
-    "Can only be run by automation! "
-    "It's result is stored as an json file on the artifacts server.",
-)
-@CommandArgument(
-    "source",
-    nargs="*",
-    default=[],
-    help="Source files to be analyzed by Coverity Static Analysis Tool. "
-    "This is ran only in automation.",
-)
-@CommandArgument(
-    "--output",
-    "-o",
-    default=None,
-    help="Write coverity output translated to json output in a file",
-)
-@CommandArgument(
-    "--coverity_output_path",
-    "-co",
-    default=None,
-    help="Path where to write coverity results as cov-results.json. "
-    "If no path is specified the default path from the coverity working "
-    "directory, ~./mozbuild/coverity is used.",
-)
-@CommandArgument(
-    "--outgoing",
-    default=False,
-    action="store_true",
-    help="Run coverity on outgoing files from mercurial or git repository",
-)
-@CommandArgument(
-    "--full-build",
-    default=False,
-    action="store_true",
-    help="Run a full build for coverity analisys.",
-)
-def check_coverity(
-    command_context,
-    source=[],
-    output=None,
-    coverity_output_path=None,
-    outgoing=False,
-    full_build=False,
-    verbose=False,
-):
-    command_context._set_log_level(verbose)
-    command_context.activate_virtualenv()
-    command_context.log_manager.enable_unstructured()
-
-    if "MOZ_AUTOMATION" not in os.environ:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "Coverity based static-analysis cannot be ran outside automation.",
-        )
-        return
-
-    if full_build and outgoing:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "Coverity full build cannot be associated with outgoing.",
-        )
-        return
-
-    # Use outgoing files instead of source files
-    if outgoing:
-        repo = get_repository_object(command_context.topsrcdir)
-        files = repo.get_outgoing_files()
-        source = get_abspath_files(command_context, files)
+        command_context._set_log_level(verbose)
+        command_context.activate_virtualenv()
+        command_context.log_manager.enable_unstructured()
 
-    # Verify that we have source files or we are dealing with a full-build
-    if len(source) == 0 and not full_build:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: There are no files that coverity can use to scan.",
-        )
-        return 0
-
-    # Load the configuration file for coverity static-analysis
-    # For the moment we store only the reliability index for each checker
-    # as the rest is managed on the https://github.com/mozilla/release-services side.
-    cov_config = _get_cov_config(command_context)
-
-    rc, cov = setup_coverity(command_context)
-    if rc != 0:
-        return rc
-
-    # First run cov-run-desktop --setup in order to setup the analysis env
-    # We need this in both cases, per patch analysis or full tree build
-    cmd = [cov.cov_run_desktop, "--setup"]
-    if run_cov_command(command_context, cmd, cov.cov_path):
-        # Avoiding a bug in Coverity where snapshot is not identified
-        # as beeing built with the current analysis binary.
-        if not full_build:
-            return 1
-
-    # Run cov-configure for clang, javascript and python
-    langs = ["clang", "javascript", "python"]
-    for lang in langs:
-        cmd = [cov.cov_configure, "--{}".format(lang)]
-
-        if run_cov_command(command_context, cmd):
-            return 1
-
-    if full_build:
-        # 1. Build the model file that is going to be used for analysis
-        model_path = mozpath.join("tools", "coverity", "model.cpp")
-        cmd = [cov.cov_make_library, "-sf", cov.cov_lic_path, model_path]
-
-        if run_cov_command(command_context, cmd):
-            return 1
-
-        # 2. Run cov-build
+        rc, clang_paths = self._get_clang_tools(command_context, verbose=verbose)
+        if rc != 0:
+            return rc
 
-        # Add cov_build command
-        cmd = [cov.cov_build, "--dir", "cov-int"]
-        # Add fs capture search paths for languages that are not nuilt
-        cmd += [
-            "--fs-capture-search={}".format(path) for path in cov.cov_capture_search
-        ]
-
-        # Add the exclude criteria for test cases
-        cmd += [
-            "--fs-capture-search-exclude-regex",
-            ".*/test",
-            "./mach",
-            "--log-no-times",
-            "build",
-        ]
-        if run_cov_command(command_context, cmd):
-            return 1
-
-        # 3. Run cov-analyze and exclude disabled checkers
-        cmd = [
-            cov.cov_analyze,
-            "--dir",
-            "cov-int",
-            "--all",
-            "--enable-virtual",
-            "--strip-path={}".format(command_context.topsrcdir),
-            "-sf",
-            cov.cov_lic_path,
-        ]
-
-        cmd += [
-            "--disable={}".format(key)
-            for key, checker in cov_config["coverity_checkers"].items()
-            if checker.get("publish", True) is False
-        ]
-
-        if run_cov_command(command_context, cmd):
-            return 1
-
-        # 4. Run cov-commit-defects
-        protocol = "https" if cov.cov_server_ssl else "http"
-        server_url = "{0}://{1}:{2}".format(protocol, cov.cov_url, cov.cov_port)
-        cmd = [
-            cov.cov_commit_defects,
-            "--auth-key-file",
-            cov.cov_auth_path,
-            "--stream",
-            cov.cov_stream,
-            "--dir",
-            "cov-int",
-            "--url",
-            server_url,
-            "-sf",
-            cov.cov_lic_path,
-        ]
-
-        if run_cov_command(command_context, cmd):
+        if not self._is_version_eligible(command_context, clang_paths):
             return 1
 
-        return 0
-
-    # TEMP Fix for Case# 00847671
-    cmd = [
-        cov.cov_configure,
-        "--delete-compiler-config",
-        "template-clangcc-config-0",
-        "coverity_config.xml",
-    ]
-    if run_cov_command(command_context, cmd):
-        return 1
-
-    cmd = [
-        cov.cov_configure,
-        "--delete-compiler-config",
-        "template-clangcxx-config-0",
-        "coverity_config.xml",
-    ]
-    if run_cov_command(command_context, cmd):
-        return 1
-
-    cmd = [
-        cov.cov_configure,
-        "--clang",
-        "--xml-option",
-        "append_arg:--ppp_translator",
-        "--xml-option",
-        "append_arg:replace/\{([a-zA-Z]+::None\(\))\}/=$1",
-    ]
-    if run_cov_command(command_context, cmd):
-        return 1
-    # End for Case# 00847671
-
-    rc, compile_db, compilation_commands_path = _build_compile_db(
-        command_context, verbose=verbose
-    )
-    rc = rc or _build_export(command_context, jobs=2, verbose=verbose)
-
-    if rc != 0:
-        return rc
-
-    commands_list = get_files_with_commands(command_context, compile_db, source)
-    if len(commands_list) == 0:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "There are no files that need to be analyzed.",
+        rc, _compile_db, compilation_commands_path = self._build_compile_db(
+            command_context, verbose=verbose
         )
-        return 0
-
-    # For each element in commands_list run `cov-translate`
-    for element in commands_list:
-
-        def transform_cmd(cmd):
-            # Coverity Analysis has a problem translating definitions passed as:
-            # '-DSOME_DEF="ValueOfAString"', please see Bug 1588283.
-            return [re.sub(r'\'-D(.*)="(.*)"\'', r'-D\1="\2"', arg) for arg in cmd]
-
-        build_command = element["command"].split(" ")
-
-        cmd = [cov.cov_translate, "--dir", cov.cov_idir_path] + transform_cmd(
-            build_command
-        )
-
-        if run_cov_command(command_context, cmd, element["directory"]):
-            return 1
-
-    if coverity_output_path is None:
-        cov_result = mozpath.join(cov.cov_state_path, "cov-results.json")
-    else:
-        cov_result = mozpath.join(coverity_output_path, "cov-results.json")
-
-    # Once the capture is performed we need to do the actual Coverity Desktop analysis
-    cmd = [
-        cov.cov_run_desktop,
-        "--json-output-v6",
-        cov_result,
-        "--analyze-captured-source",
-    ]
-
-    if run_cov_command(command_context, cmd, cov.cov_state_path):
-        return 1
-
-    if output is not None:
-        dump_cov_artifact(command_context, cov_config, cov_result, source, output)
-
-
-def get_abspath_files(command_context, files):
-    return [mozpath.join(command_context.topsrcdir, f) for f in files]
-
-
-def run_cov_command(command_context, cmd, path=None):
-    if path is None:
-        # We want to run it in topsrcdir
-        path = command_context.topsrcdir
-
-    command_context.log(logging.INFO, "static-analysis", {}, "Running " + " ".join(cmd))
-
-    rc = command_context.run_process(
-        args=cmd, cwd=path, pass_thru=True, ensure_exit_code=False
-    )
-
-    if rc != 0:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Running " + " ".join(cmd) + " failed!",
-        )
-        return rc
-    return 0
-
+        rc = rc or self._build_export(command_context, jobs=jobs, verbose=verbose)
+        if rc != 0:
+            return rc
 
-def get_reliability_index_for_cov_checker(command_context, cov_config, checker_name):
-    if cov_config is None:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "Coverity config file not found, "
-            "using default-value 'reliablity' = medium. for checker {}".format(
-                checker_name
-            ),
-        )
-        return "medium"
-
-    checkers = cov_config["coverity_checkers"]
-    if checker_name not in checkers:
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "Coverity checker {} not found to determine reliability index. "
-            "For the moment we shall use the default 'reliablity' = medium.".format(
-                checker_name
-            ),
-        )
-        return "medium"
-
-    if "reliability" not in checkers[checker_name]:
-        # This checker doesn't have a reliability index
-        command_context.log(
-            logging.INFO,
-            "static-analysis",
-            {},
-            "Coverity checker {} doesn't have a reliability index set, "
-            "field 'reliability is missing', please cosinder adding it. "
-            "For the moment we shall use the default 'reliablity' = medium.".format(
-                checker_name
-            ),
-        )
-        return "medium"
-
-    return checkers[checker_name]["reliability"]
-
-
-def dump_cov_artifact(command_context, cov_config, cov_results, source, output):
-    # Parse Coverity json into structured issues
-
-    with open(cov_results) as f:
-        result = json.load(f)
-
-        # Parse the issues to a standard json format
-        issues_dict = {"files": {}}
-
-        files_list = issues_dict["files"]
-
-        def build_element(issue):
-            # We look only for main event
-            event_path = next(
-                (event for event in issue["events"] if event["main"] is True), None
-            )
-
-            dict_issue = {
-                "line": issue["mainEventLineNumber"],
-                "flag": issue["checkerName"],
-                "message": event_path["eventDescription"],
-                "reliability": get_reliability_index_for_cov_checker(
-                    command_context, cov_config, issue["checkerName"]
-                ),
-                "extra": {
-                    "category": issue["checkerProperties"]["category"],
-                    "stateOnServer": issue["stateOnServer"],
-                    "stack": [],
-                },
-            }
-
-            # Embed all events into extra message
-            for event in issue["events"]:
-                dict_issue["extra"]["stack"].append(
-                    {
-                        "file_path": build_repo_relative_path(
-                            event["strippedFilePathname"], command_context.topsrcdir
-                        ),
-                        "line_number": event["lineNumber"],
-                        "path_type": event["eventTag"],
-                        "description": event["eventDescription"],
-                    }
-                )
-
-            return dict_issue
-
-        for issue in result["issues"]:
-            path = build_repo_relative_path(
-                issue["strippedMainEventFilePathname"], command_context.topsrcdir
-            )
-            # Skip clang diagnostic messages
-            if issue["checkerName"].startswith("RW.CLANG"):
-                continue
-
-            if path is None:
-                # Since we skip a result we should log it
-                command_context.log(
-                    logging.INFO,
-                    "static-analysis",
-                    {},
-                    "Skipping CID: {0} from file: {1} since it's not related "
-                    "with the current patch.".format(
-                        issue["stateOnServer"]["cid"],
-                        issue["strippedMainEventFilePathname"],
-                    ),
-                )
-                continue
-            if path in files_list:
-                files_list[path]["warnings"].append(build_element(issue))
-            else:
-                files_list[path] = {"warnings": [build_element(issue)]}
-
-        with open(output, "w") as f:
-            json.dump(issues_dict, f)
-
-
-def get_coverity_secrets(command_context):
-    from taskgraph.util.taskcluster import get_root_url
+        # Use outgoing files instead of source files
+        if outgoing:
+            repo = get_repository_object(command_context.topsrcdir)
+            files = repo.get_outgoing_files()
+            source = self.get_abspath_files(command_context, files)
 
-    secret_name = "project/relman/coverity"
-    secrets_url = "{}/secrets/v1/secret/{}".format(get_root_url(True), secret_name)
-
-    command_context.log(
-        logging.INFO,
-        "static-analysis",
-        {},
-        'Using symbol upload token from the secrets service: "{}"'.format(secrets_url),
-    )
-
-    import requests
-
-    res = requests.get(secrets_url)
-    res.raise_for_status()
-    secret = res.json()
-    cov_config = secret["secret"] if "secret" in secret else None
-
-    cov = SimpleNamespace()
-
-    if cov_config is None:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Ill formatted secret for Coverity. Aborting analysis.",
-        )
-        return 1, cov
-
-    cov.cov_analysis_url = cov_config.get("package_url")
-    cov.cov_package_name = cov_config.get("package_name")
-    cov.cov_url = cov_config.get("server_url")
-    cov.cov_server_ssl = cov_config.get("server_ssl", True)
-    # In case we don't have a port in the secret we use the default one,
-    # for a default coverity deployment.
-    cov.cov_port = cov_config.get("server_port", 8443)
-    cov.cov_auth = cov_config.get("auth_key")
-    cov.cov_package_ver = cov_config.get("package_ver")
-    cov.cov_lic_name = cov_config.get("lic_name")
-    cov.cov_capture_search = cov_config.get("fs_capture_search", None)
-    cov.cov_full_stack = cov_config.get("full_stack", False)
-    cov.cov_stream = cov_config.get("stream", False)
-
-    return 0, cov
-
-
-def download_coverity(command_context, cov):
-    if (
-        cov.cov_url is None
-        or cov.cov_port is None
-        or cov.cov_analysis_url is None
-        or cov.cov_auth is None
-    ):
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Missing Coverity secret on try job!",
-        )
-        return 1
-
-    COVERITY_CONFIG = """
-    {
-        "type": "Coverity configuration",
-        "format_version": 1,
-        "settings": {
-        "server": {
-            "host": "%s",
-            "ssl" : true,
-            "port": %s,
-            "on_new_cert" : "trust",
-            "auth_key_file": "%s"
-        },
-        "stream": "Firefox",
-        "cov_run_desktop": {
-            "build_cmd": [],
-            "clean_cmd": []
-        }
-        }
-    }
-    """
-    # Generate the coverity.conf and auth files
-    cov.cov_auth_path = mozpath.join(cov.cov_state_path, "auth")
-    cov_setup_path = mozpath.join(cov.cov_state_path, "coverity.conf")
-    cov_conf = COVERITY_CONFIG % (cov.cov_url, cov.cov_port, cov.cov_auth_path)
-
-    def download(artifact_url, target):
-        import requests
-
-        command_context.log_manager.enable_unstructured()
-        resp = requests.get(artifact_url, verify=False, stream=True)
-        command_context.log_manager.disable_unstructured()
-        resp.raise_for_status()
-
-        # Extract archive into destination
-        with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
-            tar.extractall(target)
-
-    download(cov.cov_analysis_url, cov.cov_state_path)
-
-    with open(cov.cov_auth_path, "w") as f:
-        f.write(cov.cov_auth)
-
-    # Modify it's permission to 600
-    os.chmod(cov.cov_auth_path, 0o600)
-
-    with open(cov_setup_path, "a") as f:
-        f.write(cov_conf)
-
-
-def setup_coverity(command_context, force_download=True):
-    rc, config, _ = _get_config_environment(command_context)
-    if rc != 0:
-        return rc, None
-
-    rc, cov = get_coverity_secrets(command_context)
-    if rc != 0:
-        return rc, cov
+        # Split in several chunks to avoid hitting Python's limit of 100 groups in re
+        compile_db = json.loads(open(_compile_db, "r").read())
+        total = 0
+        import re
 
-    # Create a directory in mozbuild where we setup coverity
-    cov.cov_state_path = mozpath.join(
-        command_context._mach_context.state_dir, "coverity"
-    )
-
-    if force_download is True and os.path.exists(cov.cov_state_path):
-        shutil.rmtree(cov.cov_state_path)
-
-    os.mkdir(cov.cov_state_path)
-
-    # Download everything that we need for Coverity from out private instance
-    download_coverity(command_context, cov)
-
-    cov.cov_path = mozpath.join(cov.cov_state_path, cov.cov_package_name)
-    cov.cov_run_desktop = mozpath.join(cov.cov_path, "bin", "cov-run-desktop")
-    cov.cov_configure = mozpath.join(cov.cov_path, "bin", "cov-configure")
-    cov.cov_make_library = mozpath.join(cov.cov_path, "bin", "cov-make-library")
-    cov.cov_build = mozpath.join(cov.cov_path, "bin", "cov-build")
-    cov.cov_analyze = mozpath.join(cov.cov_path, "bin", "cov-analyze")
-    cov.cov_commit_defects = mozpath.join(cov.cov_path, "bin", "cov-commit-defects")
-    cov.cov_translate = mozpath.join(cov.cov_path, "bin", "cov-translate")
-    cov.cov_configure = mozpath.join(cov.cov_path, "bin", "cov-configure")
-    cov.cov_work_path = mozpath.join(cov.cov_state_path, "data-coverity")
-    cov.cov_idir_path = mozpath.join(cov.cov_work_path, cov.cov_package_ver, "idir")
-    cov.cov_lic_path = mozpath.join(
-        cov.cov_work_path, cov.cov_package_ver, "lic", cov.cov_lic_name
-    )
-
-    if not os.path.exists(cov.cov_path):
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Missing Coverity in {}".format(cov.cov_path),
-        )
-        return 1, cov
-
-    return 0, cov
-
-
-def get_files_with_commands(command_context, compile_db, source):
-    """
-    Returns an array of dictionaries having file_path with build command
-    """
-
-    compile_db = json.load(open(compile_db, "r"))
-
-    commands_list = []
-
-    for f in source:
-        # It must be a C/C++ file
-        _, ext = os.path.splitext(f)
+        chunk_size = 50
+        for offset in range(0, len(source), chunk_size):
+            source_chunks = [
+                re.escape(f) for f in source[offset : offset + chunk_size].copy()
+            ]
+            name_re = re.compile("(" + ")|(".join(source_chunks) + ")")
+            for f in compile_db:
+                if name_re.search(f["file"]):
+                    total = total + 1
 
-        if ext.lower() not in _format_include_extensions:
-            command_context.log(
-                logging.INFO, "static-analysis", {}, "Skipping {}".format(f)
-            )
-            continue
-        file_with_abspath = os.path.join(command_context.topsrcdir, f)
-        for f in compile_db:
-            # Found for a file that we are looking
-            if file_with_abspath == f["file"]:
-                commands_list.append(f)
-
-    return commands_list
-
-
-@memoize
-def get_clang_tidy_config(command_context):
-    from mozbuild.code_analysis.utils import ClangTidyConfig
-
-    return ClangTidyConfig(command_context.topsrcdir)
-
+        # Filter source to remove excluded files
+        source = self._generate_path_list(command_context, source, verbose=verbose)
 
-def _get_cov_config(command_context):
-    try:
-        file_handler = open(
-            mozpath.join(command_context.topsrcdir, "tools", "coverity", "config.yaml")
-        )
-        config = yaml.safe_load(file_handler)
-    except Exception:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Looks like config.yaml is not valid, we are going to use default"
-            " values for the rest of the analysis for coverity.",
-        )
-        return None
-    return config
-
-
-def _get_required_version(command_context):
-    version = get_clang_tidy_config(command_context).version
-    if version is None:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "ERROR: Unable to find 'package_version' in config.yml",
-        )
-    return version
-
-
-def _get_current_version(command_context, clang_paths):
-    # Because the fact that we ship together clang-tidy and clang-format
-    # we are sure that these two will always share the same version.
-    # Thus in order to determine that the version is compatible we only
-    # need to check one of them, going with clang-format
-    cmd = [clang_paths._clang_format_path, "--version"]
-    version_info = None
-    try:
-        version_info = (
-            subprocess.check_output(cmd, stderr=subprocess.STDOUT)
-            .decode("utf-8")
-            .strip()
-        )
-
-        if "MOZ_AUTOMATION" in os.environ:
-            # Only show it in the CI
+        if not total or not source:
             command_context.log(
                 logging.INFO,
                 "static-analysis",
                 {},
-                "{} Version = {} ".format(clang_paths._clang_format_path, version_info),
+                "There are no files eligible for analysis. Please note that 'header' files "
+                "cannot be used for analysis since they do not consist compilation units.",
+            )
+            return 0
+
+        # Escape the files from source
+        source = [re.escape(f) for f in source]
+
+        cwd = command_context.topobjdir
+
+        monitor = StaticAnalysisMonitor(
+            command_context.topsrcdir,
+            command_context.topobjdir,
+            self.get_clang_tidy_config(command_context).checks_with_data,
+            total,
+        )
+
+        footer = StaticAnalysisFooter(command_context.log_manager.terminal, monitor)
+
+        with StaticAnalysisOutputManager(
+            command_context.log_manager, monitor, footer
+        ) as output_manager:
+            import math
+
+            batch_size = int(
+                math.ceil(float(len(source)) / multiprocessing.cpu_count())
+            )
+            for i in range(0, len(source), batch_size):
+                args = self._get_clang_tidy_command(
+                    command_context,
+                    clang_paths,
+                    compilation_commands_path,
+                    checks=checks,
+                    header_filter=header_filter,
+                    sources=source[i : (i + batch_size)],
+                    jobs=jobs,
+                    fix=fix,
+                )
+                rc = command_context.run_process(
+                    args=args,
+                    ensure_exit_code=False,
+                    line_handler=output_manager.on_line,
+                    cwd=cwd,
+                )
+
+            command_context.log(
+                logging.WARNING,
+                "warning_summary",
+                {"count": len(monitor.warnings_db)},
+                "{count} warnings present.",
             )
 
-    except subprocess.CalledProcessError as e:
-        command_context.log(
-            logging.ERROR,
-            "static-analysis",
-            {},
-            "Error determining the version clang-tidy/format binary, please see the "
-            "attached exception: \n{}".format(e.output),
-        )
-    return version_info
-
-
-def _is_version_eligible(command_context, clang_paths):
-    version = _get_required_version(command_context)
-    if version is None:
-        return False
+            # Write output file
+            if output is not None:
+                output_manager.write(output, format)
 
-    current_version = _get_current_version(command_context, clang_paths)
-    if current_version is None:
-        return False
-    version = "clang-format version " + version
-    if version in current_version:
-        return True
-    command_context.log(
-        logging.ERROR,
-        "static-analysis",
-        {},
-        "ERROR: You're using an old or incorrect version ({}) of clang-format binary. "
-        "Please update to a more recent one (at least > {}) "
-        "by running: './mach bootstrap' ".format(
-            _get_current_version(command_context, clang_paths),
-            _get_required_version(command_context),
-        ),
-    )
-    return False
-
-
-def _get_clang_tidy_command(
-    command_context,
-    clang_paths,
-    compilation_commands_path,
-    checks,
-    header_filter,
-    sources,
-    jobs,
-    fix,
-):
-
-    if checks == "-*":
-        checks = ",".join(get_clang_tidy_config(command_context).checks)
-
-    common_args = [
-        "-clang-tidy-binary",
-        clang_paths._clang_tidy_path,
-        "-clang-apply-replacements-binary",
-        clang_paths._clang_apply_replacements,
-        "-checks=%s" % checks,
-        "-extra-arg=-std=c++17",
-        "-extra-arg=-DMOZ_CLANG_PLUGIN",
-    ]
-
-    # Flag header-filter is passed in order to limit the diagnostic messages only
-    # to the specified header files. When no value is specified the default value
-    # is considered to be the source in order to limit the diagnostic message to
-    # the source files or folders.
-    common_args += [
-        "-header-filter=%s"
-        % (header_filter if len(header_filter) else "|".join(sources))
-    ]
+        if rc != 0:
+            return rc
+        # if we are building firefox for android it might be nice to
+        # also analyze the java code base
+        if command_context.substs["MOZ_BUILD_APP"] == "mobile/android":
+            rc = self.check_java(
+                command_context, source, jobs, strip, verbose, skip_export=True
+            )
+        return rc
 
-    # From our configuration file, config.yaml, we build the configuration list, for
-    # the checkers that are used. These configuration options are used to better fit
-    # the checkers to our code.
-    cfg = get_clang_tidy_config(command_context).checks_config
-    if cfg:
-        common_args += ["-config=%s" % yaml.dump(cfg)]
-
-    if fix:
-        common_args += ["-fix"]
-
-    return (
-        [
-            command_context.virtualenv_manager.python_path,
-            clang_paths._run_clang_tidy_path,
-            "-j",
-            str(jobs),
-            "-p",
-            compilation_commands_path,
-        ]
-        + common_args
-        + sources
+    @StaticAnalysisSubCommand(
+        "static-analysis",
+        "check-coverity",
+        "Run coverity static-analysis tool on the given files. "
+        "Can only be run by automation! "
+        "It's result is stored as an json file on the artifacts server.",
+    )
+    @CommandArgument(
+        "source",
+        nargs="*",
+        default=[],
+        help="Source files to be analyzed by Coverity Static Analysis Tool. "
+        "This is ran only in automation.",
+    )
+    @CommandArgument(
+        "--output",
+        "-o",
+        default=None,
+        help="Write coverity