Backed out 12 changesets (bug 1383880) for decision task bustage
authorPhil Ringnalda <philringnalda@gmail.com>
Wed, 20 Sep 2017 19:57:39 -0700
changeset 382104 d88e5dec2638ae6d34527726ff0a9cb8d9336036
parent 382103 a439e2ac43058989193daa201b764b96821bd8fa
child 382105 9699e1cbb1ab3bdba84d7548dcdcd6efca9d1190
push id32546
push userarchaeopteryx@coole-files.de
push dateThu, 21 Sep 2017 13:14:27 +0000
treeherdermozilla-central@9caeafcec998 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1383880
milestone57.0a1
backs outfd3615e7e0a3a85ba58616396468229bce30f957
8cceb6a82bfb079e93005d120f540f0877a0eaf3
571a6c9054a540e0bf40baf837da9175f21796b9
ec2b8ba5a949c3b899a647dbad93aa60b653e745
8e5847d9acda6349226b37fbbfc79ef2c01d7614
b354fdf6e233ae06118ac8f1a975d0d4a53db4a9
ebdd6ccbcfca7f7672040f7227da31594d60f737
ebcc9d20981a491ab1cb6b71bff43225a90169e8
97eedc84d6e8f7b1128cd29a90ab4a72963bb851
a3116da52b4e3e58fbdc1da33c47e18a2e371bae
b3eb0c93972084fd856a6d6f1bdadf8035478162
7c07cb7985302e288a56c29c3fac22cacc4096d2
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 12 changesets (bug 1383880) for decision task bustage CLOSED TREE Backed out changeset fd3615e7e0a3 (bug 1383880) Backed out changeset 8cceb6a82bfb (bug 1383880) Backed out changeset 571a6c9054a5 (bug 1383880) Backed out changeset ec2b8ba5a949 (bug 1383880) Backed out changeset 8e5847d9acda (bug 1383880) Backed out changeset b354fdf6e233 (bug 1383880) Backed out changeset ebdd6ccbcfca (bug 1383880) Backed out changeset ebcc9d20981a (bug 1383880) Backed out changeset 97eedc84d6e8 (bug 1383880) Backed out changeset a3116da52b4e (bug 1383880) Backed out changeset b3eb0c939720 (bug 1383880) Backed out changeset 7c07cb798530 (bug 1383880) MozReview-Commit-ID: EPDuQHr7w2y
browser/themes/moz.build
build/sparse-profiles/taskgraph
mobile/android/moz.build
moz.build
python/mozbuild/mozbuild/frontend/context.py
python/mozbuild/mozbuild/frontend/mach_commands.py
python/mozbuild/mozbuild/mach_commands.py
python/mozbuild/mozbuild/schedules.py
python/mozbuild/mozbuild/test/frontend/data/schedules/moz.build
python/mozbuild/mozbuild/test/frontend/data/schedules/subd/moz.build
python/mozbuild/mozbuild/test/frontend/test_reader.py
python/mozbuild/mozbuild/util.py
taskcluster/ci/build/android-stuff.yml
taskcluster/ci/build/kind.yml
taskcluster/ci/test/kind.yml
taskcluster/ci/upload-generated-sources/kind.yml
taskcluster/ci/upload-symbols/kind.yml
taskcluster/docs/loading.rst
taskcluster/docs/optimization.rst
taskcluster/docs/parameters.rst
taskcluster/taskgraph/decision.py
taskcluster/taskgraph/generator.py
taskcluster/taskgraph/graph.py
taskcluster/taskgraph/morph.py
taskcluster/taskgraph/optimize.py
taskcluster/taskgraph/parameters.py
taskcluster/taskgraph/target_tasks.py
taskcluster/taskgraph/task.py
taskcluster/taskgraph/test/test_decision.py
taskcluster/taskgraph/test/test_generator.py
taskcluster/taskgraph/test/test_graph.py
taskcluster/taskgraph/test/test_optimize.py
taskcluster/taskgraph/test/test_target_tasks.py
taskcluster/taskgraph/test/test_taskgraph.py
taskcluster/taskgraph/test/test_try_option_syntax.py
taskcluster/taskgraph/test/test_util_parameterization.py
taskcluster/taskgraph/transforms/build.py
taskcluster/taskgraph/transforms/build_attrs.py
taskcluster/taskgraph/transforms/docker_image.py
taskcluster/taskgraph/transforms/job/__init__.py
taskcluster/taskgraph/transforms/job/toolchain.py
taskcluster/taskgraph/transforms/task.py
taskcluster/taskgraph/transforms/tests.py
taskcluster/taskgraph/try_option_syntax.py
taskcluster/taskgraph/util/platforms.py
--- a/browser/themes/moz.build
+++ b/browser/themes/moz.build
@@ -10,17 +10,8 @@ with Files("**"):
 toolkit = CONFIG['MOZ_WIDGET_TOOLKIT']
 
 if toolkit == 'cocoa':
     DIRS += ['osx']
 elif toolkit in ('gtk2', 'gtk3'):
     DIRS += ['linux']
 else:
     DIRS += ['windows']
-
-with Files('osx/**'):
-    SCHEDULES.exclusive = ['macosx']
-
-with Files('linux/**'):
-    SCHEDULES.exclusive = ['linux']
-
-with Files('windows/**'):
-    SCHEDULES.exclusive = ['windows']
--- a/build/sparse-profiles/taskgraph
+++ b/build/sparse-profiles/taskgraph
@@ -17,15 +17,11 @@ path:taskcluster/
 # them all in.
 path:testing/config/tooltool-manifests/
 path:testing/mozharness/
 path:tools/lint/
 
 # for new-style try pushes
 path:try_task_config.json
 
-# Moz.build files are read in filesystem mode
-glob:**/moz.build
-glob:**/*.mozbuild
-
 # Tooltool manifests also need to be opened. Assume they
 # are all somewhere in "tooltool-manifests" directories.
 glob:**/tooltool-manifests/**
--- a/mobile/android/moz.build
+++ b/mobile/android/moz.build
@@ -1,17 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files('**'):
     BUG_COMPONENT = ('Firefox for Android', 'Build Config & IDE Support')
-    SCHEDULES.exclusive = ['android']
 
 with Files('bouncer/**'):
     BUG_COMPONENT = ('Firefox for Android', 'Distributions')
 
 with Files('branding/**'):
     BUG_COMPONENT = ('Firefox for Android', 'General')
 
 with Files('build/**'):
--- a/moz.build
+++ b/moz.build
@@ -31,17 +31,16 @@ with Files('mach'):
 with Files('*moz*'):
     BUG_COMPONENT = ('Core', 'Build Config')
 
 with Files('GNUmakefile'):
     BUG_COMPONENT = ('Core', 'Build Config')
 
 with Files('*gradle*'):
     BUG_COMPONENT = ('Firefox for Android', 'Build Config & IDE Support')
-    SCHEDULES.exclusive = ['android']
 
 with Files('**/l10n.toml'):
     BUG_COMPONENT = ('Core', 'Localization')
     FINAL = True
 
 with Files('README.txt'):
     BUG_COMPONENT = ('Core', 'General')
 
--- a/python/mozbuild/mozbuild/frontend/context.py
+++ b/python/mozbuild/mozbuild/frontend/context.py
@@ -19,32 +19,29 @@ from __future__ import absolute_import, 
 import os
 
 from collections import (
     Counter,
     OrderedDict,
 )
 from mozbuild.util import (
     HierarchicalStringList,
-    ImmutableStrictOrderingOnAppendList,
     KeyedDefaultDict,
     List,
     ListWithAction,
     memoize,
     memoized_property,
     ReadOnlyKeyedDefaultDict,
     StrictOrderingOnAppendList,
     StrictOrderingOnAppendListWithAction,
     StrictOrderingOnAppendListWithFlagsFactory,
     TypedList,
     TypedNamedTuple,
 )
 
-from .. import schedules
-
 from ..testing import (
     all_test_flavors,
     read_manifestparser_manifest,
     read_reftest_manifest,
     read_wpt_manifest,
 )
 
 import mozpack.path as mozpath
@@ -603,64 +600,16 @@ def ContextDerivedTypedRecord(*fields):
             if name in self._fields and not isinstance(value, self._fields[name]):
                 value = self._fields[name](value)
             object.__setattr__(self, name, value)
 
     _TypedRecord._fields = dict(fields)
     return _TypedRecord
 
 
-class Schedules(object):
-    """Similar to a ContextDerivedTypedRecord, but with different behavior
-    for the properties:
-
-     * VAR.inclusive can only be appended to (+=), and can only contain values
-       from mozbuild.schedules.INCLUSIVE_COMPONENTS
-
-     * VAR.exclusive can only be assigned to (no +=), and can only contain
-       values from mozbuild.schedules.ALL_COMPONENTS
-    """
-    __slots__ = ('_exclusive', '_inclusive')
-
-    def __init__(self):
-        self._inclusive = TypedList(Enum(*schedules.INCLUSIVE_COMPONENTS))()
-        self._exclusive = ImmutableStrictOrderingOnAppendList(schedules.EXCLUSIVE_COMPONENTS)
-
-    # inclusive is mutable cannot be assigned to (+= only)
-    @property
-    def inclusive(self):
-        return self._inclusive
-
-    @inclusive.setter
-    def inclusive(self, value):
-        if value is not self._inclusive:
-            raise AttributeError("Cannot assign to this value - use += instead")
-        unexpected = [v for v in value if v not in schedules.INCLUSIVE_COMPONENTS]
-        if unexpected:
-            raise Exception("unexpected exclusive component(s) " + ', '.join(unexpected))
-
-    # exclusive is immuntable but can be set (= only)
-    @property
-    def exclusive(self):
-        return self._exclusive
-
-    @exclusive.setter
-    def exclusive(self, value):
-        if not isinstance(value, (tuple, list)):
-            raise Exception("expected a tuple or list")
-        unexpected = [v for v in value if v not in schedules.ALL_COMPONENTS]
-        if unexpected:
-            raise Exception("unexpected exclusive component(s) " + ', '.join(unexpected))
-        self._exclusive = ImmutableStrictOrderingOnAppendList(sorted(value))
-
-    # components provides a synthetic summary of all components
-    @property
-    def components(self):
-        return list(sorted(set(self._inclusive) | set(self._exclusive)))
-
 @memoize
 def ContextDerivedTypedHierarchicalStringList(type):
     """Specialized HierarchicalStringList for use with ContextDerivedValue
     types."""
     class _TypedListWithItems(ContextDerivedValue, HierarchicalStringList):
         __slots__ = ('_strings', '_children', '_context')
 
         def __init__(self, context):
@@ -721,19 +670,16 @@ OrderedSourceList = ContextDerivedTypedL
 OrderedTestFlavorList = TypedList(Enum(*all_test_flavors()),
                                   StrictOrderingOnAppendList)
 OrderedStringList = TypedList(unicode, StrictOrderingOnAppendList)
 DependentTestsEntry = ContextDerivedTypedRecord(('files', OrderedSourceList),
                                                 ('tags', OrderedStringList),
                                                 ('flavors', OrderedTestFlavorList))
 BugzillaComponent = TypedNamedTuple('BugzillaComponent',
                         [('product', unicode), ('component', unicode)])
-SchedulingComponents = ContextDerivedTypedRecord(
-        ('inclusive', TypedList(unicode, StrictOrderingOnAppendList)),
-        ('exclusive', TypedList(unicode, StrictOrderingOnAppendList)))
 
 
 class Files(SubContext):
     """Metadata attached to files.
 
     It is common to want to annotate files with metadata, such as which
     Bugzilla component tracks issues with certain files. This sub-context is
     where we stick that metadata.
@@ -842,45 +788,16 @@ class Files(SubContext):
             with Files('dom/base/nsGlobalWindow.cpp'):
                 IMPACTED_TESTS.flavors += [
                     'mochitest',
                 ]
 
             Would suggest that nsGlobalWindow.cpp is potentially relevant to
             any plain mochitest.
             """),
-        'SCHEDULES': (Schedules, list,
-            """Maps source files to the CI tasks that should be scheduled when
-            they change.  The tasks are grouped by named components, and those
-            names appear again in the taskgraph configuration
-            `($topsrcdir/taskgraph/).
-
-            Some components are "inclusive", meaning that changes to most files
-            do not schedule them, aside from those described in a Files
-            subcontext.  For example, py-lint tasks need not be scheduled for
-            most changes, but should be scheduled when any Python file changes.
-            Such components are named by appending to `SCHEDULES.inclusive`:
-
-            with Files('**.py'):
-                SCHEDULES.inclusive += ['py-lint']
-
-            Other components are 'exclusive', meaning that changes to most
-            files schedule them, but some files affect only one or two
-            components. For example, most files schedule builds and tests of
-            Firefox for Android, OS X, Windows, and Linux, but files under
-            `mobile/android/` affect Android builds and tests exclusively, so
-            builds for other operating systems are not needed.  Test suites
-            provide another example: most files schedule reftests, but changes
-            to reftest scripts need only schedule reftests and no other suites.
-
-            Exclusive components are named by setting `SCHEDULES.exclusive`:
-
-            with Files('mobile/android/**'):
-                SCHEDULES.exclusive = ['android']
-            """),
     }
 
     def __init__(self, parent, pattern=None):
         super(Files, self).__init__(parent)
         self.pattern = pattern
         self.finalized = set()
         self.test_files = set()
         self.test_tags = set()
--- a/python/mozbuild/mozbuild/frontend/mach_commands.py
+++ b/python/mozbuild/mozbuild/frontend/mach_commands.py
@@ -12,17 +12,16 @@ from mach.decorators import (
     CommandProvider,
     Command,
     SubCommand,
 )
 
 from mozbuild.base import MachCommandBase
 import mozpack.path as mozpath
 
-TOPSRCDIR = os.path.abspath(os.path.join(__file__, '../../../../../'))
 
 class InvalidPathException(Exception):
     """Represents an error due to an invalid path."""
 
 
 @CommandProvider
 class MozbuildFileCommands(MachCommandBase):
     @Command('mozbuild-reference', category='build-dev',
@@ -188,28 +187,8 @@ class MozbuildFileCommands(MachCommandBa
                 raise InvalidPathException('cannot use wildcard in version control mode')
 
             for path, f in reader.finder.find(p):
                 if path not in all_paths_set:
                     all_paths_set.add(path)
                     allpaths.append(path)
 
         return reader.files_info(allpaths)
-
-
-    @SubCommand('file-info', 'schedules',
-                'Show the combined SCHEDULES for the files listed.')
-    @CommandArgument('paths', nargs='+',
-                     help='Paths whose data to query')
-    def file_info_schedules(self, paths):
-        """Show what is scheduled by the given files.
-
-        Given a requested set of files (which can be specified using
-        wildcards), print the total set of scheduled components.
-        """
-        from mozbuild.frontend.reader import EmptyConfig, BuildReader
-        config = EmptyConfig(TOPSRCDIR)
-        reader = BuildReader(config)
-        schedules = set()
-        for p, m in reader.files_info(paths).items():
-            schedules |= set(m['SCHEDULES'].components)
-
-        print(", ".join(schedules))
--- a/python/mozbuild/mozbuild/mach_commands.py
+++ b/python/mozbuild/mozbuild/mach_commands.py
@@ -1779,16 +1779,17 @@ class PackageFrontend(MachCommandBase):
             unpack_file,
         )
         from requests.adapters import HTTPAdapter
         import redo
         import requests
         import shutil
 
         from taskgraph.generator import Kind
+        from taskgraph.optimize import optimize_task
         from taskgraph.util.taskcluster import (
             get_artifact_url,
             list_artifacts,
         )
         import yaml
 
         self._set_log_level(verbose)
         # Normally, we'd use self.log_manager.enable_unstructured(),
@@ -1877,22 +1878,16 @@ class PackageFrontend(MachCommandBase):
                                         record.digest)
                 records[record.filename] = DownloadRecord(
                     url, record.filename, record.size, record.digest,
                     record.algorithm, unpack=record.unpack,
                     version=record.version, visibility=record.visibility,
                     setup=record.setup)
 
         if from_build:
-            if 'TASK_ID' in os.environ:
-                self.log(logging.ERROR, 'artifact', {},
-                         'Do not use --from-build in automation; all dependencies '
-                         'should be determined in the decision task.')
-                return 1
-            from taskgraph.optimize import IndexSearch
             params = {
                 'message': '',
                 'project': '',
                 'level': os.environ.get('MOZ_SCM_LEVEL', '3'),
                 'base_repository': '',
                 'head_repository': '',
                 'head_rev': '',
                 'moz_build_date': '',
@@ -1928,18 +1923,17 @@ class PackageFrontend(MachCommandBase):
                     b = 'toolchain-{}'.format(b)
 
                 task = toolchains.get(aliases.get(b, b))
                 if not task:
                     self.log(logging.ERROR, 'artifact', {'build': user_value},
                              'Could not find a toolchain build named `{build}`')
                     return 1
 
-                task_id = IndexSearch().should_replace_task(
-                    task, {}, task.optimization.get('index-search', []))
+                task_id = optimize_task(task, {})
                 artifact_name = task.attributes.get('toolchain-artifact')
                 if task_id in (True, False) or not artifact_name:
                     self.log(logging.ERROR, 'artifact', {'build': user_value},
                              'Could not find artifacts for a toolchain build '
                              'named `{build}`')
                     return 1
 
                 record = ArtifactRecord(task_id, artifact_name)
deleted file mode 100644
--- a/python/mozbuild/mozbuild/schedules.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-"""
-Constants for SCHEDULES configuration in moz.build files and for
-skip-unless-schedules optimizations in task-graph generation.
-"""
-
-from __future__ import absolute_import, unicode_literals, print_function
-
-# TODO: ideally these lists could be specified in moz.build itself
-
-INCLUSIVE_COMPONENTS = [
-    'py-lint',
-    'js-lint',
-    'yaml-lint',
-]
-EXCLUSIVE_COMPONENTS = [
-    # os families
-    'android',
-    'linux',
-    'macosx',
-    'windows',
-]
-ALL_COMPONENTS = INCLUSIVE_COMPONENTS + EXCLUSIVE_COMPONENTS
deleted file mode 100644
--- a/python/mozbuild/mozbuild/test/frontend/data/schedules/moz.build
+++ /dev/null
@@ -1,11 +0,0 @@
-# Any copyright is dedicated to the Public Domain.
-# http://creativecommons.org/publicdomain/zero/1.0/
-
-with Files('*.win'):
-    SCHEDULES.exclusive = ['windows']
-
-with Files('*.osx'):
-    SCHEDULES.exclusive = ['macosx']
-
-with Files('subd/**.py'):
-    SCHEDULES.inclusive += ['py-lint']
deleted file mode 100644
--- a/python/mozbuild/mozbuild/test/frontend/data/schedules/subd/moz.build
+++ /dev/null
@@ -1,2 +0,0 @@
-with Files('yaml.py'):
-    SCHEDULES.inclusive += ['yaml-lint']
--- a/python/mozbuild/mozbuild/test/frontend/test_reader.py
+++ b/python/mozbuild/mozbuild/test/frontend/test_reader.py
@@ -475,32 +475,11 @@ class TestBuildReader(unittest.TestCase)
                              expected_flavors[path])
 
     def test_invalid_flavor(self):
         reader = self.reader('invalid-files-flavor')
 
         with self.assertRaises(BuildReaderError):
             reader.files_info(['foo.js'])
 
-    def test_schedules(self):
-        reader = self.reader('schedules')
-        info = reader.files_info(['somefile', 'foo.win', 'foo.osx', 'subd/aa.py', 'subd/yaml.py'])
-        # default: all exclusive, no inclusive
-        self.assertEqual(info['somefile']['SCHEDULES'].inclusive, [])
-        self.assertEqual(info['somefile']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
-        # windows-only
-        self.assertEqual(info['foo.win']['SCHEDULES'].inclusive, [])
-        self.assertEqual(info['foo.win']['SCHEDULES'].exclusive, ['windows'])
-        # osx-only
-        self.assertEqual(info['foo.osx']['SCHEDULES'].inclusive, [])
-        self.assertEqual(info['foo.osx']['SCHEDULES'].exclusive, ['macosx'])
-        # top-level moz.build specifies subd/**.py with an inclusive option
-        self.assertEqual(info['subd/aa.py']['SCHEDULES'].inclusive, ['py-lint'])
-        self.assertEqual(info['subd/aa.py']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
-        # Files('yaml.py') in subd/moz.build *overrides* Files('subdir/**.py')
-        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].inclusive, ['yaml-lint'])
-        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
-
-        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].components,
-                ['android', 'linux', 'macosx', 'windows', 'yaml-lint'])
 
 if __name__ == '__main__':
     main()
--- a/python/mozbuild/mozbuild/util.py
+++ b/python/mozbuild/mozbuild/util.py
@@ -480,35 +480,16 @@ class StrictOrderingOnAppendListMixin(ob
 class StrictOrderingOnAppendList(ListMixin, StrictOrderingOnAppendListMixin,
         list):
     """A list specialized for moz.build environments.
 
     We overload the assignment and append operations to require that incoming
     elements be ordered. This enforces cleaner style in moz.build files.
     """
 
-class ImmutableStrictOrderingOnAppendList(StrictOrderingOnAppendList):
-    """Like StrictOrderingOnAppendList, but not allowing mutations of the value.
-    """
-    def append(self, elt):
-        raise Exception("cannot use append on this type")
-
-    def extend(self, iterable):
-        raise Exception("cannot use extend on this type")
-
-    def __setslice__(self, i, j, iterable):
-        raise Exception("cannot assign to slices on this type")
-
-    def __setitem__(self, i, elt):
-        raise Exception("cannot assign to indexes on this type")
-
-    def __iadd__(self, other):
-        raise Exception("cannot use += on this type")
-
-
 class ListWithActionMixin(object):
     """Mixin to create lists with pre-processing. See ListWithAction."""
     def __init__(self, iterable=None, action=None):
         if iterable is None:
             iterable = []
         if not callable(action):
             raise ValueError('A callabe action is required to construct '
                              'a ListWithAction')
--- a/taskcluster/ci/build/android-stuff.yml
+++ b/taskcluster/ci/build/android-stuff.yml
@@ -34,19 +34,19 @@ android-dependencies/opt:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: api-16-gradle-dependencies
         tooltool-downloads: internal
         job-script: taskcluster/scripts/builder/build-android-dependencies.sh
-    optimization:
-        skip-unless-changed:
-          - "mobile/android/config/**"
+    optimizations:
+      - - skip-unless-changed
+        - - "mobile/android/config/**"
           - "testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py"
           - "**/*.gradle"
           - "taskcluster/docker/android-build/**"
 
 android-test/opt:
     description: "Android armv7 unit tests"
     index:
         product: mobile
@@ -77,19 +77,19 @@ android-test/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-test
         tooltool-downloads: internal
-    optimization:
-        skip-unless-changed:
-          - "mobile/android/base/**"
+    optimizations:
+      - - skip-unless-changed
+        - - "mobile/android/base/**"
           - "mobile/android/config/**"
           - "mobile/android/tests/background/junit4/**"
           - "**/*.gradle"
 
 android-lint/opt:
     description: "Android lint"
     index:
         product: mobile
@@ -135,19 +135,19 @@ android-lint/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-lint
         tooltool-downloads: internal
-    optimization:
-        skip-unless-changed:
-          - "mobile/android/**/*.java"
+    optimizations:
+      - - skip-unless-changed
+        - - "mobile/android/**/*.java"
           - "mobile/android/**/*.jpeg"
           - "mobile/android/**/*.jpg"
           - "mobile/android/**/*.png"
           - "mobile/android/**/*.svg"
           - "mobile/android/**/*.xml" # Manifest & android resources
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
@@ -187,19 +187,19 @@ android-checkstyle/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-checkstyle
         tooltool-downloads: internal
-    optimization:
-        skip-unless-changed:
-          - "mobile/android/**/checkstyle.xml"
+    optimizations:
+      - - skip-unless-changed
+        - - "mobile/android/**/checkstyle.xml"
           - "mobile/android/**/*.java"
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
           - "**/*.gradle"
 
 android-findbugs/opt:
     description: "Android findbugs"
@@ -241,15 +241,15 @@ android-findbugs/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-findbugs
         tooltool-downloads: internal
-    optimization:
-        skip-unless-changed:
-          - "mobile/android/**/*.java"
+    optimizations:
+      - - skip-unless-changed
+        - - "mobile/android/**/*.java"
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
           - "**/*.gradle"
--- a/taskcluster/ci/build/kind.yml
+++ b/taskcluster/ci/build/kind.yml
@@ -16,8 +16,10 @@ transforms:
    - taskgraph.transforms.task:transforms
 
 jobs-from:
     - android.yml
     - android-stuff.yml
     - linux.yml
     - macosx.yml
     - windows.yml
+
+parse-commit: taskgraph.try_option_syntax:parse_message
--- a/taskcluster/ci/test/kind.yml
+++ b/taskcluster/ci/test/kind.yml
@@ -4,8 +4,10 @@ kind-dependencies:
     - build
     - build-signing
 
 transforms:
    - taskgraph.transforms.tests:transforms
    - taskgraph.transforms.job:transforms
    - taskgraph.transforms.coalesce:transforms
    - taskgraph.transforms.task:transforms
+
+parse-commit: taskgraph.try_option_syntax:parse_message
--- a/taskcluster/ci/upload-generated-sources/kind.yml
+++ b/taskcluster/ci/upload-generated-sources/kind.yml
@@ -26,12 +26,10 @@ job-template:
   worker:
      docker-image: {in-tree: "lint"}
      max-run-time: 600
   run:
     using: run-task
     command: >
             cd /builds/worker/checkouts/gecko &&
             ./mach python build/upload_generated_sources.py ${ARTIFACT_URL}
-  optimization:
-    only-if-dependencies-run: null
   scopes:
       - secrets:get:project/releng/gecko/build/level-{level}/gecko-generated-sources-upload
--- a/taskcluster/ci/upload-symbols/kind.yml
+++ b/taskcluster/ci/upload-symbols/kind.yml
@@ -38,12 +38,10 @@ job-template:
        os: linux
        max-run-time: 600
        command: ["/bin/bash", "bin/upload.sh"]
        docker-image: taskclusterprivate/upload_symbols:0.0.4
        env:
            GECKO_HEAD_REPOSITORY: # see transforms
            GECKO_HEAD_REV: # see transforms
            ARTIFACT_TASKID: {"task-reference": "<build>"}
-   optimization:
-       only-if-dependencies-run: null
    scopes:
        - docker-worker:image:taskclusterprivate/upload_symbols:0.0.4
--- a/taskcluster/docs/loading.rst
+++ b/taskcluster/docs/loading.rst
@@ -27,8 +27,17 @@ The return value is a list of inputs to 
 ``transforms`` property. The specific format for the input depends on the first
 transform - whatever it expects. The final transform should be
 ``taskgraph.transform.task:transforms``, which produces the output format the
 task-graph generation infrastructure expects.
 
 The ``transforms`` key in ``kind.yml`` is further documented in
 :doc:`transforms`.  For more information on how all of this works, consult the
 docstrings and comments in the source code itself.
+
+Try option syntax
+-----------------
+
+The ``parse-commit`` optional field specified in ``kind.yml`` links to a
+function to parse the command line options in the ``--message`` mach parameter.
+Currently, the only valid value is ``taskgraph.try_option_syntax:parse_message``.
+The parsed arguments are stored in ``config.config['args']``, it corresponds
+to the same object returned by ``parse_args`` from ``argparse`` Python module.
--- a/taskcluster/docs/optimization.rst
+++ b/taskcluster/docs/optimization.rst
@@ -1,119 +1,44 @@
 Optimization
 ============
 
 The objective of optimization to remove as many tasks from the graph as
 possible, as efficiently as possible, thereby delivering useful results as
-quickly as possible. For example, ideally if only a test script is modified in
+quickly as possible.  For example, ideally if only a test script is modified in
 a push, then the resulting graph contains only the corresponding test suite
 task.
 
 A task is said to be "optimized" when it is either replaced with an equivalent,
 already-existing task, or dropped from the graph entirely.
 
-Optimization Strategies
------------------------
+Optimization Functions
+----------------------
 
-Each task has a single named optimization strategy, and can provide an argument
-to that strategy. Each strategy is defined as an ``OptimizationStrategy``
-instance in ``taskcluster/taskgraph/optimization.py``.
+During the optimization phase of task-graph generation, each task is optimized
+in post-order, meaning that each task's dependencies will be optimized before
+the task itself is optimized.
 
-Each task has a ``task.optimization`` property describing the optimization
-strategy that applies, specified as a dictionary mapping strategy to argument. For
+Each task has a ``task.optimizations`` property describing the optimization
+methods that apply.  Each is specified as a list of method and arguments. For
 example::
 
-    task.optimization = {'skip-unless-changed': ['js/**', 'tests/**']}
+    task.optimizations = [
+        ['seta'],
+        ['skip-unless-changed', ['js/**', 'tests/**']],
+    ]
 
-Strategy implementations are shared across all tasks, so they may cache
-commonly-used information as instance variables.
+These methods are defined in ``taskcluster/taskgraph/optimize.py``.  They are
+applied in order, and the first to return a success value causes the task to
+be optimized.
+
+Each method can return either a taskId (indicating that the given task can be
+replaced) or indicate that the task can be optimized away. If a task on which
+others depend is optimized away, task-graph generation will fail.
 
 Optimizing Target Tasks
 -----------------------
 
 In some cases, such as try pushes, tasks in the target task set have been
 explicitly requested and are thus excluded from optimization. In other cases,
 the target task set is almost the entire task graph, so targetted tasks are
-considered for optimization. This behavior is controlled with the
+considered for optimization.  This behavior is controlled with the
 ``optimize_target_tasks`` parameter.
-
-.. note:
-
-    Because it is a mix of "what the push author wanted" and "what should run
-    when necessary", try pushes with the old option syntax (``-b do -p all``,
-    etc.) *do* optimize target tasks.  This can cause unexpected results when
-    requested jobs are optimized away.  If those jobs were actually necessary,
-    then a try push with ``try_task_config.json`` is the solution.
-
-Optimization Process
---------------------
-
-Optimization proceeds in three phases: removing tasks, replacing tasks,
-and finally generating a subgraph containing only the remaining tasks.
-
-Assume the following task graph as context for these examples::
-
-    TC1 <--\     ,- UP1
-          , B1 <--- T1a
-    I1 <-|       `- T1b
-          ` B2 <--- T2a
-    TC2 <--/     |- T2b
-                 `- UP2
-
-Removing Tasks
-::::::::::::::
-
-This phase begins with tasks on which nothing depends and follows the
-dependency graph backward from there -- right to left in the diagram above. If
-a task is not removed, then nothing it depends on will be removed either.
-Thus if T1a and T1b are both removed, B1 may be removed as well. But if T2b is
-not removed, then B2 may not be removed either.
-
-For each task with no remaining dependencies, the decision whether to remove is
-made by calling the optimization strategy's ``should_remove_task`` method. If
-this method returns True, the task is removed.
-
-The optimization process takes a ``do_not_optimize`` argument containing a list
-of tasks that cannot be removed under any circumstances. This is used to
-"force" running specific tasks.
-
-Replacing Tasks
-:::::::::::::::
-
-This phase begins with tasks having no dependencies and follows the reversed
-dependency graph from there -- left to right in the diagram above. If a task is
-not replaced, then anything depending on that task cannot be replaced.
-Replacement is generally done on the basis of some hash of the inputs to the
-task. In the diagram above, if both TC1 and I1 are replaced with existing
-tasks, then B1 is a candidate for replacement. But if TC2 has no replacement,
-then replacement of B2 will not be considered.
-
-It is possible to replace a task with nothing.  This is similar to optimzing
-away, but is useful for utility tasks like UP1. If such a task is considered
-for replacement, then all of its dependencies (here, B1) have already been
-replaced and there is no utility in running the task and no need for a
-replacement task.  It is an error for a task on which others depend to be
-replaced with nothing.
-
-The ``do_not_optimize`` set applies to task replacement, as does an additional
-``existing_tasks`` dictionary which allows the caller to supply as set of
-known, pre-existing tasks. This is used for action tasks, for example, where it
-contains the entire task-graph generated by the original decision task.
-
-Subgraph Generation
-:::::::::::::::::::
-
-The first two phases annotate each task in the existing taskgraph with their
-fate: removed, replaced, or retained. The tasks that are replaced also have a
-replacement taskId.
-
-The last phase constructs a subgraph containing the retained tasks, and
-simultaneously rewrites all dependencies to refer to taskIds instead of labels.
-To do so, it assigns a taskId to each retained task and uses the replacement
-taskId for all replaced tasks.
-
-The result is an optimized taskgraph with tasks named by taskId instead of
-label. At this phase, the edges in the task graph diverge from the
-``task.dependencies`` attributes, as the latter may contain dependencies
-outside of the taskgraph (for replacement tasks).
-
-As a side-effect, this phase also expands all ``{"task-reference": ".."}``
-objects within the task definitions.
--- a/taskcluster/docs/parameters.rst
+++ b/taskcluster/docs/parameters.rst
@@ -74,53 +74,50 @@ Tree Information
    ``cedar``.
 
 ``level``
    The `SCM level
    <https://www.mozilla.org/en-US/about/governance/policies/commit/access-policy/>`_
    associated with this tree.  This dictates the names of resources used in the
    generated tasks, and those tasks will fail if it is incorrect.
 
-Try Configuration
------------------
-
-``try_mode``
-    The mode in which a try push is operating.  This can be one of
-    ``"try_task_config"``, ``"try_option_syntax"``, or ``None`` meaning no try
-    input was provided.
-
-``try_options``
-    The arguments given as try syntax (as a dictionary), or ``None`` if
-    ``try_mode`` is not ``try_option_syntax``.
-
-``try_task_config``
-    The contents of the ``try_task_config.json`` file, or ``None`` if
-    ``try_mode`` is not ``try_task_config``.
-
 Target Set
 ----------
 
 The "target set" is the set of task labels which must be included in a task
 graph.  The task graph generation process will include any tasks required by
 those in the target set, recursively.  In a decision task, this set can be
 specified programmatically using one of a variety of methods (e.g., parsing try
 syntax or reading a project-specific configuration file).
 
 ``filters``
     List of filter functions (from ``taskcluster/taskgraph/filter_tasks.py``) to
     apply. This is usually defined internally, as filters are typically
     global.
 
+``target_task_labels``
+    List of task labels to select. Labels not listed will be filtered out.
+    Enabled on try only.
+
 ``target_tasks_method``
     The method to use to determine the target task set.  This is the suffix of
     one of the functions in ``taskcluster/taskgraph/target_tasks.py``.
 
 ``optimize_target_tasks``
     If true, then target tasks are eligible for optimization.
 
 ``include_nightly``
     If true, then nightly tasks are eligible for optimization.
 
 ``release_history``
    History of recent releases by platform and locale, used when generating
    partial updates for nightly releases.
    Suitable contents can be generated with ``mach release-history``,
    which will print to the console by default.
+
+Morphed Set
+-----------
+
+``morph_templates``
+    Dict of JSON-e templates to apply to each task, keyed by template name.
+    Values are extra context that will be available to the template under the
+    ``input.<template>`` key. Available templates live in
+    ``taskcluster/taskgraph/templates``. Enabled on try only.
--- a/taskcluster/taskgraph/decision.py
+++ b/taskcluster/taskgraph/decision.py
@@ -12,17 +12,16 @@ import re
 
 import time
 import yaml
 
 from .generator import TaskGraphGenerator
 from .create import create_tasks
 from .parameters import Parameters
 from .taskgraph import TaskGraph
-from .try_option_syntax import parse_message
 from .actions import render_actions_json
 from taskgraph.util.partials import populate_release_history
 from . import GECKO
 
 from taskgraph.util.templates import Templates
 from taskgraph.util.time import (
     json_time_from_now,
     current_json_time,
@@ -32,16 +31,20 @@ logger = logging.getLogger(__name__)
 
 ARTIFACTS_DIR = 'artifacts'
 
 # For each project, this gives a set of parameters specific to the project.
 # See `taskcluster/docs/parameters.rst` for information on parameters.
 PER_PROJECT_PARAMETERS = {
     'try': {
         'target_tasks_method': 'try_tasks',
+        # Always perform optimization.  This makes it difficult to use try
+        # pushes to run a task that would otherwise be optimized, but is a
+        # compromise to avoid essentially disabling optimization in try.
+        'optimize_target_tasks': True,
         # By default, the `try_option_syntax` `target_task_method` ignores this
         # parameter, and enables/disables nightlies depending whether
         # `--include-nightly` is specified in the commit message.
         # We're setting the `include_nightly` parameter to True here for when
         # we submit decision tasks against Try that use other
         # `target_task_method`s, like `nightly_fennec` or `mozilla_beta_tasks`,
         # which reference the `include_nightly` parameter.
         'include_nightly': True,
@@ -160,16 +163,18 @@ def get_decision_parameters(options):
     ] if n in options}
 
     # Define default filter list, as most configurations shouldn't need
     # custom filters.
     parameters['filters'] = [
         'check_servo',
         'target_tasks_method',
     ]
+    parameters['target_task_labels'] = []
+    parameters['morph_templates'] = {}
 
     # owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which
     # case, fake it
     if '@' not in parameters['owner']:
         parameters['owner'] += '@noreply.mozilla.org'
 
     # use the pushdate as build_date if given, else use current time
     parameters['build_date'] = parameters['pushdate'] or int(time.time())
@@ -181,63 +186,36 @@ def get_decision_parameters(options):
     try:
         parameters.update(PER_PROJECT_PARAMETERS[project])
     except KeyError:
         logger.warning("using default project parameters; add {} to "
                        "PER_PROJECT_PARAMETERS in {} to customize behavior "
                        "for this project".format(project, __file__))
         parameters.update(PER_PROJECT_PARAMETERS['default'])
 
+    # morph_templates and target_task_labels are only used on try, so don't
+    # bother loading them elsewhere
+    task_config_file = os.path.join(GECKO, 'try_task_config.json')
+    if project == 'try' and os.path.isfile(task_config_file):
+        with open(task_config_file, 'r') as fh:
+            task_config = json.load(fh)
+        parameters['morph_templates'] = task_config.get('templates', {})
+        parameters['target_task_labels'] = task_config.get('tasks')
+
     # `target_tasks_method` has higher precedence than `project` parameters
     if options.get('target_tasks_method'):
         parameters['target_tasks_method'] = options['target_tasks_method']
 
     # If the target method is nightly, we should build partials. This means
     # knowing what has been released previously.
     # An empty release_history is fine, it just means no partials will be built
     parameters.setdefault('release_history', dict())
     if 'nightly' in parameters.get('target_tasks_method', ''):
         parameters['release_history'] = populate_release_history('Firefox', project)
 
-    # if try_task_config.json is present, load it
-    task_config_file = os.path.join(os.getcwd(), 'try_task_config.json')
-
-    # load try settings
-    parameters['try_mode'] = None
-    if os.path.isfile(task_config_file):
-        parameters['try_mode'] = 'try_task_config'
-        with open(task_config_file, 'r') as fh:
-            parameters['try_task_config'] = json.load(fh)
-    else:
-        parameters['try_task_config'] = None
-
-    if 'try:' in parameters['message']:
-        parameters['try_mode'] = 'try_option_syntax'
-        args = parse_message(parameters['message'])
-        parameters['try_options'] = args
-    else:
-        parameters['try_options'] = None
-
-    parameters['optimize_target_tasks'] = {
-        # The user has explicitly requested a set of jobs, so run them all
-        # regardless of optimization.  Their dependencies can be optimized,
-        # though.
-        'try_task_config': False,
-
-        # Always perform optimization.  This makes it difficult to use try
-        # pushes to run a task that would otherwise be optimized, but is a
-        # compromise to avoid essentially disabling optimization in try.
-        # to run tasks that would otherwise be optimized, ues try_task_config.
-        'try_option_syntax': True,
-
-        # since no try jobs have been specified, the standard target task will
-        # be applied, and tasks should be optimized out of that.
-        None: True,
-    }[parameters['try_mode']]
-
     return Parameters(parameters)
 
 
 def write_artifact(filename, data):
     logger.info('writing artifact file `{}`'.format(filename))
     if not os.path.isdir(ARTIFACTS_DIR):
         os.mkdir(ARTIFACTS_DIR)
     path = os.path.join(ARTIFACTS_DIR, filename)
--- a/taskcluster/taskgraph/generator.py
+++ b/taskcluster/taskgraph/generator.py
@@ -37,16 +37,22 @@ class Kind(object):
         except KeyError:
             raise KeyError("{!r} does not define `loader`".format(self.path))
         return find_object(loader)
 
     def load_tasks(self, parameters, loaded_tasks):
         loader = self._get_loader()
         config = copy.deepcopy(self.config)
 
+        if 'parse-commit' in self.config:
+            parse_commit = find_object(config['parse-commit'])
+            config['args'] = parse_commit(parameters['message'])
+        else:
+            config['args'] = None
+
         kind_dependencies = config.get('kind-dependencies', [])
         kind_dependencies_tasks = [task for task in loaded_tasks
                                    if task.kind in kind_dependencies]
 
         inputs = loader(self.name, self.path, config, parameters, loaded_tasks)
 
         transforms = TransformSequence()
         for xform_path in config['transforms']:
@@ -55,17 +61,17 @@ class Kind(object):
 
         # perform the transformations on the loaded inputs
         trans_config = TransformConfig(self.name, self.path, config, parameters,
                                        kind_dependencies_tasks)
         tasks = [Task(self.name,
                       label=task_dict['label'],
                       attributes=task_dict['attributes'],
                       task=task_dict['task'],
-                      optimization=task_dict.get('optimization'),
+                      optimizations=task_dict.get('optimizations'),
                       dependencies=task_dict.get('dependencies'))
                  for task_dict in transforms(trans_config, inputs)]
         return tasks
 
 
 class TaskGraphGenerator(object):
     """
     The central controller for taskgraph.  This handles all phases of graph
--- a/taskcluster/taskgraph/graph.py
+++ b/taskcluster/taskgraph/graph.py
@@ -73,49 +73,39 @@ class Graph(object):
             add_edges = set((left, right, name)
                             for (left, right, name) in self.edges
                             if (right if reverse else left) in nodes)
             add_nodes = set((left if reverse else right) for (left, right, _) in add_edges)
             new_nodes = nodes | add_nodes
             new_edges = edges | add_edges
         return Graph(new_nodes, new_edges)
 
-    def _visit(self, reverse):
+    def visit_postorder(self):
+        """
+        Generate a sequence of nodes in postorder, such that every node is
+        visited *after* any nodes it links to.
+
+        Behavior is undefined (read: it will hang) if the graph contains a
+        cycle.
+        """
         queue = collections.deque(sorted(self.nodes))
-        links_by_node = self.reverse_links_dict() if reverse else self.links_dict()
+        links_by_node = self.links_dict()
         seen = set()
         while queue:
             node = queue.popleft()
             if node in seen:
                 continue
             links = links_by_node[node]
             if all((n in seen) for n in links):
                 seen.add(node)
                 yield node
             else:
                 queue.extend(n for n in links if n not in seen)
                 queue.append(node)
 
-    def visit_postorder(self):
-        """
-        Generate a sequence of nodes in postorder, such that every node is
-        visited *after* any nodes it links to.
-
-        Behavior is undefined (read: it will hang) if the graph contains a
-        cycle.
-        """
-        return self._visit(False)
-
-    def visit_preorder(self):
-        """
-        Like visit_postorder, but in reverse: evrey node is visited *before*
-        any nodes it links to.
-        """
-        return self._visit(True)
-
     def links_dict(self):
         """
         Return a dictionary mapping each node to a set of the nodes it links to
         (omitting edge names)
         """
         links = collections.defaultdict(set)
         for left, right, _ in self.edges:
             links[left].add(right)
--- a/taskcluster/taskgraph/morph.py
+++ b/taskcluster/taskgraph/morph.py
@@ -278,15 +278,13 @@ class apply_jsone_templates(object):
         return taskgraph, label_to_taskid
 
 
 def morph(taskgraph, label_to_taskid, parameters):
     """Apply all morphs"""
     morphs = [
         add_index_tasks,
         add_s3_uploader_task,
+        apply_jsone_templates(parameters.get('morph_templates')),
     ]
-    if parameters['try_mode'] == 'try_task_config':
-        morphs.append(apply_jsone_templates(parameters['try_task_config'].get('templates')))
-
     for m in morphs:
         taskgraph, label_to_taskid = m(taskgraph, label_to_taskid)
     return taskgraph, label_to_taskid
--- a/taskcluster/taskgraph/optimize.py
+++ b/taskcluster/taskgraph/optimize.py
@@ -1,383 +1,219 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
-"""
-The objective of optimization is to remove as many tasks from the graph as
-possible, as efficiently as possible, thereby delivering useful results as
-quickly as possible.  For example, ideally if only a test script is modified in
-a push, then the resulting graph contains only the corresponding test suite
-task.
-
-See ``taskcluster/docs/optimization.rst`` for more information.
-"""
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import logging
 import os
 import requests
-from collections import defaultdict
 
 from .graph import Graph
 from . import files_changed
 from .taskgraph import TaskGraph
 from .util.seta import is_low_value_task
 from .util.taskcluster import find_task_id
 from .util.parameterization import resolve_task_references
-from mozbuild.util import memoize
 from slugid import nice as slugid
-from mozbuild.base import MozbuildObject
 
 logger = logging.getLogger(__name__)
 
-TOPSRCDIR = os.path.abspath(os.path.join(__file__, '../../../'))
+_optimizations = {}
 
 
-def optimize_task_graph(target_task_graph, params, do_not_optimize,
-                        existing_tasks=None, strategies=None):
+def optimize_task_graph(target_task_graph, params, do_not_optimize, existing_tasks=None):
     """
-    Perform task optimization, returning a taskgraph and a map from label to
-    assigned taskId, including replacement tasks.
+    Perform task optimization, without optimizing tasks named in
+    do_not_optimize.
     """
+    named_links_dict = target_task_graph.graph.named_links_dict()
     label_to_taskid = {}
-    if not existing_tasks:
-        existing_tasks = {}
-
-    # instantiate the strategies for this optimization process
-    if not strategies:
-        strategies = _make_default_strategies()
-
-    optimizations = _get_optimizations(target_task_graph, strategies)
 
-    removed_tasks = remove_tasks(
-        target_task_graph=target_task_graph,
-        optimizations=optimizations,
-        params=params,
-        do_not_optimize=do_not_optimize)
-
-    replaced_tasks = replace_tasks(
-        target_task_graph=target_task_graph,
-        optimizations=optimizations,
-        params=params,
-        do_not_optimize=do_not_optimize,
-        label_to_taskid=label_to_taskid,
-        existing_tasks=existing_tasks,
-        removed_tasks=removed_tasks)
-
-    return get_subgraph(
-            target_task_graph, removed_tasks, replaced_tasks,
-            label_to_taskid), label_to_taskid
+    # This proceeds in two phases.  First, mark all optimized tasks (those
+    # which will be removed from the graph) as such, including a replacement
+    # taskId where applicable.  Second, generate a new task graph containing
+    # only the non-optimized tasks, with all task labels resolved to taskIds
+    # and with task['dependencies'] populated.
+    annotate_task_graph(target_task_graph=target_task_graph,
+                        params=params,
+                        do_not_optimize=do_not_optimize,
+                        named_links_dict=named_links_dict,
+                        label_to_taskid=label_to_taskid,
+                        existing_tasks=existing_tasks)
+    return get_subgraph(target_task_graph, named_links_dict, label_to_taskid), label_to_taskid
 
 
-def _make_default_strategies():
-    return {
-        'never': OptimizationStrategy(),  # "never" is the default behavior
-        'index-search': IndexSearch(),
-        'seta': SETA(),
-        'skip-unless-changed': SkipUnlessChanged(),
-        'skip-unless-schedules': SkipUnlessSchedules(),
-        'skip-unless-schedules-or-seta': Either(SkipUnlessSchedules(), SETA()),
-        'only-if-dependencies-run': OnlyIfDependenciesRun(),
-    }
-
+def optimize_task(task, params):
+    """
+    Optimize a single task by running its optimizations in order until one
+    succeeds.
+    """
+    for opt in task.optimizations:
+        opt_type, args = opt[0], opt[1:]
+        opt_fn = _optimizations[opt_type]
+        opt_result = opt_fn(task, params, *args)
+        if opt_result:
+            return opt_result
 
-def _get_optimizations(target_task_graph, strategies):
-    def optimizations(label):
-        task = target_task_graph.tasks[label]
-        if task.optimization:
-            opt_by, arg = task.optimization.items()[0]
-            return (opt_by, strategies[opt_by], arg)
-        else:
-            return ('never', strategies['never'], None)
-    return optimizations
-
-
-def _log_optimization(verb, opt_counts):
-    if opt_counts:
-        logger.info(
-            '{} '.format(verb.title()) +
-            ', '.join(
-                '{} tasks by {}'.format(c, b)
-                for b, c in sorted(opt_counts.iteritems())) +
-            ' during optimization.')
-    else:
-        logger.info('No tasks {} during optimization'.format(verb))
+    return False
 
 
-def remove_tasks(target_task_graph, params, optimizations, do_not_optimize):
+def annotate_task_graph(target_task_graph, params, do_not_optimize,
+                        named_links_dict, label_to_taskid, existing_tasks):
     """
-    Implement the "Removing Tasks" phase, returning a set of task labels of all removed tasks.
+    Annotate each task in the graph with .optimized (boolean) and .task_id
+    (possibly None), following the rules for optimization and calling the task
+    kinds' `optimize_task` method.
+
+    As a side effect, label_to_taskid is updated with labels for all optimized
+    tasks that are replaced with existing tasks.
     """
-    opt_counts = defaultdict(int)
-    removed = set()
-    reverse_links_dict = target_task_graph.graph.reverse_links_dict()
+
+    # set .optimized for all tasks, and .task_id for optimized tasks
+    # with replacements
+    for label in target_task_graph.graph.visit_postorder():
+        task = target_task_graph.tasks[label]
+        named_task_dependencies = named_links_dict.get(label, {})
 
-    for label in target_task_graph.graph.visit_preorder():
-        # if we're not allowed to optimize, that's easy..
-        if label in do_not_optimize:
-            continue
+        # check whether any dependencies have been optimized away
+        dependencies = [target_task_graph.tasks[l] for l in named_task_dependencies.itervalues()]
+        for t in dependencies:
+            if t.optimized and not t.task_id:
+                raise Exception(
+                    "task {} was optimized away, but {} depends on it".format(
+                        t.label, label))
 
-        # if there are remaining tasks depending on this one, do not remove..
-        if any(l not in removed for l in reverse_links_dict[label]):
-            continue
+        # if this task is blacklisted, don't even consider optimizing
+        replacement_task_id = None
+        if label in do_not_optimize:
+            optimized = False
+        # Let's check whether this task has been created before
+        elif existing_tasks is not None and label in existing_tasks:
+            optimized = True
+            replacement_task_id = existing_tasks[label]
+        # otherwise, examine the task itself (which may be an expensive operation)
+        else:
+            opt_result = optimize_task(task, params)
+
+            # use opt_result to determine values for optimized, replacement_task_id
+            optimized = bool(opt_result)
+            replacement_task_id = opt_result if opt_result and opt_result is not True else None
 
-        # call the optimization strategy
-        task = target_task_graph.tasks[label]
-        opt_by, opt, arg = optimizations(label)
-        if opt.should_remove_task(task, params, arg):
-            removed.add(label)
-            opt_counts[opt_by] += 1
-            continue
+        task.optimized = optimized
+        task.task_id = replacement_task_id
+        if replacement_task_id:
+            label_to_taskid[label] = replacement_task_id
 
-    _log_optimization('removed', opt_counts)
-    return removed
+        if optimized:
+            if replacement_task_id:
+                logger.debug("optimizing `{}`, replacing with task `{}`"
+                             .format(label, replacement_task_id))
+            else:
+                logger.debug("optimizing `{}` away".format(label))
+                # note: any dependent tasks will fail when they see this
+        else:
+            if replacement_task_id:
+                raise Exception("{}: optimize_task returned False with a taskId".format(label))
 
 
-def replace_tasks(target_task_graph, params, optimizations, do_not_optimize,
-                  label_to_taskid, removed_tasks, existing_tasks):
-    """
-    Implement the "Replacing Tasks" phase, returning a set of task labels of
-    all replaced tasks. The replacement taskIds are added to label_to_taskid as
-    a side-effect.
+def get_subgraph(annotated_task_graph, named_links_dict, label_to_taskid):
     """
-    opt_counts = defaultdict(int)
-    replaced = set()
-    links_dict = target_task_graph.graph.links_dict()
-
-    for label in target_task_graph.graph.visit_postorder():
-        # if we're not allowed to optimize, that's easy..
-        if label in do_not_optimize:
-            continue
-
-        # if this task depends on un-replaced, un-removed tasks, do not replace
-        if any(l not in replaced and l not in removed_tasks for l in links_dict[label]):
-            continue
-
-        # if the task already exists, that's an easy replacement
-        repl = existing_tasks.get(label)
-        if repl:
-            label_to_taskid[label] = repl
-            replaced.add(label)
-            opt_counts['existing_tasks'] += 1
-            continue
-
-        # call the optimization strategy
-        task = target_task_graph.tasks[label]
-        opt_by, opt, arg = optimizations(label)
-        repl = opt.should_replace_task(task, params, arg)
-        if repl:
-            if repl is True:
-                # True means remove this task; get_subgraph will catch any
-                # problems with removed tasks being depended on
-                removed_tasks.add(label)
-            else:
-                label_to_taskid[label] = repl
-                replaced.add(label)
-            opt_counts[opt_by] += 1
-            continue
-
-    _log_optimization('replaced', opt_counts)
-    return replaced
-
-
-def get_subgraph(target_task_graph, removed_tasks, replaced_tasks, label_to_taskid):
-    """
-    Return the subgraph of target_task_graph consisting only of
+    Return the subgraph of annotated_task_graph consisting only of
     non-optimized tasks and edges between them.
 
     To avoid losing track of taskIds for tasks optimized away, this method
     simultaneously substitutes real taskIds for task labels in the graph, and
     populates each task definition's `dependencies` key with the appropriate
     taskIds.  Task references are resolved in the process.
     """
 
-    # check for any dependency edges from included to removed tasks
-    bad_edges = [(l, r, n) for l, r, n in target_task_graph.graph.edges
-                 if l not in removed_tasks and r in removed_tasks]
-    if bad_edges:
-        probs = ', '.join('{} depends on {} as {} but it has been removed'.format(l, r, n)
-                          for l, r, n in bad_edges)
-        raise Exception("Optimization error: " + probs)
-
-    # fill in label_to_taskid for anything not removed or replaced
-    assert replaced_tasks <= set(label_to_taskid)
-    for label in sorted(target_task_graph.graph.nodes - removed_tasks - set(label_to_taskid)):
-        label_to_taskid[label] = slugid()
-
     # resolve labels to taskIds and populate task['dependencies']
     tasks_by_taskid = {}
-    named_links_dict = target_task_graph.graph.named_links_dict()
-    omit = removed_tasks | replaced_tasks
-    for label, task in target_task_graph.tasks.iteritems():
-        if label in omit:
+    for label in annotated_task_graph.graph.visit_postorder():
+        task = annotated_task_graph.tasks[label]
+        if task.optimized:
             continue
-        task.task_id = label_to_taskid[label]
+        task.task_id = label_to_taskid[label] = slugid()
         named_task_dependencies = {
-            name: label_to_taskid[label]
-            for name, label in named_links_dict.get(label, {}).iteritems()}
+                name: label_to_taskid[label]
+                for name, label in named_links_dict.get(label, {}).iteritems()}
         task.task = resolve_task_references(task.label, task.task, named_task_dependencies)
-        deps = task.task.setdefault('dependencies', [])
-        deps.extend(sorted(named_task_dependencies.itervalues()))
+        task.task.setdefault('dependencies', []).extend(named_task_dependencies.itervalues())
         tasks_by_taskid[task.task_id] = task
 
     # resolve edges to taskIds
     edges_by_taskid = (
         (label_to_taskid.get(left), label_to_taskid.get(right), name)
-        for (left, right, name) in target_task_graph.graph.edges
-    )
-    # ..and drop edges that are no longer entirely in the task graph
-    #   (note that this omits edges to replaced tasks, but they are still in task.dependnecies)
+        for (left, right, name) in annotated_task_graph.graph.edges
+        )
+    # ..and drop edges that are no longer in the task graph
     edges_by_taskid = set(
         (left, right, name)
         for (left, right, name) in edges_by_taskid
         if left in tasks_by_taskid and right in tasks_by_taskid
-    )
+        )
 
     return TaskGraph(
         tasks_by_taskid,
         Graph(set(tasks_by_taskid), edges_by_taskid))
 
 
-class OptimizationStrategy(object):
-    def should_remove_task(self, task, params, arg):
-        """Determine whether to optimize this task by removing it.  Returns
-        True to remove."""
-        return False
+def optimization(name):
+    def wrap(func):
+        if name in _optimizations:
+            raise Exception("multiple optimizations with name {}".format(name))
+        _optimizations[name] = func
+        return func
+    return wrap
+
+
+@optimization('index-search')
+def opt_index_search(task, params, index_path):
+    try:
+        task_id = find_task_id(
+            index_path,
+            use_proxy=bool(os.environ.get('TASK_ID')))
+
+        return task_id or True
+    except requests.exceptions.HTTPError:
+        pass
+
+    return False
 
-    def should_replace_task(self, task, params, arg):
-        """Determine whether to optimize this task by replacing it.  Returns a
-        taskId to replace this task, True to replace with nothing, or False to
-        keep the task."""
+
+@optimization('seta')
+def opt_seta(task, params):
+    bbb_task = False
+
+    # for bbb tasks we need to send in the buildbot buildername
+    if task.task.get('provisionerId', '') == 'buildbot-bridge':
+        label = task.task.get('payload').get('buildername')
+        bbb_task = True
+    else:
+        label = task.label
+
+    # we would like to return 'False, None' while it's high_value_task
+    # and we wouldn't optimize it. Otherwise, it will return 'True, None'
+    if is_low_value_task(label,
+                         params.get('project'),
+                         params.get('pushlog_id'),
+                         params.get('pushdate'),
+                         bbb_task):
+        # Always optimize away low-value tasks
+        return True
+    else:
         return False
 
 
-class Either(OptimizationStrategy):
-    """Given one or more optimization strategies, remove a task if any of them
-    says to, and replace with a task if any finds a replacement (preferring the
-    earliest).  By default, each substrategy gets the same arg, but split_args
-    can return a list of args for each strategy, if desired."""
-    def __init__(self, *substrategies, **kwargs):
-        self.substrategies = substrategies
-        self.split_args = kwargs.pop('split_args', None)
-        if not self.split_args:
-            self.split_args = lambda arg: [arg] * len(substrategies)
-        if kwargs:
-            raise TypeError("unexpected keyword args")
-
-    def _for_substrategies(self, arg, fn):
-        for sub, arg in zip(self.substrategies, self.split_args(arg)):
-            rv = fn(sub, arg)
-            if rv:
-                return rv
-        return False
-
-    def should_remove_task(self, task, params, arg):
-        return self._for_substrategies(
-            arg,
-            lambda sub, arg: sub.should_remove_task(task, params, arg))
-
-    def should_replace_task(self, task, params, arg):
-        return self._for_substrategies(
-            arg,
-            lambda sub, arg: sub.should_replace_task(task, params, arg))
-
-
-class OnlyIfDependenciesRun(OptimizationStrategy):
-    """Run this taks only if its dependencies run."""
-
-    # This takes advantage of the behavior of the second phase of optimization:
-    # a task can only be replaced if it has no un-optimized dependencies. So if
-    # should_replace_task is called, then a task has no un-optimized
-    # dependencies and can be removed (indicated by returning True)
-
-    def should_replace_task(self, task, params, arg):
-        return True
-
-
-class IndexSearch(OptimizationStrategy):
-    def should_remove_task(self, task, params, index_paths):
-        "If this task has no dependencies, don't run it.."
+@optimization('skip-unless-changed')
+def opt_files_changed(task, params, file_patterns):
+    # pushlog_id == -1 - this is the case when run from a cron.yml job
+    if params.get('pushlog_id') == -1:
         return True
 
-    def should_replace_task(self, task, params, index_paths):
-        "Look for a task with one of the given index paths"
-        for index_path in index_paths:
-            try:
-                task_id = find_task_id(
-                    index_path,
-                    use_proxy=bool(os.environ.get('TASK_ID')))
-                return task_id
-            except requests.exceptions.HTTPError:
-                # 404 will end up here and go on to the next index path
-                pass
-
-        return False
-
-
-class SETA(OptimizationStrategy):
-    def should_remove_task(self, task, params, _):
-        bbb_task = False
-
-        # for bbb tasks we need to send in the buildbot buildername
-        if task.task.get('provisionerId', '') == 'buildbot-bridge':
-            label = task.task.get('payload').get('buildername')
-            bbb_task = True
-        else:
-            label = task.label
-
-        # we would like to return 'False, None' while it's high_value_task
-        # and we wouldn't optimize it. Otherwise, it will return 'True, None'
-        if is_low_value_task(label,
-                             params.get('project'),
-                             params.get('pushlog_id'),
-                             params.get('pushdate'),
-                             bbb_task):
-            # Always optimize away low-value tasks
-            return True
-        else:
-            return False
-
-
-class SkipUnlessChanged(OptimizationStrategy):
-    def should_remove_task(self, task, params, file_patterns):
-        # pushlog_id == -1 - this is the case when run from a cron.yml job
-        if params.get('pushlog_id') == -1:
-            return False
-
-        changed = files_changed.check(params, file_patterns)
-        if not changed:
-            logger.debug('no files found matching a pattern in `skip-unless-changed` for ' +
-                         task.label)
-            return True
-        return False
-
-
-class SkipUnlessSchedules(OptimizationStrategy):
-
-    @memoize
-    def scheduled_by_push(self, repository, revision):
-        changed_files = files_changed.get_changed_files(repository, revision)
-
-        mbo = MozbuildObject.from_environment()
-        # the decision task has a sparse checkout, so, mozbuild_reader will use
-        # a MercurialRevisionFinder with revision '.', which should be the same
-        # as `revision`; in other circumstances, it will use a default reader
-        rdr = mbo.mozbuild_reader(config_mode='empty')
-
-        components = set()
-        for p, m in rdr.files_info(changed_files).items():
-            components |= set(m['SCHEDULES'].components)
-
-        return components
-
-    def should_remove_task(self, task, params, conditions):
-        if params.get('pushlog_id') == -1:
-            return False
-
-        scheduled = self.scheduled_by_push(params['head_repository'], params['head_rev'])
-        conditions = set(conditions)
-        # if *any* of the condition components are scheduled, do not optimize
-        if conditions & scheduled:
-            return False
-
+    changed = files_changed.check(params, file_patterns)
+    if not changed:
+        logger.debug('no files found matching a pattern in `skip-unless-changed` for ' +
+                     task.label)
         return True
+    return False
--- a/taskcluster/taskgraph/parameters.py
+++ b/taskcluster/taskgraph/parameters.py
@@ -16,37 +16,41 @@ PARAMETER_NAMES = set([
     'build_date',
     'filters',
     'head_ref',
     'head_repository',
     'head_rev',
     'include_nightly',
     'level',
     'message',
+    'morph_templates',
     'moz_build_date',
     'optimize_target_tasks',
     'owner',
     'project',
     'pushdate',
     'pushlog_id',
     'release_history',
+    'target_task_labels',
     'target_tasks_method',
-    'try_mode',
-    'try_options',
-    'try_task_config',
+])
+
+TRY_ONLY_PARAMETERS = set([
+    'morph_templates',
+    'target_task_labels',
 ])
 
 
 class Parameters(ReadOnlyDict):
     """An immutable dictionary with nicer KeyError messages on failure"""
     def check(self):
         names = set(self)
         msg = []
 
-        missing = PARAMETER_NAMES - names
+        missing = PARAMETER_NAMES - TRY_ONLY_PARAMETERS - names
         if missing:
             msg.append("missing parameters: " + ", ".join(missing))
 
         extra = names - PARAMETER_NAMES
         if extra:
             msg.append("extra parameters: " + ", ".join(extra))
 
         if msg:
--- a/taskcluster/taskgraph/target_tasks.py
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -1,19 +1,22 @@
 # -*- coding: utf-8 -*-
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
+import os
+
 from taskgraph import try_option_syntax
 from taskgraph.util.attributes import match_run_on_projects
 
+here = os.path.abspath(os.path.dirname(__file__))
 _target_task_methods = {}
 
 
 def _target_task(name):
     def wrap(func):
         _target_task_methods[name] = func
         return func
     return wrap
@@ -45,24 +48,27 @@ def filter_upload_symbols(task, paramete
 def standard_filter(task, parameters):
     return all(
         filter_func(task, parameters) for filter_func in
         (filter_on_nightly, filter_for_project, filter_upload_symbols)
     )
 
 
 def _try_task_config(full_task_graph, parameters):
-    requested_tasks = parameters['try_task_config']['tasks']
-    return list(set(requested_tasks) & full_task_graph.graph.nodes)
+    if not parameters.get('target_task_labels'):
+        return []
+
+    return [t.label for t in full_task_graph.tasks.itervalues()
+            if t.label in parameters['target_task_labels']]
 
 
 def _try_option_syntax(full_task_graph, parameters):
     """Generate a list of target tasks based on try syntax in
     parameters['message'] and, for context, the full task graph."""
-    options = try_option_syntax.TryOptionSyntax(parameters, full_task_graph)
+    options = try_option_syntax.TryOptionSyntax(parameters['message'], full_task_graph)
     target_tasks_labels = [t.label for t in full_task_graph.tasks.itervalues()
                            if options.task_matches(t)]
 
     attributes = {
         k: getattr(options, k) for k in [
             'env',
             'no_retry',
             'tag',
@@ -99,33 +105,29 @@ def _try_option_syntax(full_task_graph, 
                 routes.append("notify.email.{}.on-failed".format(owner))
                 routes.append("notify.email.{}.on-exception".format(owner))
 
     return target_tasks_labels
 
 
 @_target_task('try_tasks')
 def target_tasks_try(full_task_graph, parameters):
-    try_mode = parameters['try_mode']
-    if try_mode == 'try_task_config':
-        return _try_task_config(full_task_graph, parameters)
-    elif try_mode == 'try_option_syntax':
-        return _try_option_syntax(full_task_graph, parameters)
-    else:
-        # With no try mode, we would like to schedule everything (following
-        # run_on_projects) and let optimization trim it down.  But optimization
-        # isn't yet up to the task, so instead we use try_option_syntax with
-        # an empty message (which basically just schedules `-j`objs)
-        return _try_option_syntax(full_task_graph, parameters)
+    labels = _try_task_config(full_task_graph, parameters)
+
+    if 'try:' in parameters['message'] or not labels:
+        labels.extend(_try_option_syntax(full_task_graph, parameters))
+
+    return labels
 
 
 @_target_task('default')
 def target_tasks_default(full_task_graph, parameters):
     """Target the tasks which have indicated they should be run on this project
     via the `run_on_projects` attributes."""
+
     return [l for l, t in full_task_graph.tasks.iteritems()
             if standard_filter(t, parameters)]
 
 
 @_target_task('ash_tasks')
 def target_tasks_ash(full_task_graph, parameters):
     """Target tasks that only run on the ash branch."""
     def filter(task):
--- a/taskcluster/taskgraph/task.py
+++ b/taskcluster/taskgraph/task.py
@@ -8,62 +8,64 @@ from __future__ import absolute_import, 
 class Task(object):
     """
     Representation of a task in a TaskGraph.  Each Task has, at creation:
 
     - kind: the name of the task kind
     - label; the label for this task
     - attributes: a dictionary of attributes for this task (used for filtering)
     - task: the task definition (JSON-able dictionary)
-    - optimization: optimization to apply to the task (see taskgraph.optimize)
+    - optimizations: optimizations to apply to the task (see taskgraph.optimize)
     - dependencies: tasks this one depends on, in the form {name: label}, for example
       {'build': 'build-linux64/opt', 'docker-image': 'build-docker-image-desktop-test'}
 
     And later, as the task-graph processing proceeds:
 
     - task_id -- TaskCluster taskId under which this task will be created
+    - optimized -- true if this task need not be performed
 
     This class is just a convenience wraper for the data type and managing
     display, comparison, serialization, etc. It has no functionality of its own.
     """
     def __init__(self, kind, label, attributes, task,
-                 optimization=None, dependencies=None):
+                 optimizations=None, dependencies=None):
         self.kind = kind
         self.label = label
         self.attributes = attributes
         self.task = task
 
         self.task_id = None
+        self.optimized = False
 
         self.attributes['kind'] = kind
 
-        self.optimization = optimization
+        self.optimizations = optimizations or []
         self.dependencies = dependencies or {}
 
     def __eq__(self, other):
         return self.kind == other.kind and \
             self.label == other.label and \
             self.attributes == other.attributes and \
             self.task == other.task and \
             self.task_id == other.task_id and \
-            self.optimization == other.optimization and \
+            self.optimizations == other.optimizations and \
             self.dependencies == other.dependencies
 
     def __repr__(self):
         return ('Task({kind!r}, {label!r}, {attributes!r}, {task!r}, '
-                'optimization={optimization!r}, '
+                'optimizations={optimizations!r}, '
                 'dependencies={dependencies!r})'.format(**self.__dict__))
 
     def to_json(self):
         rv = {
             'kind': self.kind,
             'label': self.label,
             'attributes': self.attributes,
             'dependencies': self.dependencies,
-            'optimization': self.optimization,
+            'optimizations': self.optimizations,
             'task': self.task,
         }
         if self.task_id:
             rv['task_id'] = self.task_id
         return rv
 
     @classmethod
     def from_json(cls, task_dict):
@@ -72,13 +74,13 @@ class Task(object):
         the original Task object.  This is used to "resume" the task-graph
         generation process, for example in Action tasks.
         """
         rv = cls(
             kind=task_dict['kind'],
             label=task_dict['label'],
             attributes=task_dict['attributes'],
             task=task_dict['task'],
-            optimization=task_dict['optimization'],
+            optimizations=task_dict['optimizations'],
             dependencies=task_dict.get('dependencies'))
         if 'task_id' in task_dict:
             rv.task_id = task_dict['task_id']
         return rv
--- a/taskcluster/taskgraph/test/test_decision.py
+++ b/taskcluster/taskgraph/test/test_decision.py
@@ -7,17 +7,17 @@ from __future__ import absolute_import, 
 import os
 import json
 import yaml
 import shutil
 import unittest
 import tempfile
 
 from taskgraph import decision
-from mozunit import main, MockedOpen
+from mozunit import main
 
 
 class TestDecision(unittest.TestCase):
 
     def test_write_artifact_json(self):
         data = [{'some': 'data'}]
         tmpdir = tempfile.mkdtemp()
         try:
@@ -39,58 +39,10 @@ class TestDecision(unittest.TestCase):
             with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.yml")) as f:
                 self.assertEqual(yaml.safe_load(f), data)
         finally:
             if os.path.exists(tmpdir):
                 shutil.rmtree(tmpdir)
             decision.ARTIFACTS_DIR = 'artifacts'
 
 
-class TestGetDecisionParameters(unittest.TestCase):
-
-    def setUp(self):
-        self.options = {
-            'base_repository': 'https://hg.mozilla.org/mozilla-unified',
-            'head_repository': 'https://hg.mozilla.org/mozilla-central',
-            'head_rev': 'abcd',
-            'head_ref': 'ef01',
-            'message': '',
-            'project': 'mozilla-central',
-            'pushlog_id': 143,
-            'pushdate': 1503691511,
-            'owner': 'nobody@mozilla.com',
-            'level': 3,
-        }
-
-    def test_simple_options(self):
-        params = decision.get_decision_parameters(self.options)
-        self.assertEqual(params['pushlog_id'], 143)
-        self.assertEqual(params['build_date'], 1503691511)
-        self.assertEqual(params['moz_build_date'], '20170825200511')
-        self.assertEqual(params['try_mode'], None)
-        self.assertEqual(params['try_options'], None)
-        self.assertEqual(params['try_task_config'], None)
-
-    def test_no_email_owner(self):
-        self.options['owner'] = 'ffxbld'
-        params = decision.get_decision_parameters(self.options)
-        self.assertEqual(params['owner'], 'ffxbld@noreply.mozilla.org')
-
-    def test_try_options(self):
-        self.options['message'] = 'try: -b do -t all'
-        params = decision.get_decision_parameters(self.options)
-        self.assertEqual(params['try_mode'], 'try_option_syntax')
-        self.assertEqual(params['try_options']['build_types'], 'do')
-        self.assertEqual(params['try_options']['unittests'], 'all')
-        self.assertEqual(params['try_task_config'], None)
-
-    def test_try_task_config(self):
-        ttc = {'tasks': ['a', 'b'], 'templates': {}}
-        ttc_file = os.path.join(os.getcwd(), 'try_task_config.json')
-        with MockedOpen({ttc_file: json.dumps(ttc)}):
-            params = decision.get_decision_parameters(self.options)
-            self.assertEqual(params['try_mode'], 'try_task_config')
-            self.assertEqual(params['try_options'], None)
-            self.assertEqual(params['try_task_config'], ttc)
-
-
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_generator.py
+++ b/taskcluster/taskgraph/test/test_generator.py
@@ -54,17 +54,16 @@ class TestGenerator(unittest.TestCase):
         def target_tasks_method(full_task_graph, parameters):
             return self.target_tasks
 
         target_tasks_mod._target_task_methods['test_method'] = target_tasks_method
 
         parameters = {
             '_kinds': kinds,
             'target_tasks_method': 'test_method',
-            'try_mode': None,
         }
 
         return WithFakeKind('/root', parameters)
 
     def test_kind_ordering(self):
         "When task kinds depend on each other, they are loaded in postorder"
         self.tgg = self.maketgg(kinds=[
             ('_fake3', ['_fake2', '_fake1']),
--- a/taskcluster/taskgraph/test/test_graph.py
+++ b/taskcluster/taskgraph/test/test_graph.py
@@ -124,41 +124,16 @@ class TestGraph(unittest.TestCase):
     def test_visit_postorder_multi_edges(self):
         "postorder visit of a graph with duplicate edges satisfies invariant"
         self.assert_postorder(self.multi_edges.visit_postorder(), self.multi_edges.nodes)
 
     def test_visit_postorder_disjoint(self):
         "postorder visit of a disjoint graph satisfies invariant"
         self.assert_postorder(self.disjoint.visit_postorder(), self.disjoint.nodes)
 
-    def assert_preorder(self, seq, all_nodes):
-        seen = set()
-        for e in seq:
-            for l, r, n in self.tree.edges:
-                if r == e:
-                    self.failUnless(l in seen)
-            seen.add(e)
-        self.assertEqual(seen, all_nodes)
-
-    def test_visit_preorder_tree(self):
-        "preorder visit of a tree satisfies invariant"
-        self.assert_preorder(self.tree.visit_preorder(), self.tree.nodes)
-
-    def test_visit_preorder_diamonds(self):
-        "preorder visit of a graph full of diamonds satisfies invariant"
-        self.assert_preorder(self.diamonds.visit_preorder(), self.diamonds.nodes)
-
-    def test_visit_preorder_multi_edges(self):
-        "preorder visit of a graph with duplicate edges satisfies invariant"
-        self.assert_preorder(self.multi_edges.visit_preorder(), self.multi_edges.nodes)
-
-    def test_visit_preorder_disjoint(self):
-        "preorder visit of a disjoint graph satisfies invariant"
-        self.assert_preorder(self.disjoint.visit_preorder(), self.disjoint.nodes)
-
     def test_links_dict(self):
         "link dict for a graph with multiple edges is correct"
         self.assertEqual(self.multi_edges.links_dict(), {
             '2': set(['1']),
             '3': set(['1', '2']),
             '4': set(['3']),
         })
 
--- a/taskcluster/taskgraph/test/test_optimize.py
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -1,231 +1,249 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
-from taskgraph import optimize
+from taskgraph.optimize import optimize_task_graph, resolve_task_references, optimization
+from taskgraph.optimize import annotate_task_graph, get_subgraph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph import graph
 from taskgraph.task import Task
 from mozunit import main
-from slugid import nice as slugid
 
 
-class Remove(optimize.OptimizationStrategy):
+class TestResolveTaskReferences(unittest.TestCase):
+
+    def do(self, input, output):
+        taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
+        self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
 
-    def should_remove_task(self, task, params, arg):
-        return True
+    def test_in_list(self):
+        "resolve_task_references resolves task references in a list"
+        self.do({'in-a-list': ['stuff', {'task-reference': '<edge1>'}]},
+                {'in-a-list': ['stuff', 'tid1']})
+
+    def test_in_dict(self):
+        "resolve_task_references resolves task references in a dict"
+        self.do({'in-a-dict': {'stuff': {'task-reference': '<edge2>'}}},
+                {'in-a-dict': {'stuff': 'tid2'}})
 
+    def test_multiple(self):
+        "resolve_task_references resolves multiple references in the same string"
+        self.do({'multiple': {'task-reference': 'stuff <edge1> stuff <edge2> after'}},
+                {'multiple': 'stuff tid1 stuff tid2 after'})
 
-class Replace(optimize.OptimizationStrategy):
+    def test_embedded(self):
+        "resolve_task_references resolves ebmedded references"
+        self.do({'embedded': {'task-reference': 'stuff before <edge3> stuff after'}},
+                {'embedded': 'stuff before tid3 stuff after'})
 
-    def should_replace_task(self, task, params, taskid):
-        return taskid
+    def test_escaping(self):
+        "resolve_task_references resolves escapes in task references"
+        self.do({'escape': {'task-reference': '<<><edge3>>'}},
+                {'escape': '<tid3>'})
+
+    def test_invalid(self):
+        "resolve_task_references raises a KeyError on reference to an invalid task"
+        self.assertRaisesRegexp(
+            KeyError,
+            "task 'subject' has no dependency named 'no-such'",
+            lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
+        )
 
 
 class TestOptimize(unittest.TestCase):
 
-    strategies = {
-        'never': optimize.OptimizationStrategy(),
-        'remove': Remove(),
-        'replace': Replace(),
-    }
+    kind = None
 
-    def make_task(self, label, optimization=None, task_def=None, optimized=None,
-                  task_id=None, dependencies=None):
+    @classmethod
+    def setUpClass(cls):
+        # set up some simple optimization functions
+        optimization('no-optimize')(lambda self, params: False)
+        optimization('optimize-away')(lambda self, params: True)
+        optimization('optimize-to-task')(lambda self, params, task: task)
+
+    def make_task(self, label, optimization=None, task_def=None, optimized=None, task_id=None):
         task_def = task_def or {'sample': 'task-def'}
         task = Task(kind='test', label=label, attributes={}, task=task_def)
-        task.optimization = optimization
+        task.optimized = optimized
+        if optimization:
+            task.optimizations = [optimization]
+        else:
+            task.optimizations = []
         task.task_id = task_id
-        if dependencies is not None:
-            task.task['dependencies'] = sorted(dependencies)
         return task
 
     def make_graph(self, *tasks_and_edges):
         tasks = {t.label: t for t in tasks_and_edges if isinstance(t, Task)}
         edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
         return TaskGraph(tasks, graph.Graph(set(tasks), edges))
 
-    def make_opt_graph(self, *tasks_and_edges):
-        tasks = {t.task_id: t for t in tasks_and_edges if isinstance(t, Task)}
-        edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
-        return TaskGraph(tasks, graph.Graph(set(tasks), edges))
-
-    def make_triangle(self, **opts):
-        """
-        Make a "triangle" graph like this:
+    def assert_annotations(self, graph, **annotations):
+        def repl(task_id):
+            return 'SLUGID' if task_id and len(task_id) == 22 else task_id
+        got_annotations = {
+            t.label: repl(t.task_id) or t.optimized for t in graph.tasks.itervalues()
+        }
+        self.assertEqual(got_annotations, annotations)
 
-          t1 <-------- t3
-           `---- t2 --'
-        """
-        return self.make_graph(
-            self.make_task('t1', opts.get('t1')),
-            self.make_task('t2', opts.get('t2')),
-            self.make_task('t3', opts.get('t3')),
-            ('t3', 't2', 'dep'),
-            ('t3', 't1', 'dep2'),
-            ('t2', 't1', 'dep'))
-
-    def assert_remove_tasks(self, graph, exp_removed, do_not_optimize=set()):
-        got_removed = optimize.remove_tasks(
-            target_task_graph=graph,
-            optimizations=optimize._get_optimizations(graph, self.strategies),
-            params={},
-            do_not_optimize=do_not_optimize)
-        self.assertEqual(got_removed, exp_removed)
-
-    def test_remove_tasks_never(self):
-        "A graph full of optimization=never has nothing removed"
-        graph = self.make_triangle()
-        self.assert_remove_tasks(graph, set())
+    def test_annotate_task_graph_no_optimize(self):
+        "annotating marks everything as un-optimized if the kind returns that"
+        graph = self.make_graph(
+            self.make_task('task1', ['no-optimize']),
+            self.make_task('task2', ['no-optimize']),
+            self.make_task('task3', ['no-optimize']),
+            ('task2', 'task1', 'build'),
+            ('task2', 'task3', 'image'),
+        )
+        annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
+        self.assert_annotations(
+            graph,
+            task1=False,
+            task2=False,
+            task3=False
+        )
 
-    def test_remove_tasks_all(self):
-        "A graph full of optimization=remove has removes everything"
-        graph = self.make_triangle(
-            t1={'remove': None},
-            t2={'remove': None},
-            t3={'remove': None})
-        self.assert_remove_tasks(graph, {'t1', 't2', 't3'})
-
-    def test_remove_tasks_blocked(self):
-        "Removable tasks that are depended on by non-removable tasks are not removed"
-        graph = self.make_triangle(
-            t1={'remove': None},
-            t3={'remove': None})
-        self.assert_remove_tasks(graph, {'t3'})
-
-    def test_remove_tasks_do_not_optimize(self):
-        "Removable tasks that are marked do_not_optimize are not removed"
-        graph = self.make_triangle(
-            t1={'remove': None},
-            t2={'remove': None},  # but do_not_optimize
-            t3={'remove': None})
-        self.assert_remove_tasks(graph, {'t3'}, do_not_optimize={'t2'})
+    def test_annotate_task_graph_optimize_away_dependency(self):
+        "raises exception if kind optimizes away a task on which another depends"
+        graph = self.make_graph(
+            self.make_task('task1', ['optimize-away']),
+            self.make_task('task2', ['no-optimize']),
+            ('task2', 'task1', 'build'),
+        )
+        self.assertRaises(
+            Exception,
+            lambda: annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
+        )
 
-    def assert_replace_tasks(self, graph, exp_replaced, exp_removed=set(), exp_label_to_taskid={},
-                             do_not_optimize=None, label_to_taskid=None, removed_tasks=None,
-                             existing_tasks=None):
-        do_not_optimize = do_not_optimize or set()
-        label_to_taskid = label_to_taskid or {}
-        removed_tasks = removed_tasks or set()
-        existing_tasks = existing_tasks or {}
+    def test_annotate_task_graph_do_not_optimize(self):
+        "annotating marks everything as un-optimized if in do_not_optimize"
+        graph = self.make_graph(
+            self.make_task('task1', ['optimize-away']),
+            self.make_task('task2', ['optimize-away']),
+            ('task2', 'task1', 'build'),
+        )
+        label_to_taskid = {}
+        annotate_task_graph(graph, {}, {'task1', 'task2'},
+                            graph.graph.named_links_dict(), label_to_taskid, None)
+        self.assert_annotations(
+            graph,
+            task1=False,
+            task2=False
+        )
+        self.assertEqual
 
-        got_replaced = optimize.replace_tasks(
-            target_task_graph=graph,
-            optimizations=optimize._get_optimizations(graph, self.strategies),
-            params={},
-            do_not_optimize=do_not_optimize,
-            label_to_taskid=label_to_taskid,
-            removed_tasks=removed_tasks,
-            existing_tasks=existing_tasks)
-        self.assertEqual(got_replaced, exp_replaced)
-        self.assertEqual(removed_tasks, exp_removed)
-        self.assertEqual(label_to_taskid, exp_label_to_taskid)
-
-    def test_replace_tasks_never(self):
-        "No tasks are replaced when strategy is 'never'"
-        graph = self.make_triangle()
-        self.assert_replace_tasks(graph, set())
+    def test_annotate_task_graph_nos_do_not_propagate(self):
+        "a task with a non-optimized dependency can be optimized"
+        graph = self.make_graph(
+            self.make_task('task1', ['no-optimize']),
+            self.make_task('task2', ['optimize-to-task', 'taskid']),
+            self.make_task('task3', ['optimize-to-task', 'taskid']),
+            ('task2', 'task1', 'build'),
+            ('task2', 'task3', 'image'),
+        )
+        annotate_task_graph(graph, {}, set(),
+                            graph.graph.named_links_dict(), {}, None)
+        self.assert_annotations(
+            graph,
+            task1=False,
+            task2='taskid',
+            task3='taskid'
+        )
 
-    def test_replace_tasks_all(self):
-        "All replacable tasks are replaced when strategy is 'replace'"
-        graph = self.make_triangle(
-            t1={'replace': 'e1'},
-            t2={'replace': 'e2'},
-            t3={'replace': 'e3'})
-        self.assert_replace_tasks(
-            graph,
-            exp_replaced={'t1', 't2', 't3'},
-            exp_label_to_taskid={'t1': 'e1', 't2': 'e2', 't3': 'e3'})
-
-    def test_replace_tasks_blocked(self):
-        "A task cannot be replaced if it depends on one that was not replaced"
-        graph = self.make_triangle(
-            t1={'replace': 'e1'},
-            t3={'replace': 'e3'})
-        self.assert_replace_tasks(
-            graph,
-            exp_replaced={'t1'},
-            exp_label_to_taskid={'t1': 'e1'})
+    def test_get_subgraph_single_dep(self):
+        "when a single dependency is optimized, it is omitted from the graph"
+        graph = self.make_graph(
+            self.make_task('task1', optimized=True, task_id='dep1'),
+            self.make_task('task2', optimized=False),
+            self.make_task('task3', optimized=False),
+            ('task2', 'task1', 'build'),
+            ('task2', 'task3', 'image'),
+        )
+        label_to_taskid = {'task1': 'dep1'}
+        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+        task2 = label_to_taskid['task2']
+        task3 = label_to_taskid['task3']
+        self.assertEqual(sub.graph.nodes, {task2, task3})
+        self.assertEqual(sub.graph.edges, {(task2, task3, 'image')})
+        self.assertEqual(sub.tasks[task2].task_id, task2)
+        self.assertEqual(sorted(sub.tasks[task2].task['dependencies']),
+                         sorted([task3, 'dep1']))
+        self.assertEqual(sub.tasks[task3].task_id, task3)
+        self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), [])
 
-    def test_replace_tasks_do_not_optimize(self):
-        "A task cannot be replaced if it depends on one that was not replaced"
-        graph = self.make_triangle(
-            t1={'replace': 'e1'},
-            t2={'replace': 'xxx'},  # but do_not_optimize
-            t3={'replace': 'e3'})
-        self.assert_replace_tasks(
-            graph,
-            exp_replaced={'t1'},
-            exp_label_to_taskid={'t1': 'e1'},
-            do_not_optimize={'t2'})
-
-    def test_replace_tasks_removed(self):
-        "A task can be replaced with nothing"
-        graph = self.make_triangle(
-            t1={'replace': 'e1'},
-            t2={'replace': True},
-            t3={'replace': True})
-        self.assert_replace_tasks(
-            graph,
-            exp_replaced={'t1'},
-            exp_removed={'t2', 't3'},
-            exp_label_to_taskid={'t1': 'e1'})
+    def test_get_subgraph_dep_chain(self):
+        "when a dependency chain is optimized, it is omitted from the graph"
+        graph = self.make_graph(
+            self.make_task('task1', optimized=True, task_id='dep1'),
+            self.make_task('task2', optimized=True, task_id='dep2'),
+            self.make_task('task3', optimized=False),
+            ('task2', 'task1', 'build'),
+            ('task3', 'task2', 'image'),
+        )
+        label_to_taskid = {'task1': 'dep1', 'task2': 'dep2'}
+        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+        task3 = label_to_taskid['task3']
+        self.assertEqual(sub.graph.nodes, {task3})
+        self.assertEqual(sub.graph.edges, set())
+        self.assertEqual(sub.tasks[task3].task_id, task3)
+        self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), ['dep2'])
 
-    def assert_subgraph(self, graph, removed_tasks, replaced_tasks,
-                        label_to_taskid, exp_subgraph, exp_label_to_taskid):
-        self.maxDiff = None
-        optimize.slugid = ('tid{}'.format(i) for i in xrange(1, 10)).next
-        try:
-            got_subgraph = optimize.get_subgraph(graph, removed_tasks,
-                                                 replaced_tasks, label_to_taskid)
-        finally:
-            optimize.slugid = slugid
-        self.assertEqual(got_subgraph.graph, exp_subgraph.graph)
-        self.assertEqual(got_subgraph.tasks, exp_subgraph.tasks)
-        self.assertEqual(label_to_taskid, exp_label_to_taskid)
+    def test_get_subgraph_opt_away(self):
+        "when a leaf task is optimized away, it is omitted from the graph"
+        graph = self.make_graph(
+            self.make_task('task1', optimized=False),
+            self.make_task('task2', optimized=True),
+            ('task2', 'task1', 'build'),
+        )
+        label_to_taskid = {'task2': 'dep2'}
+        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+        task1 = label_to_taskid['task1']
+        self.assertEqual(sub.graph.nodes, {task1})
+        self.assertEqual(sub.graph.edges, set())
+        self.assertEqual(sub.tasks[task1].task_id, task1)
+        self.assertEqual(sorted(sub.tasks[task1].task['dependencies']), [])
 
-    def test_get_subgraph_no_change(self):
-        "get_subgraph returns a similarly-shaped subgraph when nothing is removed"
-        graph = self.make_triangle()
-        self.assert_subgraph(
-            graph, set(), set(), {},
-            self.make_opt_graph(
-                self.make_task('t1', task_id='tid1', dependencies={}),
-                self.make_task('t2', task_id='tid2', dependencies={'tid1'}),
-                self.make_task('t3', task_id='tid3', dependencies={'tid1', 'tid2'}),
-                ('tid3', 'tid2', 'dep'),
-                ('tid3', 'tid1', 'dep2'),
-                ('tid2', 'tid1', 'dep')),
-            {'t1': 'tid1', 't2': 'tid2', 't3': 'tid3'})
+    def test_get_subgraph_refs_resolved(self):
+        "get_subgraph resolves task references"
+        graph = self.make_graph(
+            self.make_task('task1', optimized=True, task_id='dep1'),
+            self.make_task(
+                'task2',
+                optimized=False,
+                task_def={'payload': {'task-reference': 'http://<build>/<test>'}}
+            ),
+            ('task2', 'task1', 'build'),
+            ('task2', 'task3', 'test'),
+            self.make_task('task3', optimized=False),
+        )
+        label_to_taskid = {'task1': 'dep1'}
+        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+        task2 = label_to_taskid['task2']
+        task3 = label_to_taskid['task3']
+        self.assertEqual(sub.graph.nodes, {task2, task3})
+        self.assertEqual(sub.graph.edges, {(task2, task3, 'test')})
+        self.assertEqual(sub.tasks[task2].task_id, task2)
+        self.assertEqual(sorted(sub.tasks[task2].task['dependencies']), sorted([task3, 'dep1']))
+        self.assertEqual(sub.tasks[task2].task['payload'], 'http://dep1/' + task3)
+        self.assertEqual(sub.tasks[task3].task_id, task3)
 
-    def test_get_subgraph_removed(self):
-        "get_subgraph returns a smaller subgraph when tasks are removed"
-        graph = self.make_triangle()
-        self.assert_subgraph(
-            graph, {'t2', 't3'}, set(), {},
-            self.make_opt_graph(
-                self.make_task('t1', task_id='tid1', dependencies={})),
-            {'t1': 'tid1'})
-
-    def test_get_subgraph_replaced(self):
-        "get_subgraph returns a smaller subgraph when tasks are replaced"
-        graph = self.make_triangle()
-        self.assert_subgraph(
-            graph, set(), {'t1', 't2'}, {'t1': 'e1', 't2': 'e2'},
-            self.make_opt_graph(
-                self.make_task('t3', task_id='tid1', dependencies={'e1', 'e2'})),
-            {'t1': 'e1', 't2': 'e2', 't3': 'tid1'})
-
-    def test_get_subgraph_removed_dep(self):
-        "get_subgraph raises an Exception when a task depends on a removed task"
-        graph = self.make_triangle()
-        with self.assertRaises(Exception):
-            optimize.get_subgraph(graph, {'t2'}, set(), {})
+    def test_optimize(self):
+        "optimize_task_graph annotates and extracts the subgraph from a simple graph"
+        input = self.make_graph(
+            self.make_task('task1', ['optimize-to-task', 'dep1']),
+            self.make_task('task2', ['no-optimize']),
+            self.make_task('task3', ['no-optimize']),
+            ('task2', 'task1', 'build'),
+            ('task2', 'task3', 'image'),
+        )
+        opt, label_to_taskid = optimize_task_graph(input, {}, set())
+        self.assertEqual(opt.graph, graph.Graph(
+            {label_to_taskid['task2'], label_to_taskid['task3']},
+            {(label_to_taskid['task2'], label_to_taskid['task3'], 'image')}))
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_target_tasks.py
+++ b/taskcluster/taskgraph/test/test_target_tasks.py
@@ -1,15 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
-import contextlib
 import unittest
 
 from taskgraph import target_tasks
 from taskgraph import try_option_syntax
 from taskgraph.graph import Graph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.task import Task
 from mozunit import main
@@ -61,61 +60,48 @@ class TestTargetTasks(unittest.TestCase)
         self.assertFalse(self.default_matches(['release'], 'baobab'))
 
     def test_default_nothing(self):
         """run_on_projects=[] includes nothing"""
         self.assertFalse(self.default_matches([], 'mozilla-central'))
         self.assertFalse(self.default_matches([], 'mozilla-inbound'))
         self.assertFalse(self.default_matches([], 'baobab'))
 
-    def make_task_graph(self):
+    def test_try_tasks(self):
         tasks = {
             'a': Task(kind=None, label='a', attributes={}, task={}),
             'b': Task(kind=None, label='b', attributes={'at-at': 'yep'}, task={}),
-            'c': Task(kind=None, label='c', attributes={'run_on_projects': ['try']}, task={}),
+            'c': Task(kind=None, label='c', attributes={}, task={}),
         }
         graph = Graph(nodes=set('abc'), edges=set())
-        return TaskGraph(tasks, graph)
+        tg = TaskGraph(tasks, graph)
 
-    @contextlib.contextmanager
-    def fake_TryOptionSyntax(self):
+        method = target_tasks.get_method('try_tasks')
+        params = {
+            'message': '',
+            'target_task_labels': [],
+        }
+
         orig_TryOptionSyntax = try_option_syntax.TryOptionSyntax
         try:
             try_option_syntax.TryOptionSyntax = FakeTryOptionSyntax
-            yield
-        finally:
-            try_option_syntax.TryOptionSyntax = orig_TryOptionSyntax
 
-    def test_just_try_it(self):
-        "try_mode = None runs try optoin syntax with no options"
-        tg = self.make_task_graph()
-        method = target_tasks.get_method('try_tasks')
-        with self.fake_TryOptionSyntax():
-            params = {
-                'try_mode': None,
-                'message': '',
-            }
+            # no try specifier
+            self.assertEqual(method(tg, params), ['b'])
+
+            # try syntax only
+            params['message'] = 'try: me'
             self.assertEqual(method(tg, params), ['b'])
 
-    def test_try_option_syntax(self):
-        "try_mode = try_option_syntax uses TryOptionSyntax"
-        tg = self.make_task_graph()
-        method = target_tasks.get_method('try_tasks')
-        with self.fake_TryOptionSyntax():
-            params = {
-                'try_mode': 'try_option_syntax',
-                'message': 'try: -p all',
-            }
-            self.assertEqual(method(tg, params), ['b'])
+            # try task config only
+            params['message'] = ''
+            params['target_task_labels'] = ['c']
+            self.assertEqual(method(tg, params), ['c'])
 
-    def test_try_task_config(self):
-        "try_mode = try_task_config uses the try config"
-        tg = self.make_task_graph()
-        method = target_tasks.get_method('try_tasks')
-        params = {
-            'try_mode': 'try_task_config',
-            'try_task_config': {'tasks': ['a']},
-        }
-        self.assertEqual(method(tg, params), ['a'])
+            # both syntax and config
+            params['message'] = 'try: me'
+            self.assertEqual(set(method(tg, params)), set(['b', 'c']))
+        finally:
+            try_option_syntax.TryOptionSyntax = orig_TryOptionSyntax
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_taskgraph.py
+++ b/taskcluster/taskgraph/test/test_taskgraph.py
@@ -19,60 +19,60 @@ class TestTaskGraph(unittest.TestCase):
     def test_taskgraph_to_json(self):
         tasks = {
             'a': Task(kind='test', label='a',
                       attributes={'attr': 'a-task'},
                       task={'taskdef': True}),
             'b': Task(kind='test', label='b',
                       attributes={},
                       task={'task': 'def'},
-                      optimization={'seta': None},
+                      optimizations=[['seta']],
                       # note that this dep is ignored, superseded by that
                       # from the taskgraph's edges
                       dependencies={'first': 'a'}),
         }
         graph = Graph(nodes=set('ab'), edges={('a', 'b', 'edgelabel')})
         taskgraph = TaskGraph(tasks, graph)
 
         res = taskgraph.to_json()
 
         self.assertEqual(res, {
             'a': {
                 'kind': 'test',
                 'label': 'a',
                 'attributes': {'attr': 'a-task', 'kind': 'test'},
                 'task': {'taskdef': True},
                 'dependencies': {'edgelabel': 'b'},
-                'optimization': None,
+                'optimizations': [],
             },
             'b': {
                 'kind': 'test',
                 'label': 'b',
                 'attributes': {'kind': 'test'},
                 'task': {'task': 'def'},
                 'dependencies': {},
-                'optimization': {'seta': None},
+                'optimizations': [['seta']],
             }
         })
 
     def test_round_trip(self):
         graph = TaskGraph(tasks={
             'a': Task(
                 kind='fancy',
                 label='a',
                 attributes={},
                 dependencies={'prereq': 'b'},  # must match edges, below
-                optimization={'seta': None},
+                optimizations=[['seta']],
                 task={'task': 'def'}),
             'b': Task(
                 kind='pre',
                 label='b',
                 attributes={},
                 dependencies={},
-                optimization={'seta': None},
+                optimizations=[['seta']],
                 task={'task': 'def2'}),
         }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))
 
         tasks, new_graph = TaskGraph.from_json(graph.to_json())
         self.assertEqual(graph, new_graph)
 
 
 if __name__ == '__main__':
--- a/taskcluster/taskgraph/test/test_try_option_syntax.py
+++ b/taskcluster/taskgraph/test/test_try_option_syntax.py
@@ -1,17 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
-from taskgraph.try_option_syntax import TryOptionSyntax, parse_message
+from taskgraph.try_option_syntax import TryOptionSyntax
 from taskgraph.try_option_syntax import RIDEALONG_BUILDS
 from taskgraph.graph import Graph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.task import Task
 from mozunit import main
 
 
 def unittest_task(n, tp, bt='opt'):
@@ -58,286 +58,274 @@ unittest_tasks = {k: v for k, v in tasks
                   if 'unittest_try_name' in v.attributes}
 talos_tasks = {k: v for k, v in tasks.iteritems()
                if 'talos_try_name' in v.attributes}
 graph_with_jobs = TaskGraph(tasks, Graph(set(tasks), set()))
 
 
 class TestTryOptionSyntax(unittest.TestCase):
 
+    def test_empty_message(self):
+        "Given an empty message, it should return an empty value"
+        tos = TryOptionSyntax('', graph_with_jobs)
+        self.assertEqual(tos.build_types, [])
+        self.assertEqual(tos.jobs, [])
+        self.assertEqual(tos.unittests, [])
+        self.assertEqual(tos.talos, [])
+        self.assertEqual(tos.platforms, [])
+        self.assertEqual(tos.trigger_tests, 0)
+        self.assertEqual(tos.talos_trigger_tests, 0)
+        self.assertEqual(tos.env, [])
+        self.assertFalse(tos.profile)
+        self.assertIsNone(tos.tag)
+        self.assertFalse(tos.no_retry)
+
+    def test_message_without_try(self):
+        "Given a non-try message, it should return an empty value"
+        tos = TryOptionSyntax('Bug 1234: frobnicte the foo', graph_with_jobs)
+        self.assertEqual(tos.build_types, [])
+        self.assertEqual(tos.jobs, [])
+        self.assertEqual(tos.unittests, [])
+        self.assertEqual(tos.talos, [])
+        self.assertEqual(tos.platforms, [])
+        self.assertEqual(tos.trigger_tests, 0)
+        self.assertEqual(tos.talos_trigger_tests, 0)
+        self.assertEqual(tos.env, [])
+        self.assertFalse(tos.profile)
+        self.assertIsNone(tos.tag)
+        self.assertFalse(tos.no_retry)
+
     def test_unknown_args(self):
         "unknown arguments are ignored"
-        parameters = {'try_options': parse_message('try: --doubledash -z extra')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --doubledash -z extra', graph_with_jobs)
         # equilvant to "try:"..
         self.assertEqual(tos.build_types, [])
         self.assertEqual(tos.jobs, None)
 
     def test_apostrophe_in_message(self):
         "apostrophe does not break parsing"
-        parameters = {'try_options': parse_message('Increase spammy log\'s log level. try: -b do')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('Increase spammy log\'s log level. try: -b do', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug', 'opt'])
 
     def test_b_do(self):
         "-b do should produce both build_types"
-        parameters = {'try_options': parse_message('try: -b do')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -b do', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug', 'opt'])
 
     def test_b_d(self):
         "-b d should produce build_types=['debug']"
-        parameters = {'try_options': parse_message('try: -b d')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -b d', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug'])
 
     def test_b_o(self):
         "-b o should produce build_types=['opt']"
-        parameters = {'try_options': parse_message('try: -b o')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -b o', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['opt'])
 
     def test_build_o(self):
         "--build o should produce build_types=['opt']"
-        parameters = {'try_options': parse_message('try: --build o')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --build o', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['opt'])
 
     def test_b_dx(self):
         "-b dx should produce build_types=['debug'], silently ignoring the x"
-        parameters = {'try_options': parse_message('try: -b dx')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -b dx', graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug'])
 
     def test_j_job(self):
         "-j somejob sets jobs=['somejob']"
-        parameters = {'try_options': parse_message('try: -j somejob')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -j somejob', graph_with_jobs)
         self.assertEqual(sorted(tos.jobs), ['somejob'])
 
     def test_j_jobs(self):
         "-j job1,job2 sets jobs=['job1', 'job2']"
-        parameters = {'try_options': parse_message('try: -j job1,job2')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -j job1,job2', graph_with_jobs)
         self.assertEqual(sorted(tos.jobs), ['job1', 'job2'])
 
     def test_j_all(self):
         "-j all sets jobs=None"
-        parameters = {'try_options': parse_message('try: -j all')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -j all', graph_with_jobs)
         self.assertEqual(tos.jobs, None)
 
     def test_j_twice(self):
         "-j job1 -j job2 sets jobs=job1, job2"
-        parameters = {'try_options': parse_message('try: -j job1 -j job2')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -j job1 -j job2', graph_with_jobs)
         self.assertEqual(sorted(tos.jobs), sorted(['job1', 'job2']))
 
     def test_p_all(self):
         "-p all sets platforms=None"
-        parameters = {'try_options': parse_message('try: -p all')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -p all', graph_with_jobs)
         self.assertEqual(tos.platforms, None)
 
     def test_p_linux(self):
         "-p linux sets platforms=['linux', 'linux-l10n']"
-        parameters = {'try_options': parse_message('try: -p linux')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -p linux', graph_with_jobs)
         self.assertEqual(tos.platforms, ['linux', 'linux-l10n'])
 
     def test_p_linux_win32(self):
         "-p linux,win32 sets platforms=['linux', 'linux-l10n', 'win32']"
-        parameters = {'try_options': parse_message('try: -p linux,win32')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -p linux,win32', graph_with_jobs)
         self.assertEqual(sorted(tos.platforms), ['linux', 'linux-l10n', 'win32'])
 
     def test_p_expands_ridealongs(self):
         "-p linux,linux64 includes the RIDEALONG_BUILDS"
-        parameters = {'try_options': parse_message('try: -p linux,linux64')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -p linux,linux64', graph_with_jobs)
         platforms = set(['linux'] + RIDEALONG_BUILDS['linux'])
         platforms |= set(['linux64'] + RIDEALONG_BUILDS['linux64'])
         self.assertEqual(sorted(tos.platforms), sorted(platforms))
 
     def test_u_none(self):
         "-u none sets unittests=[]"
-        parameters = {'try_options': parse_message('try: -u none')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u none', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), [])
 
     def test_u_all(self):
         "-u all sets unittests=[..whole list..]"
-        parameters = {'try_options': parse_message('try: -u all')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u all', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([{'test': t} for t in unittest_tasks]))
 
     def test_u_single(self):
         "-u mochitest-webgl sets unittests=[mochitest-webgl]"
-        parameters = {'try_options': parse_message('try: -u mochitest-webgl')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u mochitest-webgl', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([{'test': 'mochitest-webgl'}]))
 
     def test_u_alias(self):
         "-u mochitest-gl sets unittests=[mochitest-webgl]"
-        parameters = {'try_options': parse_message('try: -u mochitest-gl')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u mochitest-gl', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([{'test': 'mochitest-webgl'}]))
 
     def test_u_multi_alias(self):
         "-u e10s sets unittests=[all e10s unittests]"
-        parameters = {'try_options': parse_message('try: -u e10s')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u e10s', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': t} for t in unittest_tasks if 'e10s' in t
         ]))
 
     def test_u_commas(self):
         "-u mochitest-webgl,gtest sets unittests=both"
-        parameters = {'try_options': parse_message('try: -u mochitest-webgl,gtest')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u mochitest-webgl,gtest', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'mochitest-webgl'},
             {'test': 'gtest'},
         ]))
 
     def test_u_chunks(self):
         "-u gtest-3,gtest-4 selects the third and fourth chunk of gtest"
-        parameters = {'try_options': parse_message('try: -u gtest-3,gtest-4')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest-3,gtest-4', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'only_chunks': set('34')},
         ]))
 
     def test_u_platform(self):
         "-u gtest[linux] selects the linux platform for gtest"
-        parameters = {'try_options': parse_message('try: -u gtest[linux]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest[linux]', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'platforms': ['linux']},
         ]))
 
     def test_u_platforms(self):
         "-u gtest[linux,win32] selects the linux and win32 platforms for gtest"
-        parameters = {'try_options': parse_message('try: -u gtest[linux,win32]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest[linux,win32]', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'platforms': ['linux', 'win32']},
         ]))
 
     def test_u_platforms_pretty(self):
         """-u gtest[Ubuntu] selects the linux, linux64, linux64-asan, linux64-stylo-disabled,
         and linux64-stylo-sequential platforms for gtest"""
-        parameters = {'try_options': parse_message('try: -u gtest[Ubuntu]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest[Ubuntu]', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'platforms': ['linux32', 'linux64', 'linux64-asan',
                                             'linux64-stylo-disabled', 'linux64-stylo-sequential']},
         ]))
 
     def test_u_platforms_negated(self):
         "-u gtest[-linux] selects all platforms but linux for gtest"
-        parameters = {'try_options': parse_message('try: -u gtest[-linux]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest[-linux]', graph_with_jobs)
         all_platforms = set([x.attributes['test_platform'] for x in unittest_tasks.values()])
         self.assertEqual(sorted(tos.unittests[0]['platforms']), sorted(
             [x for x in all_platforms if x != 'linux']
         ))
 
     def test_u_platforms_negated_pretty(self):
         "-u gtest[Ubuntu,-x64] selects just linux for gtest"
-        parameters = {'try_options': parse_message('try: -u gtest[Ubuntu,-x64]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest[Ubuntu,-x64]', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'platforms': ['linux32']},
         ]))
 
     def test_u_chunks_platforms(self):
         "-u gtest-1[linux,win32] selects the linux and win32 platforms for chunk 1 of gtest"
-        parameters = {'try_options': parse_message('try: -u gtest-1[linux,win32]')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -u gtest-1[linux,win32]', graph_with_jobs)
         self.assertEqual(sorted(tos.unittests), sorted([
             {'test': 'gtest', 'platforms': ['linux', 'win32'], 'only_chunks': set('1')},
         ]))
 
     def test_t_none(self):
         "-t none sets talos=[]"
-        parameters = {'try_options': parse_message('try: -t none')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -t none', graph_with_jobs)
         self.assertEqual(sorted(tos.talos), [])
 
     def test_t_all(self):
         "-t all sets talos=[..whole list..]"
-        parameters = {'try_options': parse_message('try: -t all')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -t all', graph_with_jobs)
         self.assertEqual(sorted(tos.talos), sorted([{'test': t} for t in talos_tasks]))
 
     def test_t_single(self):
         "-t mochitest-webgl sets talos=[mochitest-webgl]"
-        parameters = {'try_options': parse_message('try: -t mochitest-webgl')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: -t mochitest-webgl', graph_with_jobs)
         self.assertEqual(sorted(tos.talos), sorted([{'test': 'mochitest-webgl'}]))
 
     # -t shares an implementation with -u, so it's not tested heavily
 
     def test_trigger_tests(self):
         "--rebuild 10 sets trigger_tests"
-        parameters = {'try_options': parse_message('try: --rebuild 10')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --rebuild 10', graph_with_jobs)
         self.assertEqual(tos.trigger_tests, 10)
 
     def test_talos_trigger_tests(self):
         "--rebuild-talos 10 sets talos_trigger_tests"
-        parameters = {'try_options': parse_message('try: --rebuild-talos 10')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --rebuild-talos 10', graph_with_jobs)
         self.assertEqual(tos.talos_trigger_tests, 10)
 
     def test_interactive(self):
         "--interactive sets interactive"
-        parameters = {'try_options': parse_message('try: --interactive')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --interactive', graph_with_jobs)
         self.assertEqual(tos.interactive, True)
 
     def test_all_email(self):
         "--all-emails sets notifications"
-        parameters = {'try_options': parse_message('try: --all-emails')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --all-emails', graph_with_jobs)
         self.assertEqual(tos.notifications, 'all')
 
     def test_fail_email(self):
         "--failure-emails sets notifications"
-        parameters = {'try_options': parse_message('try: --failure-emails')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --failure-emails', graph_with_jobs)
         self.assertEqual(tos.notifications, 'failure')
 
     def test_no_email(self):
         "no email settings don't set notifications"
-        parameters = {'try_options': parse_message('try:')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try:', graph_with_jobs)
         self.assertEqual(tos.notifications, None)
 
     def test_setenv(self):
         "--setenv VAR=value adds a environment variables setting to env"
-        parameters = {'try_options': parse_message(
-            'try: --setenv VAR1=value1 --setenv VAR2=value2')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --setenv VAR1=value1 --setenv VAR2=value2', graph_with_jobs)
         self.assertEqual(tos.env, ['VAR1=value1', 'VAR2=value2'])
 
     def test_profile(self):
         "--geckoProfile sets profile to true"
-        parameters = {'try_options': parse_message('try: --geckoProfile')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --geckoProfile', graph_with_jobs)
         self.assertTrue(tos.profile)
 
     def test_tag(self):
         "--tag TAG sets tag to TAG value"
-        parameters = {'try_options': parse_message('try: --tag tagName')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --tag tagName', graph_with_jobs)
         self.assertEqual(tos.tag, 'tagName')
 
     def test_no_retry(self):
         "--no-retry sets no_retry to true"
-        parameters = {'try_options': parse_message('try: --no-retry')}
-        tos = TryOptionSyntax(parameters, graph_with_jobs)
+        tos = TryOptionSyntax('try: --no-retry', graph_with_jobs)
         self.assertTrue(tos.no_retry)
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_util_parameterization.py
+++ b/taskcluster/taskgraph/test/test_util_parameterization.py
@@ -35,58 +35,36 @@ class TestTimestamps(unittest.TestCase):
         now = datetime.datetime(2018, 1, 1)
         input = [{'relative-datestamp': '1 day', 'another-key': True}]
         self.assertEqual(resolve_timestamps(now, input),
                          [{'relative-datestamp': '1 day', 'another-key': True}])
 
 
 class TestTaskRefs(unittest.TestCase):
 
-    def do(self, input, output):
-        taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
-        self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
-
     def test_no_change(self):
-        "resolve_task_references does nothing when there are no task references"
-        self.do({'in-a-list': ['stuff', {'property': '<edge1>'}]},
-                {'in-a-list': ['stuff', {'property': '<edge1>'}]})
+        input = {"key": "value", "numeric": 10, "list": ["a", True, False, None]}
+        self.assertEqual(resolve_task_references('lable', input, {}), input)
 
-    def test_in_list(self):
-        "resolve_task_references resolves task references in a list"
-        self.do({'in-a-list': ['stuff', {'task-reference': '<edge1>'}]},
-                {'in-a-list': ['stuff', 'tid1']})
-
-    def test_in_dict(self):
-        "resolve_task_references resolves task references in a dict"
-        self.do({'in-a-dict': {'stuff': {'task-reference': '<edge2>'}}},
-                {'in-a-dict': {'stuff': 'tid2'}})
+    def test_buried_replacement(self):
+        input = {"key": [{"key2": [{'task-reference': 'taskid=<toolchain>'}]}]}
+        self.assertEqual(resolve_task_references('lable', input, {'toolchain': 'abcd'}),
+                         {u'key': [{u'key2': [u'taskid=abcd']}]})
 
-    def test_multiple(self):
-        "resolve_task_references resolves multiple references in the same string"
-        self.do({'multiple': {'task-reference': 'stuff <edge1> stuff <edge2> after'}},
-                {'multiple': 'stuff tid1 stuff tid2 after'})
-
-    def test_embedded(self):
-        "resolve_task_references resolves ebmedded references"
-        self.do({'embedded': {'task-reference': 'stuff before <edge3> stuff after'}},
-                {'embedded': 'stuff before tid3 stuff after'})
+    def test_appears_with_other_keys(self):
+        input = [{'task-reference': '<toolchain>', 'another-key': True}]
+        self.assertEqual(resolve_task_references('lable', input, {'toolchain': 'abcd'}),
+                         [{'task-reference': '<toolchain>', 'another-key': True}])
 
-    def test_escaping(self):
-        "resolve_task_references resolves escapes in task references"
-        self.do({'escape': {'task-reference': '<<><edge3>>'}},
-                {'escape': '<tid3>'})
+    def test_multiple_subs(self):
+        input = [{'task-reference': 'toolchain=<toolchain>, build=<build>'}]
+        self.assertEqual(
+            resolve_task_references('lable', input, {'toolchain': 'abcd', 'build': 'def'}),
+            ['toolchain=abcd, build=def'])
 
-    def test_multikey(self):
-        "resolve_task_references is ignored when there is another key in the dict"
-        self.do({'escape': {'task-reference': '<edge3>', 'another-key': True}},
-                {'escape': {'task-reference': '<edge3>', 'another-key': True}})
-
-    def test_invalid(self):
-        "resolve_task_references raises a KeyError on reference to an invalid task"
-        self.assertRaisesRegexp(
-            KeyError,
-            "task 'subject' has no dependency named 'no-such'",
-            lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
-        )
+    def test_escaped(self):
+        input = [{'task-reference': '<<><toolchain>>'}]
+        self.assertEqual(resolve_task_references('lable', input, {'toolchain': 'abcd'}),
+                         ['<abcd>'])
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/transforms/build.py
+++ b/taskcluster/taskgraph/transforms/build.py
@@ -39,18 +39,16 @@ def set_defaults(config, jobs):
             worker.setdefault('env', {})
 
         yield job
 
 
 @transforms.add
 def set_env(config, jobs):
     """Set extra environment variables from try command line."""
-    env = {}
-    if config.params['try_mode'] == 'try_option_syntax':
-        env = config.params['try_options']['env'] or {}
     for job in jobs:
+        env = config.config['args'].env
         if env:
             job_env = {}
             if 'worker' in job:
                 job_env = job['worker']['env']
             job_env.update(dict(x.split('=') for x in env))
         yield job
--- a/taskcluster/taskgraph/transforms/build_attrs.py
+++ b/taskcluster/taskgraph/transforms/build_attrs.py
@@ -1,15 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 from __future__ import absolute_import, print_function, unicode_literals
 
 from taskgraph.transforms.base import TransformSequence
-from taskgraph.util.platforms import platform_family
 
 transforms = TransformSequence()
 
 
 @transforms.add
 def set_build_attributes(config, jobs):
     """
     Set the build_platform and build_type attributes based on the job name.
@@ -27,23 +26,8 @@ def set_build_attributes(config, jobs):
 
         attributes = job.setdefault('attributes', {})
         attributes.update({
             'build_platform': build_platform,
             'build_type': build_type,
         })
 
         yield job
-
-
-@transforms.add
-def set_schedules_optimization(config, jobs):
-    """Set the `skip-unless-affected` optimization based on the build platform."""
-    for job in jobs:
-        # don't add skip-unless-schedules if there's already a when defined
-        if 'when' in job:
-            yield job
-            continue
-
-        build_platform = job['attributes']['build_platform']
-        job.setdefault('optimization',
-                       {'skip-unless-schedules': [platform_family(build_platform)]})
-        yield job
--- a/taskcluster/taskgraph/transforms/docker_image.py
+++ b/taskcluster/taskgraph/transforms/docker_image.py
@@ -56,36 +56,36 @@ def fill_template(config, tasks):
                 context_hash=context_hash,
             ))
 
         # As an optimization, if the context hash exists for a high level, that image
         # task ID will be used.  The reasoning behind this is that eventually everything ends
         # up on level 3 at some point if most tasks use this as a common image
         # for a given context hash, a worker within Taskcluster does not need to contain
         # the same image per branch.
-        optimization = {'index-search': ['{}.level-{}.{}.hash.{}'.format(
-            INDEX_PREFIX, level, image_name, context_hash)
-            for level in reversed(range(int(config.params['level']), 4))]}
+        optimizations = [['index-search', '{}.level-{}.{}.hash.{}'.format(
+            INDEX_PREFIX, level, image_name, context_hash)]
+            for level in reversed(range(int(config.params['level']), 4))]
 
         # Adjust the zstandard compression level based on the execution level.
         # We use faster compression for level 1 because we care more about
         # end-to-end times. We use slower/better compression for other levels
         # because images are read more often and it is worth the trade-off to
         # burn more CPU once to reduce image size.
         zstd_level = '3' if int(config.params['level']) == 1 else '10'
 
         # include some information that is useful in reconstructing this task
         # from JSON
         taskdesc = {
             'label': 'build-docker-image-' + image_name,
             'description': description,
             'attributes': {'image_name': image_name},
             'expires-after': '1 year',
             'routes': routes,
-            'optimization': optimization,
+            'optimizations': optimizations,
             'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
             'treeherder': {
                 'symbol': job_symbol,
                 'platform': 'taskcluster-images/opt',
                 'kind': 'other',
                 'tier': 1,
             },
             'run-on-projects': [],
--- a/taskcluster/taskgraph/transforms/job/__init__.py
+++ b/taskcluster/taskgraph/transforms/job/__init__.py
@@ -22,17 +22,16 @@ from taskgraph.util.schema import (
 )
 from taskgraph.util.workertypes import worker_type_implementation
 from taskgraph.transforms.task import task_description_schema
 from voluptuous import (
     Any,
     Extra,
     Optional,
     Required,
-    Exclusive,
 )
 
 logger = logging.getLogger(__name__)
 
 # Voluptuous uses marker objects as dictionary *keys*, but they are not
 # comparable, so we cast all of the keys back to regular strings
 task_description_schema = {str(k): v for k, v in task_description_schema.schema.iteritems()}
 
@@ -55,24 +54,23 @@ job_description_schema = Schema({
     Optional('routes'): task_description_schema['routes'],
     Optional('scopes'): task_description_schema['scopes'],
     Optional('tags'): task_description_schema['tags'],
     Optional('extra'): task_description_schema['extra'],
     Optional('treeherder'): task_description_schema['treeherder'],
     Optional('index'): task_description_schema['index'],
     Optional('run-on-projects'): task_description_schema['run-on-projects'],
     Optional('coalesce'): task_description_schema['coalesce'],
-    Exclusive('optimization', 'optimization'): task_description_schema['optimization'],
+    Optional('optimizations'): task_description_schema['optimizations'],
     Optional('needs-sccache'): task_description_schema['needs-sccache'],
 
-    # The "when" section contains descriptions of the circumstances under which
-    # this task should be included in the task graph.  This will be converted
-    # into an optimization, so it cannot be specified in a job description that
-    # also gives 'optimization'.
-    Exclusive('when', 'optimization'): Any({
+    # The "when" section contains descriptions of the circumstances
+    # under which this task should be included in the task graph.  This
+    # will be converted into an element in the `optimizations` list.
+    Optional('when'): Any({
         # This task only needs to be run if a file matching one of the given
         # patterns has changed in the push.  The patterns use the mozpack
         # match function (python/mozbuild/mozpack/path.py).
         Optional('files-changed'): [basestring],
     }),
 
     # A description of how to run this job.
     'run': {
@@ -100,33 +98,32 @@ def validate(config, jobs):
         yield validate_schema(job_description_schema, job,
                               "In job {!r}:".format(job.get('name', job.get('label'))))
 
 
 @transforms.add
 def rewrite_when_to_optimization(config, jobs):
     for job in jobs:
         when = job.pop('when', {})
-        if not when:
+        files_changed = when.get('files-changed')
+        if not files_changed:
             yield job
             continue
 
-        files_changed = when.get('files-changed')
-
         # add some common files
         files_changed.extend([
             '{}/**'.format(config.path),
             'taskcluster/taskgraph/**',
         ])
         if 'in-tree' in job.get('worker', {}).get('docker-image', {}):
             files_changed.append('taskcluster/docker/{}/**'.format(
                 job['worker']['docker-image']['in-tree']))
 
         # "only when files changed" implies "skip if files have not changed"
-        job['optimization'] = {'skip-unless-changed': files_changed}
+        job.setdefault('optimizations', []).append(['skip-unless-changed', files_changed])
 
         assert 'when' not in job
         yield job
 
 
 @transforms.add
 def make_task_description(config, jobs):
     """Given a build description, create a task description"""
--- a/taskcluster/taskgraph/transforms/job/toolchain.py
+++ b/taskcluster/taskgraph/transforms/job/toolchain.py
@@ -48,17 +48,17 @@ toolchain_run_schema = Schema({
     Required('toolchain-artifact'): basestring,
 
     # An alias that can be used instead of the real toolchain job name in
     # the toolchains list for build jobs.
     Optional('toolchain-alias'): basestring,
 })
 
 
-def add_optimization(config, run, taskdesc):
+def add_optimizations(config, run, taskdesc):
     files = list(run.get('resources', []))
     # This file
     files.append('taskcluster/taskgraph/transforms/job/toolchain.py')
     # The script
     files.append('taskcluster/scripts/misc/{}'.format(run['script']))
     # Tooltool manifest if any is defined:
     tooltool_manifest = taskdesc['worker']['env'].get('TOOLTOOL_MANIFEST')
     if tooltool_manifest:
@@ -76,23 +76,23 @@ def add_optimization(config, run, taskde
         digest = hashlib.sha256('\n'.join(data)).hexdigest()
 
     label = taskdesc['label']
     subs = {
         'name': label.replace('%s-' % config.kind, ''),
         'digest': digest,
     }
 
+    optimizations = taskdesc.setdefault('optimizations', [])
+
     # We'll try to find a cached version of the toolchain at levels above
     # and including the current level, starting at the highest level.
-    index_routes = []
     for level in reversed(range(int(config.params['level']), 4)):
         subs['level'] = level
-        index_routes.append(TOOLCHAIN_INDEX.format(**subs))
-    taskdesc['optimization'] = {'index-search': index_routes}
+        optimizations.append(['index-search', TOOLCHAIN_INDEX.format(**subs)])
 
     # ... and cache at the lowest level.
     taskdesc.setdefault('routes', []).append(
         'index.{}'.format(TOOLCHAIN_INDEX.format(**subs)))
 
 
 @run_job_using("docker-worker", "toolchain-script", schema=toolchain_run_schema)
 def docker_worker_toolchain(config, job, taskdesc):
@@ -131,17 +131,17 @@ def docker_worker_toolchain(config, job,
             run['script'])
     ]
 
     attributes = taskdesc.setdefault('attributes', {})
     attributes['toolchain-artifact'] = run['toolchain-artifact']
     if 'toolchain-alias' in run:
         attributes['toolchain-alias'] = run['toolchain-alias']
 
-    add_optimization(config, run, taskdesc)
+    add_optimizations(config, run, taskdesc)
 
 
 @run_job_using("generic-worker", "toolchain-script", schema=toolchain_run_schema)
 def windows_toolchain(config, job, taskdesc):
     run = job['run']
     taskdesc['run-on-projects'] = ['trunk', 'try']
 
     worker = taskdesc['worker']
@@ -178,9 +178,9 @@ def windows_toolchain(config, job, taskd
         r'{} -c ./build/src/taskcluster/scripts/misc/{}'.format(bash, run['script'])
     ]
 
     attributes = taskdesc.setdefault('attributes', {})
     attributes['toolchain-artifact'] = run['toolchain-artifact']
     if 'toolchain-alias' in run:
         attributes['toolchain-alias'] = run['toolchain-alias']
 
-    add_optimization(config, run, taskdesc)
+    add_optimizations(config, run, taskdesc)
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -13,17 +13,16 @@ from __future__ import absolute_import, 
 import hashlib
 import json
 import os
 import re
 import time
 from copy import deepcopy
 
 from mozbuild.util import memoize
-from mozbuild import schedules
 from taskgraph.util.attributes import TRUNK_PROJECTS
 from taskgraph.util.hash import hash_path
 from taskgraph.util.treeherder import split_symbol
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.util.schema import validate_schema, Schema
 from taskgraph.util.scriptworker import get_release_config
 from voluptuous import Any, Required, Optional, Extra
 from taskgraph import GECKO
@@ -163,36 +162,27 @@ task_description_schema = Schema({
         # tasks.
         'age': int,
 
         # The minimum number of backlogged tasks with the same coalescing key,
         # before the coalescing service will return tasks.
         'size': int,
     },
 
-    # Optimization to perform on this task during the optimization phase.
-    # Optimizations are defined in taskcluster/taskgraph/optimize.py.
-    Required('optimization', default=None): Any(
-        # always run this task (default)
-        None,
-        # search the index for the given index namespaces, and replace this task if found
-        # the search occurs in order, with the first match winning
-        {'index-search': [basestring]},
+    # Optimizations to perform on this task during the optimization phase,
+    # specified in order.  These optimizations are defined in
+    # taskcluster/taskgraph/optimize.py.
+    Optional('optimizations'): [Any(
+        # search the index for the given index namespace, and replace this task if found
+        ['index-search', basestring],
         # consult SETA and skip this task if it is low-value
-        {'seta': None},
+        ['seta'],
         # skip this task if none of the given file patterns match
-        {'skip-unless-changed': [basestring]},
-        # skip this task if unless the change files' SCHEDULES contains any of these components
-        {'skip-unless-schedules': list(schedules.ALL_COMPONENTS)},
-        # skip if SETA or skip-unless-schedules says to
-        {'skip-unless-schedules-or-seta': list(schedules.ALL_COMPONENTS)},
-        # only run this task if its dependencies will run (useful for follow-on tasks that
-        # are unnecessary if the parent tasks are not run)
-        {'only-if-dependencies-run': None}
-    ),
+        ['skip-unless-changed', [basestring]],
+    )],
 
     # the provisioner-id/worker-type for the task.  The following parameters will
     # be substituted in this string:
     #  {level} -- the scm level of this push
     'worker-type': basestring,
 
     # Whether the job should use sccache compiler caching.
     Required('needs-sccache', default=False): bool,
@@ -1237,17 +1227,17 @@ def build_task(config, tasks):
                 env = payload.setdefault('env', {})
                 env['MOZ_AUTOMATION'] = '1'
 
         yield {
             'label': task['label'],
             'task': task_def,
             'dependencies': task.get('dependencies', {}),
             'attributes': attributes,
-            'optimization': task.get('optimization', None),
+            'optimizations': task.get('optimizations', []),
         }
 
 
 def check_caches_are_volumes(task):
     """Ensures that all cache paths are defined as volumes.
 
     Caches and volumes are the only filesystem locations whose content
     isn't defined by the Docker image itself. Some caches are optional
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -17,17 +17,16 @@ what should run where. this is the wrong
 for example - use `all_tests.py` instead.
 """
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.util.schema import resolve_keyed_by
 from taskgraph.util.treeherder import split_symbol, join_symbol
-from taskgraph.util.platforms import platform_family
 from taskgraph.util.schema import (
     validate_schema,
     optionally_keyed_by,
     Schema,
 )
 from voluptuous import (
     Any,
     Optional,
@@ -755,32 +754,27 @@ def set_retry_exit_status(config, tests)
     for test in tests:
         test['retry-exit-status'] = 4
         yield test
 
 
 @transforms.add
 def set_profile(config, tests):
     """Set profiling mode for tests."""
-    profile = None
-    if config.params['try_mode'] == 'try_option_syntax':
-        profile = config.params['try_options']['profile']
     for test in tests:
-        if profile and test['suite'] == 'talos':
+        if config.config['args'].profile and test['suite'] == 'talos':
             test['mozharness']['extra-options'].append('--geckoProfile')
         yield test
 
 
 @transforms.add
 def set_tag(config, tests):
     """Set test for a specific tag."""
-    tag = None
-    if config.params['try_mode'] == 'try_option_syntax':
-        tag = config.params['try_options']['tag']
     for test in tests:
+        tag = config.config['args'].tag
         if tag:
             test['mozharness']['extra-options'].extend(['--tag', tag])
         yield test
 
 
 @transforms.add
 def set_test_type(config, tests):
     for test in tests:
@@ -822,32 +816,31 @@ def single_stylo_traversal_tests(config,
 
 @transforms.add
 def set_worker_type(config, tests):
     """Set the worker type based on the test platform."""
     for test in tests:
         # during the taskcluster migration, this is a bit tortured, but it
         # will get simpler eventually!
         test_platform = test['test-platform']
-        try_options = config.params['try_options'] if config.params['try_options'] else {}
         if test.get('worker-type'):
             # This test already has its worker type defined, so just use that (yields below)
             pass
         elif test_platform.startswith('macosx'):
             test['worker-type'] = MACOSX_WORKER_TYPES['macosx64']
         elif test_platform.startswith('win'):
             if test.get('suite', '') == 'talos' and \
                     not any('taskcluster' in cfg for cfg in test['mozharness']['config']):
                 test['worker-type'] = 'buildbot-bridge/buildbot-bridge'
             else:
                 test['worker-type'] = \
                     WINDOWS_WORKER_TYPES[test_platform.split('/')[0]][test['virtualization']]
         elif test_platform.startswith('linux') or test_platform.startswith('android'):
             if test.get('suite', '') == 'talos' and test['build-platform'] != 'linux64-ccov/opt':
-                if try_options.get('taskcluster_worker'):
+                if config.config['args'].taskcluster_worker:
                     test['worker-type'] = 'releng-hardware/gecko-t-linux-talos'
                 else:
                     test['worker-type'] = 'buildbot-bridge/buildbot-bridge'
             else:
                 test['worker-type'] = LINUX_WORKER_TYPES[test['instance-size']]
         else:
             raise Exception("unknown test_platform {}".format(test_platform))
 
@@ -903,16 +896,17 @@ def make_job_description(config, tests):
             attr_try_name: try_name,
         })
 
         jobdesc = {}
         name = '{}-{}'.format(test['test-platform'], test['test-name'])
         jobdesc['name'] = name
         jobdesc['label'] = label
         jobdesc['description'] = test['description']
+        jobdesc['when'] = test.get('when', {})
         jobdesc['attributes'] = attributes
         jobdesc['dependencies'] = {'build': build_label}
 
         if test['mozharness']['requires-signed-builds'] is True:
             jobdesc['dependencies']['build-signing'] = test['build-signing-label']
 
         jobdesc['expires-after'] = test['expires-after']
         jobdesc['routes'] = []
@@ -931,26 +925,20 @@ def make_job_description(config, tests):
         }
         jobdesc['treeherder'] = {
             'symbol': test['treeherder-symbol'],
             'kind': 'test',
             'tier': test['tier'],
             'platform': test.get('treeherder-machine-platform', test['build-platform']),
         }
 
-        if test.get('when'):
-            jobdesc['when'] = test['when']
-        else:
-            schedules = [platform_family(test['build-platform'])]
-            if config.params['project'] != 'try':
-                # for non-try branches, include SETA
-                jobdesc['optimization'] = {'skip-unless-schedules-or-seta': schedules}
-            else:
-                # otherwise just use skip-unless-schedules
-                jobdesc['optimization'] = {'skip-unless-schedules': schedules}
+        # run SETA unless this is a try push
+        jobdesc['optimizations'] = optimizations = []
+        if config.params['project'] != 'try':
+            optimizations.append(['seta'])
 
         run = jobdesc['run'] = {}
         run['using'] = 'mozharness-test'
         run['test'] = test
 
         jobdesc['worker-type'] = test.pop('worker-type')
 
         yield jobdesc
--- a/taskcluster/taskgraph/try_option_syntax.py
+++ b/taskcluster/taskgraph/try_option_syntax.py
@@ -252,24 +252,26 @@ def parse_message(message):
     # machines but not overload machines with every try push. Therefore, we add
     # this temporary option to be able to push jobs to tc-worker.
     parser.add_argument('-w', '--taskcluster-worker',
                         dest='taskcluster_worker', action='store_true', default=False)
 
     # In order to run test jobs multiple times
     parser.add_argument('--rebuild', dest='trigger_tests', type=int, default=1)
     args, _ = parser.parse_known_args(parts)
-    return vars(args)
+    return args
 
 
 class TryOptionSyntax(object):
 
-    def __init__(self, parameters, full_task_graph):
+    def __init__(self, message, full_task_graph):
         """
-        Apply the try options in parameters.
+        Parse a "try syntax" formatted commit message.  This is the old "-b do -p
+        win32 -u all" format.  Aliases are applied to map short names to full
+        names.
 
         The resulting object has attributes:
 
         - build_types: a list containing zero or more of 'opt' and 'debug'
         - platforms: a list of selected platform names, or None for all
         - unittests: a list of tests, of the form given below, or None for all
         - jobs: a list of requested job names, or None for all
         - trigger_tests: the number of times tests should be triggered (--rebuild)
@@ -298,32 +300,38 @@ class TryOptionSyntax(object):
         self.interactive = False
         self.notifications = None
         self.talos_trigger_tests = 0
         self.env = []
         self.profile = False
         self.tag = None
         self.no_retry = False
 
-        options = parameters['try_options']
-        self.jobs = self.parse_jobs(options['jobs'])
-        self.build_types = self.parse_build_types(options['build_types'], full_task_graph)
-        self.platforms = self.parse_platforms(options['platforms'], full_task_graph)
+        parts = split_try_msg(message)
+        if not parts:
+            return None
+
+        args = parse_message(message)
+        assert args is not None
+
+        self.jobs = self.parse_jobs(args.jobs)
+        self.build_types = self.parse_build_types(args.build_types, full_task_graph)
+        self.platforms = self.parse_platforms(args.platforms, full_task_graph)
         self.unittests = self.parse_test_option(
-            "unittest_try_name", options['unittests'], full_task_graph)
-        self.talos = self.parse_test_option("talos_try_name", options['talos'], full_task_graph)
-        self.trigger_tests = options['trigger_tests']
-        self.interactive = options['interactive']
-        self.notifications = options['notifications']
-        self.talos_trigger_tests = options['talos_trigger_tests']
-        self.env = options['env']
-        self.profile = options['profile']
-        self.tag = options['tag']
-        self.no_retry = options['no_retry']
-        self.include_nightly = options['include_nightly']
+            "unittest_try_name", args.unittests, full_task_graph)
+        self.talos = self.parse_test_option("talos_try_name", args.talos, full_task_graph)
+        self.trigger_tests = args.trigger_tests
+        self.interactive = args.interactive
+        self.notifications = args.notifications
+        self.talos_trigger_tests = args.talos_trigger_tests
+        self.env = args.env
+        self.profile = args.profile
+        self.tag = args.tag
+        self.no_retry = args.no_retry
+        self.include_nightly = args.include_nightly
 
     def parse_jobs(self, jobs_arg):
         if not jobs_arg or jobs_arg == ['all']:
             return None
         expanded = []
         for job in jobs_arg:
             expanded.extend(j.strip() for j in job.split(','))
         return expanded
deleted file mode 100644
--- a/taskcluster/taskgraph/util/platforms.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-import re
-
-# platform family is extracted from build platform by taking the alphabetic prefix
-# and then translating win -> windows
-_platform_re = re.compile(r'^[a-z]*')
-_renames = {
-    'win': 'windows'
-}
-
-
-def platform_family(build_platform):
-    """Given a build platform, return the platform family (linux, macosx, etc.)"""
-    family = _platform_re.match(build_platform).group(0)
-    return _renames.get(family, family)