Backed out 7 changesets (bug 1641971) for breaking Gecko Decision Task opt . CLOSED TREE
authorNarcis Beleuzu <nbeleuzu@mozilla.com>
Wed, 03 Jun 2020 05:30:51 +0300
changeset 533627 ced2b80350838f6f7236242457dac3854ed690c3
parent 533626 233196e86939e937728f2b8e81277e548cc2844e
child 533628 2931388675f0eb1c426408881f12946f810e19da
push id37474
push userabutkovits@mozilla.com
push dateWed, 03 Jun 2020 09:29:05 +0000
treeherdermozilla-central@bf162b065e1f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1641971
milestone79.0a1
backs outa039e60a26d201d3dfc7c43ee524d55b99fe7dd0
e7d4c490c31ac8c12af6af87a117362601b43a59
f2322981b294deb461fd109f20a291d9827dcd91
d123b08d2c2095b7a3dd62a33e65d89887803260
57772c0b9b3e1a41891d27020f79b0cb578641d8
11a887f73dbbac601a84d4ee2f45b507a79b6c4f
e9c7a59d4bc5a2bb46d36df9ab6f7b7b23a8f769
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 7 changesets (bug 1641971) for breaking Gecko Decision Task opt . CLOSED TREE Backed out changeset a039e60a26d2 (bug 1641971) Backed out changeset e7d4c490c31a (bug 1641971) Backed out changeset f2322981b294 (bug 1641971) Backed out changeset d123b08d2c20 (bug 1641971) Backed out changeset 57772c0b9b3e (bug 1641971) Backed out changeset 11a887f73dbb (bug 1641971) Backed out changeset e9c7a59d4bc5 (bug 1641971)
python/mozperftest/mozperftest/mach_commands.py
python/mozperftest/mozperftest/runner.py
taskcluster/ci/perftest/kind.yml
taskcluster/ci/source-test/shadow-scheduler.yml
taskcluster/docs/taskgraph.rst
taskcluster/mach_commands.py
taskcluster/taskgraph/actions/registry.py
taskcluster/taskgraph/actions/util.py
taskcluster/taskgraph/create.py
taskcluster/taskgraph/decision.py
taskcluster/taskgraph/generator.py
taskcluster/taskgraph/optimize/__init__.py
taskcluster/taskgraph/test/test_create.py
taskcluster/taskgraph/test/test_optimize.py
taskcluster/taskgraph/test/test_util_parameterization.py
taskcluster/taskgraph/transforms/docker_image.py
taskcluster/taskgraph/transforms/perftest.py
taskcluster/taskgraph/transforms/release_started.py
taskcluster/taskgraph/transforms/source_test.py
taskcluster/taskgraph/transforms/task.py
taskcluster/taskgraph/util/parameterization.py
taskcluster/taskgraph/util/schema.py
--- a/python/mozperftest/mozperftest/mach_commands.py
+++ b/python/mozperftest/mozperftest/mach_commands.py
@@ -48,23 +48,20 @@ class Perftest(MachCommandBase):
             perftest_parameters = {}
             parser = get_perftest_parser()()
             for name, value in kwargs.items():
                 # ignore values that are set to default
                 if parser.get_default(name) == value:
                     continue
                 perftest_parameters[name] = value
 
-            parameters = {
-                "try_task_config": {
-                    "tasks": [_TRY_PLATFORMS[platform]],
-                    "perftest-options": perftest_parameters,
-                },
-                "try_mode": "try_task_config",
-            }
+            parameters = {"try_options": {"perftest": perftest_parameters}}
+            try_config = {"tasks": [_TRY_PLATFORMS[platform]]}
+            parameters["try_task_config"] = try_config
+            parameters["try_mode"] = "try_task_config"
 
             task_config = {"parameters": parameters, "version": 2}
             push_to_try("perftest", "perftest", try_task_config=task_config)
             return
 
         # run locally
         MachCommandBase._activate_virtualenv(self)
 
--- a/python/mozperftest/mozperftest/runner.py
+++ b/python/mozperftest/mozperftest/runner.py
@@ -18,20 +18,19 @@ populated via a local --push-to-try call
 The --push-to-try flow is:
 
 - a user calls ./mach perftest --push-to-try --option1 --option2
 - a new push to try commit is made and includes all options in its parameters
 - a generic TC job triggers the perftest by calling this module with --on-try
 - run_test() grabs the parameters artifact and converts them into args for
   perftest
 """
-import json
+import sys
 import os
 import shutil
-import sys
 
 
 HERE = os.path.dirname(__file__)
 SRC_ROOT = os.path.join(HERE, "..", "..", "..")
 SEARCH_PATHS = [
     "python/mach",
     "python/mozboot",
     "python/mozbuild",
@@ -73,30 +72,48 @@ def _setup_path():
     for path in SEARCH_PATHS:
         path = os.path.abspath(path)
         path = os.path.join(SRC_ROOT, path)
         if not os.path.exists(path):
             raise IOError("Can't find %s" % path)
         sys.path.insert(0, path)
 
 
+def _get_params():
+    """Fetches the parameters.yml artifact and returns its content.
+    """
+    # XXX - this already exists in taskcluster code
+    # in a more robust way, but for now let's not depend on it.
+    import requests
+    import yaml
+
+    root = os.environ.get(
+        "TASKCLUSTER_ROOT_URL", "https://firefox-ci-tc.services.mozilla.com"
+    )
+    # set by require-decision-task-id
+    tid = os.environ["DECISION_TASK_ID"]
+    url = root + "/api/queue/v1/task/%s/artifacts/public/parameters.yml" % tid
+    response = requests.get(url)
+    return yaml.load(response.text)
+
+
 def run_tests(mach_cmd, **kwargs):
     """This tests runner can be used directly via main or via Mach.
 
-    When the --on-try option is used, the test runner looks at the
-    `PERFTEST_OPTIONS` environment variable that contains all options passed by
-    the user via a ./mach perftest --push-to-try call.
+    When the --on-try option is used, the test runner looks for the
+    `parameters.yml` artifact that contains all options passed by
+    the used via a ./mach perftest --push-to-try call.
     """
     _setup_path()
     on_try = kwargs.pop("on_try", False)
 
     # trying to get the arguments from the task params
     if on_try:
-        try_options = json.loads(os.environ['PERFTEST_OPTIONS'])
-        kwargs.update(try_options)
+        params = _get_params()
+        kwargs.update(params["try_options"]["perftest"])
 
     from mozperftest.utils import build_test_list, install_package
     from mozperftest import MachEnvironment, Metadata
 
     flavor = kwargs["flavor"]
     kwargs["tests"], tmp_dir = build_test_list(
         kwargs["tests"], randomized=flavor != "doc"
     )
--- a/taskcluster/ci/perftest/kind.yml
+++ b/taskcluster/ci/perftest/kind.yml
@@ -4,26 +4,26 @@
 ---
 loader: taskgraph.loader.transform:loader
 
 kind-dependencies:
     - toolchain
     - build
 
 transforms:
-    - taskgraph.transforms.perftest:transforms
     - taskgraph.transforms.source_test:transforms
     - taskgraph.transforms.job:transforms
     - taskgraph.transforms.task:transforms
 
 job-defaults:
     run-on-projects: []
     treeherder:
         kind: other
         tier: 3
+    require-decision-task-id: true
     worker:
         taskcluster-proxy: true
         max-run-time: 3600
         env:
             USE_ARTIFACT: '1'
         artifacts:
             - type: directory
               name: public/build
--- a/taskcluster/ci/source-test/shadow-scheduler.yml
+++ b/taskcluster/ci/source-test/shadow-scheduler.yml
@@ -7,21 +7,20 @@ job-defaults:
     worker-type: t-linux-xlarge-source
     worker:
         docker-image: {in-tree: "lint"}
         max-run-time: 3600
         artifacts:
             - type: file
               name: public/shadow-scheduler/optimized_tasks.list
               path: /builds/worker/optimized_tasks.list
-        env:
-            DECISION_TASK_ID: {task-reference: <decision>}
     treeherder:
         kind: other
         tier: 3
+    require-decision-task-id: true
     run-on-projects: ['autoland']
     run:
         using: mach
         mach: taskgraph optimized -v -p task-id=$DECISION_TASK_ID --output-file /builds/worker/optimized_tasks.list
         sparse-profile: taskgraph
 
 relevant_tests:
     description: Runs the relevant_tests optimization strategy instead of the default.
--- a/taskcluster/docs/taskgraph.rst
+++ b/taskcluster/docs/taskgraph.rst
@@ -180,20 +180,18 @@ using simple parameterized values, as fo
     just before the ``queue.createTask`` call is made.  For example, an
     artifact expiration might be specified as ``{"relative-datestamp": "1
     year"}``.
 
 ``{"task-reference": "string containing <dep-name>"}``
     The task definition may contain "task references" of this form.  These will
     be replaced during the optimization step, with the appropriate taskId for
     the named dependency substituted for ``<dep-name>`` in the string.
-    Additionally, `decision` and `self` can be used a dependency names to refer
-    to the decision task, and the task itself.  Multiple labels may be
-    substituted in a single string, and ``<<>`` can be used to escape a literal
-    ``<``.
+    Multiple labels may be substituted in a single string, and ``<<>`` can be
+    used to escape a literal ``<``.
 
 ``{"artifact-reference": "..<dep-name/artifact/name>.."}``
     Similar to a ``task-reference``, but this substitutes a URL to the queue's
     ``getLatestArtifact`` API method (for which a GET will redirect to the
     artifact itself).
 
 .. _taskgraph-graph-config:
 
--- a/taskcluster/mach_commands.py
+++ b/taskcluster/mach_commands.py
@@ -429,22 +429,19 @@ class MachCommands(MachCommandBase):
         import taskgraph.actions
 
         try:
             self.setup_logging(quiet=options['quiet'], verbose=options['verbose'])
             parameters = taskgraph.parameters.parameters_loader(options['parameters'])
 
             tgg = taskgraph.generator.TaskGraphGenerator(
                 root_dir=options.get('root'),
-                parameters=parameters,
-            )
+                parameters=parameters)
 
-            actions = taskgraph.actions.render_actions_json(
-                tgg.parameters, tgg.graph_config, decision_task_id="DECISION-TASK",
-                )
+            actions = taskgraph.actions.render_actions_json(tgg.parameters, tgg.graph_config)
             print(json.dumps(actions, sort_keys=True, indent=2, separators=(',', ': ')))
         except Exception:
             traceback.print_exc()
             sys.exit(1)
 
 
 @CommandProvider
 class TaskClusterImagesProvider(MachCommandBase):
--- a/taskcluster/taskgraph/actions/registry.py
+++ b/taskcluster/taskgraph/actions/registry.py
@@ -2,17 +2,19 @@
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import json
+import os
 import re
+from slugid import nice as slugid
 from types import FunctionType
 from collections import namedtuple
 
 from six import text_type
 
 from taskgraph import create
 from taskgraph.config import load_graph_config
 from taskgraph.util import taskcluster, yaml, hash
@@ -146,17 +148,17 @@ def register_callback_action(name, title
             assert 1 <= len(symbol) <= 25, 'symbol must be between 1 and 25 characters'
         assert isinstance(symbol, text_type), 'symbol must be a string'
 
         assert not mem['registered'], 'register_callback_action must be used as decorator'
         if not cb_name:
             cb_name = name
         assert cb_name not in callbacks, 'callback name {} is not unique'.format(cb_name)
 
-        def action_builder(parameters, graph_config, decision_task_id):
+        def action_builder(parameters, graph_config):
             if not available(parameters):
                 return None
 
             actionPerm = 'generic' if generic else cb_name
 
             # gather up the common decision-task-supplied data for this action
             repo_param = '{}head_repository'.format(graph_config['project-repo-param-prefix'])
             repository = {
@@ -167,25 +169,26 @@ def register_callback_action(name, title
 
             revision = parameters['{}head_rev'.format(graph_config['project-repo-param-prefix'])]
             push = {
                 'owner': 'mozilla-taskcluster-maintenance@mozilla.com',
                 'pushlog_id': parameters['pushlog_id'],
                 'revision': revision,
             }
 
+            task_group_id = os.environ.get('TASK_ID', slugid())
             match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', parameters[repo_param])
             if not match:
                 raise Exception('Unrecognized {}'.format(repo_param))
             action = {
                 'name': name,
                 'title': title,
                 'description': description,
                 # target taskGroupId (the task group this decision task is creating)
-                'taskGroupId': decision_task_id,
+                'taskGroupId': task_group_id,
                 'cb_name': cb_name,
                 'symbol': symbol,
             }
 
             rv = {
                 'name': name,
                 'title': title,
                 'description': description,
@@ -235,34 +238,34 @@ def register_callback_action(name, title
         actions.append(Action(order, cb_name, generic, action_builder))
 
         mem['registered'] = True
         callbacks[cb_name] = cb
         return cb
     return register_callback
 
 
-def render_actions_json(parameters, graph_config, decision_task_id):
+def render_actions_json(parameters, graph_config):
     """
     Render JSON object for the ``public/actions.json`` artifact.
 
     Parameters
     ----------
     parameters : taskgraph.parameters.Parameters
         Decision task parameters.
 
     Returns
     -------
     dict
         JSON object representation of the ``public/actions.json`` artifact.
     """
     assert isinstance(parameters, Parameters), 'requires instance of Parameters'
     actions = []
     for action in sorted(_get_actions(graph_config), key=lambda action: action.order):
-        action = action.action_builder(parameters, graph_config, decision_task_id)
+        action = action.action_builder(parameters, graph_config)
         if action:
             assert is_json(action), 'action must be a JSON compatible object'
             actions.append(action)
     return {
         'version': 1,
         'variables': {},
         'actions': actions,
     }
--- a/taskcluster/taskgraph/actions/util.py
+++ b/taskcluster/taskgraph/actions/util.py
@@ -157,18 +157,17 @@ def create_tasks(graph_config, to_run, f
         {l: modifier(full_task_graph[l]) for l in target_graph.nodes},
         target_graph)
     target_task_graph.for_each_task(update_parent)
     if decision_task_id and decision_task_id != os.environ.get('TASK_ID'):
         target_task_graph.for_each_task(update_dependencies)
     optimized_task_graph, label_to_taskid = optimize_task_graph(target_task_graph,
                                                                 params,
                                                                 to_run,
-                                                                label_to_taskid,
-                                                                decision_task_id)
+                                                                label_to_taskid)
     write_artifact('task-graph{}.json'.format(suffix), optimized_task_graph.to_json())
     write_artifact('label-to-taskid{}.json'.format(suffix), label_to_taskid)
     write_artifact('to-run{}.json'.format(suffix), list(to_run))
     create.create_tasks(
         graph_config,
         optimized_task_graph,
         label_to_taskid,
         params,
--- a/taskcluster/taskgraph/create.py
+++ b/taskcluster/taskgraph/create.py
@@ -1,53 +1,58 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import concurrent.futures as futures
 import json
+import os
 import sys
 import logging
 
 from slugid import nice as slugid
 from taskgraph.util.parameterization import resolve_timestamps
 from taskgraph.util.time import current_json_time
 from taskgraph.util.taskcluster import get_session, CONCURRENCY
 
 logger = logging.getLogger(__name__)
 
 # this is set to true for `mach taskgraph action-callback --test`
 testing = False
 
 
-def create_tasks(graph_config, taskgraph, label_to_taskid, params, decision_task_id):
+def create_tasks(graph_config, taskgraph, label_to_taskid, params, decision_task_id=None):
     taskid_to_label = {t: l for l, t in label_to_taskid.iteritems()}
 
+    decision_task_id = decision_task_id or os.environ.get('TASK_ID')
+
     # when running as an actual decision task, we use the decision task's
     # taskId as the taskGroupId.  The process that created the decision task
     # helpfully placed it in this same taskGroup.  If there is no $TASK_ID,
     # fall back to a slugid
+    task_group_id = decision_task_id or slugid()
     scheduler_id = '{}-level-{}'.format(graph_config['trust-domain'], params['level'])
 
     # Add the taskGroupId, schedulerId and optionally the decision task
     # dependency
     for task_id in taskgraph.graph.nodes:
         task_def = taskgraph.tasks[task_id].task
 
         # if this task has no dependencies *within* this taskgraph, make it
         # depend on this decision task. If it has another dependency within
         # the taskgraph, then it already implicitly depends on the decision
         # task.  The result is that tasks do not start immediately. if this
         # loop fails halfway through, none of the already-created tasks run.
-        if not any(t in taskgraph.tasks for t in task_def.get('dependencies', [])):
-            task_def.setdefault('dependencies', []).append(decision_task_id)
+        if decision_task_id:
+            if not any(t in taskgraph.tasks for t in task_def.get('dependencies', [])):
+                task_def.setdefault('dependencies', []).append(decision_task_id)
 
-        task_def['taskGroupId'] = decision_task_id
+        task_def['taskGroupId'] = task_group_id
         task_def['schedulerId'] = scheduler_id
 
     # If `testing` is True, then run without parallelization
     concurrency = CONCURRENCY if not testing else 1
     session = get_session()
     with futures.ThreadPoolExecutor(concurrency) as e:
         fs = {}
 
--- a/taskcluster/taskgraph/decision.py
+++ b/taskcluster/taskgraph/decision.py
@@ -124,20 +124,16 @@ PER_PROJECT_PARAMETERS = {
 try_task_config_schema = Schema({
     Required('tasks'): [text_type],
     Optional('browsertime'): bool,
     Optional('chemspill-prio'): bool,
     Optional('disable-pgo'): bool,
     Optional('env'): {text_type: text_type},
     Optional('gecko-profile'): bool,
     Optional(
-        "perftest-options",
-        description="Options passed from `mach perftest` to try."
-    ): object,
-    Optional(
         "optimize-strategies",
         description="Alternative optimization strategies to use instead of the default. "
                     "A module path pointing to a dict to be use as the `strategy_override` "
                     "argument in `taskgraph.optimize.optimize_task_graph`."
     ): text_type,
     Optional('rebuild'): int,
     Optional('use-artifact-builds'): bool,
     Optional(
@@ -207,33 +203,26 @@ def taskgraph_decision(options, paramete
      * generating a set of artifacts to memorialize the graph
      * calling TaskCluster APIs to create the graph
     """
 
     parameters = parameters or (
         lambda graph_config: get_decision_parameters(graph_config, options)
     )
 
-    decision_task_id = os.environ['TASK_ID']
-
     # create a TaskGraphGenerator instance
     tgg = TaskGraphGenerator(
         root_dir=options.get('root'),
-        parameters=parameters,
-        decision_task_id=decision_task_id,
-    )
+        parameters=parameters)
 
     # write out the parameters used to generate this graph
     write_artifact('parameters.yml', dict(**tgg.parameters))
 
     # write out the public/actions.json file
-    write_artifact(
-        'actions.json',
-        render_actions_json(tgg.parameters, tgg.graph_config, decision_task_id),
-    )
+    write_artifact('actions.json', render_actions_json(tgg.parameters, tgg.graph_config))
 
     # write out the full graph for reference
     full_task_json = tgg.full_task_graph.to_json()
     write_artifact('full-task-graph.json', full_task_json)
 
     # write out the public/runnable-jobs.json file
     write_artifact('runnable-jobs.json', full_task_graph_to_runnable_jobs(full_task_json))
 
@@ -255,23 +244,17 @@ def taskgraph_decision(options, paramete
     write_artifact('task-graph.json', tgg.morphed_task_graph.to_json())
     write_artifact('label-to-taskid.json', tgg.label_to_taskid)
 
     # write bugbug scheduling information if it was invoked
     if len(push_schedules) > 0:
         write_artifact("bugbug-push-schedules.json", push_schedules.popitem()[1])
 
     # actually create the graph
-    create_tasks(
-        tgg.graph_config,
-        tgg.morphed_task_graph,
-        tgg.label_to_taskid,
-        tgg.parameters,
-        decision_task_id=decision_task_id,
-    )
+    create_tasks(tgg.graph_config, tgg.morphed_task_graph, tgg.label_to_taskid, tgg.parameters)
 
 
 def get_decision_parameters(graph_config, options):
     """
     Load parameters from the command-line options for 'taskgraph decision'.
     This also applies per-project parameters, based on the given project.
 
     """
--- a/taskcluster/taskgraph/generator.py
+++ b/taskcluster/taskgraph/generator.py
@@ -102,31 +102,28 @@ class TaskGraphGenerator(object):
     various phases of generation, is available via properties.  This encourages
     the provision of all generation inputs at instance construction time.
     """
 
     # Task-graph generation is implemented as a Python generator that yields
     # each "phase" of generation.  This allows some mach subcommands to short-
     # circuit generation of the entire graph by never completing the generator.
 
-    def __init__(
-            self, root_dir, parameters, decision_task_id="<decision-task>", target_kind=None,
-    ):
+    def __init__(self, root_dir, parameters, target_kind=None):
         """
         @param root_dir: root directory, with subdirectories for each kind
         @param paramaters: parameters for this task-graph generation, or callable
             taking a `GraphConfig` and returning parameters
         @type parameters: Union[Parameters, Callable[[GraphConfig], Parameters]]
         """
         if root_dir is None:
             root_dir = 'taskcluster/ci'
         self.root_dir = ensure_text(root_dir)
         self._parameters = parameters
         self._target_kind = target_kind
-        self._decision_task_id = decision_task_id
 
         # start the generator
         self._run = self._run()
         self._run_results = {}
 
     @property
     def parameters(self):
         """
@@ -339,17 +336,16 @@ class TaskGraphGenerator(object):
                                     parameters['try_task_config'].get('optimize-strategies'))
         if strategies:
             strategies = find_object(strategies)
 
         optimized_task_graph, label_to_taskid = optimize_task_graph(
             target_task_graph,
             parameters,
             do_not_optimize,
-            self._decision_task_id,
             existing_tasks=existing_tasks,
             strategy_override=strategies,
         )
 
         yield verifications('optimized_task_graph', optimized_task_graph, graph_config)
 
         morphed_task_graph, label_to_taskid = morph(
             optimized_task_graph, label_to_taskid, parameters, graph_config)
--- a/taskcluster/taskgraph/optimize/__init__.py
+++ b/taskcluster/taskgraph/optimize/__init__.py
@@ -35,17 +35,17 @@ def register_strategy(name, args=()):
             registry[name] = cls(*args)
             if not hasattr(registry[name], 'description'):
                 registry[name].description = name
         return cls
     return wrap
 
 
 def optimize_task_graph(target_task_graph, params, do_not_optimize,
-                        decision_task_id, existing_tasks=None, strategy_override=None):
+                        existing_tasks=None, strategy_override=None):
     """
     Perform task optimization, returning a taskgraph and a map from label to
     assigned taskId, including replacement tasks.
     """
     label_to_taskid = {}
     if not existing_tasks:
         existing_tasks = {}
 
@@ -68,17 +68,17 @@ def optimize_task_graph(target_task_grap
         params=params,
         do_not_optimize=do_not_optimize,
         label_to_taskid=label_to_taskid,
         existing_tasks=existing_tasks,
         removed_tasks=removed_tasks)
 
     return get_subgraph(
             target_task_graph, removed_tasks, replaced_tasks,
-            label_to_taskid, decision_task_id), label_to_taskid
+            label_to_taskid), label_to_taskid
 
 
 def _get_optimizations(target_task_graph, strategies):
     def optimizations(label):
         task = target_task_graph.tasks[label]
         if task.optimization:
             opt_by, arg = task.optimization.items()[0]
             strategy = strategies[opt_by]
@@ -181,19 +181,17 @@ def replace_tasks(target_task_graph, par
                 replaced.add(label)
             opt_counts[opt_by] += 1
             continue
 
     _log_optimization('replaced', opt_counts)
     return replaced
 
 
-def get_subgraph(
-    target_task_graph, removed_tasks, replaced_tasks, label_to_taskid, decision_task_id,
-):
+def get_subgraph(target_task_graph, removed_tasks, replaced_tasks, label_to_taskid):
     """
     Return the subgraph of target_task_graph consisting only of
     non-optimized tasks and edges between them.
 
     To avoid losing track of taskIds for tasks optimized away, this method
     simultaneously substitutes real taskIds for task labels in the graph, and
     populates each task definition's `dependencies` key with the appropriate
     taskIds.  Task references are resolved in the process.
@@ -227,23 +225,17 @@ def get_subgraph(
         # Add remaining soft dependencies
         if task.soft_dependencies:
             named_task_dependencies.update({
                 label: label_to_taskid[label]
                 for label in task.soft_dependencies
                 if label in label_to_taskid and label not in omit
             })
 
-        task.task = resolve_task_references(
-            task.label,
-            task.task,
-            task_id=task.task_id,
-            decision_task_id=decision_task_id,
-            dependencies=named_task_dependencies,
-        )
+        task.task = resolve_task_references(task.label, task.task, named_task_dependencies)
         deps = task.task.setdefault('dependencies', [])
         deps.extend(sorted(named_task_dependencies.itervalues()))
         tasks_by_taskid[task.task_id] = task
 
     # resolve edges to taskIds
     edges_by_taskid = (
         (label_to_taskid.get(left), label_to_taskid.get(right), name)
         for (left, right, name) in target_task_graph.graph.edges
--- a/taskcluster/taskgraph/test/test_create.py
+++ b/taskcluster/taskgraph/test/test_create.py
@@ -1,16 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import sys
 import unittest
+import os
 import mock
 import pytest
 
 from taskgraph import create
 from taskgraph.config import GraphConfig
 from taskgraph.graph import Graph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.task import Task
@@ -18,101 +19,92 @@ from taskgraph.task import Task
 from mozunit import main
 
 GRAPH_CONFIG = GraphConfig({'trust-domain': 'domain'}, '/var/empty')
 
 
 class TestCreate(unittest.TestCase):
 
     def setUp(self):
+        self.old_task_id = os.environ.get('TASK_ID')
+        if 'TASK_ID' in os.environ:
+            del os.environ['TASK_ID']
         self.created_tasks = {}
         self.old_create_task = create.create_task
         create.create_task = self.fake_create_task
 
     def tearDown(self):
         create.create_task = self.old_create_task
+        if self.old_task_id:
+            os.environ['TASK_ID'] = self.old_task_id
+        elif 'TASK_ID' in os.environ:
+            del os.environ['TASK_ID']
 
     def fake_create_task(self, session, task_id, label, task_def):
         self.created_tasks[task_id] = task_def
 
     @pytest.mark.xfail(
         sys.version_info >= (3, 0), reason="python3 migration is not complete"
     )
     def test_create_tasks(self):
         tasks = {
             'tid-a': Task(kind='test', label='a', attributes={}, task={'payload': 'hello world'}),
             'tid-b': Task(kind='test', label='b', attributes={}, task={'payload': 'hello world'}),
         }
         label_to_taskid = {'a': 'tid-a', 'b': 'tid-b'}
         graph = Graph(nodes={'tid-a', 'tid-b'}, edges={('tid-a', 'tid-b', 'edge')})
         taskgraph = TaskGraph(tasks, graph)
 
-        create.create_tasks(
-            GRAPH_CONFIG,
-            taskgraph,
-            label_to_taskid,
-            {"level": "4"},
-            decision_task_id="decisiontask",
-        )
+        create.create_tasks(GRAPH_CONFIG, taskgraph, label_to_taskid, {'level': '4'})
 
         for tid, task in self.created_tasks.iteritems():
             self.assertEqual(task['payload'], 'hello world')
             self.assertEqual(task['schedulerId'], 'domain-level-4')
             # make sure the dependencies exist, at least
             for depid in task.get('dependencies', []):
                 if depid == 'decisiontask':
                     # Don't look for decisiontask here
                     continue
                 self.assertIn(depid, self.created_tasks)
 
     @pytest.mark.xfail(
         sys.version_info >= (3, 0), reason="python3 migration is not complete"
     )
     def test_create_task_without_dependencies(self):
         "a task with no dependencies depends on the decision task"
+        os.environ['TASK_ID'] = 'decisiontask'
         tasks = {
             'tid-a': Task(kind='test', label='a', attributes={}, task={'payload': 'hello world'}),
         }
         label_to_taskid = {'a': 'tid-a'}
         graph = Graph(nodes={'tid-a'}, edges=set())
         taskgraph = TaskGraph(tasks, graph)
 
-        create.create_tasks(
-            GRAPH_CONFIG,
-            taskgraph,
-            label_to_taskid,
-            {"level": "4"},
-            decision_task_id="decisiontask",
-        )
+        create.create_tasks(GRAPH_CONFIG, taskgraph, label_to_taskid, {'level': '4'})
 
         for tid, task in self.created_tasks.iteritems():
-            self.assertEqual(task.get('dependencies'), ["decisiontask"])
+            self.assertEqual(task.get('dependencies'), [os.environ['TASK_ID']])
 
     @pytest.mark.xfail(
         sys.version_info >= (3, 0), reason="python3 migration is not complete"
     )
     @mock.patch('taskgraph.create.create_task')
     def test_create_tasks_fails_if_create_fails(self, create_task):
         "creat_tasks fails if a single create_task call fails"
+        os.environ['TASK_ID'] = 'decisiontask'
         tasks = {
             'tid-a': Task(kind='test', label='a', attributes={}, task={'payload': 'hello world'}),
         }
         label_to_taskid = {'a': 'tid-a'}
         graph = Graph(nodes={'tid-a'}, edges=set())
         taskgraph = TaskGraph(tasks, graph)
 
         def fail(*args):
             print("UHOH")
             raise RuntimeError('oh noes!')
         create_task.side_effect = fail
 
         with self.assertRaises(RuntimeError):
-            create.create_tasks(
-                GRAPH_CONFIG,
-                taskgraph,
-                label_to_taskid,
-                {"level": "4"},
-                decision_task_id="decisiontask",
-            )
+            create.create_tasks(GRAPH_CONFIG, taskgraph, label_to_taskid, {'level': '4'})
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_optimize.py
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -235,18 +235,17 @@ class TestOptimize(unittest.TestCase):
             exp_label_to_taskid={'t1': 'e1'})
 
     def assert_subgraph(self, graph, removed_tasks, replaced_tasks,
                         label_to_taskid, exp_subgraph, exp_label_to_taskid):
         self.maxDiff = None
         optimize.slugid = ('tid{}'.format(i) for i in range(1, 10)).next
         try:
             got_subgraph = optimize.get_subgraph(graph, removed_tasks,
-                                                 replaced_tasks, label_to_taskid,
-                                                 "DECISION-TASK")
+                                                 replaced_tasks, label_to_taskid)
         finally:
             optimize.slugid = slugid
         self.assertEqual(got_subgraph.graph, exp_subgraph.graph)
         self.assertEqual(got_subgraph.tasks, exp_subgraph.tasks)
         self.assertEqual(label_to_taskid, exp_label_to_taskid)
 
     @pytest.mark.xfail(
         sys.version_info >= (3, 0), reason="python3 migration is not complete"
--- a/taskcluster/taskgraph/test/test_util_parameterization.py
+++ b/taskcluster/taskgraph/test/test_util_parameterization.py
@@ -47,22 +47,17 @@ class TestTimestamps(unittest.TestCase):
 
 @pytest.mark.xfail(
     sys.version_info >= (3, 0), reason="python3 migration is not complete"
 )
 class TestTaskRefs(unittest.TestCase):
 
     def do(self, input, output):
         taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
-        self.assertEqual(
-            resolve_task_references(
-                "subject", input, "tid-self", "tid-decision", taskid_for_edge_name,
-            ),
-            output,
-        )
+        self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
 
     def test_no_change(self):
         "resolve_task_references does nothing when there are no task references"
         self.do({'in-a-list': ['stuff', {'property': '<edge1>'}]},
                 {'in-a-list': ['stuff', {'property': '<edge1>'}]})
 
     def test_in_list(self):
         "resolve_task_references resolves task references in a list"
@@ -89,51 +84,35 @@ class TestTaskRefs(unittest.TestCase):
         self.do({'escape': {'task-reference': '<<><edge3>>'}},
                 {'escape': '<tid3>'})
 
     def test_multikey(self):
         "resolve_task_references is ignored when there is another key in the dict"
         self.do({'escape': {'task-reference': '<edge3>', 'another-key': True}},
                 {'escape': {'task-reference': '<edge3>', 'another-key': True}})
 
-    def test_self(self):
-        "resolve_task_references resolves `self` to the provided task id"
-        self.do({'escape': {'task-reference': '<self>'}},
-                {'escape': 'tid-self'})
-
-    def test_decision(self):
-        "resolve_task_references resolves `decision` to the provided decision task id"
-        self.do({'escape': {'task-reference': '<decision>'}},
-                {'escape': 'tid-decision'})
-
     def test_invalid(self):
         "resolve_task_references raises a KeyError on reference to an invalid task"
         self.assertRaisesRegexp(
             KeyError,
             "task 'subject' has no dependency named 'no-such'",
-            lambda: resolve_task_references(
-                "subject", {"task-reference": "<no-such>"}, "tid-self", "tid-decision", {}
-            ),
+            lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
         )
 
 
 @pytest.mark.xfail(
     sys.version_info >= (3, 0), reason="python3 migration is not complete"
 )
 class TestArtifactRefs(unittest.TestCase):
 
     def do(self, input, output):
         taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
         with mock.patch.dict(os.environ, {'TASKCLUSTER_ROOT_URL': 'https://tc-tests.localhost'}):
-            self.assertEqual(
-                resolve_task_references(
-                    "subject", input, "tid-self", "tid-decision", taskid_for_edge_name
-                ),
-                output,
-            )
+            self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name),
+                             output)
 
     def test_in_list(self):
         "resolve_task_references resolves artifact references in a list"
         self.do(
             {'in-a-list': [
                 'stuff', {'artifact-reference': '<edge1/public/foo/bar>'}]},
             {'in-a-list': [
                 'stuff', 'https://tc-tests.localhost/api/queue/v1'
@@ -151,49 +130,17 @@ class TestArtifactRefs(unittest.TestCase
     def test_in_string(self):
         "resolve_task_references resolves artifact references embedded in a string"
         self.do(
             {'stuff': {'artifact-reference': '<edge1/public/filename> and <edge2/public/bar>'}},
             {'stuff': 'https://tc-tests.localhost/api/queue/v1'
                 '/task/tid1/artifacts/public/filename and '
                 'https://tc-tests.localhost/api/queue/v1/task/tid2/artifacts/public/bar'})
 
-    def test_self(self):
-        "resolve_task_references raises KeyError on artifact references to `self`"
-        self.assertRaisesRegexp(
-            KeyError,
-            "task 'subject' can't reference artifacts of self",
-            lambda: resolve_task_references(
-                "subject", {"artifact-reference": "<self/public/artifact>"},
-                "tid-self", "tid-decision",  {}
-            ),
-        )
-
-    def test_decision(self):
-        "resolve_task_references resolves `decision` to the provided decision task id"
-        self.do(
-            {'stuff': {'artifact-reference': '<decision/public/artifact>'}},
-            {'stuff': 'https://tc-tests.localhost/api/queue/v1/task/tid-decision/'
-             'artifacts/public/artifact'},
-        )
-
     def test_invalid(self):
-        "resolve_task_references raises a KeyError on reference to an invalid task"
-        self.assertRaisesRegexp(
-            KeyError,
-            "task 'subject' has no dependency named 'no-such'",
-            lambda: resolve_task_references(
-                "subject", {"artifact-reference": "<no-such/public/artifact>"},
-                "tid-self", "tid-decision", {}
-            ),
-        )
-
-    def test_badly_formed(self):
         "resolve_task_references ignores badly-formatted artifact references"
         for inv in ['<edge1>', 'edge1/foo>', '<edge1>/foo', '<edge1>foo']:
-            resolved = resolve_task_references(
-                "subject", {"artifact-reference": inv}, "tid-self", "tid-decision", {}
-            )
+            resolved = resolve_task_references('subject', {'artifact-reference': inv}, {})
             self.assertEqual(resolved, inv)
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/transforms/docker_image.py
+++ b/taskcluster/taskgraph/transforms/docker_image.py
@@ -236,19 +236,19 @@ def fill_template(config, tasks):
 
         if packages:
             deps = taskdesc.setdefault('dependencies', {})
             for p in sorted(packages):
                 deps[p] = 'packages-{}'.format(p)
 
         if parent:
             deps = taskdesc.setdefault('dependencies', {})
-            deps['parent'] = 'build-docker-image-{}'.format(parent)
+            deps[parent] = 'build-docker-image-{}'.format(parent)
             worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
-                'task-reference': '<parent>',
+                'task-reference': '<{}>'.format(parent),
             }
         if 'index' in task:
             taskdesc['index'] = task['index']
 
         if task.get('cache', True) and not taskgraph.fast:
             taskdesc['cache'] = {
                 'type': 'docker-images.v2',
                 'name': image_name,
deleted file mode 100644
--- a/taskcluster/taskgraph/transforms/perftest.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-"""
-This transform passes options from `mach perftest` to the corresponding task.
-"""
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-import json
-
-import six
-
-from taskgraph.transforms.base import TransformSequence
-
-transforms = TransformSequence()
-
-
-@transforms.add
-def pass_perftest_options(config, jobs):
-    for job in jobs:
-        env = job.setdefault('worker', {}).setdefault('env', {})
-        env['PERFTEST_OPTIONS'] = six.ensure_text(
-            json.dumps(config.params["try_task_config"].get('perftest-options'))
-        )
-        yield job
--- a/taskcluster/taskgraph/transforms/release_started.py
+++ b/taskcluster/taskgraph/transforms/release_started.py
@@ -2,16 +2,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 """
 Add notifications via taskcluster-notify for release tasks
 """
 
 from __future__ import absolute_import, print_function, unicode_literals
 
+import os
 from pipes import quote as shell_quote
 
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.util.schema import resolve_keyed_by
 
 
 transforms = TransformSequence()
 
@@ -30,21 +31,21 @@ def add_notifications(config, jobs):
             '--version', config.params['version'],
             '--product', job['shipping-product'],
             '--revision', config.params['head_rev'],
             '--build-number', str(config.params['build_number']),
             '--repo', config.params['head_repository'],
         ]
         for address in emails:
             command += ['--address', address]
-        command += [
-            # We wrap this in `{'task-reference': ...}` below
-            '--task-group-id', '<decision>',
-        ]
+        if 'TASK_ID' in os.environ:
+            command += [
+                '--task-group-id', os.environ['TASK_ID'],
+            ]
 
         job['scopes'] = ['notify:email:{}'.format(address) for address in emails]
         job['run'] = {
             'using': 'mach',
             'sparse-profile': 'mach',
-            'mach': {'task-reference': ' '.join(map(shell_quote, command))},
+            'mach': ' '.join(map(shell_quote, command)),
         }
 
         yield job
--- a/taskcluster/taskgraph/transforms/source_test.py
+++ b/taskcluster/taskgraph/transforms/source_test.py
@@ -5,16 +5,17 @@ Source-test jobs can run on multiple pla
 with either `platform` or a list of `platforms`, and set the appropriate
 treeherder configuration and attributes for that platform.
 """
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import copy
 import os
+import six
 from six import text_type
 
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.transforms.job import job_description_schema
 from taskgraph.util.attributes import keymatch
 from taskgraph.util.schema import (
     resolve_keyed_by,
     optionally_keyed_by,
@@ -51,16 +52,18 @@ source_test_description_schema = Schema(
     # job descriptions.
     Required('worker-type'): optionally_keyed_by(
         'platform', job_description_schema['worker-type']),
 
     Required('worker'): optionally_keyed_by(
         'platform', job_description_schema['worker']),
 
     Optional('python-version'): [int],
+    # If true, the DECISION_TASK_ID env will be populated.
+    Optional('require-decision-task-id'): bool,
 
     # A list of artifacts to install from 'fetch' tasks.
     Optional('fetches'): {
         text_type: optionally_keyed_by(
             'platform', job_description_schema['fetches'][text_type]),
     },
 
 })
@@ -213,16 +216,32 @@ def handle_shell(config, jobs):
         for field in fields:
             resolve_keyed_by(job, field, item_name=job['name'])
 
         del job['shell']
         yield job
 
 
 @transforms.add
+def add_decision_task_id_to_env(config, jobs):
+    """
+    Creates the `DECISION_TASK_ID` environment variable in tasks that set the
+    `require-decision-task-id` config.
+    """
+    for job in jobs:
+        if not job.pop('require-decision-task-id', False):
+            yield job
+            continue
+
+        env = job['worker'].setdefault('env', {})
+        env['DECISION_TASK_ID'] = six.ensure_text(os.environ.get('TASK_ID', ''))
+        yield job
+
+
+@transforms.add
 def set_code_review_env(config, jobs):
     """
     Add a CODE_REVIEW environment variable when running in code-review bot mode
     """
     is_code_review = config.params['target_tasks_method'] == 'codereview'
 
     for job in jobs:
         attrs = job.get('attributes', {})
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -36,17 +36,17 @@ from taskgraph.util.schema import (
 )
 from taskgraph.util.partners import get_partners_to_be_published
 from taskgraph.util.scriptworker import (
     BALROG_ACTIONS,
     get_release_config,
 )
 from taskgraph.util.signed_artifacts import get_signed_artifacts
 from taskgraph.util.workertypes import worker_type_implementation
-from voluptuous import Any, Required, Optional, Extra, Match, All, NotIn
+from voluptuous import Any, Required, Optional, Extra, Match
 from taskgraph import GECKO, MAX_DEPENDENCIES
 from ..util import docker as dockerutil
 from ..util.workertypes import get_worker_type
 
 RUN_TASK = os.path.join(GECKO, 'taskcluster', 'scripts', 'run-task')
 
 SCCACHE_GCS_PROJECT = 'sccache-3'
 
@@ -77,22 +77,17 @@ task_description_schema = Schema({
     Optional('attributes'): {text_type: object},
 
     # relative path (from config.path) to the file task was defined in
     Optional('job-from'): text_type,
 
     # dependencies of this task, keyed by name; these are passed through
     # verbatim and subject to the interpretation of the Task's get_dependencies
     # method.
-    Optional('dependencies'): {
-        All(
-            text_type,
-            NotIn(["self", "decision"], "Can't use 'self` or 'decision' as depdency names."),
-        ): object,
-    },
+    Optional('dependencies'): {text_type: object},
 
     # Soft dependencies of this task, as a list of tasks labels
     Optional('soft-dependencies'): [text_type],
 
     Optional('requires'): Any('all-completed', 'all-resolved'),
 
     # expiration and deadline times, relative to task creation, with units
     # (e.g., "14 days").  Defaults are set based on the project.
@@ -1764,17 +1759,17 @@ def build_task(config, tasks):
         task['worker-type'] = '/'.join([provisioner_id, worker_type])
         project = config.params['project']
 
         routes = task.get('routes', [])
         scopes = [s.format(level=level, project=project) for s in task.get('scopes', [])]
 
         # set up extra
         extra = task.get('extra', {})
-        extra['parent'] = {'task-reference': '<decision>'}
+        extra['parent'] = os.environ.get('TASK_ID', '')
         task_th = task.get('treeherder')
         if task_th:
             extra.setdefault('treeherder-platform', task_th['platform'])
             treeherder = extra.setdefault('treeherder', {})
 
             machine_platform, collection = task_th['platform'].split('/', 1)
             treeherder['machine'] = {'platform': machine_platform}
             treeherder['collection'] = {collection: True}
--- a/taskcluster/taskgraph/util/parameterization.py
+++ b/taskcluster/taskgraph/util/parameterization.py
@@ -30,59 +30,46 @@ def _recurse(val, param_fns):
 
 def resolve_timestamps(now, task_def):
     """Resolve all instances of `{'relative-datestamp': '..'}` in the given task definition"""
     return _recurse(task_def, {
         'relative-datestamp': lambda v: json_time_from_now(v, now),
     })
 
 
-def resolve_task_references(label, task_def, task_id, decision_task_id, dependencies):
+def resolve_task_references(label, task_def, dependencies):
     """Resolve all instances of
       {'task-reference': '..<..>..'}
     and
       {'artifact-reference`: '..<dependency/artifact/path>..'}
     in the given task definition, using the given dependencies
 
     """
 
     def task_reference(val):
         def repl(match):
             key = match.group(1)
-            if key == 'self':
-                return task_id
-            elif key == 'decision':
-                return decision_task_id
             try:
                 return dependencies[key]
             except KeyError:
                 # handle escaping '<'
                 if key == '<':
                     return key
                 raise KeyError("task '{}' has no dependency named '{}'".format(label, key))
 
         return TASK_REFERENCE_PATTERN.sub(repl, val)
 
     def artifact_reference(val):
         def repl(match):
             dependency, artifact_name = match.group(1, 2)
 
-            if dependency == 'self':
-                raise KeyError(
-                    "task '{}' can't reference artifacts of self".format(label)
-                )
-            elif dependency == 'decision':
-                task_id = decision_task_id
-            else:
-                try:
-                    task_id = dependencies[dependency]
-                except KeyError:
-                    raise KeyError(
-                        "task '{}' has no dependency named '{}'".format(label, dependency)
-                    )
+            try:
+                task_id = dependencies[dependency]
+            except KeyError:
+                raise KeyError("task '{}' has no dependency named '{}'".format(label, dependency))
 
             assert artifact_name.startswith('public/'), \
                 "artifact-reference only supports public artifacts, not `{}`".format(artifact_name)
             return get_artifact_url(task_id, artifact_name)
 
         return ARTIFACT_REFERENCE_PATTERN.sub(repl, val)
 
     return _recurse(task_def, {
--- a/taskcluster/taskgraph/util/schema.py
+++ b/taskcluster/taskgraph/util/schema.py
@@ -140,26 +140,24 @@ def check_schema(schema):
 
     def whitelisted(path):
         return any(f(path) for f in WHITELISTED_SCHEMA_IDENTIFIERS)
 
     def iter(path, sch):
         def check_identifier(path, k):
             if k in (text_type, text_type, voluptuous.Extra):
                 pass
-            elif isinstance(k, voluptuous.NotIn):
-                pass
             elif isinstance(k, text_type):
                 if not identifier_re.match(k) and not whitelisted(path):
                     raise RuntimeError(
                         'YAML schemas should use dashed lower-case identifiers, '
                         'not {!r} @ {}'.format(k, path))
             elif isinstance(k, (voluptuous.Optional, voluptuous.Required)):
                 check_identifier(path, k.schema)
-            elif isinstance(k, (voluptuous.Any, voluptuous.All)):
+            elif isinstance(k, voluptuous.Any):
                 for v in k.validators:
                     check_identifier(path, v)
             elif not whitelisted(path):
                 raise RuntimeError(
                     'Unexpected type in YAML schema: {} @ {}'.format(
                         type(k).__name__, path))
 
         if isinstance(sch, collections.Mapping):