Bug 1418058: [taskcluster] Update actions to take and use graph configuration; r=dustin
authorTom Prince <mozilla@hocat.ca>
Mon, 09 Apr 2018 21:30:36 -0600
changeset 782000 c165de96347189065b24211ee8589bf95f6f25d8
parent 781999 9f9c19355f64734bc7da7a015c21e516dad1fd5f
child 782001 b173e6c0621fbb71d523d87d9ac8f33575184a1c
push id106461
push userbmo:mratcliffe@mozilla.com
push dateFri, 13 Apr 2018 23:37:27 +0000
reviewersdustin
bugs1418058
milestone61.0a1
Bug 1418058: [taskcluster] Update actions to take and use graph configuration; r=dustin Differential Revision: https://phabricator.services.mozilla.com/D916
taskcluster/docs/actions.rst
taskcluster/taskgraph/actions/add_new_jobs.py
taskcluster/taskgraph/actions/add_talos.py
taskcluster/taskgraph/actions/backfill.py
taskcluster/taskgraph/actions/mochitest_retrigger.py
taskcluster/taskgraph/actions/registry.py
taskcluster/taskgraph/actions/release_promotion.py
taskcluster/taskgraph/actions/rerun.py
taskcluster/taskgraph/actions/retrigger.py
taskcluster/taskgraph/actions/run_missing_tests.py
taskcluster/taskgraph/actions/util.py
--- a/taskcluster/docs/actions.rst
+++ b/taskcluster/docs/actions.rst
@@ -47,17 +47,17 @@ To create a new callback action you must
 
   @register_callback_action(
       name='hello',
       title='Say Hello',
       symbol='hw',  # Show the callback task in treeherder as 'hw'
       description="Simple **proof-of-concept** callback action",
       order=10000,  # Order in which it should appear relative to other actions
   )
-  def hello_world_action(parameters, input, task_group_id, task_id, task):
+  def hello_world_action(parameters, graph_config, input, task_group_id, task_id, task):
       # parameters is an instance of taskgraph.parameters.Parameters
       # it carries decision task parameters from the original decision task.
       # input, task_id, and task should all be None
       print "Hello was triggered from taskGroupId: " + taskGroupId
 
 Callback actions are configured in-tree to generate 3 artifacts when they run.
 These artifacts are similar to the artifacts generated by decision tasks since
 callback actions are basically mini decision tasks. The artifacts are:
@@ -107,17 +107,17 @@ The example action below will be shown i
   @register_callback_action(
       name='retrigger',
       title='Retrigger',
       symbol='re-c',  # Show the callback task in treeherder as 're-c'
       description="Create a clone of the task",
       order=1,
       context=[{'platform': 'linux'}]
   )
-  def retrigger_action(parameters, input, task_group_id, task_id, task):
+  def retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
       # input will be None
       print "Retriggering: {}".format(task_id)
       print "task definition: {}".format(task)
 
 When the ``context`` parameter is set, the ``task_id`` and ``task`` parameters
 will provided to the callback. In this case the ``task_id`` and ``task``
 parameters will be the ``taskId`` and *task definition* of the task from whose
 context-menu the action was triggered.
@@ -170,17 +170,17 @@ both ``input`` and ``context``::
                   'type': 'boolean',
                   'default': 'false',
               }
           },
           'required': ['priority', 'runTalos'],
           'additionalProperties': False,
       },
   )
-  def retrigger_action(parameters, input, task_group_id, task_id, task):
+  def retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
       print "Create all pruned tasks with priority: {}".format(input['priority'])
       if input['runTalos']:
           print "Also running talos jobs..."
 
 When the ``schema`` parameter is given the callback will always be called with
 an ``input`` parameter that satisfies the previously given JSON schema.
 It is encouraged to set ``additionalProperties: false``, as well as specifying
 all properties as ``required`` in the JSON schema. Furthermore, it's good
@@ -206,17 +206,17 @@ The feature is illustrated below::
       name='hello',
       title='Say Hello',
       symbol='hw',  # Show the callback task in treeherder as 'hw'
       description="Simple **proof-of-concept** callback action",
       order=2,
       # Define an action that is only included if this is a push to try
       available=lambda parameters: parameters.get('project', None) == 'try',
   )
-  def try_only_action(parameters, input, task_group_id, task_id, task):
+  def try_only_action(parameters, graph_config, input, task_group_id, task_id, task):
       print "My try-only action"
 
 Properties of ``parameters``  are documented in the
 :doc:`parameters section <parameters>`. You can also examine the
 ``parameters.yml`` artifact created by decisions tasks.
 
 
 Creating a Custom Action Task
--- a/taskcluster/taskgraph/actions/add_new_jobs.py
+++ b/taskcluster/taskgraph/actions/add_new_jobs.py
@@ -26,18 +26,19 @@ from .util import (create_tasks, fetch_g
                 'description': 'An array of task labels',
                 'items': {
                     'type': 'string'
                 }
             }
         }
     }
 )
-def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+def add_new_jobs_action(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
 
     to_run = []
     for elem in input['tasks']:
         if elem in full_task_graph.tasks:
             to_run.append(elem)
         else:
             raise Exception('{} was not found in the task-graph'.format(elem))
 
--- a/taskcluster/taskgraph/actions/add_talos.py
+++ b/taskcluster/taskgraph/actions/add_talos.py
@@ -31,18 +31,19 @@ logger = logging.getLogger(__name__)
                 'maximum': 6,
                 'title': 'Times',
                 'description': 'How many times to run each task.',
             }
         },
         'additionalProperties': False
     },
 )
-def add_all_talos(parameters, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+def add_all_talos(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
 
     times = input.get('times', 1)
     for i in xrange(times):
         to_run = [label
                   for label, entry
                   in full_task_graph.tasks.iteritems() if 'talos_try_name' in entry.attributes]
 
         create_tasks(to_run, full_task_graph, label_to_taskid, parameters, decision_task_id)
--- a/taskcluster/taskgraph/actions/backfill.py
+++ b/taskcluster/taskgraph/actions/backfill.py
@@ -43,17 +43,17 @@ logger = logging.getLogger(__name__)
                 'description': ('The number of previous pushes before the current '
                                 'push to attempt to trigger this task on.')
             }
         },
         'additionalProperties': False
     },
     available=lambda parameters: parameters.get('project', None) != 'try'
 )
-def backfill_action(parameters, input, task_group_id, task_id, task):
+def backfill_action(parameters, graph_config, input, task_group_id, task_id, task):
     label = task['metadata']['name']
     pushes = []
     depth = input.get('depth', 5)
     end_id = int(parameters['pushlog_id']) - 1
 
     while True:
         start_id = max(end_id - depth, 0)
         pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'], start_id, end_id)
@@ -77,17 +77,17 @@ def backfill_action(parameters, input, t
                     'public/full-task-graph.json')
             _, full_task_graph = TaskGraph.from_json(full_task_graph)
             label_to_taskid = get_artifact_from_index(
                     INDEX_TMPL.format(parameters['project'], push),
                     'public/label-to-taskid.json')
             push_params = get_artifact_from_index(
                     INDEX_TMPL.format(parameters['project'], push),
                     'public/parameters.yml')
-            push_decision_task_id = find_decision_task(push_params)
+            push_decision_task_id = find_decision_task(push_params, graph_config)
         except HTTPError as e:
             logger.info('Skipping {} due to missing index artifacts! Error: {}'.format(push, e))
             continue
 
         if label in full_task_graph.tasks.keys():
             create_tasks(
                     [label], full_task_graph, label_to_taskid,
                     push_params, push_decision_task_id, push)
--- a/taskcluster/taskgraph/actions/mochitest_retrigger.py
+++ b/taskcluster/taskgraph/actions/mochitest_retrigger.py
@@ -74,18 +74,19 @@ logger = logging.getLogger(__name__)
                 'description': 'Extra gecko (about:config) preferences to use for this run',
                 'additionalProperties': {'type': 'string'}
             }
         },
         'additionalProperties': False,
         'required': ['path']
     }
 )
-def mochitest_retrigger_action(parameters, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
 
     pre_task = full_task_graph.tasks[task['metadata']['name']]
 
     # fix up the task's dependencies, similar to how optimization would
     # have done in the decision
     dependencies = {name: label_to_taskid[label]
                     for name, label in pre_task.dependencies.iteritems()}
     new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
--- a/taskcluster/taskgraph/actions/registry.py
+++ b/taskcluster/taskgraph/actions/registry.py
@@ -285,17 +285,17 @@ def trigger_action_callback(task_group_i
     if not cb:
         raise Exception('Unknown callback: {}. Known callbacks: {}'.format(
             callback, callbacks))
 
     if test:
         create.testing = True
         taskcluster.testing = True
 
-    cb(Parameters(**parameters), input, task_group_id, task_id, task)
+    cb(Parameters(**parameters), graph_config, input, task_group_id, task_id, task)
 
 
 def _load(graph_config):
     # Load all modules from this folder, relying on the side-effects of register_
     # functions to populate the action registry.
     actions_dir = os.path.dirname(__file__)
     for f in os.listdir(actions_dir):
         if f.endswith('.py') and f not in ('__init__.py', 'registry.py', 'util.py'):
--- a/taskcluster/taskgraph/actions/release_promotion.py
+++ b/taskcluster/taskgraph/actions/release_promotion.py
@@ -207,17 +207,17 @@ def is_release_promotion_available(param
             'release_eta': {
                 'type': 'string',
                 'default': '',
             },
         },
         "required": ['release_promotion_flavor', 'build_number'],
     }
 )
-def release_promotion_action(parameters, input, task_group_id, task_id, task):
+def release_promotion_action(parameters, graph_config, input, task_group_id, task_id, task):
     release_promotion_flavor = input['release_promotion_flavor']
     promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]
     release_history = {}
     product = promotion_config['product']
 
     next_version = str(input.get('next_version') or '')
     if release_promotion_flavor in VERSION_BUMP_FLAVORS:
         # We force str() the input, hence the 'None'
@@ -257,18 +257,18 @@ def release_promotion_action(parameters,
     # make parameters read-write
     parameters = dict(parameters)
     # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
     # or ``revision``.
     previous_graph_ids = input.get('previous_graph_ids')
     if not previous_graph_ids:
         revision = input.get('revision')
         parameters['pushlog_id'] = parameters['pushlog_id'] or \
-            find_hg_revision_pushlog_id(parameters, revision)
-        previous_graph_ids = [find_decision_task(parameters)]
+            find_hg_revision_pushlog_id(parameters, graph_config, revision)
+        previous_graph_ids = [find_decision_task(parameters, graph_config)]
 
     # Download parameters from the first decision task
     parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
     # Download and combine full task graphs from each of the previous_graph_ids.
     # Sometimes previous relpro action tasks will add tasks, like partials,
     # that didn't exist in the first full_task_graph, so combining them is
     # important. The rightmost graph should take precedence in the case of
     # conflicts.
--- a/taskcluster/taskgraph/actions/rerun.py
+++ b/taskcluster/taskgraph/actions/rerun.py
@@ -32,19 +32,20 @@ RERUN_STATES = ('exception', 'failed')
     ),
     order=1,
     context=[{}],
     schema={
         'type': 'object',
         'properties': {}
     }
 )
-def rerun_action(parameters, input, task_group_id, task_id, task):
+def rerun_action(parameters, graph_config, input, task_group_id, task_id, task):
     parameters = dict(parameters)
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
     label = task['metadata']['name']
     if task_id not in label_to_taskid.values():
         logger.error(
             "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!".format(
                 label, task_id, decision_task_id
             )
         )
 
--- a/taskcluster/taskgraph/actions/retrigger.py
+++ b/taskcluster/taskgraph/actions/retrigger.py
@@ -43,18 +43,19 @@ logger = logging.getLogger(__name__)
                 'minimum': 1,
                 'maximum': 6,
                 'title': 'Times',
                 'description': 'How many times to run each task.',
             }
         }
     }
 )
-def retrigger_action(parameters, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+def retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
 
     label = task['metadata']['name']
     with_downstream = ' '
     to_run = [label]
 
     if input.get('downstream'):
         to_run = full_task_graph.graph.transitive_closure(set(to_run), reverse=True).nodes
         to_run = to_run & set(label_to_taskid.keys())
--- a/taskcluster/taskgraph/actions/run_missing_tests.py
+++ b/taskcluster/taskgraph/actions/run_missing_tests.py
@@ -23,18 +23,19 @@ logger = logging.getLogger(__name__)
         "Run tests in the selected push that were optimized away, usually by SETA."
         "\n"
         "This action is for use on pushes that will be merged into another branch,"
         "to check that optimization hasn't hidden any failures."
     ),
     order=100,  # Useful for sheriffs, but not top of the list
     context=[],  # Applies to decision task
 )
-def run_missing_tests(parameters, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)
+def run_missing_tests(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
     target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
 
     # The idea here is to schedule all tasks of the `test` kind that were
     # targetted but did not appear in the final task-graph -- those were the
     # optimized tasks.
     to_run = []
     already_run = 0
     for label in target_tasks:
--- a/taskcluster/taskgraph/actions/util.py
+++ b/taskcluster/taskgraph/actions/util.py
@@ -19,28 +19,31 @@ from taskgraph.taskgraph import TaskGrap
 from taskgraph.optimize import optimize_task_graph
 from taskgraph.util.taskcluster import get_session, find_task_id, get_artifact, list_tasks
 
 logger = logging.getLogger(__name__)
 
 PUSHLOG_TMPL = '{}/json-pushes?version=2&changeset={}&tipsonly=1&full=1'
 
 
-def find_decision_task(parameters):
+def find_decision_task(parameters, graph_config):
     """Given the parameters for this action, find the taskId of the decision
     task"""
-    return find_task_id('gecko.v2.{}.pushlog-id.{}.decision'.format(
+    return find_task_id('{}.v2.{}.pushlog-id.{}.decision'.format(
+        graph_config['trust-domain'],
         parameters['project'],
         parameters['pushlog_id']))
 
 
-def find_hg_revision_pushlog_id(parameters, revision):
+def find_hg_revision_pushlog_id(parameters, graph_config, revision):
     """Given the parameters for this action and a revision, find the
     pushlog_id of the revision."""
-    pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'], revision)
+
+    repo_param = '{}head_repository'.format(graph_config['project-repo-param-prefix'])
+    pushlog_url = PUSHLOG_TMPL.format(parameters[repo_param], revision)
     r = requests.get(pushlog_url)
     r.raise_for_status()
     pushes = r.json()['pushes'].keys()
     if len(pushes) != 1:
         raise RuntimeError(
             "Unable to find a single pushlog_id for {} revision {}: {}".format(
                 parameters['head_repository'], revision, pushes
             )
@@ -58,27 +61,28 @@ def find_existing_tasks_from_previous_ki
         label_to_taskid = get_artifact(previous_graph_id, "public/label-to-taskid.json")
         kind_labels = set(t.label for t in full_task_graph.tasks.itervalues()
                           if t.attributes['kind'] not in rebuild_kinds)
         for label in set(label_to_taskid.keys()).intersection(kind_labels):
             existing_tasks[label] = label_to_taskid[label]
     return existing_tasks
 
 
-def fetch_graph_and_labels(parameters):
-    decision_task_id = find_decision_task(parameters)
+def fetch_graph_and_labels(parameters, graph_config):
+    decision_task_id = find_decision_task(parameters, graph_config)
 
     # First grab the graph and labels generated during the initial decision task
     full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
     _, full_task_graph = TaskGraph.from_json(full_task_graph)
     label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")
 
     # Now fetch any modifications made by action tasks and swap out new tasks
     # for old ones
-    namespace = 'gecko.v2.{}.pushlog-id.{}.actions'.format(
+    namespace = '{}.v2.{}.pushlog-id.{}.actions'.format(
+        graph_config['trust-domain'],
         parameters['project'],
         parameters['pushlog_id'])
     for action in list_tasks(namespace):
         try:
             run_label_to_id = get_artifact(action, "public/label-to-taskid.json")
             label_to_taskid.update(run_label_to_id)
         except HTTPError as e:
             logger.info('Skipping {} due to missing artifact! Error: {}'.format(action, e))