Bug 1638990: [taskgraph] Make `taskgraph.action.util.create_task_from_def` generate the task id; r=ahal
authorTom Prince <mozilla@hocat.ca>
Mon, 20 Jul 2020 19:27:36 +0000
changeset 541334 6330322fb7f8a1cd9e306bc7188fb03d4ba1a0ae
parent 541333 fb44bf643588dc466231bcfa891752849a9ab7c6
child 541335 625d545ca409fb29de49444b601a812120471f7f
push id122172
push usermozilla@hocat.ca
push dateMon, 20 Jul 2020 20:16:13 +0000
treeherderautoland@7b1128e12c72 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal
bugs1638990
milestone80.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1638990: [taskgraph] Make `taskgraph.action.util.create_task_from_def` generate the task id; r=ahal All the callers generate a fresh task id, so refactor that logic into the function. Differential Revision: https://phabricator.services.mozilla.com/D84084
taskcluster/taskgraph/actions/isolate_test.py
taskcluster/taskgraph/actions/retrigger.py
taskcluster/taskgraph/actions/retrigger_custom.py
taskcluster/taskgraph/actions/util.py
--- a/taskcluster/taskgraph/actions/isolate_test.py
+++ b/taskcluster/taskgraph/actions/isolate_test.py
@@ -9,17 +9,16 @@ from __future__ import absolute_import, 
 import copy
 import json
 import logging
 import os
 import re
 
 import six
 
-from slugid import nice as slugid
 from taskgraph.util.taskcluster import list_artifacts, get_artifact, get_task_definition
 from ..util.parameterization import resolve_task_references
 from .registry import register_callback_action
 from .util import create_task_from_def, fetch_graph_and_labels, add_args_to_command
 
 logger = logging.getLogger(__name__)
 
 
@@ -143,17 +142,17 @@ def create_isolate_failure_tasks(task_de
     # through the failure_group loop.
 
     command = copy.deepcopy(task_definition['payload']['command'])
 
     th_dict['groupSymbol'] = th_dict['groupSymbol'] + '-I'
     th_dict['tier'] = 3
 
     for i in range(times):
-        create_task_from_def(slugid(), task_definition, level)
+        create_task_from_def(task_definition, level)
 
     if repeatable_task:
         task_definition['payload']['maxRunTime'] = 3600 * 3
 
     for failure_group in failures:
         if len(failures[failure_group]) == 0:
             continue
         if failure_group == 'dirs':
@@ -195,17 +194,17 @@ def create_isolate_failure_tasks(task_de
             else:
                 task_definition['payload']['env']['MOZHARNESS_TEST_PATHS'] = six.ensure_text(
                     json.dumps({suite: [failure_path]}, sort_keys=True))
 
             logger.info("Creating task for path {} with command {}".format(
                 failure_path,
                 task_definition['payload']['command']))
             for i in range(times):
-                create_task_from_def(slugid(), task_definition, level)
+                create_task_from_def(task_definition, level)
 
 
 @register_callback_action(
     name='isolate-test-failures',
     title='Isolate test failures in job',
     symbol='it',
     description="Re-run Tests for original manifest, directories and tests for failing tests.",
     order=150,
--- a/taskcluster/taskgraph/actions/retrigger.py
+++ b/taskcluster/taskgraph/actions/retrigger.py
@@ -6,17 +6,16 @@
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import sys
 
 import logging
 import textwrap
 
-from slugid import nice as slugid
 from .util import (
     combine_task_graph_files,
     create_tasks,
     fetch_graph_and_labels,
     relativize_datestamps,
     create_task_from_def,
 )
 from .registry import register_callback_action
@@ -62,17 +61,17 @@ def retrigger_decision_action(parameters
     """For a single task, we try to just run exactly the same task once more.
     It's quite possible that we don't have the scopes to do so (especially for
     an action), but this is best-effort."""
 
     # make all of the timestamps relative; they will then be turned back into
     # absolute timestamps relative to the current time.
     task = taskcluster.get_task_definition(task_id)
     task = relativize_datestamps(task)
-    create_task_from_def(slugid(), task, parameters['level'])
+    create_task_from_def(task, parameters['level'])
 
 
 @register_callback_action(
     title='Retrigger',
     name='retrigger',
     symbol='rt',
     description=(
         'Create a clone of the task.'
--- a/taskcluster/taskgraph/actions/retrigger_custom.py
+++ b/taskcluster/taskgraph/actions/retrigger_custom.py
@@ -4,17 +4,16 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import json
 import logging
 
-from slugid import nice as slugid
 from .util import (
     fetch_graph_and_labels,
     create_task_from_def,
 )
 from ..util.parameterization import resolve_task_references
 from .registry import register_callback_action
 from taskgraph.util import taskcluster
 
@@ -161,11 +160,9 @@ def handle_custom_retrigger(parameters, 
     # update environment
     new_task_definition['payload']['env'].update(input.get('environment', {}))
 
     # tweak the treeherder symbol
     new_task_definition['extra']['treeherder']['symbol'] += '-custom'
 
     logging.info("New task definition: %s", new_task_definition)
 
-    # actually create the new task
-    new_task_id = slugid()
-    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
+    create_task_from_def(new_task_definition, parameters['level'])
--- a/taskcluster/taskgraph/actions/util.py
+++ b/taskcluster/taskgraph/actions/util.py
@@ -12,16 +12,17 @@ import logging
 import os
 import re
 from functools import reduce
 
 import jsone
 import requests
 from requests.exceptions import HTTPError
 from six import text_type, string_types
+from slugid import nice as slugid
 
 from taskgraph import create
 from taskgraph.decision import read_artifact, write_artifact, rename_artifact
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.optimize import optimize_task_graph
 from taskgraph.util.taskcluster import (
     find_task_id,
     get_artifact,
@@ -210,26 +211,27 @@ def fetch_graph_and_labels(parameters, g
         # now wait for each fetch to complete, raising an exception if there
         # were any issues
         for f in futures.as_completed(fetches):
             f.result()
 
     return (decision_task_id, full_task_graph, label_to_taskid)
 
 
-def create_task_from_def(task_id, task_def, level):
+def create_task_from_def(task_def, level):
     """Create a new task from a definition rather than from a label
     that is already in the full-task-graph. The task definition will
     have {relative-datestamp': '..'} rendered just like in a decision task.
     Use this for entirely new tasks or ones that change internals of the task.
     It is useful if you want to "edit" the full_task_graph and then hand
     it to this function. No dependencies will be scheduled. You must handle
     this yourself. Seeing how create_tasks handles it might prove helpful."""
     task_def['schedulerId'] = 'gecko-level-{}'.format(level)
     label = task_def['metadata']['name']
+    task_id = slugid().decode('ascii')
     session = get_session()
     create.create_task(session, task_id, label, task_def)
 
 
 def update_parent(task, graph):
     task.task.setdefault('extra', {})['parent'] = os.environ.get('TASK_ID', '')
     return task