Bug 1474570 - provide support for linux talos tasks on generic-worker without enabling,r=pmoore,r=dustin
☠☠ backed out by bfd5c4ba4632 ☠ ☠
authorDragos Crisan <dcrisan@mozilla.com>
Thu, 11 Oct 2018 15:30:00 +0300
changeset 496602 2c6af6cc1ae8bd7d8227f377fb0d8e255d41b918
parent 496601 75ed7377db616cdeb922a9ebf28e17aa7f97b492
child 496603 f3e71a64a33cf74135362b6d278b2acb6600454e
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspmoore, dustin
bugs1474570
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1474570 - provide support for linux talos tasks on generic-worker without enabling,r=pmoore,r=dustin
taskcluster/scripts/tester/test-linux.sh
taskcluster/taskgraph/transforms/job/mozharness_test.py
taskcluster/taskgraph/transforms/job/run_task.py
taskcluster/taskgraph/transforms/task.py
--- a/taskcluster/scripts/tester/test-linux.sh
+++ b/taskcluster/scripts/tester/test-linux.sh
@@ -23,22 +23,23 @@ fi
 : MOZHARNESS_URL                ${MOZHARNESS_URL}
 : MOZHARNESS_SCRIPT             ${MOZHARNESS_SCRIPT}
 : MOZHARNESS_CONFIG             ${MOZHARNESS_CONFIG}
 : NEED_XVFB                     ${NEED_XVFB:=true}
 : NEED_WINDOW_MANAGER           ${NEED_WINDOW_MANAGER:=false}
 : NEED_PULSEAUDIO               ${NEED_PULSEAUDIO:=false}
 : START_VNC                     ${START_VNC:=false}
 : TASKCLUSTER_INTERACTIVE       ${TASKCLUSTER_INTERACTIVE:=false}
-: WORKSPACE                     ${WORKSPACE:=$HOME/workspace}
+: TASK_DIRECTORY                ${TASK_DIRECTORY:=$(pwd)}
+: WORKSPACE                     ${WORKSPACE:=${TASK_DIRECTORY}/workspace}
 : mozharness args               "${@}"
 
 set -v
-mkdir -p $WORKSPACE
-cd $WORKSPACE
+mkdir -p "$WORKSPACE"
+cd "$WORKSPACE"
 
 fail() {
     echo # make sure error message is on a new line
     echo "[test-linux.sh:error]" "${@}"
     exit 1
 }
 
 maybe_start_pulse() {
@@ -52,25 +53,25 @@ maybe_start_pulse() {
 if [ -z "${MOZHARNESS_PATH}" -a -z "${MOZHARNESS_URL}" ]; then
     fail "MOZHARNESS_PATH or MOZHARNESS_URL must be defined";
 fi
 
 if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
 if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
 
 # make sure artifact directories exist
-mkdir -p $WORKSPACE/build/upload/logs
-mkdir -p ~/artifacts/public
-mkdir -p $WORKSPACE/build/blobber_upload_dir
+mkdir -p "$WORKSPACE/build/upload/logs"
+mkdir -p "$TASK_DIRECTORY/artifacts/public"
+mkdir -p "$WORKSPACE/build/blobber_upload_dir"
 
 cleanup() {
     local rv=$?
     if [[ -s $HOME/.xsession-errors ]]; then
       # To share X issues
-      cp $HOME/.xsession-errors ~/artifacts/public/xsession-errors.log
+      cp "$HOME/.xsession-errors" "$TASK_DIRECTORY/artifacts/public/xsession-errors.log"
     fi
     if $NEED_XVFB; then
         cleanup_xvfb
     fi
     exit $rv
 }
 trap cleanup EXIT INT
 
@@ -119,17 +120,17 @@ fi
 # run XVfb in the background, if necessary
 if $NEED_XVFB; then
     # note that this file is not available when run under native-worker
     . $HOME/scripts/xvfb.sh
     start_xvfb '1600x1200x24' 0
 fi
 
 if $START_VNC; then
-    x11vnc > ~/artifacts/public/x11vnc.log 2>&1 &
+    x11vnc > "$TASK_DIRECTORY/artifacts/public/x11vnc.log" 2>&1 &
 fi
 
 if $NEED_WINDOW_MANAGER; then
     # This is read by xsession to select the window manager
     echo DESKTOP_SESSION=ubuntu > $HOME/.xsessionrc
 
     # note that doing anything with this display before running Xsession will cause sadness (like,
     # crashes in compiz). Make sure that X has enough time to start
@@ -171,27 +172,27 @@ done
 mozharness_bin="$HOME/bin/run-mozharness"
 mkdir -p $(dirname $mozharness_bin)
 
 # Save the computed mozharness command to a binary which is useful
 # for interactive mode.
 echo -e "#!/usr/bin/env bash
 # Some mozharness scripts assume base_work_dir is in
 # the current working directory, see bug 1279237
-cd $WORKSPACE
+cd "$WORKSPACE"
 cmd=\"python2.7 ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${@} \${@}\"
 echo \"Running: \${cmd}\"
 exec \${cmd}" > ${mozharness_bin}
 chmod +x ${mozharness_bin}
 
 # In interactive mode, the user will be prompted with options for what to do.
 if ! $TASKCLUSTER_INTERACTIVE; then
   # run the given mozharness script and configs, but pass the rest of the
   # arguments in from our own invocation
   ${mozharness_bin};
 fi
 
 # Run a custom mach command (this is typically used by action tasks to run
 # harnesses in a particular way)
 if [ "$CUSTOM_MACH_COMMAND" ]; then
-    eval "$HOME/workspace/build/tests/mach ${CUSTOM_MACH_COMMAND}"
+    eval "'$WORKSPACE/build/tests/mach' ${CUSTOM_MACH_COMMAND}"
     exit $?
 fi
--- a/taskcluster/taskgraph/transforms/job/mozharness_test.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness_test.py
@@ -178,17 +178,18 @@ def mozharness_test_on_docker(config, jo
 @run_job_using('generic-worker', 'mozharness-test', schema=mozharness_test_run_schema)
 def mozharness_test_on_generic_worker(config, job, taskdesc):
     test = taskdesc['run']['test']
     mozharness = test['mozharness']
     worker = taskdesc['worker']
 
     is_macosx = worker['os'] == 'macosx'
     is_windows = worker['os'] == 'windows'
-    assert is_macosx or is_windows
+    is_linux = worker['os'] == 'linux'
+    assert is_macosx or is_windows or is_linux
 
     artifacts = [
         {
             'name': 'public/logs',
             'path': 'logs',
             'type': 'directory'
         },
     ]
@@ -249,28 +250,29 @@ def mozharness_test_on_generic_worker(co
             'NO_FAIL_ON_TEST_ERRORS': '1',
             'PATH': '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
             'SHELL': '/bin/bash',
             'XPCOM_DEBUG_BREAK': 'warn',
             'XPC_FLAGS': '0x0',
             'XPC_SERVICE_NAME': '0',
         })
 
-    if is_macosx:
+    if is_windows:
+        mh_command = [
+            'c:\\mozilla-build\\python\\python.exe',
+            '-u',
+            'mozharness\\scripts\\' + normpath(mozharness['script'])
+        ]
+    else:
+        # is_linux or is_macosx
         mh_command = [
             'python2.7',
             '-u',
             'mozharness/scripts/' + mozharness['script']
         ]
-    elif is_windows:
-        mh_command = [
-            'c:\\mozilla-build\\python\\python.exe',
-            '-u',
-            'mozharness\\scripts\\' + normpath(mozharness['script'])
-        ]
 
     for mh_config in mozharness['config']:
         cfg_path = 'mozharness/configs/' + mh_config
         if is_windows:
             cfg_path = normpath(cfg_path)
         mh_command.extend(['--cfg', cfg_path])
     mh_command.extend(mozharness.get('extra-options', []))
     mh_command.extend(['--installer-url', installer_url])
--- a/taskcluster/taskgraph/transforms/job/run_task.py
+++ b/taskcluster/taskgraph/transforms/job/run_task.py
@@ -35,84 +35,107 @@ run_task_schema = Schema({
     # it will be included in a single argument to `bash -cx`.
     Required('command'): Any([basestring], basestring),
 
     # Base work directory used to set up the task.
     Required('workdir'): basestring,
 })
 
 
-def common_setup(config, job, taskdesc):
+def common_setup(config, job, taskdesc, command, checkoutdir):
     run = job['run']
     if run['checkout']:
         support_vcs_checkout(config, job, taskdesc,
                              sparse=bool(run['sparse-profile']))
-
-    taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
-
-
-def add_checkout_to_command(run, command):
-    if not run['checkout']:
-        return
-
-    command.append('--vcs-checkout={workdir}/checkouts/gecko'.format(**run))
+        command.append('--vcs-checkout={}/gecko'.format(checkoutdir))
 
     if run['sparse-profile']:
         command.append('--sparse-profile=build/sparse-profiles/%s' %
                        run['sparse-profile'])
 
+    taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
 
-defaults = {
+
+worker_defaults = {
     'cache-dotcache': False,
     'checkout': True,
     'comm-checkout': False,
     'sparse-profile': None,
 }
 
 
-@run_job_using("docker-worker", "run-task", schema=run_task_schema, defaults=defaults)
+def run_task_url(config):
+    return '{}/raw-file/{}/taskcluster/scripts/run-task'.format(
+                config.params['head_repository'], config.params['head_rev'])
+
+
+@run_job_using("docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults)
 def docker_worker_run_task(config, job, taskdesc):
     run = job['run']
     worker = taskdesc['worker'] = job['worker']
-    common_setup(config, job, taskdesc)
+    command = ['/builds/worker/bin/run-task']
+    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
 
     if run.get('cache-dotcache'):
         worker['caches'].append({
             'type': 'persistent',
             'name': 'level-{level}-{project}-dotcache'.format(**config.params),
             'mount-point': '{workdir}/.cache'.format(**run),
             'skip-untrusted': True,
         })
 
     run_command = run['command']
     if isinstance(run_command, basestring):
         run_command = ['bash', '-cx', run_command]
-    command = ['{workdir}/bin/run-task'.format(**run)]
-    add_checkout_to_command(run, command)
     if run['comm-checkout']:
         command.append('--comm-checkout={workdir}/checkouts/gecko/comm'.format(**run))
     command.append('--fetch-hgfingerprint')
     command.append('--')
     command.extend(run_command)
     worker['command'] = command
 
 
-@run_job_using("native-engine", "run-task", schema=run_task_schema, defaults=defaults)
+@run_job_using("native-engine", "run-task", schema=run_task_schema, defaults=worker_defaults)
 def native_engine_run_task(config, job, taskdesc):
     run = job['run']
     worker = taskdesc['worker'] = job['worker']
-    common_setup(config, job, taskdesc)
+    command = ['./run-task']
+    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
 
-    worker['context'] = '{}/raw-file/{}/taskcluster/scripts/run-task'.format(
-        config.params['head_repository'], config.params['head_rev']
-    )
+    worker['context'] = run_task_url(config)
 
     if run.get('cache-dotcache'):
         raise Exception("No cache support on native-worker; can't use cache-dotcache")
 
     run_command = run['command']
     if isinstance(run_command, basestring):
         run_command = ['bash', '-cx', run_command]
-    command = ['./run-task']
-    add_checkout_to_command(run, command)
     command.append('--')
     command.extend(run_command)
     worker['command'] = command
+
+
+@run_job_using("generic-worker", "run-task", schema=run_task_schema, defaults=worker_defaults)
+def generic_worker_run_task(config, job, taskdesc):
+    run = job['run']
+    worker = taskdesc['worker'] = job['worker']
+    command = ['./run-task']
+    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
+
+    worker.setdefault('mounts', [])
+    if run.get('cache-dotcache'):
+        worker['mounts'].append({
+            'cache-name': 'level-{level}-{project}-dotcache'.format(**config.params),
+            'directory': '{workdir}/.cache'.format(**run),
+        })
+    worker['mounts'].append({
+        'content': {
+            'url': run_task_url(config),
+        },
+        'file': './run-task',
+    })
+
+    run_command = run['command']
+    if isinstance(run_command, basestring):
+        run_command = ['bash', '-cx', run_command]
+    command.append('--')
+    command.extend(run_command)
+    worker['command'] = [['chmod', '+x', 'run-task'], command]
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -303,17 +303,17 @@ task_description_schema = Schema({
         # the exit status code(s) that indicates the caches used by the task
         # should be purged
         Optional('purge-caches-exit-status'): [int],
 
         # Wether any artifacts are assigned to this worker
         Optional('skip-artifacts'): bool,
     }, {
         Required('implementation'): 'generic-worker',
-        Required('os'): Any('windows', 'macosx'),
+        Required('os'): Any('windows', 'macosx', 'linux'),
         # see http://schemas.taskcluster.net/generic-worker/v1/payload.json
         # and https://docs.taskcluster.net/reference/workers/generic-worker/payload
 
         # command is a list of commands to run, sequentially
         # on Windows, each command is a string, on OS X and Linux, each command is
         # a string array
         Required('command'): Any(
             [taskref_or_string],   # Windows
@@ -1017,19 +1017,24 @@ def build_generic_worker_payload(config,
     # Need to copy over mounts, but rename keys to respect naming convention
     #   * 'cache-name' -> 'cacheName'
     #   * 'task-id'    -> 'taskId'
     # All other key names are already suitable, and don't need renaming.
     mounts = deepcopy(worker.get('mounts', []))
     for mount in mounts:
         if 'cache-name' in mount:
             mount['cacheName'] = mount.pop('cache-name')
+            task_def['scopes'].append('generic-worker:cache:{}'.format(mount['cacheName']))
         if 'content' in mount:
             if 'task-id' in mount['content']:
                 mount['content']['taskId'] = mount['content'].pop('task-id')
+            if 'artifact' in mount['content']:
+                if not mount['content']['artifact'].startswith('public/'):
+                    task_def['scopes'].append(
+                        'queue:get-artifact:{}'.format(mount['content']['artifact']))
 
     if mounts:
         task_def['payload']['mounts'] = mounts
 
     if worker.get('os-groups', []):
         task_def['payload']['osGroups'] = worker['os-groups']
 
     features = {}