Backed out 2 changesets (bug 1474570) due to 'Permission denied for workspace' failures on a CLOSED TREE
authorGurzau Raul <rgurzau@mozilla.com>
Fri, 12 Oct 2018 13:39:17 +0300
changeset 496605 bfd5c4ba4632b62b0b47857c600fefba739593e7
parent 496604 09b16fa61f2811251381b0a3780a5d97c6579e82
child 496606 e009fe76cb87961baf55cb10b68f86cb3a258367
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1474570
milestone64.0a1
backs outf3e71a64a33cf74135362b6d278b2acb6600454e
2c6af6cc1ae8bd7d8227f377fb0d8e255d41b918
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 2 changesets (bug 1474570) due to 'Permission denied for workspace' failures on a CLOSED TREE Backed out changeset f3e71a64a33c (bug 1474570) Backed out changeset 2c6af6cc1ae8 (bug 1474570)
taskcluster/ci/source-test/jsshell.yml
taskcluster/scripts/tester/test-linux.sh
taskcluster/taskgraph/transforms/job/mozharness_test.py
taskcluster/taskgraph/transforms/job/run_task.py
taskcluster/taskgraph/transforms/task.py
taskcluster/taskgraph/transforms/tests.py
taskcluster/taskgraph/util/workertypes.py
--- a/taskcluster/ci/source-test/jsshell.yml
+++ b/taskcluster/ci/source-test/jsshell.yml
@@ -1,14 +1,14 @@
 job-defaults:
     platform: linux64/opt
     require-build: true
     worker-type:
         by-platform:
-            linux64.*: releng-hardware/gecko-t-linux-talos
+            linux64.*: releng-hardware/gecko-t-linux-talos-tw
     worker:
         by-platform:
             linux64.*:
                 env:
                     by-shell:
                         sm:
                             SHELL: /bin/bash
                             JSSHELL: /home/cltbld/fetches/js
--- a/taskcluster/scripts/tester/test-linux.sh
+++ b/taskcluster/scripts/tester/test-linux.sh
@@ -23,23 +23,22 @@ fi
 : MOZHARNESS_URL                ${MOZHARNESS_URL}
 : MOZHARNESS_SCRIPT             ${MOZHARNESS_SCRIPT}
 : MOZHARNESS_CONFIG             ${MOZHARNESS_CONFIG}
 : NEED_XVFB                     ${NEED_XVFB:=true}
 : NEED_WINDOW_MANAGER           ${NEED_WINDOW_MANAGER:=false}
 : NEED_PULSEAUDIO               ${NEED_PULSEAUDIO:=false}
 : START_VNC                     ${START_VNC:=false}
 : TASKCLUSTER_INTERACTIVE       ${TASKCLUSTER_INTERACTIVE:=false}
-: TASK_DIRECTORY                ${TASK_DIRECTORY:=$(pwd)}
-: WORKSPACE                     ${WORKSPACE:=${TASK_DIRECTORY}/workspace}
+: WORKSPACE                     ${WORKSPACE:=$HOME/workspace}
 : mozharness args               "${@}"
 
 set -v
-mkdir -p "$WORKSPACE"
-cd "$WORKSPACE"
+mkdir -p $WORKSPACE
+cd $WORKSPACE
 
 fail() {
     echo # make sure error message is on a new line
     echo "[test-linux.sh:error]" "${@}"
     exit 1
 }
 
 maybe_start_pulse() {
@@ -53,25 +52,25 @@ maybe_start_pulse() {
 if [ -z "${MOZHARNESS_PATH}" -a -z "${MOZHARNESS_URL}" ]; then
     fail "MOZHARNESS_PATH or MOZHARNESS_URL must be defined";
 fi
 
 if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
 if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
 
 # make sure artifact directories exist
-mkdir -p "$WORKSPACE/build/upload/logs"
-mkdir -p "$TASK_DIRECTORY/artifacts/public"
-mkdir -p "$WORKSPACE/build/blobber_upload_dir"
+mkdir -p $WORKSPACE/build/upload/logs
+mkdir -p ~/artifacts/public
+mkdir -p $WORKSPACE/build/blobber_upload_dir
 
 cleanup() {
     local rv=$?
     if [[ -s $HOME/.xsession-errors ]]; then
       # To share X issues
-      cp "$HOME/.xsession-errors" "$TASK_DIRECTORY/artifacts/public/xsession-errors.log"
+      cp $HOME/.xsession-errors ~/artifacts/public/xsession-errors.log
     fi
     if $NEED_XVFB; then
         cleanup_xvfb
     fi
     exit $rv
 }
 trap cleanup EXIT INT
 
@@ -120,17 +119,17 @@ fi
 # run XVfb in the background, if necessary
 if $NEED_XVFB; then
     # note that this file is not available when run under native-worker
     . $HOME/scripts/xvfb.sh
     start_xvfb '1600x1200x24' 0
 fi
 
 if $START_VNC; then
-    x11vnc > "$TASK_DIRECTORY/artifacts/public/x11vnc.log" 2>&1 &
+    x11vnc > ~/artifacts/public/x11vnc.log 2>&1 &
 fi
 
 if $NEED_WINDOW_MANAGER; then
     # This is read by xsession to select the window manager
     echo DESKTOP_SESSION=ubuntu > $HOME/.xsessionrc
 
     # note that doing anything with this display before running Xsession will cause sadness (like,
     # crashes in compiz). Make sure that X has enough time to start
@@ -172,27 +171,27 @@ done
 mozharness_bin="$HOME/bin/run-mozharness"
 mkdir -p $(dirname $mozharness_bin)
 
 # Save the computed mozharness command to a binary which is useful
 # for interactive mode.
 echo -e "#!/usr/bin/env bash
 # Some mozharness scripts assume base_work_dir is in
 # the current working directory, see bug 1279237
-cd "$WORKSPACE"
+cd $WORKSPACE
 cmd=\"python2.7 ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${@} \${@}\"
 echo \"Running: \${cmd}\"
 exec \${cmd}" > ${mozharness_bin}
 chmod +x ${mozharness_bin}
 
 # In interactive mode, the user will be prompted with options for what to do.
 if ! $TASKCLUSTER_INTERACTIVE; then
   # run the given mozharness script and configs, but pass the rest of the
   # arguments in from our own invocation
   ${mozharness_bin};
 fi
 
 # Run a custom mach command (this is typically used by action tasks to run
 # harnesses in a particular way)
 if [ "$CUSTOM_MACH_COMMAND" ]; then
-    eval "'$WORKSPACE/build/tests/mach' ${CUSTOM_MACH_COMMAND}"
+    eval "$HOME/workspace/build/tests/mach ${CUSTOM_MACH_COMMAND}"
     exit $?
 fi
--- a/taskcluster/taskgraph/transforms/job/mozharness_test.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness_test.py
@@ -178,18 +178,17 @@ def mozharness_test_on_docker(config, jo
 @run_job_using('generic-worker', 'mozharness-test', schema=mozharness_test_run_schema)
 def mozharness_test_on_generic_worker(config, job, taskdesc):
     test = taskdesc['run']['test']
     mozharness = test['mozharness']
     worker = taskdesc['worker']
 
     is_macosx = worker['os'] == 'macosx'
     is_windows = worker['os'] == 'windows'
-    is_linux = worker['os'] == 'linux'
-    assert is_macosx or is_windows or is_linux
+    assert is_macosx or is_windows
 
     artifacts = [
         {
             'name': 'public/logs',
             'path': 'logs',
             'type': 'directory'
         },
     ]
@@ -250,29 +249,28 @@ def mozharness_test_on_generic_worker(co
             'NO_FAIL_ON_TEST_ERRORS': '1',
             'PATH': '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
             'SHELL': '/bin/bash',
             'XPCOM_DEBUG_BREAK': 'warn',
             'XPC_FLAGS': '0x0',
             'XPC_SERVICE_NAME': '0',
         })
 
-    if is_windows:
+    if is_macosx:
+        mh_command = [
+            'python2.7',
+            '-u',
+            'mozharness/scripts/' + mozharness['script']
+        ]
+    elif is_windows:
         mh_command = [
             'c:\\mozilla-build\\python\\python.exe',
             '-u',
             'mozharness\\scripts\\' + normpath(mozharness['script'])
         ]
-    else:
-        # is_linux or is_macosx
-        mh_command = [
-            'python2.7',
-            '-u',
-            'mozharness/scripts/' + mozharness['script']
-        ]
 
     for mh_config in mozharness['config']:
         cfg_path = 'mozharness/configs/' + mh_config
         if is_windows:
             cfg_path = normpath(cfg_path)
         mh_command.extend(['--cfg', cfg_path])
     mh_command.extend(mozharness.get('extra-options', []))
     mh_command.extend(['--installer-url', installer_url])
--- a/taskcluster/taskgraph/transforms/job/run_task.py
+++ b/taskcluster/taskgraph/transforms/job/run_task.py
@@ -35,107 +35,84 @@ run_task_schema = Schema({
     # it will be included in a single argument to `bash -cx`.
     Required('command'): Any([basestring], basestring),
 
     # Base work directory used to set up the task.
     Required('workdir'): basestring,
 })
 
 
-def common_setup(config, job, taskdesc, command, checkoutdir):
+def common_setup(config, job, taskdesc):
     run = job['run']
     if run['checkout']:
         support_vcs_checkout(config, job, taskdesc,
                              sparse=bool(run['sparse-profile']))
-        command.append('--vcs-checkout={}/gecko'.format(checkoutdir))
+
+    taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
+
+
+def add_checkout_to_command(run, command):
+    if not run['checkout']:
+        return
+
+    command.append('--vcs-checkout={workdir}/checkouts/gecko'.format(**run))
 
     if run['sparse-profile']:
         command.append('--sparse-profile=build/sparse-profiles/%s' %
                        run['sparse-profile'])
 
-    taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
 
-
-worker_defaults = {
+defaults = {
     'cache-dotcache': False,
     'checkout': True,
     'comm-checkout': False,
     'sparse-profile': None,
 }
 
 
-def run_task_url(config):
-    return '{}/raw-file/{}/taskcluster/scripts/run-task'.format(
-                config.params['head_repository'], config.params['head_rev'])
-
-
-@run_job_using("docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults)
+@run_job_using("docker-worker", "run-task", schema=run_task_schema, defaults=defaults)
 def docker_worker_run_task(config, job, taskdesc):
     run = job['run']
     worker = taskdesc['worker'] = job['worker']
-    command = ['/builds/worker/bin/run-task']
-    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
+    common_setup(config, job, taskdesc)
 
     if run.get('cache-dotcache'):
         worker['caches'].append({
             'type': 'persistent',
             'name': 'level-{level}-{project}-dotcache'.format(**config.params),
             'mount-point': '{workdir}/.cache'.format(**run),
             'skip-untrusted': True,
         })
 
     run_command = run['command']
     if isinstance(run_command, basestring):
         run_command = ['bash', '-cx', run_command]
+    command = ['{workdir}/bin/run-task'.format(**run)]
+    add_checkout_to_command(run, command)
     if run['comm-checkout']:
         command.append('--comm-checkout={workdir}/checkouts/gecko/comm'.format(**run))
     command.append('--fetch-hgfingerprint')
     command.append('--')
     command.extend(run_command)
     worker['command'] = command
 
 
-@run_job_using("native-engine", "run-task", schema=run_task_schema, defaults=worker_defaults)
+@run_job_using("native-engine", "run-task", schema=run_task_schema, defaults=defaults)
 def native_engine_run_task(config, job, taskdesc):
     run = job['run']
     worker = taskdesc['worker'] = job['worker']
-    command = ['./run-task']
-    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
+    common_setup(config, job, taskdesc)
 
-    worker['context'] = run_task_url(config)
+    worker['context'] = '{}/raw-file/{}/taskcluster/scripts/run-task'.format(
+        config.params['head_repository'], config.params['head_rev']
+    )
 
     if run.get('cache-dotcache'):
         raise Exception("No cache support on native-worker; can't use cache-dotcache")
 
     run_command = run['command']
     if isinstance(run_command, basestring):
         run_command = ['bash', '-cx', run_command]
+    command = ['./run-task']
+    add_checkout_to_command(run, command)
     command.append('--')
     command.extend(run_command)
     worker['command'] = command
-
-
-@run_job_using("generic-worker", "run-task", schema=run_task_schema, defaults=worker_defaults)
-def generic_worker_run_task(config, job, taskdesc):
-    run = job['run']
-    worker = taskdesc['worker'] = job['worker']
-    command = ['./run-task']
-    common_setup(config, job, taskdesc, command, checkoutdir='{workdir}/checkouts'.format(**run))
-
-    worker.setdefault('mounts', [])
-    if run.get('cache-dotcache'):
-        worker['mounts'].append({
-            'cache-name': 'level-{level}-{project}-dotcache'.format(**config.params),
-            'directory': '{workdir}/.cache'.format(**run),
-        })
-    worker['mounts'].append({
-        'content': {
-            'url': run_task_url(config),
-        },
-        'file': './run-task',
-    })
-
-    run_command = run['command']
-    if isinstance(run_command, basestring):
-        run_command = ['bash', '-cx', run_command]
-    command.append('--')
-    command.extend(run_command)
-    worker['command'] = [['chmod', '+x', 'run-task'], command]
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -303,17 +303,17 @@ task_description_schema = Schema({
         # the exit status code(s) that indicates the caches used by the task
         # should be purged
         Optional('purge-caches-exit-status'): [int],
 
         # Wether any artifacts are assigned to this worker
         Optional('skip-artifacts'): bool,
     }, {
         Required('implementation'): 'generic-worker',
-        Required('os'): Any('windows', 'macosx', 'linux'),
+        Required('os'): Any('windows', 'macosx'),
         # see http://schemas.taskcluster.net/generic-worker/v1/payload.json
         # and https://docs.taskcluster.net/reference/workers/generic-worker/payload
 
         # command is a list of commands to run, sequentially
         # on Windows, each command is a string, on OS X and Linux, each command is
         # a string array
         Required('command'): Any(
             [taskref_or_string],   # Windows
@@ -1017,24 +1017,19 @@ def build_generic_worker_payload(config,
     # Need to copy over mounts, but rename keys to respect naming convention
     #   * 'cache-name' -> 'cacheName'
     #   * 'task-id'    -> 'taskId'
     # All other key names are already suitable, and don't need renaming.
     mounts = deepcopy(worker.get('mounts', []))
     for mount in mounts:
         if 'cache-name' in mount:
             mount['cacheName'] = mount.pop('cache-name')
-            task_def['scopes'].append('generic-worker:cache:{}'.format(mount['cacheName']))
         if 'content' in mount:
             if 'task-id' in mount['content']:
                 mount['content']['taskId'] = mount['content'].pop('task-id')
-            if 'artifact' in mount['content']:
-                if not mount['content']['artifact'].startswith('public/'):
-                    task_def['scopes'].append(
-                        'queue:get-artifact:{}'.format(mount['content']['artifact']))
 
     if mounts:
         task_def['payload']['mounts'] = mounts
 
     if worker.get('os-groups', []):
         task_def['payload']['osGroups'] = worker['os-groups']
 
     features = {}
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -1040,17 +1040,17 @@ def set_worker_type(config, tests):
                 test['worker-type'] = 'proj-autophone/gecko-t-ap-perf-p2'
             else:
                 test['worker-type'] = 'proj-autophone/gecko-t-ap-unit-p2'
         elif test_platform.startswith('android-em-7.0-x86'):
             test['worker-type'] = 'terraform-packet/gecko-t-linux'
         elif test_platform.startswith('linux') or test_platform.startswith('android'):
             if test.get('suite', '') in ['talos', 'raptor'] and \
                  not test['build-platform'].startswith('linux64-ccov'):
-                test['worker-type'] = 'releng-hardware/gecko-t-linux-talos'
+                test['worker-type'] = 'releng-hardware/gecko-t-linux-talos-tw'
             else:
                 test['worker-type'] = LINUX_WORKER_TYPES[test['instance-size']]
         else:
             raise Exception("unknown test_platform {}".format(test_platform))
 
         yield test
 
 
--- a/taskcluster/taskgraph/util/workertypes.py
+++ b/taskcluster/taskgraph/util/workertypes.py
@@ -31,17 +31,17 @@ WORKER_TYPES = {
     'aws-provisioner-v1/gecko-t-win10-64-gpu': ('generic-worker', 'windows'),
     'releng-hardware/gecko-t-win10-64-hw': ('generic-worker', 'windows'),
     'aws-provisioner-v1/gecko-t-win7-32': ('generic-worker', 'windows'),
     'aws-provisioner-v1/gecko-t-win7-32-gpu': ('generic-worker', 'windows'),
     'releng-hardware/gecko-t-win7-32-hw': ('generic-worker', 'windows'),
     'aws-provisioner-v1/taskcluster-generic': ('docker-worker', 'linux'),
     'invalid/invalid': ('invalid', None),
     'invalid/always-optimized': ('always-optimized', None),
-    'releng-hardware/gecko-t-linux-talos': ('generic-worker', 'linux'),
+    'releng-hardware/gecko-t-linux-talos-tw': ('native-engine', 'linux'),
     'scriptworker-prov-v1/balrog-dev': ('balrog', None),
     'scriptworker-prov-v1/balrogworker-v1': ('balrog', None),
     'scriptworker-prov-v1/beetmoverworker-v1': ('beetmover', None),
     'scriptworker-prov-v1/pushapk-v1': ('push-apk', None),
     "scriptworker-prov-v1/signing-linux-v1": ('scriptworker-signing', None),
     "scriptworker-prov-v1/shipit": ('shipit', None),
     "scriptworker-prov-v1/shipit-dev": ('shipit', None),
     "scriptworker-prov-v1/treescript-v1": ('treescript', None),