Bug 1508381 - use rootUrl style with taskcluster-proxy r=tomprince
authorDustin J. Mitchell <dustin@mozilla.com>
Tue, 12 Mar 2019 20:38:42 +0000
changeset 521724 e212dcda9cce85ee547998dbd119cd87625822a3
parent 521723 a5d46695e93cdfd60faca65368edd4cf585ccc1d
child 521725 81c183207343ab527f9ce64a6e9637f108e415e6
push id10867
push userdvarga@mozilla.com
push dateThu, 14 Mar 2019 15:20:45 +0000
treeherdermozilla-beta@abad13547875 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstomprince
bugs1508381
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1508381 - use rootUrl style with taskcluster-proxy r=tomprince Differential Revision: https://phabricator.services.mozilla.com/D18023
taskcluster/docker/funsize-update-generator/runme.sh
taskcluster/docker/periodic-updates/runme.sh
taskcluster/docker/pipfile-updates/runme.sh
taskcluster/scripts/misc/fetch-content
taskcluster/taskgraph/actions/create_interactive.py
taskcluster/taskgraph/transforms/docker_image.py
taskcluster/taskgraph/transforms/job/debian_package.py
taskcluster/taskgraph/util/taskcluster.py
--- a/taskcluster/docker/funsize-update-generator/runme.sh
+++ b/taskcluster/docker/funsize-update-generator/runme.sh
@@ -28,18 +28,17 @@ if [ -n "${S3_BUCKET_AND_PATH}" ] && get
 then
   # Does this parse as we expect?
   S3_PATH=${S3_BUCKET_AND_PATH#*/}
   AWS_BUCKET_NAME=${S3_BUCKET_AND_PATH%/${S3_PATH}*}
   test "${S3_PATH}"
   test "${AWS_BUCKET_NAME}"
 
   set +x  # Don't echo these.
-  # Until bug 1460015 is finished, use baseUrl-style proxy URLs
-  secret_url="${TASKCLUSTER_PROXY_URL}/auth/v1/aws/s3/read-write/${AWS_BUCKET_NAME}/${S3_PATH}"
+  secret_url="${TASKCLUSTER_PROXY_URL}/api/auth/v1/aws/s3/read-write/${AWS_BUCKET_NAME}/${S3_PATH}"
   AUTH=$(curl "${secret_url}")
   AWS_ACCESS_KEY_ID=$(echo "${AUTH}" | jq -r '.credentials.accessKeyId')
   AWS_SECRET_ACCESS_KEY=$(echo "${AUTH}" | jq -r '.credentials.secretAccessKey')
   AWS_SESSION_TOKEN=$(echo "${AUTH}" | jq -r '.credentials.sessionToken')
   export AWS_ACCESS_KEY_ID
   export AWS_SECRET_ACCESS_KEY
   export AWS_SESSION_TOKEN
   AUTH=
--- a/taskcluster/docker/periodic-updates/runme.sh
+++ b/taskcluster/docker/periodic-updates/runme.sh
@@ -63,18 +63,17 @@ fi
 if [ -n "${TASK_ID}" ]
 then
   curl --location --retry 10 --retry-delay 10 -o /home/worker/task.json "$queue_base/task/$TASK_ID"
   ARC_SECRET=$(jq -r '.scopes[] | select(contains ("arc-phabricator-token"))' /home/worker/task.json | awk -F: '{print $3}')
 fi
 if [ -n "${ARC_SECRET}" ] && getent hosts taskcluster
 then
   set +x # Don't echo these
-  # Until bug 1460015 is finished, use baseUrl-style proxy URLs
-  secrets_url="${TASKCLUSTER_PROXY_URL}/secrets/v1/secret/${ARC_SECRET}"
+  secrets_url="${TASKCLUSTER_PROXY_URL}/api/secrets/v1/secret/${ARC_SECRET}"
   SECRET=$(curl "${secrets_url}")
   TOKEN=$(echo "${SECRET}" | jq -r '.secret.token')
 elif [ -n "${ARC_TOKEN}" ] # Allow for local testing.
 then
   TOKEN="${ARC_TOKEN}"
 fi
 
 if [ -n "${TOKEN}" ]
--- a/taskcluster/docker/pipfile-updates/runme.sh
+++ b/taskcluster/docker/pipfile-updates/runme.sh
@@ -31,18 +31,17 @@ fi
 if [ -n "${TASK_ID}" ]
 then
   curl --location --retry 10 --retry-delay 10 -o /home/worker/task.json "$queue_base/task/$TASK_ID"
   ARC_SECRET=$(jq -r '.scopes[] | select(contains ("arc-phabricator-token"))' /home/worker/task.json | awk -F: '{print $3}')
 fi
 if [ -n "${ARC_SECRET}" ] && getent hosts taskcluster
 then
   set +x # Don't echo these
-  # Until bug 1460015 is finished, use the old, baseUrl-style proxy URLs
-  secrets_url="${TASKCLUSTER_PROXY_URL}/secrets/v1/secret/${ARC_SECRET}"
+  secrets_url="${TASKCLUSTER_PROXY_URL}/api/secrets/v1/secret/${ARC_SECRET}"
   SECRET=$(curl "${secrets_url}")
   TOKEN=$(echo "${SECRET}" | jq -r '.secret.token')
 elif [ -n "${ARC_TOKEN}" ] # Allow for local testing.
 then
   TOKEN="${ARC_TOKEN}"
 fi
 
 if [ -n "${TOKEN}" ]
--- a/taskcluster/scripts/misc/fetch-content
+++ b/taskcluster/scripts/misc/fetch-content
@@ -391,18 +391,17 @@ def command_task_artifacts(args):
             extdir = extdir.joinpath(fetch['dest'])
         extdir.mkdir(parents=True, exist_ok=True)
         root_url = os.environ['TASKCLUSTER_ROOT_URL']
         if fetch['artifact'].startswith('public/'):
             path = 'task/{task}/artifacts/{artifact}'.format(
                     task=fetch['task'], artifact=fetch['artifact'])
             url = api(root_url, 'queue', 'v1', path)
         else:
-            # Until bug 1460015 is finished, use the old baseUrl style proxy URLs
-            url = ('{proxy_url}/queue/v1/task/{task}/artifacts/{artifact}').format(
+            url = ('{proxy_url}/api/queue/v1/task/{task}/artifacts/{artifact}').format(
                     proxy_url=os.environ['TASKCLUSTER_PROXY_URL'],
                     task=fetch['task'],
                     artifact=fetch['artifact'])
         downloads.append((url, extdir, fetch['extract']))
 
     fetch_urls(downloads)
 
 
--- a/taskcluster/taskgraph/actions/create_interactive.py
+++ b/taskcluster/taskgraph/actions/create_interactive.py
@@ -156,17 +156,17 @@ def create_interactive_action(parameters
 
     if input and 'notify' in input:
         email = input['notify']
         # no point sending to a noreply address!
         if email == 'noreply@noreply.mozilla.org':
             return
 
         info = {
-            'url': taskcluster_urls.ui(get_root_url(), 'tasks/{}/connect'.format(taskId)),
+            'url': taskcluster_urls.ui(get_root_url(False), 'tasks/{}/connect'.format(taskId)),
             'label': label,
             'revision': parameters['head_rev'],
             'repo': parameters['head_repository'],
         }
         send_email(
             email,
             subject=EMAIL_SUBJECT.format(**info),
             content=EMAIL_CONTENT.format(**info),
--- a/taskcluster/taskgraph/transforms/docker_image.py
+++ b/taskcluster/taskgraph/transforms/docker_image.py
@@ -118,17 +118,17 @@ def fill_template(config, tasks):
         # task-reference value, see further below). We add the package routes
         # containing a hash to get the overall docker image hash, so changes
         # to packages will be reflected in the docker image hash.
         args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                  for p in packages)
         if parent:
             args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(parent, context_hashes[parent])
 
-        args['TASKCLUSTER_ROOT_URL'] = get_root_url()
+        args['TASKCLUSTER_ROOT_URL'] = get_root_url(False)
 
         if not taskgraph.fast:
             context_path = os.path.join('taskcluster', 'docker', definition)
             context_hash = generate_context_hash(
                 GECKO, context_path, image_name, args)
         else:
             context_hash = '0'*40
         digest_data = [context_hash]
--- a/taskcluster/taskgraph/transforms/job/debian_package.py
+++ b/taskcluster/taskgraph/transforms/job/debian_package.py
@@ -148,17 +148,17 @@ def docker_worker_debian_package(config,
             dist=run['dist'],
         )
 
     worker['command'] = [
         'sh',
         '-x',
         '-c',
         # Add sources for packages coming from other package tasks.
-        '/usr/local/sbin/setup_packages.sh {queue_url} $PACKAGES && '
+        '/usr/local/sbin/setup_packages.sh {root_url} $PACKAGES && '
         'apt-get update && '
         # Upgrade packages that might have new versions in package tasks.
         'apt-get dist-upgrade && '
         'cd /tmp && '
         # Get, validate and extract the package source.
         '(dget -d -u {src_url} || exit 100) && '
         'echo "{src_sha256}  {src_file}" | sha256sum -c && '
         '{unpack} && '
@@ -172,17 +172,17 @@ def docker_worker_debian_package(config,
         # Copy the artifacts
         'mkdir -p {artifacts}/debian && '
         'dcmd cp ../{package}_*.changes {artifacts}/debian/ && '
         'cd {artifacts} && '
         # Make the artifacts directory usable as an APT repository.
         'apt-ftparchive sources debian | gzip -c9 > debian/Sources.gz && '
         'apt-ftparchive packages debian | gzip -c9 > debian/Packages.gz'
         .format(
-            queue_url=get_root_url(),
+            root_url=get_root_url(False),
             package=package,
             snapshot=run['snapshot'],
             dist=run['dist'],
             src_url=src_url,
             src_file=src_file,
             src_sha256=src_sha256,
             unpack=unpack,
             adjust=adjust,
--- a/taskcluster/taskgraph/util/taskcluster.py
+++ b/taskcluster/taskgraph/util/taskcluster.py
@@ -26,20 +26,33 @@ testing = False
 # to the production Taskcluster deployment used for CI.
 PRODUCTION_TASKCLUSTER_ROOT_URL = 'https://taskcluster.net'
 
 # the maximum number of parallel Taskcluster API calls to make
 CONCURRENCY = 50
 
 
 @memoize
-def get_root_url():
+def get_root_url(use_proxy):
     """Get the current TASKCLUSTER_ROOT_URL.  When running in a task, this must
     come from $TASKCLUSTER_ROOT_URL; when run on the command line, we apply a
-    defualt that points to the production deployment of Taskcluster."""
+    defualt that points to the production deployment of Taskcluster.  If use_proxy
+    is set, this attempts to get TASKCLUSTER_PROXY_URL instead, failing if it
+    is not set."""
+    if use_proxy:
+        try:
+            return os.environ['TASKCLUSTER_PROXY_URL']
+        except KeyError:
+            if 'TASK_ID' not in os.environ:
+                raise RuntimeError(
+                    'taskcluster-proxy is not available when not executing in a task')
+            else:
+                raise RuntimeError(
+                    'taskcluster-proxy is not enabled for this task')
+
     if 'TASKCLUSTER_ROOT_URL' not in os.environ:
         if 'TASK_ID' in os.environ:
             raise RuntimeError('$TASKCLUSTER_ROOT_URL must be set when running in a task')
         else:
             logger.debug('Using default TASKCLUSTER_ROOT_URL (Firefox CI production)')
             return PRODUCTION_TASKCLUSTER_ROOT_URL
     logger.debug('Running in Taskcluster instance {}{}'.format(
         os.environ['TASKCLUSTER_ROOT_URL'],
@@ -87,17 +100,17 @@ def _handle_artifact(path, response):
     if path.endswith('.yml'):
         return yaml.load_stream(response.text)
     response.raw.read = functools.partial(response.raw.read,
                                           decode_content=True)
     return response.raw
 
 
 def get_artifact_url(task_id, path, use_proxy=False):
-    artifact_tmpl = liburls.api(get_root_url(), 'queue', 'v1',
+    artifact_tmpl = liburls.api(get_root_url(False), 'queue', 'v1',
                                 'task/{}/artifacts/{}')
     data = artifact_tmpl.format(task_id, path)
     if use_proxy:
         # Until Bug 1405889 is deployed, we can't download directly
         # from the taskcluster-proxy.  Work around by using the /bewit
         # endpoint instead.
         # The bewit URL is the body of a 303 redirect, which we don't
         # want to follow (which fetches a potentially large resource).
@@ -136,21 +149,17 @@ def get_artifact_prefix(task):
     return prefix or "public/build"
 
 
 def get_artifact_path(task, path):
     return "{}/{}".format(get_artifact_prefix(task), path)
 
 
 def get_index_url(index_path, use_proxy=False, multiple=False):
-    if use_proxy:
-        # Until bug 1460015 is finished, use the old baseUrl style of proxy URL
-        index_tmpl = os.environ['TASKCLUSTER_PROXY_URL'] + '/index/v1/task{}/{}'
-    else:
-        index_tmpl = liburls.api(get_root_url(), 'index', 'v1', 'task{}/{}')
+    index_tmpl = liburls.api(get_root_url(use_proxy), 'index', 'v1', 'task{}/{}')
     return index_tmpl.format('s' if multiple else '', index_path)
 
 
 def find_task_id(index_path, use_proxy=False):
     try:
         response = _do_request(get_index_url(index_path, use_proxy))
     except requests.exceptions.HTTPError as e:
         if e.response.status_code == 404:
@@ -190,21 +199,17 @@ def list_tasks(index_path, use_proxy=Fal
 
 
 def parse_time(timestamp):
     """Turn a "JSON timestamp" as used in TC APIs into a datetime"""
     return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
 
 
 def get_task_url(task_id, use_proxy=False):
-    if use_proxy:
-        # Until bug 1460015 is finished, use the old baseUrl style of proxy URL
-        task_tmpl = os.environ['TASKCLUSTER_PROXY_URL'] + '/queue/v1/task/{}'
-    else:
-        task_tmpl = liburls.api(get_root_url(), 'queue', 'v1', 'task/{}')
+    task_tmpl = liburls.api(get_root_url(use_proxy), 'queue', 'v1', 'task/{}')
     return task_tmpl.format(task_id)
 
 
 def get_task_definition(task_id, use_proxy=False):
     response = _do_request(get_task_url(task_id, use_proxy))
     return response.json()
 
 
@@ -235,61 +240,53 @@ def rerun_task(task_id):
         logger.info('Would have rerun {}.'.format(task_id))
     else:
         _do_request(get_task_url(task_id, use_proxy=True) + '/rerun', json={})
 
 
 def get_current_scopes():
     """Get the current scopes.  This only makes sense in a task with the Taskcluster
     proxy enabled, where it returns the actual scopes accorded to the task."""
-    # Until bug 1460015 is finished, use the old baseUrl style of proxy URL
-    resp = _do_request(os.environ['TASKCLUSTER_PROXY_URL'] + '/auth/v1/scopes/current')
+    auth_url = liburls.api(get_root_url(True), 'auth', 'v1', 'scopes/current')
+    resp = _do_request(auth_url)
     return resp.json().get("scopes", [])
 
 
 def get_purge_cache_url(provisioner_id, worker_type, use_proxy=False):
-    if use_proxy:
-        # Until bug 1460015 is finished, use the old baseUrl style of proxy URL
-        url_tmpl = os.environ['TASKCLUSTER_PROXY_URL'] + '/purge-cache/v1/purge-cache/{}/{}'
-    else:
-        url_tmpl = liburls.api(get_root_url(), 'purge-cache', 'v1', 'purge-cache/{}/{}')
+    url_tmpl = liburls.api(get_root_url(use_proxy), 'purge-cache', 'v1', 'purge-cache/{}/{}')
     return url_tmpl.format(provisioner_id, worker_type)
 
 
 def purge_cache(provisioner_id, worker_type, cache_name, use_proxy=False):
     """Requests a cache purge from the purge-caches service."""
     if testing:
         logger.info('Would have purged {}/{}/{}.'.format(provisioner_id, worker_type, cache_name))
     else:
         logger.info('Purging {}/{}/{}.'.format(provisioner_id, worker_type, cache_name))
         purge_cache_url = get_purge_cache_url(provisioner_id, worker_type, use_proxy)
         _do_request(purge_cache_url, json={'cacheName': cache_name})
 
 
 def send_email(address, subject, content, link, use_proxy=False):
     """Sends an email using the notify service"""
     logger.info('Sending email to {}.'.format(address))
-    if use_proxy:
-        # Until bug 1460015 is finished, use the old baseUrl style of proxy URL
-        url = os.environ['TASKCLUSTER_PROXY_URL'] + '/notify/v1/email'
-    else:
-        url = liburls.api(get_root_url(), 'notify', 'v1', 'email')
+    url = liburls.api(get_root_url(use_proxy), 'notify', 'v1', 'email')
     _do_request(url, json={
         'address': address,
         'subject': subject,
         'content': content,
         'link': link,
     })
 
 
 def list_task_group_incomplete_tasks(task_group_id):
     """Generate the incomplete tasks in a task group"""
     params = {}
     while True:
-        url = liburls.api(get_root_url(), 'queue', 'v1',
+        url = liburls.api(get_root_url(False), 'queue', 'v1',
                           'task-group/{}/list'.format(task_group_id))
         resp = _do_request(url, force_get=True, params=params).json()
         for task in [t['status'] for t in resp['tasks']]:
             if task['state'] in ['running', 'pending', 'unscheduled']:
                 yield task['taskId']
         if resp.get('continuationToken'):
             params = {'continuationToken': resp.get('continuationToken')}
         else: