Bug 1274611: implement docker image builds as a distinct kind; r?garndt draft
authorDustin J. Mitchell <dustin@mozilla.com>
Fri, 27 May 2016 22:13:41 +0000
changeset 372292 05a986c0fb592f771d985c821ebf128185075ec2
parent 372291 395a5ee47aeca55d284a2377bf77876b54239299
child 522152 bfbdaf586fd8a37cc28105e08f5cb75db2c85ba8
push id19496
push userdmitchell@mozilla.com
push dateFri, 27 May 2016 22:17:17 +0000
reviewersgarndt
bugs1274611
milestone49.0a1
Bug 1274611: implement docker image builds as a distinct kind; r?garndt MozReview-Commit-ID: 81Ad9LcBdx6
config/mozunit.py
taskcluster/ci/docker-image/image.yml
taskcluster/ci/docker-image/kind.yml
taskcluster/docs/attributes.rst
taskcluster/taskgraph/kind/docker_image.py
taskcluster/taskgraph/kind/legacy.py
taskcluster/taskgraph/test/test_kind_docker_image.py
taskcluster/taskgraph/test/test_util.py
taskcluster/taskgraph/util.py
testing/taskcluster/routes.json
testing/taskcluster/taskcluster_graph/image_builder.py
testing/taskcluster/tasks/builds/android_api_15_gradle_dependencies.yml
testing/taskcluster/tasks/builds/b2g_base.yml
testing/taskcluster/tasks/builds/firefox_base.yml
testing/taskcluster/tasks/builds/mobile_base.yml
testing/taskcluster/tasks/harness_test.yml
testing/taskcluster/tasks/post-builds/mulet_simulator.yml
testing/taskcluster/tasks/test.yml
testing/taskcluster/tasks/tests/eslint-gecko.yml
testing/taskcluster/tasks/tests/fx_test_base.yml
testing/taskcluster/tasks/tests/mozharness-gecko.yml
--- a/config/mozunit.py
+++ b/config/mozunit.py
@@ -68,17 +68,17 @@ class _MozTestResult(_TestResult):
     def addFailure(self, test, err):
         _TestResult.addFailure(self, test, err)
         self.printFail(test,err)
         self.stream.writeln("FAIL: {0}".format(self.getDescription(test)))
         self.stream.writeln(self.failures[-1][1])
 
     def printFail(self, test, err):
         exctype, value, tb = err
-        message = value.message.splitlines()[0]
+        message = value.message.splitlines()[0] if value.message else 'NO MESSAGE'
         # Skip test runner traceback levels
         while tb and self._is_relevant_tb_level(tb):
             tb = tb.tb_next
         if tb:
             _, ln, _ = inspect.getframeinfo(tb)[:3]
             message = 'line {0}: {1}'.format(ln, message)
         self.printStatus("TEST-UNEXPECTED-FAIL", test, message)
 
new file mode 100644
--- /dev/null
+++ b/taskcluster/ci/docker-image/image.yml
@@ -0,0 +1,56 @@
+---
+task:
+  created: '{{now}}'
+  deadline: '{{#from_now}}24 hours{{/from_now}}'
+  metadata:
+    name: 'Docker Image Build: {{image_name}}'
+    description: 'Build the docker image {{image_name}} for use by dependent tasks'
+    source: '{{source}}'
+    owner: mozilla-taskcluster-maintenance@mozilla.com
+  tags:
+    createdForUser: {{owner}}
+
+  workerType: taskcluster-images
+  provisionerId: aws-provisioner-v1
+
+  routes:
+      - index.docker.images.v1.{{project}}.{{image_name}}.latest
+      - index.docker.images.v1.{{project}}.{{image_name}}.pushdate.{{year}}.{{month}}-{{day}}-{{pushtime}}
+      - index.docker.images.v1.{{project}}.{{image_name}}.hash.{{context_hash}}
+      - tc-treeherder.{{project}}.{{revision_hash}}
+      - tc-treeherder-stage.{{project}}.{{revision_hash}}
+
+  payload:
+    env:
+      HASH: '{{context_hash}}'
+      PROJECT: '{{project}}'
+      CONTEXT_URL: '{{context_url}}'
+      CONTEXT_PATH: '{{context_path}}'
+      BASE_REPOSITORY: '{{base_repository}}'
+      HEAD_REPOSITORY: '{{head_repository}}'
+      HEAD_REV: '{{head_rev}}'
+      HEAD_REF: '{{head_ref}}'
+    features:
+      dind: true
+    image: '{{#docker_image}}image_builder{{/docker_image}}'
+    command:
+      - /bin/bash
+      - -c
+      - /home/worker/bin/build_image.sh
+    maxRunTime: 3600
+    artifacts:
+      '{{artifact_path}}':
+        type: 'file'
+        path: '/artifacts/image.tar'
+        expires: '{{#from_now}}1 year{{/from_now}}'
+  extra:
+    treeherderEnv:
+      - staging
+      - production
+    treeherder:
+      revision: {{head_rev}}
+      revision_hash: {{revision_hash}}
+      build:
+        platform: 'taskcluster-images'
+      symbol: 'I'
+
new file mode 100644
--- /dev/null
+++ b/taskcluster/ci/docker-image/kind.yml
@@ -0,0 +1,18 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: 'taskgraph.kind.docker_image:DockerImageKind'
+images_path: '../../../testing/docker'
+
+# make a task for each docker-image we might want.  For the moment, since we
+# write artifacts for each, these are whitelisted, but ideally that will change
+# (to use subdirectory clones of the proper directory), at which point we can
+# generate tasks for every docker image in the directory, secure in the
+# knowledge that unnecessary images will be omitted from the target task graph
+images:
+  - desktop-test
+  - desktop-build
+  - builder
+  - lint
+  - android-gradle-lint
--- a/taskcluster/docs/attributes.rst
+++ b/taskcluster/docs/attributes.rst
@@ -88,8 +88,12 @@ job
 of a post-build job).  This is valid only for the ``legacy`` kind.
 
 post_build
 ==========
 
 (deprecated) The name of the post-build activity.  This is valid only for the
 ``legacy`` kind.
 
+image_name
+==========
+
+For the ``docker_image`` kind, this attribute contains the docker image name.
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/kind/docker_image.py
@@ -0,0 +1,163 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import json
+import os
+import urllib2
+import hashlib
+import tarfile
+import time
+
+from . import base
+from ..types import Task
+from taskgraph.util import docker_image
+import taskcluster_graph.transform.routes as routes_transform
+import taskcluster_graph.transform.treeherder as treeherder_transform
+from taskcluster_graph.templates import Templates
+from taskcluster_graph.from_now import (
+    json_time_from_now,
+    current_json_time,
+)
+
+logger = logging.getLogger(__name__)
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+IMAGE_BUILD_TASK = os.path.join(GECKO, 'testing', 'taskcluster', 'tasks', 'image.yml')
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
+
+
+class DockerImageKind(base.Kind):
+
+    def load_tasks(self, params):
+        # TODO: make this match the pushdate (get it from a parameter rather than vcs)
+        pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime())
+
+        parameters = {
+            'pushdate': pushdate,
+            'pushtime': pushdate[8:],
+            'year': pushdate[0:4],
+            'month': pushdate[4:6],
+            'day': pushdate[6:8],
+            'project': params['project'],
+            'docker_image': docker_image,
+            'base_repository': params['base_repository'] or params['head_repository'],
+            'head_repository': params['head_repository'],
+            'head_ref': params['head_ref'] or params['head_rev'],
+            'head_rev': params['head_rev'],
+            'owner': params['owner'],
+            'level': params['level'],
+            'from_now': json_time_from_now,
+            'now': current_json_time(),
+            'revision_hash': params['revision_hash'],
+            'source': '{repo}file/{rev}/testing/taskcluster/tasks/image.yml'
+                    .format(repo=params['head_repository'], rev=params['head_rev']),
+        }
+
+        tasks = []
+        templates = Templates(self.path)
+        for image_name in self.config['images']:
+            context_path = os.path.join('testing', 'docker', image_name)
+            context_hash = self.generate_context_hash(context_path)
+
+            image_parameters = dict(parameters)
+            image_parameters['context_hash'] = context_hash
+            image_parameters['context_path'] = context_path
+            image_parameters['artifact_path'] = 'public/image.tar'
+            image_parameters['image_name'] = image_name
+
+            # TODO: generate context tarballs using subdirectory clones in the image-building task
+            image_artifact_path = "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
+            if os.environ.get('TASK_ID'):
+                destination = os.environ['HOME'] + "/artifacts/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
+                image_parameters['context_url'] = ARTIFACT_URL.format(os.environ['TASK_ID'], image_artifact_path)
+                self.create_context_tar(context_path, destination, name)
+            else:
+                logger.info("Not running in a decision task; not writing context tarball for image generation")
+                image_parameters['context_url'] = 'file:///tmp/' + image_artifact_path
+
+            image_task = templates.load(IMAGE_BUILD_TASK, image_parameters)
+
+            attributes = {
+                'kind': self.name,
+                'image_name': image_name,
+            }
+
+            # As an optimization, if the context hash exists for mozilla-central, that image
+            # task ID will be used.  The reasoning behind this is that eventually everything ends
+            # up on mozilla-central at some point if most tasks use this as a common image
+            # for a given context hash, a worker within Taskcluster does not need to contain
+            # the same image per branch.
+            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(project, image_name, context_hash)
+                           for project in ['mozilla-central', params['project']]]
+
+            tasks.append(Task(self, 'build-docker-image-' + image_name,
+                              task=image_task['task'], attributes=attributes,
+                              index_paths=index_paths))
+
+        return tasks
+
+    def get_task_dependencies(self, task, taskgraph):
+        return []
+
+    def optimize_task(self, task, taskgraph):
+        logger.debug("optimizing docker-image task {}".format(task.label))
+        for index_path in task.extra['index_paths']:
+            try:
+                url = INDEX_URL.format(index_path)
+                logger.debug("loading {}".format(url))
+                existing_task = json.load(urllib2.urlopen(url))
+                # Only return the task ID if the artifact exists for the indexed
+                # task.  Otherwise, continue on looking at each of the branches.  Method
+                # continues trying other branches in case mozilla-central has an expired
+                # artifact, but 'project' might not. Only return no task ID if all
+                # branches have been tried
+                request = urllib2.Request(ARTIFACT_URL.format(existing_task['task_id'], 'public/image.tar'))
+                request.get_method = lambda: 'HEAD'
+                urllib2.urlopen(request)  # HEAD success is enough
+            except urllib2.HTTPError:
+                pass
+
+        return (False, None)
+
+    def create_context_tar(self, context_dir, destination, image_name):
+        ''' Creates a tar file of a particular context directory '''
+        destination = os.path.abspath(destination)
+        if not os.path.exists(os.path.dirname(destination)):
+            os.makedirs(os.path.dirname(destination))
+
+        with tarfile.open(destination, 'w:gz') as tar:
+            tar.add(context_dir, arcname=image_name)
+
+    def generate_context_hash(self, image_path):
+        '''
+        Generates a sha256 hash for context directory used to build an image.
+
+        Contents of the directory are sorted alphabetically, contents of each file is hashed,
+        and then a hash is created for both the file hashes as well as their paths.
+
+        This ensures that hashs are consistent and also change based on if file locations
+        within the context directory change.
+        '''
+        context_hash = hashlib.sha256()
+        files = []
+
+        for dirpath, dirnames, filenames in os.walk(os.path.join(GECKO, image_path)):
+            for filename in filenames:
+                files.append(os.path.join(dirpath, filename))
+
+        for filename in sorted(files):
+            relative_filename = filename.replace(GECKO, '')
+            with open(filename, 'rb') as f:
+                file_hash = hashlib.sha256()
+                while True:
+                    data = f.read()
+                    if not data:
+                        break
+                    file_hash.update(data)
+                context_hash.update(file_hash.hexdigest() + '\t' + relative_filename + '\n')
+
+        return context_hash.hexdigest()
--- a/taskcluster/taskgraph/kind/legacy.py
+++ b/taskcluster/taskgraph/kind/legacy.py
@@ -23,27 +23,23 @@ from taskcluster_graph.mach_util import 
     configure_dependent_task,
     set_interactive_task,
     remove_caches_from_task,
     query_vcs_info
 )
 import taskcluster_graph.transform.routes as routes_transform
 import taskcluster_graph.transform.treeherder as treeherder_transform
 from taskcluster_graph.commit_parser import parse_commit
-from taskcluster_graph.image_builder import (
-    docker_image,
-    normalize_image_details,
-    task_id_for_image
-)
 from taskcluster_graph.from_now import (
     json_time_from_now,
     current_json_time,
 )
 from taskcluster_graph.templates import Templates
 import taskcluster_graph.build_task
+from taskgraph.util import docker_image
 
 # TASKID_PLACEHOLDER is the "internal" form of a taskid; it is substituted with
 # actual taskIds at the very last minute, in get_task_definition
 TASKID_PLACEHOLDER = 'TaskLabel=={}'
 
 DEFINE_TASK = 'queue:define-task:aws-provisioner-v1/{}'
 DEFAULT_TRY = 'try: -b do -p all -u all -t all'
 DEFAULT_JOB_PATH = os.path.join(
@@ -71,16 +67,18 @@ def set_expiration(task, timestamp):
     try:
         artifacts = task_def['payload']['artifacts']
     except KeyError:
         return
 
     for artifact in artifacts.values():
         artifact['expires'] = timestamp
 
+
+
 class LegacyKind(base.Kind):
     """
     This kind generates a full task graph from the old YAML files in
     `testing/taskcluster/tasks`.  The tasks already have dependency links.
 
     The existing task-graph generation generates slugids for tasks during task
     generation, so this kind labels tasks using those slugids, with a prefix of
     "TaskLabel==".  These labels are unfortunately not stable from run to run.
@@ -116,23 +114,21 @@ class LegacyKind(base.Kind):
             logger.debug('{} commits influencing task scheduling:'.format(len(vcs_info.changesets)))
             for c in vcs_info.changesets:
                 logger.debug("{cset} {desc}".format(
                     cset=c['node'][0:12],
                     desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
                 changed_files |= set(c['files'])
 
         # Template parameters used when expanding the graph
-        seen_images = {}
         parameters = dict(gaia_info().items() + {
             'index': 'index',
             'project': project,
             'pushlog_id': params.get('pushlog_id', 0),
             'docker_image': docker_image,
-            'task_id_for_image': partial(task_id_for_image, seen_images, project),
             'base_repository': params['base_repository'] or
             params['head_repository'],
             'head_repository': params['head_repository'],
             'head_ref': params['head_ref'] or params['head_rev'],
             'head_rev': params['head_rev'],
             'pushdate': pushdate,
             'pushtime': pushdate[8:],
             'year': pushdate[0:4],
@@ -226,21 +222,16 @@ class LegacyKind(base.Kind):
 
             # Copy build_* attributes to expose them to post-build tasks
             # as well as json routes and tests
             task_extra = build_task['task']['extra']
             build_parameters['build_name'] = task_extra['build_name']
             build_parameters['build_type'] = task_extra['build_type']
             build_parameters['build_product'] = task_extra['build_product']
 
-            normalize_image_details(graph,
-                                    build_task,
-                                    seen_images,
-                                    build_parameters,
-                                    os.environ.get('TASK_ID', None))
             set_interactive_task(build_task, interactive)
 
             # try builds don't use cache
             if project == "try":
                 remove_caches_from_task(build_task)
                 set_expiration(build_task, json_time_from_now(TRY_EXPIRATION))
 
             if params['revision_hash']:
@@ -314,21 +305,16 @@ class LegacyKind(base.Kind):
                 # enabled for build tasks
                 post_parameters = merge_dicts(build_parameters,
                                               post_build.get('additional-parameters', {}))
                 post_task = configure_dependent_task(post_build['task'],
                                                      post_parameters,
                                                      mklabel(),
                                                      templates,
                                                      build_treeherder_config)
-                normalize_image_details(graph,
-                                        post_task,
-                                        seen_images,
-                                        build_parameters,
-                                        os.environ.get('TASK_ID', None))
                 set_interactive_task(post_task, interactive)
                 treeherder_transform.add_treeherder_revision_info(post_task['task'],
                                                                   params['head_rev'],
                                                                   params['revision_hash'])
 
                 if project == "try":
                     set_expiration(post_task, json_time_from_now(TRY_EXPIRATION))
 
@@ -368,21 +354,16 @@ class LegacyKind(base.Kind):
 
                     if chunked:
                         test_parameters['chunk'] = chunk
                     test_task = configure_dependent_task(test['task'],
                                                          test_parameters,
                                                          mklabel(),
                                                          templates,
                                                          build_treeherder_config)
-                    normalize_image_details(graph,
-                                            test_task,
-                                            seen_images,
-                                            build_parameters,
-                                            os.environ.get('TASK_ID', None))
                     set_interactive_task(test_task, interactive)
 
                     if params['revision_hash']:
                         treeherder_transform.add_treeherder_revision_info(test_task['task'],
                                                                           params['head_rev'],
                                                                           params['revision_hash'])
                         routes_transform.decorate_task_treeherder_routes(
                             test_task['task'],
@@ -428,14 +409,20 @@ class LegacyKind(base.Kind):
         # taskId for each task, and we use those as the *labels* for the tasks;
         # taskgraph will later assign them new taskIds.
         return [Task(self, t['taskId'], task=t['task'], attributes=t['attributes'])
                 for t in self.graph['tasks']]
 
     def get_task_dependencies(self, task, taskgraph):
         # fetch dependency information from the cached graph
         taskdict = self.tasks_by_label[task.label]
-        return [(label, label) for label in taskdict.get('requires', [])]
+        deps = [(label, label) for label in taskdict.get('requires', [])]
+
+        # add a dependency on an image task, if needed
+        if 'docker-image' in taskdict:
+            deps.append(('build-docker-image-{docker-image}'.format(**taskdict), 'docker-image'))
+
+        return deps
 
     def optimize_task(self, task, taskgraph):
         # no optimization for the moment
         return (False, None)
 
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_kind_docker_image.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+import tempfile
+import shutil
+import os
+
+from ..kind import docker_image
+from ..types import Task
+from mozunit import main, MockedOpen
+
+
+class TestDockerImageKind(unittest.TestCase):
+
+    def setUp(self):
+        self.kind = docker_image.DockerImageKind(
+                os.path.join(docker_image.GECKO, 'taskcluster', 'ci', 'docker-image'),
+                {})
+
+    def test_get_task_dependencies(self):
+        # this one's easy!
+        self.assertEqual(self.kind.get_task_dependencies(None, None), [])
+
+    # TODO: optimize_task
+
+    def test_create_context_tar(self):
+        image_dir = os.path.join(docker_image.GECKO, 'testing', 'docker', 'image_builder')
+        tarball = tempfile.mkstemp()[1]
+        self.kind.create_context_tar(image_dir, tarball, 'image_builder')
+        self.failUnless(os.path.exists(tarball))
+        os.unlink(tarball)
+
+    def test_generate_context_hash(self):
+        tmpdir = tempfile.mkdtemp()
+        old_GECKO = docker_image.GECKO
+        docker_image.GECKO = tmpdir
+        try:
+            os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
+            with open(os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile'), "w") as f:
+                f.write("FROM node\nADD a-file\n")
+            with open(os.path.join(tmpdir, 'docker', 'my-image', 'a-file'), "w") as f:
+                f.write("data\n")
+            self.assertEqual(self.kind.generate_context_hash('docker/my-image'),
+                    '781143fcc6cc72c9024b058665265cb6bae3fb8031cad7227dd169ffbfced434')
+        finally:
+            docker_image.GECKO = old_GECKO
+            shutil.rmtree(tmpdir)
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..util import docker_image, DOCKER_ROOT
+from mozunit import main, MockedOpen
+
+
+class TestDockerImage(unittest.TestCase):
+
+    def test_docker_image_explicit_registry(self):
+        files = {}
+        files["{}/myimage/REGISTRY".format(DOCKER_ROOT)] = "cool-images"
+        files["{}/myimage/VERSION".format(DOCKER_ROOT)] = "1.2.3"
+        with MockedOpen(files):
+            self.assertEqual(docker_image('myimage'), "cool-images/myimage:1.2.3")
+
+    def test_docker_image_default_registry(self):
+        files = {}
+        files["{}/REGISTRY".format(DOCKER_ROOT)] = "mozilla"
+        files["{}/myimage/VERSION".format(DOCKER_ROOT)] = "1.2.3"
+        with MockedOpen(files):
+            self.assertEqual(docker_image('myimage'), "mozilla/myimage:1.2.3")
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/util.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+DOCKER_ROOT = os.path.join(GECKO, 'testing', 'docker')
+
+def docker_image(name):
+    ''' Determine the docker image name, including repository and tag, from an
+    in-tree docker file'''
+    try:
+        with open(os.path.join(DOCKER_ROOT, name, 'REGISTRY')) as f:
+            registry = f.read().strip()
+    except IOError:
+        with open(os.path.join(DOCKER_ROOT, 'REGISTRY')) as f:
+            registry = f.read().strip()
+
+    with open(os.path.join(DOCKER_ROOT, name, 'VERSION')) as f:
+        version = f.read().strip()
+
+    return '{}/{}:{}'.format(registry, name, version)
--- a/testing/taskcluster/routes.json
+++ b/testing/taskcluster/routes.json
@@ -9,15 +9,10 @@
         "{index}.gecko.v2.{project}.nightly.{year}.{month}.{day}.latest.{build_product}.{build_name}-{build_type}",
         "{index}.gecko.v2.{project}.nightly.revision.{head_rev}.{build_product}.{build_name}-{build_type}",
         "{index}.gecko.v2.{project}.nightly.latest.{build_product}.{build_name}-{build_type}"
     ],
     "l10n": [
         "{index}.gecko.v2.{project}.revision.{head_rev}.{build_product}-l10n.{build_name}-{build_type}.{locale}",
         "{index}.gecko.v2.{project}.pushdate.{year}.{month}.{day}.{pushdate}.{build_product}-l10n.{build_name}-{build_type}.{locale}",
         "{index}.gecko.v2.{project}.latest.{build_product}-l10n.{build_name}-{build_type}.{locale}"
-    ],
-    "docker_images": [
-        "{index}.docker.images.v1.{project}.{image_name}.latest",
-        "{index}.docker.images.v1.{project}.{image_name}.pushdate.{year}.{month}-{day}-{pushtime}",
-        "{index}.docker.images.v1.{project}.{image_name}.hash.{context_hash}"
     ]
 }
--- a/testing/taskcluster/taskcluster_graph/image_builder.py
+++ b/testing/taskcluster/taskcluster_graph/image_builder.py
@@ -23,28 +23,16 @@ def is_docker_registry_image(registry_pa
     return os.path.isfile(registry_path)
 
 # make a task label; in old decision tasks, this is a regular slugid, but when called
 # from the taskgraph generator's legacy kind, this is monkey-patched to return a label
 # (`TaskLabel==..`)
 def mklabel():
     return slugid()
 
-def docker_image(name):
-    ''' Determine the docker tag/revision from an in tree docker file '''
-    repository_path = os.path.join(DOCKER_ROOT, name, 'REGISTRY')
-    repository = REGISTRY
-
-    version = open(os.path.join(DOCKER_ROOT, name, 'VERSION')).read().strip()
-
-    if os.path.isfile(repository_path):
-        repository = open(repository_path).read().strip()
-
-    return '{}/{}:{}'.format(repository, name, version)
-
 def task_id_for_image(seen_images, project, name, create=True):
     if name in seen_images:
         return seen_images[name]['taskId']
 
     context_path = os.path.join('testing', 'docker', name)
     context_hash = generate_context_hash(context_path)
     task_id = get_task_id_for_namespace(project, name, context_hash)
 
@@ -128,128 +116,16 @@ def generate_context_hash(image_path):
                 data = f.read()
                 if not data:
                     break
                 file_hash.update(data)
             context_hash.update(file_hash.hexdigest() + '\t' + relative_filename + '\n')
 
     return context_hash.hexdigest()
 
-def create_context_tar(context_dir, destination, image_name):
-    ''' Creates a tar file of a particular context directory '''
-    if not os.path.exists(os.path.dirname(destination)):
-        os.makedirs(os.path.dirname(destination))
-
-    with tarfile.open(destination, 'w:gz') as tar:
-        tar.add(context_dir, arcname=image_name)
-
-def image_requires_building(details):
-    ''' Returns true if an image task should be created for a particular image '''
-    if 'path' in details and 'hash' in details:
-        return True
-    else:
-        return False
-
-def create_image_task_parameters(params, name, details):
-    image_parameters = dict(params)
-    image_parameters['context_hash'] = details['hash']
-    image_parameters['context_path'] = details['path']
-    image_parameters['artifact_path'] = 'public/image.tar'
-    image_parameters['image_slugid'] =  details['taskId']
-    image_parameters['image_name'] = name
-
-    return image_parameters
-
-def get_image_details(seen_images, task_id):
-    '''
-    Based on a collection of image details, return the details
-    for an image matching the requested task_id.
-
-    Image details can include a path and hash indicating that the image requires
-    building.
-    '''
-    for name, details in seen_images.items():
-        if details['taskId'] == task_id:
-            return [name, details]
-    return None
-
-def get_json_routes():
-    ''' Returns routes that should be included in the image task. '''
-    routes_file = os.path.join(TASKCLUSTER_ROOT, 'routes.json')
-    with open(routes_file) as f:
-        contents = json.load(f)
-        json_routes = contents['docker_images']
-    return json_routes
-
-def normalize_image_details(graph, task, seen_images, params, decision_task_id):
-    '''
-    This takes a task-image payload and creates an image task to build that
-    image.
-
-    task-image payload is then converted to use a specific task ID of that
-    built image.  All tasks within the graph requiring this same image will have their
-    image details normalized and require the same image build task.
-    '''
-    image = task['task']['payload']['image']
-    if isinstance(image, str) or image.get('type', 'docker-image') == 'docker-image':
-        return
-
-    if 'requires' not in task:
-        task['requires'] = []
-
-    name, details = get_image_details(seen_images, image['taskId'])
-
-    if details.get('required', False) is True or image_requires_building(details) is False:
-        if 'required' in details:
-            task['requires'].append(details['taskId'])
-        return
-
-    image_parameters = create_image_task_parameters(params, name, details)
-
-    if decision_task_id:
-        image_artifact_path = "public/decision_task/image_contexts/{}/context.tar.gz".format(name)
-        destination = "/home/worker/artifacts/decision_task/image_contexts/{}/context.tar.gz".format(name)
-        image_parameters['context_url'] = ARTIFACT_URL.format(decision_task_id, image_artifact_path)
-
-        create_context_tar(image_parameters['context_path'], destination, name)
-
-    templates = Templates(TASKCLUSTER_ROOT)
-    image_task = templates.load(IMAGE_BUILD_TASK, image_parameters)
-    if params['revision_hash']:
-        treeherder_transform.add_treeherder_revision_info(
-            image_task['task'],
-            params['head_rev'],
-            params['revision_hash']
-        )
-        routes_transform.decorate_task_treeherder_routes(
-            image_task['task'],
-            "{}.{}".format(params['project'], params['revision_hash'])
-        )
-        routes_transform.decorate_task_json_routes(image_task['task'],
-                                                   get_json_routes(),
-                                                   image_parameters)
-
-    image_task['attributes'] = {
-        'kind': 'legacy',
-    }
-
-    graph['tasks'].append(image_task);
-    task['requires'].append(details['taskId'])
-
-    define_task = DEFINE_TASK.format(
-        image_task['task']['workerType']
-    )
-
-    graph['scopes'].add(define_task)
-    graph['scopes'] |= set(image_task['task'].get('scopes', []))
-    route_scopes = map(lambda route: 'queue:route:' + route, image_task['task'].get('routes', []))
-    graph['scopes'] |= set(route_scopes)
-
-    details['required'] = True
-
 def docker_load_from_url(url):
     """Get a docker image from a `docker save` tarball at the given URL,
     loading it into the running daemon and returning the image name."""
 
     # because we need to read this file twice (and one read is not all the way
     # through), it is difficult to stream it.  So we downlaod to disk and then
     # read it back.
     filename = 'temp-docker-image.tar'
--- a/testing/taskcluster/tasks/builds/android_api_15_gradle_dependencies.yml
+++ b/testing/taskcluster/tasks/builds/android_api_15_gradle_dependencies.yml
@@ -1,16 +1,17 @@
 # A build-like task to fetch Android Gradle dependencies from jcentral and
 # package them for consumption by tooltool users.  Normally invoked manually.
 
 $inherits:
   from: 'tasks/builds/mobile_base.yml'
   variables:
     build_name: 'android-api-15-gradle-dependencies'
     build_type: 'opt'
+docker-image: android-gradle-build
 task:
   metadata:
       name: '[TC] Android armv7 API 15+ gradle dependencies'
       description: 'Android armv7 API 15+ gradle dependencies'
 
   workerType: android-api-15
 
   routes:
@@ -45,17 +46,18 @@ task:
       MH_BUILD_POOL: taskcluster
       GRADLE_USER_HOME: '/home/worker/workspace/build/src/dotgradle-online'
 
     maxRunTime: 36000
 
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}android-gradle-build{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
 
     command:
       - /bin/bash
       - -c
       - >
         /home/worker/bin/before.sh &&
         /home/worker/bin/build.sh &&
         /home/worker/bin/after.sh &&
--- a/testing/taskcluster/tasks/builds/b2g_base.yml
+++ b/testing/taskcluster/tasks/builds/b2g_base.yml
@@ -1,14 +1,16 @@
 $inherits:
   from: 'tasks/build.yml'
   variables:
     build_product: 'b2g'
+docker-image: builder
 task:
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}builder{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
   extra:
     locations:
       test_packages: 'public/build/target.test_packages.json'
       mozharness: 'public/build/mozharness.zip'
--- a/testing/taskcluster/tasks/builds/firefox_base.yml
+++ b/testing/taskcluster/tasks/builds/firefox_base.yml
@@ -1,18 +1,20 @@
 $inherits:
   from: 'tasks/build.yml'
   variables:
     build_product: 'firefox'
+docker-image: desktop-build
 task:
   scopes:
     - "secrets:get:project/releng/gecko/build/level-{{level}}/*"
   extra:
     locations:
       mozharness: 'public/build/mozharness.zip'
       test_packages: 'public/build/target.test_packages.json'
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}desktop-build{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
     features:
       taskclusterProxy: true
--- a/testing/taskcluster/tasks/builds/mobile_base.yml
+++ b/testing/taskcluster/tasks/builds/mobile_base.yml
@@ -1,11 +1,13 @@
 $inherits:
   from: 'tasks/build.yml'
   variables:
     build_product: 'mobile'
+docker-image: desktop-build
 task:
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}desktop-build{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
 
--- a/testing/taskcluster/tasks/harness_test.yml
+++ b/testing/taskcluster/tasks/harness_test.yml
@@ -1,15 +1,14 @@
 # This is the "base" task which contains the common values all harness
 # unit tests must provide.
 # It's inspired by build.yml, since harness unit tests just depend on
 # checking out the source tree.
 ---
-taskId: {{build_slugid}}
-
+docker-image: desktop-build
 task:
   created: '{{now}}'
   deadline: '{{#from_now}}24 hours{{/from_now}}'
   metadata:
     source: '{{source}}'
     owner: mozilla-taskcluster-maintenance@mozilla.com
   tags:
     createdForUser: {{owner}}
@@ -23,17 +22,18 @@ task:
     - 'docker-worker:cache:level-{{level}}-{{project}}-tc-vcs'
     - 'docker-worker:cache:level-{{level}}-{{project}}-test-workspace'
   payload:
     # 30 minutes should be more than enough for harness unit tests
     maxRunTime: 1800
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}desktop-build{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
     cache:
       # The taskcluster-vcs tooling stores the large clone caches in this
       # directory and will reuse them for new requests this saves about 20s~
       # and is the most generic cache possible.
       level-{{level}}-{{project}}-tc-vcs: '/home/worker/.tc-vcs'
       level-{{level}}-{{project}}-test-workspace: '/home/worker/workspace'
     artifacts:
       'public/logs/':
--- a/testing/taskcluster/tasks/post-builds/mulet_simulator.yml
+++ b/testing/taskcluster/tasks/post-builds/mulet_simulator.yml
@@ -1,12 +1,11 @@
 # This tasks takes a mulet build, pull gaia and craft a xpi file for FxOS simulator addon
 ---
-taskId: {{taskId}}
-
+docker-image: builder
 task:
   created: '{{now}}'
   deadline: '{{#from_now}}24 hours{{/from_now}}'
   metadata:
     source: '{{source}}'
     owner: apoirot@mozilla.com
     name: '[TC] FxOS Simulator'
     description: 'Firefox OS Simulator addon'
@@ -22,17 +21,18 @@ task:
 
   scopes:
     - 'docker-worker:cache:level-{{level}}-{{project}}-tc-vcs'
 
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}builder{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
 
     maxRunTime: 600
 
     command:
       - /bin/bash
       - -exc
       - task-reference: >
         tc-vcs checkout ./gecko {{base_repository}} {{head_repository}} {{head_rev}} {{head_ref}} &&
--- a/testing/taskcluster/tasks/test.yml
+++ b/testing/taskcluster/tasks/test.yml
@@ -1,26 +1,32 @@
 # This task is the base for most tests in gecko.
+docker-image: tester
 task:
   created: '{{now}}'
   deadline: '{{#from_now}}24 hours{{/from_now}}'
   metadata:
     source: '{{source}}'
     owner: mozilla-taskcluster-maintenance@mozilla.com
   tags:
     createdForUser: {{owner}}
   workerType: b2gtest
   provisionerId: aws-provisioner-v1
   schedulerId: task-graph-scheduler
 
   scopes:
     - 'docker-worker:feature:allowPtrace'
 
   payload:
-    image: '{{#docker_image}}tester{{/docker_image}}'
+    image:
+      type: 'task-image'
+      path: 'public/image.tar'
+      taskId:
+        task-reference: "<docker-image>"
+
     maxRunTime: 3600
     env:
       MOZILLA_BUILD_URL: {"task-reference": "https://queue.taskcluster.net/v1/task/<{{build_slugid}}>/artifacts/{{build_location}}"}
       MOZHARNESS_URL: {"task-reference": "https://queue.taskcluster.net/v1/task/<{{build_slugid}}>/artifacts/{{mozharness_location}}"}
 
     # All builds share a common artifact directory for ease of uploading.
     artifacts:
       'public/logs/':
--- a/testing/taskcluster/tasks/tests/eslint-gecko.yml
+++ b/testing/taskcluster/tasks/tests/eslint-gecko.yml
@@ -1,26 +1,28 @@
 ---
 $inherits:
     from: 'tasks/lint.yml'
     variables:
       build_product: 'lint'
       build_name: 'eslint-gecko'
       build_type: 'opt'
 
+docker-image: lint
 task:
   metadata:
     name: '[TC] - ESLint'
     description: 'ESLint test'
 
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}lint{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
 
     command:
       - bash
       - -cx
       - >
           tc-vcs checkout ./gecko {{base_repository}} {{head_repository}} {{head_rev}} {{head_ref}} &&
           cd gecko/testing/eslint &&
           /build/tooltool.py fetch -m manifest.tt &&
--- a/testing/taskcluster/tasks/tests/fx_test_base.yml
+++ b/testing/taskcluster/tasks/tests/fx_test_base.yml
@@ -1,20 +1,23 @@
 ---
 $inherits:
   from: 'tasks/test.yml'
+docker-image: desktop-test
 task:
   workerType: desktop-test
   scopes:
     - docker-worker:cache:level-{{level}}-{{project}}-test-workspace
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}desktop-test{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
+
     env:
         NEED_WINDOW_MANAGER: true
         NEED_PULSEAUDIO: true
         GECKO_HEAD_REPOSITORY: '{{{head_repository}}}'
         GECKO_HEAD_REV: '{{{head_rev}}}'
         MOZ_NODE_PATH: '/usr/local/bin/node'
 
     cache:
--- a/testing/taskcluster/tasks/tests/mozharness-gecko.yml
+++ b/testing/taskcluster/tasks/tests/mozharness-gecko.yml
@@ -1,29 +1,31 @@
 ---
 $inherits:
     from: 'tasks/lint.yml'
     variables:
       build_product: 'lint'
       build_name: 'mozharness-tox'
       build_type: 'opt'
 
+docker-image: desktop-test
 task:
   metadata:
     name: '[TC] - Mozharness Tox'
     description: 'Mozharness integration tests'
 
   scopes:
     - 'docker-worker:cache:level-{{level}}-{{project}}-dotcache'
 
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
-      taskId: '{{#task_id_for_image}}desktop-test{{/task_id_for_image}}'
+      taskId:
+        task-reference: "<docker-image>"
 
     cache:
       level-{{level}}-{{project}}-dotcache: '/home/worker/.cache'
 
     command:
       - bash
       - -cx
       - >