Compress docker images with zstd
authorJonas Finnemann Jensen <jopsen@gmail.com>
Mon, 07 Nov 2016 11:26:27 -0800
changeset 887175 5784ddcc6786ab39227c696d30734e3283b2d11b
parent 884163 3b80868f7a8fe0361918a814fbbbfb9308ae0c0a
child 887176 81ce7a60db2389587cb58e55c0e048ff5768a664
push id153456
push userjojensen@mozilla.com
push dateMon, 07 Nov 2016 22:55:43 +0000
treeherdertry@81ce7a60db23 [default view] [failures only]
milestone52.0a1
Compress docker images with zstd
taskcluster/ci/docker-image/image.yml
taskcluster/mach_commands.py
taskcluster/taskgraph/docker.py
taskcluster/taskgraph/task/docker_image.py
taskcluster/taskgraph/transforms/task.py
taskcluster/taskgraph/util/docker.py
testing/docker/README.md
testing/docker/image_builder/Dockerfile
testing/docker/image_builder/REGISTRY
testing/docker/image_builder/VERSION
testing/docker/image_builder/bin/build_image.sh
testing/docker/image_builder/build-image.sh
testing/docker/image_builder/setup.sh
--- a/taskcluster/ci/docker-image/image.yml
+++ b/taskcluster/ci/docker-image/image.yml
@@ -12,49 +12,44 @@ task:
   tags:
     createdForUser: {{owner}}
 
   workerType: taskcluster-images
   provisionerId: aws-provisioner-v1
   schedulerId: task-graph-scheduler
 
   routes:
-      - index.docker.images.v1.{{project}}.{{image_name}}.latest
-      - index.docker.images.v1.{{project}}.{{image_name}}.pushdate.{{year}}.{{month}}-{{day}}-{{pushtime}}
-      - index.docker.images.v1.{{project}}.{{image_name}}.hash.{{context_hash}}
+      - index.{{index_image_prefix}}.{{project}}.{{image_name}}.latest
+      - index.{{index_image_prefix}}.{{project}}.{{image_name}}.pushdate.{{year}}.{{month}}-{{day}}-{{pushtime}}
+      - index.{{index_image_prefix}}.{{project}}.{{image_name}}.hash.{{context_hash}}
       - tc-treeherder.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
       - tc-treeherder-stage.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
 
   payload:
     env:
       HASH: '{{context_hash}}'
       PROJECT: '{{project}}'
       CONTEXT_URL: '{{context_url}}'
-      CONTEXT_PATH: '{{context_path}}'
+      IMAGE_NAME: '{{image_name}}'
       BASE_REPOSITORY: '{{base_repository}}'
       HEAD_REPOSITORY: '{{head_repository}}'
       HEAD_REV: '{{head_rev}}'
       HEAD_REF: '{{head_ref}}'
     features:
       dind: true
       chainOfTrust: true
     image: '{{#docker_image}}image_builder{{/docker_image}}'
-    command:
-      - /bin/bash
-      - -c
-      - /home/worker/bin/build_image.sh
     maxRunTime: 3600
     artifacts:
       '{{artifact_path}}':
         type: 'file'
-        path: '/artifacts/image.tar'
+        path: '/home/worker/workspace/artifacts/image.tar.zstd'
         expires:
           relative-datestamp: "1 year"
   extra:
     treeherderEnv:
       - staging
       - production
     treeherder:
       jobKind: other
       build:
         platform: 'taskcluster-images'
       symbol: 'I'
-
--- a/taskcluster/mach_commands.py
+++ b/taskcluster/mach_commands.py
@@ -268,16 +268,21 @@ class TaskClusterImagesProvider(object):
         except Exception:
             traceback.print_exc()
             sys.exit(1)
 
     @Command('taskcluster-build-image', category='ci',
              description='Build a Docker image')
     @CommandArgument('image_name',
                      help='Name of the image to build')
-    def build_image(self, image_name):
-        from taskgraph.docker import build_image
-
+    @CommandArgument('--context-only',
+                     help='Only build the image context.tar',
+                     metavar='context.tar')
+    def build_image(self, image_name, context_only):
+        from taskgraph.docker import build_image, build_context
         try:
-            build_image(image_name)
+            if context_only is None:
+                build_image(image_name)
+            else:
+                build_context(image_name, context_only)
         except Exception:
             traceback.print_exc()
             sys.exit(1)
--- a/taskcluster/taskgraph/docker.py
+++ b/taskcluster/taskgraph/docker.py
@@ -13,20 +13,19 @@ import tarfile
 import tempfile
 import urllib2
 import which
 
 from taskgraph.util import docker
 
 GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
 IMAGE_DIR = os.path.join(GECKO, 'testing', 'docker')
-INDEX_URL = 'https://index.taskcluster.net/v1/task/docker.images.v1.{}.{}.hash.{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/' + docker.INDEX_PREFIX + '.{}.{}.hash.{}'
 ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
 
-
 def load_image_by_name(image_name):
     context_path = os.path.join(GECKO, 'testing', 'docker', image_name)
     context_hash = docker.generate_context_hash(GECKO, context_path, image_name)
 
     image_index_url = INDEX_URL.format('mozilla-central', image_name, context_hash)
     print("Fetching", image_index_url)
     task = json.load(urllib2.urlopen(image_index_url))
 
@@ -34,19 +33,24 @@ def load_image_by_name(image_name):
 
 
 def load_image_by_task_id(task_id):
     # because we need to read this file twice (and one read is not all the way
     # through), it is difficult to stream it.  So we download to disk and then
     # read it back.
     filename = 'temp-docker-image.tar'
 
-    artifact_url = ARTIFACT_URL.format(task_id, 'public/image.tar')
+    artifact_url = ARTIFACT_URL.format(task_id, 'public/image.tar.zstd')
     print("Downloading", artifact_url)
-    subprocess.check_call(['curl', '-#', '-L', '-o', filename, artifact_url])
+    tempfilename = 'temp-docker-image.tar.zstd'
+    subprocess.check_call(['curl', '-#', '-L', '-o', tempfilename, artifact_url])
+    print("Decompressing")
+    subprocess.check_call(['zstd', '-d', tempfilename, '-o', filename])
+    print("Deleting temporary file")
+    os.unlink(tempfilename)
 
     print("Determining image name")
     tf = tarfile.open(filename)
     repositories = json.load(tf.extractfile('repositories'))
     name = repositories.keys()[0]
     tag = repositories[name].keys()[0]
     name = '{}:{}'.format(name, tag)
     print("Image name:", name)
@@ -60,16 +64,29 @@ def load_image_by_task_id(task_id):
         raise
 
     print("Deleting temporary file")
     os.unlink(filename)
 
     print("The requested docker image is now available as", name)
     print("Try: docker run -ti --rm {} bash".format(name))
 
+def build_context(name, outputFile):
+    """Build a context.tar for image with specified name.
+    """
+    if not name:
+        raise ValueError('must provide a Docker image name')
+    if not outputFile:
+        raise ValueError('must provide a outputFile')
+
+    image_dir = os.path.join(IMAGE_DIR, name)
+    if not os.path.isdir(image_dir):
+        raise Exception('image directory does not exist: %s' % image_dir)
+
+    docker.create_context_tar(GECKO, image_dir, outputFile, "")
 
 def build_image(name):
     """Build a Docker image of specified name.
 
     Output from image building process will be printed to stdout.
     """
     if not name:
         raise ValueError('must provide a Docker image name')
--- a/taskcluster/taskgraph/task/docker_image.py
+++ b/taskcluster/taskgraph/task/docker_image.py
@@ -11,25 +11,24 @@ import re
 import urllib2
 import time
 
 from . import base
 from taskgraph.util.docker import (
     create_context_tar,
     docker_image,
     generate_context_hash,
+    INDEX_PREFIX,
 )
 from taskgraph.util.templates import Templates
 
 logger = logging.getLogger(__name__)
 GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
 ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
 INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
-INDEX_REGEX = r'index\.(docker\.images\.v1\.(.+)\.(.+)\.hash\.(.+))'
-
 
 class DockerImageTask(base.Task):
 
     def __init__(self, *args, **kwargs):
         self.index_paths = kwargs.pop('index_paths')
         super(DockerImageTask, self).__init__(*args, **kwargs)
 
     def __eq__(self, other):
@@ -52,26 +51,27 @@ class DockerImageTask(base.Task):
             'base_repository': params['base_repository'] or params['head_repository'],
             'head_repository': params['head_repository'],
             'head_ref': params['head_ref'] or params['head_rev'],
             'head_rev': params['head_rev'],
             'owner': params['owner'],
             'level': params['level'],
             'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
                       .format(repo=params['head_repository'], rev=params['head_rev']),
+            'index_image_prefix': INDEX_PREFIX,
+            'artifact_path': 'public/image.tar.zstd',
         }
 
         tasks = []
         templates = Templates(path)
         for image_name in config['images']:
             context_path = os.path.join('testing', 'docker', image_name)
 
             image_parameters = dict(parameters)
             image_parameters['context_path'] = context_path
-            image_parameters['artifact_path'] = 'public/image.tar'
             image_parameters['image_name'] = image_name
 
             image_artifact_path = \
                 "public/docker_image_contexts/{}/context.tar.gz".format(image_name)
             if os.environ.get('TASK_ID'):
                 # We put image context tar balls in a different artifacts folder
                 # on the Gecko decision task in order to have longer expiration
                 # dates for smaller artifacts.
@@ -100,19 +100,19 @@ class DockerImageTask(base.Task):
 
             attributes = {'image_name': image_name}
 
             # As an optimization, if the context hash exists for mozilla-central, that image
             # task ID will be used.  The reasoning behind this is that eventually everything ends
             # up on mozilla-central at some point if most tasks use this as a common image
             # for a given context hash, a worker within Taskcluster does not need to contain
             # the same image per branch.
-            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(
-                                project, image_name, context_hash)
-                           for project in ['mozilla-central', params['project']]]
+            index_paths = ['{}.{}.{}.hash.{}'.format(
+                                INDEX_PREFIX, project, image_name, context_hash)
+                           for project in set(['mozilla-central', params['project']])]
 
             tasks.append(cls(kind, 'build-docker-image-' + image_name,
                              task=image_task['task'], attributes=attributes,
                              index_paths=index_paths))
 
         return tasks
 
     def get_dependencies(self, taskgraph):
@@ -124,37 +124,41 @@ class DockerImageTask(base.Task):
                 url = INDEX_URL.format(index_path)
                 existing_task = json.load(urllib2.urlopen(url))
                 # Only return the task ID if the artifact exists for the indexed
                 # task.  Otherwise, continue on looking at each of the branches.  Method
                 # continues trying other branches in case mozilla-central has an expired
                 # artifact, but 'project' might not. Only return no task ID if all
                 # branches have been tried
                 request = urllib2.Request(
-                    ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar'))
+                    ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar.zstd'))
                 request.get_method = lambda: 'HEAD'
                 urllib2.urlopen(request)
 
                 # HEAD success on the artifact is enough
                 return True, existing_task['taskId']
             except urllib2.HTTPError:
                 pass
 
         return False, None
 
     @classmethod
     def from_json(cls, task_dict):
         # Generating index_paths for optimization
         routes = task_dict['task']['routes']
         index_paths = []
         for route in routes:
-            index_path_regex = re.compile(INDEX_REGEX)
-            result = index_path_regex.search(route)
-            if result is None:
+            if not route.startswith('index.' + INDEX_PREFIX):
+                continue
+            parts = route[len(INDEX_PREFIX):].split('.')
+            if len(parts) != 4 or parts[3] != 'hash':
                 continue
-            index_paths.append(result.group(1))
-            index_paths.append(result.group(1).replace(result.group(2), 'mozilla-central'))
+            project, image_name, _, context_hash = parts
+            # Construct index namespaces
+            index_paths = ['{}.{}.{}.hash.{}'.format(
+                    INDEX_PREFIX, project, image_name, context_hash)
+               for project in set(['mozilla-central', project])]
         docker_image_task = cls(kind='docker-image',
                                 label=task_dict['label'],
                                 attributes=task_dict['attributes'],
                                 task=task_dict['task'],
                                 index_paths=index_paths)
         return docker_image_task
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -320,17 +320,17 @@ def payload_builder(name):
 def build_docker_worker_payload(config, task, task_def):
     worker = task['worker']
 
     image = worker['docker-image']
     if isinstance(image, dict):
         docker_image_task = 'build-docker-image-' + image['in-tree']
         task.setdefault('dependencies', {})['docker-image'] = docker_image_task
         image = {
-            "path": "public/image.tar",
+            "path": "public/image.tar.zstd",
             "taskId": {"task-reference": "<docker-image>"},
             "type": "task-image",
         }
 
     features = {}
 
     if worker.get('relengapi-proxy'):
         features['relengAPIProxy'] = True
--- a/taskcluster/taskgraph/util/docker.py
+++ b/taskcluster/taskgraph/util/docker.py
@@ -13,16 +13,18 @@ import tempfile
 
 from mozpack.archive import (
     create_tar_gz_from_files,
 )
 
 
 GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
 DOCKER_ROOT = os.path.join(GECKO, 'testing', 'docker')
+#TODO: Set this to 'docker.images.v2'
+INDEX_PREFIX = 'garbage.jonasfj.hack-space.images.v2'
 ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
 
 
 def docker_image(name, default_version=None):
     '''Determine the docker image name, including repository and tag, from an
     in-tree docker file.'''
     try:
         with open(os.path.join(DOCKER_ROOT, name, 'REGISTRY')) as f:
--- a/testing/docker/README.md
+++ b/testing/docker/README.md
@@ -34,17 +34,17 @@ will use that indexed task.  This is to 
 that were built from the same context. In summary, if the image has been built for mozilla-central,
 pushes to any branch will use that already built image.
 
 To use within an in-tree task definition, the format is:
 
 ```yaml
 image:
   type: 'task-image'
-  path: 'public/image.tar'
+  path: 'public/image.tar.zstd'
   taskId: '{{#task_id_for_image}}builder{{/task_id_for_image}}'
 ```
 
 ##### Context Directory Hashing
 
 Decision tasks will calculate the sha256 hash of the contents of the image
 directory and will determine if the image already exists for a given branch and hash
 or if a new image must be built and indexed.
@@ -62,19 +62,19 @@ of the context directory.
 This ensures that the hash is consistently calculated and path changes will result
 in different hashes being generated.
 
 ##### Task Image Index Namespace
 
 Images that are built on push and uploaded as an artifact of a task will be indexed under the
 following namespaces.
 
-* docker.images.v1.{project}.{image_name}.latest
-* docker.images.v1.{project}.{image_name}.pushdate.{year}.{month}-{day}-{pushtime}
-* docker.images.v1.{project}.{image_name}.hash.{context_hash}
+* docker.images.v2.{project}.{image_name}.latest
+* docker.images.v2.{project}.{image_name}.pushdate.{year}.{month}-{day}-{pushtime}
+* docker.images.v2.{project}.{image_name}.hash.{context_hash}
 
 Not only can images be browsed by the pushdate and context hash, but the 'latest' namespace
 is meant to view the latest built image.  This functions similarly to the 'latest' tag
 for docker images that are pushed to a registry.
 
 ### Docker Registry Images (prebuilt)
 
 ***Deprecation Warning: Use of prebuilt images should only be used for base images (those that other images
--- a/testing/docker/image_builder/Dockerfile
+++ b/testing/docker/image_builder/Dockerfile
@@ -1,34 +1,37 @@
-FROM ubuntu:14.04
+FROM ubuntu:16.04
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /setup/tooltool.py
+
+# %include testing/docker/recipes/common.sh
+ADD topsrcdir/testing/docker/recipes/common.sh /setup/common.sh
 
-WORKDIR /home/worker/bin
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /usr/local/bin/run-task
 
-RUN apt-get update && apt-get install -y apt-transport-https
-RUN sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 && \
-    sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\
-    > /etc/apt/sources.list.d/docker.list"
-RUN apt-get update && apt-get install -y \
-    lxc-docker-1.6.1 \
-    curl \
-    wget \
-    git \
-    mercurial \
-    tar \
-    zip \
-    unzip \
-    vim \
-    sudo \
-    ca-certificates \
-    build-essential
+# Add and run setup script
+ADD build-image.sh      /usr/local/bin/build-image.sh
+ADD setup.sh            /setup/setup.sh
+RUN bash /setup/setup.sh
+
+# Setup a workspace that won't use AUFS
+VOLUME /home/worker/workspace
 
-ENV NODE_VERSION v0.12.4
-RUN cd /usr/local/ && \
-    curl https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.gz | tar -xz --strip-components 1 && \
-    node -v
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV           HOME          /home/worker
+ENV           SHELL         /bin/bash
+ENV           USER          worker
+ENV           LOGNAME       worker
+ENV           HOSTNAME      taskcluster-worker
+ENV           LC_ALL        C
 
-RUN npm install -g taskcluster-vcs@2.3.11
-
-ADD bin /home/worker/bin
-RUN chmod +x /home/worker/bin/*
-
-# Set a default command useful for debugging
-CMD ["/bin/bash", "--login"]
+# Set some sane defaults
+WORKDIR /home/worker/
+CMD     build-image.sh
--- a/testing/docker/image_builder/REGISTRY
+++ b/testing/docker/image_builder/REGISTRY
@@ -1,1 +1,1 @@
-taskcluster
+jonasfj
--- a/testing/docker/image_builder/VERSION
+++ b/testing/docker/image_builder/VERSION
@@ -1,1 +1,1 @@
-0.1.5
+0.1.6-alpha6
deleted file mode 100755
--- a/testing/docker/image_builder/bin/build_image.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash -vex
-
-# Set bash options to exit immediately if a pipeline exists non-zero, expand
-# print a trace of commands, and make output verbose (print shell input as it's
-# read)
-# See https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
-set -x -e -v
-
-# Prefix errors with taskcluster error prefix so that they are parsed by Treeherder
-raise_error() {
-   echo
-   echo "[taskcluster-image-build:error] $1"
-   exit 1
-}
-
-# Ensure that the PROJECT is specified so the image can be indexed
-test -n "$PROJECT" || raise_error "Project must be provided."
-test -n "$HASH" || raise_error "Context Hash must be provided."
-
-mkdir /artifacts
-
-if [ ! -z "$CONTEXT_URL" ]; then
-    mkdir /context
-    if ! curl -L --retry 5 --connect-timeout 30 --fail "$CONTEXT_URL" | tar -xz --strip-components 1 -C /context; then
-        raise_error "Error downloading image context from decision task."
-    fi
-    CONTEXT_PATH=/context
-else
-    tc-vcs checkout /home/worker/workspace/src $BASE_REPOSITORY $HEAD_REPOSITORY $HEAD_REV $HEAD_REF
-    CONTEXT_PATH=/home/worker/workspace/src/$CONTEXT_PATH
-fi
-
-test -d $CONTEXT_PATH || raise_error "Context Path $CONTEXT_PATH does not exist."
-test -f "$CONTEXT_PATH/Dockerfile" || raise_error "Dockerfile must be present in $CONTEXT_PATH."
-
-docker build -t $PROJECT:$HASH $CONTEXT_PATH
-docker save $PROJECT:$HASH > /artifacts/image.tar
new file mode 100755
--- /dev/null
+++ b/testing/docker/image_builder/build-image.sh
@@ -0,0 +1,68 @@
+#!/bin/bash -vex
+
+# Set bash options to exit immediately if a pipeline exists non-zero, expand
+# print a trace of commands, and make output verbose (print shell input as it's
+# read)
+# See https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
+set -x -e -v
+
+# Prefix errors with taskcluster error prefix so that they are parsed by Treeherder
+raise_error() {
+  echo
+  echo "[taskcluster-image-build:error] $1"
+  exit 1
+}
+
+# Ensure that the PROJECT is specified so the image can be indexed
+test -n "$PROJECT"    || raise_error "PROJECT must be provided."
+test -n "$HASH"       || raise_error "Context HASH must be provided."
+test -n "$IMAGE_NAME" || raise_error "IMAGE_NAME must be provided."
+
+# Create artifact folder
+mkdir -p /home/worker/workspace/artifacts
+
+# Construct a CONTEXT_FILE
+CONTEXT_FILE=/home/worker/workspace/context.tar
+if [ ! -z "$CONTEXT_URL" ]; then
+  if ! curl -L --retry 5 --connect-timeout 30 --fail "$CONTEXT_URL" -o "$CONTEXT_FILE.gz"; then
+    raise_error "Error downloading image context from decision task."
+  fi
+  mkdir /home/worker/workspace/context
+  tar -xzf "$CONTEXT_FILE.gz" -C /home/worker/workspace/context
+  tar -cf "$CONTEXT_FILE" -C "/home/worker/workspace/context/$IMAGE_NAME/" .
+else
+  # TODO: In some future refactor to always use a checkout
+  # Run ./mach taskcluster-build-image with --context-only to build context
+  run-task \
+    --chown-recursive "/home/worker/workspace" \
+    --vcs-checkout "/home/worker/workspace/checkout" \
+    -- /home/worker/workspace/checkout/mach \
+    taskcluster-build-image \
+    --context-only "$CONTEXT_FILE" \
+    "$IMAGE_NAME"
+fi
+test -f "$CONTEXT_FILE" || raise_error "Context file wasn't created"
+
+# Post context tar-ball to docker daemon
+# This interacts directly with the docker remote API, see:
+# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.18/
+curl -s \
+  -X POST \
+  --header 'Content-Type: application/tar' \
+  --data-binary "@$CONTEXT_FILE" \
+  --unix-socket /var/run/docker.sock "http:/build?t=$IMAGE_NAME:$HASH" \
+  | tee /tmp/docker-build.log \
+  | jq -r '.status + .progress, .stream[:-1], .error | select(. != null)'
+
+# Exit non-zero if there is error entries in the log
+if cat /tmp/docker-build.log | jq -se 'add | .error' > /dev/null; then
+  raise_error "Image build failed: `cat /tmp/docker-build.log | jq -rse 'add | .error'`";
+fi
+
+# Get image from docker daemon
+# This interacts directly with the docker remote API, see:
+# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.18/
+curl -s \
+  -X GET \
+  --unix-socket /var/run/docker.sock "http:/images/$IMAGE_NAME:$HASH/get" \
+  | zstd -3 -c -o /home/worker/workspace/artifacts/image.tar.zstd
new file mode 100644
--- /dev/null
+++ b/testing/docker/image_builder/setup.sh
@@ -0,0 +1,42 @@
+#!/bin/bash -vex
+set -v -e -x
+
+export DEBIAN_FRONTEND=noninteractive
+
+# Update apt-get lists
+apt-get update -y
+
+# Install dependencies
+apt-get install -y \
+    curl \
+    tar \
+    jq \
+    python \
+    build-essential
+
+# Install mercurial
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+# Install build-image.sh script
+chmod +x /usr/local/bin/build-image.sh
+chmod +x /usr/local/bin/run-task
+
+# Create workspace
+mkdir -p /home/worker/workspace
+
+# Install zstd 1.0.0
+curl -L --retry 5 --connect-timeout 30 --fail https://github.com/facebook/zstd/archive/v1.0.0.tar.gz > /tmp/zstd.tar.gz
+echo '197e6ef74da878cbf72844f38461bb18129d144fd5221b3598e973ecda6f5963 /tmp/zstd.tar.gz' | sha256sum -c
+tar -xvf /tmp/zstd.tar.gz -C /tmp
+make -C /tmp/zstd-1.0.0/programs install
+rm -rf /tmp/zstd-1.0.0/ /tmp/zstd.tar.gz
+apt-get purge -y build-essential
+
+# Purge apt-get caches to minimize image size
+apt-get auto-remove -y
+apt-get clean -y
+rm -rf /var/lib/apt/lists/
+
+# Remove this script
+rm -rf /setup/