author | Wes Kocher <wkocher@mozilla.com> |
Thu, 20 Oct 2016 15:59:47 -0700 | |
changeset 361658 | 861fa708652cac38390be4eac89758ba0286a4a0 |
parent 361657 | 59cfa86c6db4b0384159c4322dc9ceeb152ae856 |
child 361659 | 2ec35e0e63b5b34de6f90dec0f0cbfaed76a7680 |
push id | 6795 |
push user | jlund@mozilla.com |
push date | Mon, 23 Jan 2017 14:19:46 +0000 |
treeherder | mozilla-beta@76101b503191 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | backout |
bugs | 1302763 |
milestone | 52.0a1 |
backs out | d15798b73b9e3f8226e7d4ac8adfca3a77b069f0 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/AUTHORS +++ b/AUTHORS @@ -874,17 +874,16 @@ Roy Yokoyama <yokoyama@netscape.com> RSA Security, Inc Russell King <rmk@arm.linux.org.uk> Rusty Lynch <rusty.lynch@intel.com> Ryan Cassin <rcassin@supernova.org> Ryan Flint <rflint@dslr.net> Ryan Jones <sciguyryan@gmail.com> Ryan VanderMeulen <ryanvm@gmail.com> Ryoichi Furukawa <oliver@1000cp.com> -Sanyam Khurana <Sanyam.Khurana01@gmail.com> sagdjb@softwareag.com Samir Gehani <sgehani@netscape.com> Sammy Ford Samphan Raruenrom Samuel Sieb <samuel@sieb.net> Sarlos Tamas scole@planetweb.com Scooter Morris <scootermorris@comcast.net>
--- a/taskcluster/ci/android-stuff/kind.yml +++ b/taskcluster/ci/android-stuff/kind.yml @@ -58,17 +58,17 @@ jobs: - "/home/worker/bin/before.sh && /home/worker/bin/build.sh && /home/worker/bin/after.sh && true\n" max-run-time: 36000 scopes: - docker-worker:relengapi-proxy:tooltool.download.internal - docker-worker:relengapi-proxy:tooltool.download.public when: files-changed: - "mobile/android/config/**" - - "taskcluster/docker/android-gradle-build/**" + - "testing/docker/android-gradle-build/**" - "testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py" - "**/*.gradle" android-test: description: "Android armv7 unit tests" attributes: build_platform: android-test build_type: opt
--- a/taskcluster/ci/source-check/mozlint.yml +++ b/taskcluster/ci/source-check/mozlint.yml @@ -35,17 +35,17 @@ mozlint-eslint/opt: # Run when eslint policies change. - '**/.eslintignore' - '**/*eslintrc*' # The plugin implementing custom checks. - 'tools/lint/eslint/eslint-plugin-mozilla/**' # Other misc lint related files. - 'python/mozlint/**' - 'tools/lint/**' - - 'taskcluster/docker/lint/**' + - 'testing/docker/lint/**' mozlint-flake8/opt: description: flake8 run over the gecko codebase treeherder: symbol: f8 kind: test tier: 1 platform: lint/opt @@ -61,17 +61,17 @@ mozlint-flake8/opt: - integration - release when: files-changed: - '**/*.py' - '**/.flake8' - 'python/mozlint/**' - 'tools/lint/**' - - 'taskcluster/docker/lint/**' + - 'testing/docker/lint/**' wptlint-gecko/opt: description: web-platform-tests linter treeherder: symbol: W kind: test tier: 1 platform: lint/opt @@ -89,9 +89,9 @@ wptlint-gecko/opt: when: files-changed: - 'testing/web-platform/tests/**' - 'testing/web-platform/mozilla/tests/**' - 'testing/web-platform/meta/MANIFEST.json' - 'testing/web-platform/mozilla/meta/MANIFEST.json' - 'python/mozlint/**' - 'tools/lint/**' - - 'taskcluster/docker/lint/**' + - 'testing/docker/lint/**'
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/Dockerfile +++ /dev/null @@ -1,97 +0,0 @@ -# TODO remove VOLUME below when the base image is updated next. -FROM taskcluster/centos6-build-upd:0.1.6.20160329195300 -MAINTAINER Nick Alexander <nalexander@mozilla.com> - -# BEGIN ../desktop-build/Dockerfile - -# TODO remove when base image is updated -VOLUME /home/worker/workspace -VOLUME /home/worker/tooltool-cache - -# Add build scripts; these are the entry points from the taskcluster worker, and -# operate on environment variables -ADD bin /home/worker/bin -RUN chmod +x /home/worker/bin/* - -# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb -# %include taskcluster/docker/recipes/xvfb.sh -ADD topsrcdir/taskcluster/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh - -# Add configuration -COPY dot-config /home/worker/.config - -# Generate machine uuid file -RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id - -# Stubbed out credentials; mozharness looks for this file an issues a WARNING -# if it's not found, which causes the build to fail. Note that this needs to -# be in the parent of the workspace directory and in the directory where -# mozharness is run (not its --work-dir). See Bug 1169652. -ADD oauth.txt /home/worker/ - -# stubbed out buildprops, which keeps mozharness from choking -# Note that this needs to be in the parent of the workspace directory and in -# the directory where mozharness is run (not its --work-dir) -ADD buildprops.json /home/worker/ - -# install tooltool directly from github where tooltool_wrapper.sh et al. expect -# to find it -RUN wget -O /builds/tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py -RUN chmod +x /builds/tooltool.py - -# END ../desktop-build/Dockerfile - -# Reset user/workdir from parent image so we can install software. -WORKDIR / -USER root - -# Update base. -RUN yum upgrade -y - -# Install JDK and Sonatype Nexus. Cribbed directly from -# https://github.com/sonatype/docker-nexus/blob/fffd2c61b2368292040910c055cf690c8e76a272/oss/Dockerfile. - -# Install the screen package here to use with xvfb. -# Move installation to base centos6-build image once Bug 1272629 is fixed -RUN yum install -y \ - createrepo \ - curl \ - java-1.7.0-openjdk-devel \ - java-1.7.0-openjdk \ - screen \ - sudo \ - tar \ - unzip \ - wget \ - zip \ - && yum clean all - -ENV NEXUS_VERSION 2.12.0-01 -ENV NEXUS_SHA1SUM 1a9aaad8414baffe0a2fd46eed1f41b85f4049e6 - -RUN mkdir -p /opt/sonatype/nexus - -WORKDIR /tmp -RUN curl --fail --silent --location --retry 3 \ - https://download.sonatype.com/nexus/oss/nexus-${NEXUS_VERSION}-bundle.tar.gz \ - -o /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz - -# Observe the two spaces below. Seriously. -RUN echo "${NEXUS_SHA1SUM} nexus-${NEXUS_VERSION}-bundle.tar.gz" > nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1 -RUN sha1sum --check nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1 - -RUN tar zxf nexus-${NEXUS_VERSION}-bundle.tar.gz \ - && mv /tmp/nexus-${NEXUS_VERSION}/* /opt/sonatype/nexus/ \ - && rm -rf /tmp/nexus-${NEXUS_VERSION} \ - && rm -rf /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz - -# Install tooltool directly from github. -RUN mkdir /build -ADD https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py /build/tooltool.py -RUN chmod +rx /build/tooltool.py - -# Back to the centos6-build workdir, matching desktop-build. -WORKDIR /home/worker - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/bin/checkout-script.sh +++ /dev/null @@ -1,17 +0,0 @@ -#! /bin/bash -vex - -set -x -e - -# Inputs, with defaults - -: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central} -: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default} - -: SCRIPT_DOWNLOAD_PATH ${SCRIPT_DOWNLOAD_PATH:=$PWD} -: SCRIPT_PATH ${SCRIPT_PATH:?"script path must be set"} -set -v - -# download script from the gecko repository -url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${SCRIPT_PATH} -wget --directory-prefix=${SCRIPT_DOWNLOAD_PATH} $url -chmod +x `basename ${SCRIPT_PATH}`
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/buildprops.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "properties": { - "buildername": "" - }, - "sourcestamp": { - "changes": [] - }, - "comments": "TaskCluster Job" -}
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/dot-config/pip/pip.conf +++ /dev/null @@ -1,2 +0,0 @@ -[global] -disable-pip-version-check = true
deleted file mode 100644 --- a/taskcluster/docker/android-gradle-build/oauth.txt +++ /dev/null @@ -1,2 +0,0 @@ -taskcluster_clientId = None -taskcluster_accessToken = None
deleted file mode 100644 --- a/taskcluster/docker/b2g-build/Dockerfile +++ /dev/null @@ -1,147 +0,0 @@ -FROM centos:centos6 -MAINTAINER Dustin J. Mitchell <dustin@mozilla.com> - -# Run majority of yum installs here so we cache them! -COPY releng.repo /etc/yum.repos.d/releng.repo -RUN yum install -y epel-release && \ - yum update -y && \ - yum makecache && \ - yum install -y wget - -# Install updated curl libraries -RUN cd /tmp && \ - wget https://s3-us-west-2.amazonaws.com/test-caching/libcurl-7.29.0-19.el6.x86_64.rpm && \ - wget https://s3-us-west-2.amazonaws.com/test-caching/libcurl-devel-7.29.0-19.el6.x86_64.rpm && \ - wget https://s3-us-west-2.amazonaws.com/test-caching/curl-7.29.0-19.el6.x86_64.rpm && \ - yum install -y libcurl-7.29.0-19.el6.x86_64.rpm libcurl-devel-7.29.0-19.el6.x86_64.rpm curl-7.29.0-19.el6.x86_64.rpm && \ - rm -f libcurl-7.29.0-19.el6.x86_64.rpm libcurl-devel-7.29.0-19.el6.x86_64.rpm curl-7.29.0-19.el6.x86_64.rpm && \ - cd - - -RUN yum install -y \ - # From Building B2G docs - # cmake \ - # cmake: is disabled intentionally to work around: bug 1141417 - GConf2-devel \ - alsa-lib-devel \ - autoconf213 \ - bc \ - bison \ - bzip2 \ - ccache \ - dbus-devel \ - dbus-glib-devel \ - dbus-glib-devel \ - dbus-python \ - expat-devel \ - file \ - flex \ - gawk \ - gcc473_0moz1 \ - gettext-devel \ - glibc-devel \ - glibc-devel.i686 \ - glibc-static \ - gstreamer-devel \ - gstreamer-plugins-base-devel \ - gtk2-devel \ - install \ - iw \ - libX11-devel \ - libX11-devel.i686 \ - libXrandr.i686 \ - libXt-devel \ - libnotify-devel \ - libstdc++-static \ - libstdc++-static \ - libstdc++.i686 \ - make \ - mesa-libGL-devel \ - mesa-libGL-devel.i686 \ - mozilla-python27 \ - mpfr-devel.x86_64 \ - ncurses-devel.i686 \ - ncurses:-devel \ - openssh-clients \ - openssl-devel \ - openssl-devel \ - patch \ - perl-DBI \ - perl-Digest-SHA \ - perl-ExtUtils-MakeMaker \ - pulseaudio-libs-devel \ - readline-devel.i686 \ - rsync \ - screen \ - subversion-perl \ - tar \ - tcl \ - tk \ - unzip \ - uuid \ - vim \ - wireless-tools-devel \ - xorg-x11-server-Xvfb \ - xorg-x11-server-utils \ - xz \ - yasm \ - zip \ - zlib-devel \ - zlib-devel.i686 && \ - # Remove cached packages. Cached package takes up a lot of space and - # distributing them to workers is wasteful. - yum clean all - -RUN mkdir -p /home/worker/bin -COPY bin/repository-url.py /home/worker/bin/repository-url.py - -ENV GCC_PATH /home/worker/workspace/gecko/gcc - -# Default to mozilla specific python, etc... -ENV PATH /tools/python27-mercurial/bin:/tools/python27/bin:$PATH -ENV PATH /tools/tools/buildfarm/utils:$PATH:/home/worker/bin/ -ENV PATH $GCC_PATH/bin:/tools/gcc-4.7.3-0moz1/bin:$PATH - -# Use the libstd++ we installed over all others... -env LD_LIBRARY_PATH=$GCC_PATH/lib64:$GCC_PATH/lib:/tools/gcc-4.7.3-0moz1/lib64:/tools/gcc-4.7.3-0moz1/lib - -RUN useradd -d /home/worker -s /bin/bash -m worker - -# Terrible symlink hacks so cc points to the gcc version we intend to use... -RUN ls -lah /tools/gcc-4.7.3-0moz1/bin && ln -s /tools/gcc-4.7.3-0moz1/bin/gcc /tools/gcc-4.7.3-0moz1/bin/cc - -# Terrible Hack to correctly install git-2.4.1 -RUN mkdir -p /tmp/git && cd /tmp/git && \ - curl -L https://s3-us-west-2.amazonaws.com/test-caching/git-2.4.1.tar.gz | tar -xz && \ - cd git* && \ - make prefix=/usr/local/ all -j10 && \ - make prefix=/usr/local/ install && \ - rm -Rf /tmp/git - -# Install node from node's own dist... -ENV NODE_VERSION v0.10.36 -RUN cd /usr/local/ && \ - curl https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.gz | tar -xz --strip-components 1 && \ - node -v - -RUN wget --no-check-certificate https://pypi.python.org/packages/source/s/setuptools/setuptools-1.4.2.tar.gz && \ - tar -xvf setuptools-1.4.2.tar.gz && \ - cd setuptools-1.4.2 && python setup.py install && \ - cd - && rm -rf setuptools-1.4.2* && \ - curl https://bootstrap.pypa.io/get-pip.py | python - && \ - pip install virtualenv mercurial - -# Generate machine uuid file -RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id - -# Set variable normally configured at login, by the shells parent process, these -# are taken from GNU su manual -ENV HOME /home/worker -ENV SHELL /bin/bash -ENV USER worker -ENV LOGNAME worker - -# Declare default working folder -WORKDIR /home/worker - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/b2g-build/VERSION +++ /dev/null @@ -1,1 +0,0 @@ -0.2.11
deleted file mode 100755 --- a/taskcluster/docker/b2g-build/bin/repository-url.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python - -import argparse - - -def repo_url(remote, revision, path): - ''' - Construct a url pointing to the _raw_ file in the given remote this function - will handle url construction for both hg and github. - ''' - - # Ensure remote always ends in a slash... - if remote[-1] != '/': - remote = remote + '/' - if 'hg.mozilla.org' in remote: - return '{}raw-file/{}/{}'.format(remote, revision, path) - else: - return '{}raw/{}/{}'.format(remote, revision, path) - - -parser = argparse.ArgumentParser( - description='Get url for raw file in remote repository' -) - -parser.add_argument('remote', help='URL for remote repository') -parser.add_argument('revision', help='Revision in remote repository') -parser.add_argument('path', help='Path to file in remote repository') - -args = parser.parse_args() -print(repo_url(args.remote, args.revision, args.path))
deleted file mode 100644 --- a/taskcluster/docker/b2g-build/releng.repo +++ /dev/null @@ -1,6 +0,0 @@ -[releng] -name=releng -descr=releng-specific packages from mockbuild-repos -baseurl=http://mockbuild-repos.pub.build.mozilla.org/releng/public/CentOS/6/x86_64 -enabled=1 -gpgcheck=0
deleted file mode 100644 --- a/taskcluster/docker/base-test/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/builder/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -FROM quay.io/mozilla/b2g-build:0.2.11 -MAINTAINER Dustin J. Mitchell <dustin@mozilla.com> - -ENV VERSION 1.2 -ENV PYTHONPATH /tools/tools/lib/python:$PYTHONPATH -ENV TOOLTOOL_CACHE /home/worker/tools/tooltool-cache - -ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/19219c470bd95b390ed0b31d4bf455169bf12fca/buildbot_step.js /home/worker/bin/buildbot_step - -# Add utilities and configuration -RUN mkdir -p /home/worker/bin /home/worker/tools -# Add bin tools last as they are most likely to change -RUN chown -R worker:worker /home/worker/* /home/worker/.* - -# Instal build tools -RUN hg clone http://hg.mozilla.org/build/tools/ /tools/tools && \ - cd /tools/tools && \ - python setup.py install - -# Initialize git (makes repo happy) -RUN git config --global user.email "mozilla@example.com" && \ - git config --global user.name "mozilla" - -# VCS Tools -RUN npm install -g taskcluster-vcs@2.3.17 - -# TODO enable worker -# TODO volume mount permissions will be an issue -# USER worker - -COPY bin /home/worker/bin -RUN chmod a+x /home/worker/bin/*
deleted file mode 100644 --- a/taskcluster/docker/builder/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/builder/VERSION +++ /dev/null @@ -1,1 +0,0 @@ -0.5.13
deleted file mode 100644 --- a/taskcluster/docker/builder/bin/checkout-gecko +++ /dev/null @@ -1,16 +0,0 @@ -#! /bin/bash -ex - -# Ensure we have at least enough to check gecko out... -test $GECKO_BASE_REPOSITORY - -# Workspace to checkout gecko into... -WORKSPACE=$1 -mkdir -p $WORKSPACE - -# Note that tc-vcs requires only the first two arguments so others are optional. -# This is intended to make it easier to clone local directories. -buildbot_step "Clone gecko" tc-vcs checkout $WORKSPACE/gecko \ - $GECKO_BASE_REPOSITORY \ - $GECKO_HEAD_REPOSITORY \ - $GECKO_HEAD_REV \ - $GECKO_HEAD_REF
deleted file mode 100644 --- a/taskcluster/docker/builder/git.env +++ /dev/null @@ -1,6 +0,0 @@ -GECKO_BASE_REPOSITORY=https://github.com/mozilla/gecko-dev -GECKO_HEAD_REPOSITORY=https://github.com/mozilla/gecko-dev -GECKO_HEAD_REF=master -GECKO_HEAD_REV=master -MOZHARNESS_REPOSITORY=http://hg.mozilla.org/build/mozharness -MOZHARNESS_REV=tip
deleted file mode 100644 --- a/taskcluster/docker/builder/mulet.env +++ /dev/null @@ -1,7 +0,0 @@ -GECKO_BASE_REPOSITORY=https://github.com/mozilla/gecko-dev -GECKO_HEAD_REPOSITORY=https://github.com/mozilla/gecko-dev -GECKO_HEAD_REF=master -GECKO_HEAD_REV=master -MOZHARNESS_REPOSITORY=http://hg.mozilla.org/build/mozharness -MOZHARNESS_REV=tip -MOZCONFIG=b2g/dev/config/mozconfigs/linux64/mulet
deleted file mode 100644 --- a/taskcluster/docker/centos6-build-upd/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/centos6-build/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/centos6-build/VERSION +++ /dev/null @@ -1,1 +0,0 @@ -0.1.6
deleted file mode 100644 --- a/taskcluster/docker/decision/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER Greg Arndt <garndt@mozilla.com> - -# Add worker user -RUN useradd -d /home/worker -s /bin/bash -m worker -RUN mkdir /home/worker/artifacts && chown worker:worker /home/worker/artifacts - -# %include taskcluster/docker/recipes/tooltool.py -ADD topsrcdir/taskcluster/docker/recipes/tooltool.py /tmp/tooltool.py - -# %include taskcluster/docker/recipes/install-mercurial.sh -ADD topsrcdir/taskcluster/docker/recipes/install-mercurial.sh /tmp/install-mercurial.sh - -ADD system-setup.sh /tmp/system-setup.sh -RUN bash /tmp/system-setup.sh - -# %include taskcluster/docker/recipes/run-task -ADD topsrcdir/taskcluster/docker/recipes/run-task /home/worker/bin/run-task - -ENV PATH /home/worker/bin:$PATH -ENV SHELL /bin/bash -ENV HOME /home/worker - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/decision/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/desktop-build/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -# TODO remove VOLUME below when the base image is updated next. -FROM taskcluster/centos6-build-upd:0.1.6.20160329195300 -MAINTAINER Dustin J. Mitchell <dustin@mozilla.com> - -# TODO remove when base image is updated -VOLUME /home/worker/workspace -VOLUME /home/worker/tooltool-cache - -# Add build scripts; these are the entry points from the taskcluster worker, and -# operate on environment variables -ADD bin /home/worker/bin -RUN chmod +x /home/worker/bin/* - -# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb -# %include taskcluster/docker/recipes/xvfb.sh -ADD topsrcdir/taskcluster/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh - -# Add configuration -COPY dot-config /home/worker/.config - -# Generate machine uuid file -RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id - -# Stubbed out credentials; mozharness looks for this file an issues a WARNING -# if it's not found, which causes the build to fail. Note that this needs to -# be in the parent of the workspace directory and in the directory where -# mozharness is run (not its --work-dir). See Bug 1169652. -ADD oauth.txt /home/worker/ - -# stubbed out buildprops, which keeps mozharness from choking -# Note that this needs to be in the parent of the workspace directory and in -# the directory where mozharness is run (not its --work-dir) -ADD buildprops.json /home/worker/ - -# install tooltool directly from github where tooltool_wrapper.sh et al. expect -# to find it -RUN wget -O /builds/tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py -RUN chmod +x /builds/tooltool.py - -# Move installation to base centos6-build image once Bug 1272629 is fixed -# Install the screen package here to use with xvfb. -# Install bison to build binutils. -RUN yum install -y bison screen - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/desktop-build/dot-config/pip/pip.conf +++ /dev/null @@ -1,2 +0,0 @@ -[global] -disable-pip-version-check = true
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/Dockerfile +++ /dev/null @@ -1,106 +0,0 @@ -FROM ubuntu:12.04 -MAINTAINER Jonas Finnemann Jensen <jopsen@gmail.com> - -RUN useradd -d /home/worker -s /bin/bash -m worker -WORKDIR /home/worker - -# %include taskcluster/docker/recipes/tooltool.py -ADD topsrcdir/taskcluster/docker/recipes/tooltool.py /setup/tooltool.py - -# %include taskcluster/docker/recipes/install-mercurial.sh -ADD topsrcdir/taskcluster/docker/recipes/install-mercurial.sh /tmp/install-mercurial.sh - -# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb -# %include taskcluster/docker/recipes/xvfb.sh -ADD topsrcdir/taskcluster/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh - -# %include taskcluster/docker/recipes/ubuntu1204-test-system-setup.sh -ADD topsrcdir/taskcluster/docker/recipes/ubuntu1204-test-system-setup.sh /setup/system-setup.sh -RUN bash /setup/system-setup.sh - -# %include taskcluster/docker/recipes/run-task -ADD topsrcdir/taskcluster/docker/recipes/run-task /home/worker/bin/run-task - -# %include taskcluster/scripts/tester/test-ubuntu.sh -ADD topsrcdir/taskcluster/scripts/tester/test-ubuntu.sh /home/worker/bin/test-linux.sh - -# This will create a host mounted filesystem when the cache is stripped -# on Try. This cancels out some of the performance losses of aufs. See -# bug 1291940. -VOLUME /home/worker/hg-shared -VOLUME /home/worker/checkouts -VOLUME /home/worker/workspace - -# Set variable normally configured at login, by the shells parent process, these -# are taken from GNU su manual -ENV HOME /home/worker -ENV SHELL /bin/bash -ENV USER worker -ENV LOGNAME worker -ENV HOSTNAME taskcluster-worker -ENV LANG en_US.UTF-8 -ENV LC_ALL en_US.UTF-8 - -# Add utilities and configuration -COPY dot-files/config /home/worker/.config -COPY dot-files/pulse /home/worker/.pulse -RUN chmod +x bin/* -# TODO: remove this when buildbot is gone -COPY buildprops.json /home/worker/buildprops.json -COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml - -# TODO: remove -ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/master/buildbot_step /home/worker/bin/buildbot_step -RUN chmod u+x /home/worker/bin/buildbot_step - -# TODO: remove -ADD https://s3-us-west-2.amazonaws.com/test-caching/packages/linux64-stackwalk /usr/local/bin/linux64-minidump_stackwalk -RUN chmod +x /usr/local/bin/linux64-minidump_stackwalk - -# allow the worker user to access video devices -RUN usermod -a -G video worker - -RUN mkdir Documents; mkdir Pictures; mkdir Music; mkdir Videos; mkdir artifacts - -# install tc-vcs and tc-npm-cache -RUN npm install -g taskcluster-vcs@2.3.12 \ - && npm install -g taskcluster-npm-cache@1.1.14 \ - && rm -rf ~/.npm -ENV PATH $PATH:/home/worker/bin - -# TODO Re-enable worker when bug 1093833 lands -#USER worker - -# clean up -RUN rm -Rf .cache && mkdir -p .cache - -# Disable Ubuntu update prompt -# http://askubuntu.com/questions/515161/ubuntu-12-04-disable-release-notification-of-14-04-in-update-manager -ADD release-upgrades /etc/update-manager/release-upgrades - -# Disable tools with on-login popups that interfere with tests; see bug 1240084 and bug 984944. -ADD jockey-gtk.desktop deja-dup-monitor.desktop /etc/xdg/autostart/ - -# In test.sh we accept START_VNC to start a vnc daemon. -# Exposing this port allows it to work. -EXPOSE 5900 - -# This helps not forgetting setting DISPLAY=:0 when running -# tests outside of test.sh -ENV DISPLAY :0 - -# Disable apport (Ubuntu app crash reporter) to avoid stealing focus from test runs -ADD apport /etc/default/apport - -# Disable font antialiasing for now to match releng's setup -ADD fonts.conf /home/worker/.fonts.conf - -# Set up first-run experience for interactive mode -ADD motd /etc/taskcluster-motd -ADD taskcluster-interactive-shell /bin/taskcluster-interactive-shell -RUN chmod +x /bin/taskcluster-interactive-shell - -RUN chown -R worker:worker /home/worker - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/apport +++ /dev/null @@ -1,1 +0,0 @@ -enabled=0
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/buildprops.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "properties": { - "buildername": "" - }, - "sourcestamp": { - "changes": [] - } -}
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/deja-dup-monitor.desktop +++ /dev/null @@ -1,19 +0,0 @@ -[Desktop Entry] -Version=1.0 -X-Ubuntu-Gettext-Domain=deja-dup - -Name=Backup Monitor -Comment=Schedules backups at regular intervals - -Icon=deja-dup -TryExec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor -Exec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor - -# Bug 984944/1240084 - It prevents taking screenshots -X-GNOME-Autostart-Delay=false - -StartupNotify=false -NoDisplay=true - -Type=Application -Categories=System;Utility;Archiving;
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/dot-files/config/pip/pip.conf +++ /dev/null @@ -1,2 +0,0 @@ -[global] -disable-pip-version-check = true
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/dot-files/config/user-dirs.dirs +++ /dev/null @@ -1,15 +0,0 @@ -# This file is written by xdg-user-dirs-update -# If you want to change or add directories, just edit the line you're -# interested in. All local changes will be retained on the next run -# Format is XDG_xxx_DIR="$HOME/yyy", where yyy is a shell-escaped -# homedir-relative path, or XDG_xxx_DIR="/yyy", where /yyy is an -# absolute path. No other format is supported. - -XDG_DESKTOP_DIR="$HOME/Desktop" -XDG_DOWNLOAD_DIR="$HOME/Downloads" -XDG_TEMPLATES_DIR="$HOME/Templates" -XDG_PUBLICSHARE_DIR="$HOME/Public" -XDG_DOCUMENTS_DIR="$HOME/Documents" -XDG_MUSIC_DIR="$HOME/Music" -XDG_PICTURES_DIR="$HOME/Pictures" -XDG_VIDEOS_DIR="$HOME/Videos"
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/dot-files/config/user-dirs.locale +++ /dev/null @@ -1,1 +0,0 @@ -en_US
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/dot-files/pulse/default.pa +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/pulseaudio -nF -# -# This file is part of PulseAudio. -# -# PulseAudio is free software; you can redistribute it and/or modify it -# under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# PulseAudio is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with PulseAudio; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - -# This startup script is used only if PulseAudio is started per-user -# (i.e. not in system mode) - -.nofail - -### Load something into the sample cache -#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav -#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav -#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav -#load-sample-lazy pulse-access /usr/share/sounds/generic.wav - -.fail - -### Automatically restore the volume of streams and devices -load-module module-device-restore -load-module module-stream-restore -load-module module-card-restore - -### Automatically augment property information from .desktop files -### stored in /usr/share/application -load-module module-augment-properties - -### Load audio drivers statically -### (it's probably better to not load these drivers manually, but instead -### use module-udev-detect -- see below -- for doing this automatically) -#load-module module-alsa-sink -#load-module module-alsa-source device=hw:1,0 -#load-module module-oss device="/dev/dsp" sink_name=output source_name=input -#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input -#load-module module-null-sink -#load-module module-pipe-sink - -### Automatically load driver modules depending on the hardware available -.ifexists module-udev-detect.so -load-module module-udev-detect -.else -### Use the static hardware detection module (for systems that lack udev/hal support) -load-module module-detect -.endif - -### Automatically connect sink and source if JACK server is present -.ifexists module-jackdbus-detect.so -.nofail -load-module module-jackdbus-detect -.fail -.endif - -### Automatically load driver modules for Bluetooth hardware -# This module causes a pulseaudio startup failure on "gecko-tester" -#.ifexists module-bluetooth-discover.so -#load-module module-bluetooth-discover -#.endif - -### Load several protocols -.ifexists module-esound-protocol-unix.so -load-module module-esound-protocol-unix -.endif -load-module module-native-protocol-unix - -### Network access (may be configured with paprefs, so leave this commented -### here if you plan to use paprefs) -#load-module module-esound-protocol-tcp -#load-module module-native-protocol-tcp -#load-module module-zeroconf-publish - -### Load the RTP receiver module (also configured via paprefs, see above) -#load-module module-rtp-recv - -### Load the RTP sender module (also configured via paprefs, see above) -#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'" -#load-module module-rtp-send source=rtp.monitor - -### Load additional modules from GConf settings. This can be configured with the paprefs tool. -### Please keep in mind that the modules configured by paprefs might conflict with manually -### loaded modules. -.ifexists module-gconf.so -.nofail -load-module module-gconf -.fail -.endif - -### Automatically restore the default sink/source when changed by the user -### during runtime -### NOTE: This should be loaded as early as possible so that subsequent modules -### that look up the default sink/source get the right value -load-module module-default-device-restore - -### Automatically move streams to the default sink if the sink they are -### connected to dies, similar for sources -load-module module-rescue-streams - -### Make sure we always have a sink around, even if it is a null sink. -load-module module-always-sink - -### Honour intended role device property -load-module module-intended-roles - -### Automatically suspend sinks/sources that become idle for too long -load-module module-suspend-on-idle - -### If autoexit on idle is enabled we want to make sure we only quit -### when no local session needs us anymore. -# This module causes a pulseaudio startup failure on "gecko-tester" -#.ifexists module-console-kit.so -#load-module module-console-kit -#.endif - -### Enable positioned event sounds -load-module module-position-event-sounds - -### Cork music streams when a phone stream is active -#load-module module-cork-music-on-phone - -### Modules to allow autoloading of filters (such as echo cancellation) -### on demand. module-filter-heuristics tries to determine what filters -### make sense, and module-filter-apply does the heavy-lifting of -### loading modules and rerouting streams. -load-module module-filter-heuristics -load-module module-filter-apply - -### Load DBus protocol -#.ifexists module-dbus-protocol.so -#load-module module-dbus-protocol -#.endif - -# X11 modules should not be started from default.pa so that one daemon -# can be shared by multiple sessions. - -### Load X11 bell module -#load-module module-x11-bell sample=bell-windowing-system - -### Register ourselves in the X11 session manager -#load-module module-x11-xsmp - -### Publish connection data in the X11 root window -#.ifexists module-x11-publish.so -#.nofail -#load-module module-x11-publish -#.fail -#.endif - -load-module module-switch-on-port-available - -### Make some devices default -#set-default-sink output -#set-default-source input
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/fonts.conf +++ /dev/null @@ -1,5 +0,0 @@ -<match target="font"> - <edit name="antialias" mode="assign"> - <bool>false</bool> - </edit> -</match>
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/jockey-gtk.desktop +++ /dev/null @@ -1,15 +0,0 @@ -[Desktop Entry] -Name=Check for new hardware drivers -Comment=Notify about new hardware drivers available for the system -Icon=jockey -Exec=sh -c "test -e /var/cache/jockey/check || exec jockey-gtk --check" -Terminal=false -Type=Application -Categories=System;Settings;GTK;HardwareSettings; -NotShowIn=KDE; -X-Ubuntu-Gettext-Domain=jockey - -# Bug 984944/1240084 - It prevents taking screenshots -X-GNOME-Autostart-Delay=false - -NoDisplay=true
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/motd +++ /dev/null @@ -1,6 +0,0 @@ -Welcome to your taskcluster interactive shell! The regularly scheduled task -has been paused to give you a chance to set up your debugging environment. - -For your convenience, the exact mozharness command needed for this task can -be invoked using the 'run-mozharness' command. -
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/release-upgrades +++ /dev/null @@ -1,17 +0,0 @@ -# Default behavior for the release upgrader. - -[DEFAULT] -# Default prompting behavior, valid options: -# -# never - Never check for a new release. -# normal - Check to see if a new release is available. If more than one new -# release is found, the release upgrader will attempt to upgrade to -# the release that immediately succeeds the currently-running -# release. -# lts - Check to see if a new LTS release is available. The upgrader -# will attempt to upgrade to the first LTS release available after -# the currently-running one. Note that this option should not be -# used if the currently-running release is not itself an LTS -# release, since in that case the upgrader won't be able to -# determine if a newer release is available. -Prompt=never
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/tc-vcs-config.yml +++ /dev/null @@ -1,40 +0,0 @@ -# Default configuration used by the tc-vs tools these can be overridden by -# passing the config you wish to use over the command line... -git: git -hg: hg - -repoCache: - # Repo url to clone when running repo init.. - repoUrl: https://gerrit.googlesource.com/git-repo.git - # Version of repo to utilize... - repoRevision: master - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs-repo/' - # Name/prefixed used as part of the base url. - cacheName: sources/{{name}}.tar.gz - # Command used to upload the tarball - uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z -C {{dest}} -f {{source}} - -cloneCache: - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs/' - # Command used to upload the tarball - uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}} - # Name/prefixed used as part of the base url. - cacheName: clones/{{name}}.tar.gz
deleted file mode 100644 --- a/taskcluster/docker/desktop-test/tester.env +++ /dev/null @@ -1,4 +0,0 @@ -GAIA_REV=tip -GAIA_REF=tip -GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central -GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/Dockerfile +++ /dev/null @@ -1,108 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER Joel Maher <joel.maher@gmail.com> - -RUN useradd -d /home/worker -s /bin/bash -m worker -WORKDIR /home/worker - -# %include taskcluster/docker/recipes/tooltool.py -ADD topsrcdir/taskcluster/docker/recipes/tooltool.py /setup/tooltool.py - -# %include taskcluster/docker/recipes/install-mercurial.sh -ADD topsrcdir/taskcluster/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh - -# %include taskcluster/docker/recipes/ubuntu1604-test-system-setup.sh -ADD topsrcdir/taskcluster/docker/recipes/ubuntu1604-test-system-setup.sh /setup/system-setup.sh -RUN bash /setup/system-setup.sh - -# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb -# %include taskcluster/docker/recipes/xvfb.sh -ADD topsrcdir/taskcluster/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh - -# %include taskcluster/docker/recipes/run-task -ADD topsrcdir/taskcluster/docker/recipes/run-task /home/worker/bin/run-task - -# %include taskcluster/scripts/tester/test-ubuntu.sh -ADD topsrcdir/taskcluster/scripts/tester/test-ubuntu.sh /home/worker/bin/test-linux.sh - -# This will create a host mounted filesystem when the cache is stripped -# on Try. This cancels out some of the performance losses of aufs. See -# bug 1291940. -VOLUME /home/worker/hg-shared -VOLUME /home/worker/checkouts -VOLUME /home/worker/workspace - -# Set variable normally configured at login, by the shells parent process, these -# are taken from GNU su manual -ENV HOME /home/worker -ENV SHELL /bin/bash -ENV USER worker -ENV LOGNAME worker -ENV HOSTNAME taskcluster-worker -ENV LANG en_US.UTF-8 -ENV LC_ALL en_US.UTF-8 - -# Add utilities and configuration -COPY dot-files/config /home/worker/.config -COPY dot-files/pulse /home/worker/.pulse -COPY bin /home/worker/bin -RUN chmod +x bin/* -# TODO: remove this when buildbot is gone -COPY buildprops.json /home/worker/buildprops.json -COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml - -# TODO: remove -ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/master/buildbot_step /home/worker/bin/buildbot_step -RUN chmod u+x /home/worker/bin/buildbot_step - -# TODO: remove -ADD https://s3-us-west-2.amazonaws.com/test-caching/packages/linux64-stackwalk /usr/local/bin/linux64-minidump_stackwalk -RUN chmod +x /usr/local/bin/linux64-minidump_stackwalk - -# allow the worker user to access video devices -RUN usermod -a -G video worker - -RUN mkdir Documents; mkdir Pictures; mkdir Music; mkdir Videos; mkdir artifacts - -# install a new enough npm, plus tc-vcs and tc-npm-cache -RUN npm install -g npm@^2.0.0 \ - && npm install -g taskcluster-vcs@2.3.12 \ - && npm install -g taskcluster-npm-cache@1.1.14 \ - && rm -rf ~/.npm -ENV PATH $PATH:/home/worker/bin - -# TODO Re-enable worker when bug 1093833 lands -#USER worker - -# clean up -RUN rm -Rf .cache && mkdir -p .cache - -# Disable Ubuntu update prompt -# http://askubuntu.com/questions/515161/ubuntu-12-04-disable-release-notification-of-14-04-in-update-manager -ADD release-upgrades /etc/update-manager/release-upgrades - -# Disable tools with on-login popups that interfere with tests; see bug 1240084 and bug 984944. -ADD jockey-gtk.desktop deja-dup-monitor.desktop /etc/xdg/autostart/ - -# In test.sh we accept START_VNC to start a vnc daemon. -# Exposing this port allows it to work. -EXPOSE 5900 - -# This helps not forgetting setting DISPLAY=:0 when running -# tests outside of test.sh -ENV DISPLAY :0 - -# Disable apport (Ubuntu app crash reporter) to avoid stealing focus from test runs -ADD apport /etc/default/apport - -# Disable font antialiasing for now to match releng's setup -ADD fonts.conf /home/worker/.fonts.conf - -# Set up first-run experience for interactive mode -ADD motd /etc/taskcluster-motd -ADD taskcluster-interactive-shell /bin/taskcluster-interactive-shell -RUN chmod +x /bin/taskcluster-interactive-shell - -RUN chown -R worker:worker /home/worker - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/dot-files/config/pip/pip.conf +++ /dev/null @@ -1,2 +0,0 @@ -[global] -disable-pip-version-check = true
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/dot-files/config/user-dirs.locale +++ /dev/null @@ -1,1 +0,0 @@ -en_US
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/dot-files/pulse/default.pa +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/pulseaudio -nF -# -# This file is part of PulseAudio. -# -# PulseAudio is free software; you can redistribute it and/or modify it -# under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# PulseAudio is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with PulseAudio; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - -# This startup script is used only if PulseAudio is started per-user -# (i.e. not in system mode) - -.nofail - -### Load something into the sample cache -#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav -#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav -#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav -#load-sample-lazy pulse-access /usr/share/sounds/generic.wav - -.fail - -### Automatically restore the volume of streams and devices -load-module module-device-restore -load-module module-stream-restore -load-module module-card-restore - -### Automatically augment property information from .desktop files -### stored in /usr/share/application -load-module module-augment-properties - -### Load audio drivers statically -### (it's probably better to not load these drivers manually, but instead -### use module-udev-detect -- see below -- for doing this automatically) -#load-module module-alsa-sink -#load-module module-alsa-source device=hw:1,0 -#load-module module-oss device="/dev/dsp" sink_name=output source_name=input -#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input -#load-module module-null-sink -#load-module module-pipe-sink - -### Automatically load driver modules depending on the hardware available -.ifexists module-udev-detect.so -load-module module-udev-detect -.else -### Use the static hardware detection module (for systems that lack udev/hal support) -load-module module-detect -.endif - -### Automatically connect sink and source if JACK server is present -.ifexists module-jackdbus-detect.so -.nofail -load-module module-jackdbus-detect -.fail -.endif - -### Automatically load driver modules for Bluetooth hardware -# This module causes a pulseaudio startup failure on "gecko-tester" -#.ifexists module-bluetooth-discover.so -#load-module module-bluetooth-discover -#.endif - -### Load several protocols -.ifexists module-esound-protocol-unix.so -load-module module-esound-protocol-unix -.endif -load-module module-native-protocol-unix - -### Network access (may be configured with paprefs, so leave this commented -### here if you plan to use paprefs) -#load-module module-esound-protocol-tcp -#load-module module-native-protocol-tcp -#load-module module-zeroconf-publish - -### Load the RTP receiver module (also configured via paprefs, see above) -#load-module module-rtp-recv - -### Load the RTP sender module (also configured via paprefs, see above) -#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'" -#load-module module-rtp-send source=rtp.monitor - -### Load additional modules from GConf settings. This can be configured with the paprefs tool. -### Please keep in mind that the modules configured by paprefs might conflict with manually -### loaded modules. -.ifexists module-gconf.so -.nofail -load-module module-gconf -.fail -.endif - -### Automatically restore the default sink/source when changed by the user -### during runtime -### NOTE: This should be loaded as early as possible so that subsequent modules -### that look up the default sink/source get the right value -load-module module-default-device-restore - -### Automatically move streams to the default sink if the sink they are -### connected to dies, similar for sources -load-module module-rescue-streams - -### Make sure we always have a sink around, even if it is a null sink. -load-module module-always-sink - -### Honour intended role device property -load-module module-intended-roles - -### Automatically suspend sinks/sources that become idle for too long -load-module module-suspend-on-idle - -### If autoexit on idle is enabled we want to make sure we only quit -### when no local session needs us anymore. -# This module causes a pulseaudio startup failure on "gecko-tester" -#.ifexists module-console-kit.so -#load-module module-console-kit -#.endif - -### Enable positioned event sounds -load-module module-position-event-sounds - -### Cork music streams when a phone stream is active -#load-module module-cork-music-on-phone - -### Modules to allow autoloading of filters (such as echo cancellation) -### on demand. module-filter-heuristics tries to determine what filters -### make sense, and module-filter-apply does the heavy-lifting of -### loading modules and rerouting streams. -load-module module-filter-heuristics -load-module module-filter-apply - -### Load DBus protocol -#.ifexists module-dbus-protocol.so -#load-module module-dbus-protocol -#.endif - -# X11 modules should not be started from default.pa so that one daemon -# can be shared by multiple sessions. - -### Load X11 bell module -#load-module module-x11-bell sample=bell-windowing-system - -### Register ourselves in the X11 session manager -#load-module module-x11-xsmp - -### Publish connection data in the X11 root window -#.ifexists module-x11-publish.so -#.nofail -#load-module module-x11-publish -#.fail -#.endif - -load-module module-switch-on-port-available - -### Make some devices default -#set-default-sink output -#set-default-source input
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/tc-vcs-config.yml +++ /dev/null @@ -1,40 +0,0 @@ -# Default configuration used by the tc-vs tools these can be overridden by -# passing the config you wish to use over the command line... -git: git -hg: hg - -repoCache: - # Repo url to clone when running repo init.. - repoUrl: https://gerrit.googlesource.com/git-repo.git - # Version of repo to utilize... - repoRevision: master - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs-repo/' - # Name/prefixed used as part of the base url. - cacheName: sources/{{name}}.tar.gz - # Command used to upload the tarball - uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z -C {{dest}} -f {{source}} - -cloneCache: - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs/' - # Command used to upload the tarball - uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}} - # Name/prefixed used as part of the base url. - cacheName: clones/{{name}}.tar.gz
deleted file mode 100644 --- a/taskcluster/docker/desktop1604-test/tester.env +++ /dev/null @@ -1,4 +0,0 @@ -GAIA_REV=tip -GAIA_REF=tip -GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central -GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
deleted file mode 100644 --- a/taskcluster/docker/image_builder/REGISTRY +++ /dev/null @@ -1,1 +0,0 @@ -taskcluster
deleted file mode 100644 --- a/taskcluster/docker/lint/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER Andrew Halberstadt <ahalberstadt@mozilla.com> - -RUN useradd -d /home/worker -s /bin/bash -m worker -WORKDIR /home/worker - -RUN mkdir /build -# %include taskcluster/docker/recipes/tooltool.py -ADD topsrcdir/taskcluster/docker/recipes/tooltool.py /build/tooltool.py - -# %include taskcluster/docker/recipes/install-mercurial.sh -ADD topsrcdir/taskcluster/docker/recipes/install-mercurial.sh /build/install-mercurial.sh -ADD system-setup.sh /tmp/system-setup.sh -RUN bash /tmp/system-setup.sh - -# %include taskcluster/docker/recipes/run-task -ADD topsrcdir/taskcluster/docker/recipes/run-task /home/worker/bin/run-task -RUN chown -R worker:worker /home/worker/bin && chmod 755 /home/worker/bin/* - -# Set variable normally configured at login, by the shells parent process, these -# are taken from GNU su manual -ENV HOME /home/worker -ENV SHELL /bin/bash -ENV USER worker -ENV LOGNAME worker -ENV HOSTNAME taskcluster-worker -ENV LANG en_US.UTF-8 -ENV LC_ALL en_US.UTF-8 - -# Set a default command useful for debugging -CMD ["/bin/bash", "--login"]
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM taskcluster/builder:0.5.13 -MAINTAINER Wander Lairson Costa <wcosta@mozilla.com> - -# Add utilities and configuration -ADD bin /home/worker/bin - -# Builds need the share module enabled -ADD hgrc /home/worker/.hgrc -RUN chown -R worker:worker /home/worker/.hgrc - -# Make sure we use our own config -COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml - -RUN yum install -y bc lzop java-1.7.0-openjdk -RUN pip install awscli -RUN npm install -g bower gulp apm grunt-cli -
deleted file mode 100755 --- a/taskcluster/docker/phone-builder/bin/validate_task.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import os -import os.path -import json -import urllib2 -import sys -import re -import subprocess - -repo_matcher = re.compile(r'[a-z]+://(hg|git)\.mozilla\.org') - - -def get_task(taskid): - return json.load( - urllib2.urlopen('https://queue.taskcluster.net/v1/task/' + taskid)) - - -def check_task(task): - payload = task['payload'] - - if 'GECKO_HEAD_REPOSITORY' not in payload['env']: - print('Task has no head gecko repository', file=sys.stderr) - return -1 - - repo = payload['env']['GECKO_HEAD_REPOSITORY'] - # if it is not a mozilla repository, fail - if not repo_matcher.match(repo): - print('Invalid head repository', repo, file=sys.stderr) - return -1 - - if 'GECKO_BASE_REPOSITORY' not in payload['env']: - print('Task has no base gecko repository', file=sys.stderr) - return -1 - - repo = payload['env']['GECKO_BASE_REPOSITORY'] - if not repo_matcher.match(repo): - print('Invalid base repository', repo, file=sys.stderr) - return -1 - - locations = task["extra"]["locations"] - if "img" in locations: - img = locations["img"] - if img.startswith("public"): - print('Cannot upload images to public', file=sys.stderr) - return -1 - - return 0 - - -def main(): - taskid = os.getenv('TASK_ID') - - # If the task id is None, we assume we are running docker locally - if taskid is not None: - task = get_task(taskid) - ret = check_task(task) - if ret != 0: - return ret - - if len(sys.argv) > 1: - try: - return subprocess.call(sys.argv[1:], shell=True) - except subprocess.CalledProcessError as e: - return e.returncode - - return 0 - - -if __name__ == '__main__': - sys.exit(main())
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/hgrc +++ /dev/null @@ -1,4 +0,0 @@ -[extensions] -share = -[ui] -username = TaskCluster <nobody@mozilla.org>
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/tc-vcs-config.yml +++ /dev/null @@ -1,40 +0,0 @@ -# Default configuration used by the tc-vs tools these can be overridden by -# passing the config you wish to use over the command line... -git: git -hg: hg - -repoCache: - # Repo url to clone when running repo init.. - repoUrl: https://gerrit.googlesource.com/git-repo.git - # Version of repo to utilize... - repoRevision: master - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs-repo/' - # Name/prefixed used as part of the base url. - cacheName: sources/{{name}}.tar.gz - # Command used to upload the tarball - uploadTar: "curl --retry 5 --fail --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --retry 5 --fail -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z -C {{dest}} -f {{source}} - -cloneCache: - # The root where all downloaded cache files are stored on the local machine... - cacheDir: '{{env.HOME}}/.tc-vcs/' - # Command used to upload the tarball - uploadTar: "curl --retry 5 --fail --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" - # Large http get requests are often slower using nodes built in http layer so - # we utilize a subprocess which is responsible for fetching... - get: curl --retry 5 --fail -L -o {{dest}} {{url}} - # Used to create clone tarball - compress: tar -czf {{dest}} {{source}} - # All cache urls use tar + gz this is the command used to extract those files - # downloaded by the "get" command. - extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}} - # Name/prefixed used as part of the base url. - cacheName: clones/{{name}}.tar.gz
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/tests/invalid_base_repo.yml +++ /dev/null @@ -1,50 +0,0 @@ -taskId: 1 -task: - metadata: - source: http://todo.com/soon - owner: user@domain.com - name: B2G Emulator - description: B2G Emulator - - workerType: b2gbuild - provisionerId: aws-provisioner - - scopes: - - 'docker-worker:cache:build-emulator-objects' - - 'docker-worker:image:quay.io/mozilla/phone-builder:0.0.1' - - payload: - cache: - build-emulator-objects: '/home/worker/object-folder-flame-kk-1' - - env: - TARGET: 'flame-kk' - B2G_DEBUG: '1' - # revision/project params defined originally here https://github.com/taskcluster/taskcluster-try/blob/master/try/instantiate.js - REVISION: 'tip' - GECKO_HEAD_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - GECKO_BASE_REPOSITORY: 'git@github.com:mozilla/gecko-dev.git' - - image: 'quay.io/mozilla/phone-builder:0.0.1' - maxRunTime: 14400 - - command: - - build-phone.sh - - artifacts: - 'private/build': - type: directory - path: '/home/worker/artifacts/' - - extra: - # Rather then enforcing particular conventions we require that all build - # tasks provide the "build" extra field to specify where the build and tests - # files are located. - locations: - build: 'private/build/emulator.tar.gz' - tests: 'private/build/b2g-tests.zip' - symbols: 'private/build/b2g-crashreporter-symbols.zip' - sources: 'private/build/sources.xml' - - treeherder: - symbol: B
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/tests/invalid_head_repo.yml +++ /dev/null @@ -1,50 +0,0 @@ -taskId: 1 -task: - metadata: - source: http://todo.com/soon - owner: user@domain.com - name: B2G Emulator - description: B2G Emulator - - workerType: b2gbuild - provisionerId: aws-provisioner - - scopes: - - 'docker-worker:cache:build-emulator-objects' - - 'docker-worker:image:quay.io/mozilla/phone-builder:0.0.1' - - payload: - cache: - build-emulator-objects: '/home/worker/object-folder-flame-kk-1' - - env: - TARGET: 'flame-kk' - B2G_DEBUG: '1' - # revision/project params defined originally here https://github.com/taskcluster/taskcluster-try/blob/master/try/instantiate.js - REVISION: 'tip' - GECKO_HEAD_REPOSITORY: 'git@github.com:mozilla/gecko-dev.git' - GECKO_BASE_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - - image: 'quay.io/mozilla/phone-builder:0.0.1' - maxRunTime: 14400 - - command: - - build-phone.sh - - artifacts: - 'private/build': - type: directory - path: '/home/worker/artifacts/' - - extra: - # Rather then enforcing particular conventions we require that all build - # tasks provide the "build" extra field to specify where the build and tests - # files are located. - locations: - build: 'private/build/emulator.tar.gz' - tests: 'private/build/b2g-tests.zip' - symbols: 'private/build/b2g-crashreporter-symbols.zip' - sources: 'private/build/sources.xml' - - treeherder: - symbol: B
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/tests/public.yml +++ /dev/null @@ -1,50 +0,0 @@ -taskId: 1 -task: - metadata: - source: http://todo.com/soon - owner: user@domain.com - name: B2G Emulator - description: B2G Emulator - - workerType: b2gbuild - provisionerId: aws-provisioner - - scopes: - - 'docker-worker:cache:build-emulator-objects' - - 'docker-worker:image:quay.io/mozilla/phone-builder:0.0.1' - - payload: - cache: - build-emulator-objects: '/home/worker/object-folder-flame-kk-1' - - env: - TARGET: 'flame-kk' - B2G_DEBUG: '1' - # revision/project params defined originally here https://github.com/taskcluster/taskcluster-try/blob/master/try/instantiate.js - REVISION: 'tip' - GECKO_HEAD_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - GECKO_BASE_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - - image: 'quay.io/mozilla/phone-builder:0.0.1' - maxRunTime: 14400 - - command: - - build-phone.sh - - artifacts: - 'public/build': - type: directory - path: '/home/worker/artifacts/' - - extra: - # Rather then enforcing particular conventions we require that all build - # tasks provide the "build" extra field to specify where the build and tests - # files are located. - locations: - build: 'public/build/emulator.tar.gz' - tests: 'public/build/b2g-tests.zip' - symbols: 'public/build/b2g-crashreporter-symbols.zip' - sources: 'public/build/sources.xml' - - treeherder: - symbol: B
deleted file mode 100755 --- a/taskcluster/docker/phone-builder/tests/test_validation.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -import unittest -import sys -import yaml - -from validate_task import check_task -sys.path.append('../bin') - - -def load_task(task_file): - content = open(task_file, 'r') - return yaml.load(content)['task'] - - -class TaskValidationTest(unittest.TestCase): - def test_valid_task(self): - task = load_task('valid.yml') - self.assertEquals(check_task(task), 0) - - def test_invalid_base_repo(self): - task = load_task('invalid_base_repo.yml') - self.assertEquals(check_task(task), -1) - - def test_invalid_head_repo(self): - task = load_task('invalid_head_repo.yml') - self.assertEquals(check_task(task), -1) - - def test_public_artifact(self): - task = load_task('public.yml') - self.assertEquals(check_task(task), -1) - - -if __name__ == '__main__': - unittest.main()
deleted file mode 100644 --- a/taskcluster/docker/phone-builder/tests/valid.yml +++ /dev/null @@ -1,53 +0,0 @@ -taskId: 1 -task: - metadata: - source: http://todo.com/soon - owner: user@domain.com - name: B2G flame-kk opt - description: B2G flame-kk opt - - workerType: b2gbuild - provisionerId: aws-provisioner - - scopes: - - 'docker-worker:cache:build-phone-objects' - - 'docker-worker:image:{{#docker_image}}phone-builder{{/docker_image}}' - - payload: - cache: - build-phone-objects: '/home/worker/object-folder-flame-kk-1' - - env: - TARGET: 'flame-kk' - B2G_DEBUG: '1' - # revision/project params defined originally here https://github.com/taskcluster/taskcluster-try/blob/master/try/instantiate.js - REVISION: 'tip' - GECKO_HEAD_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - GECKO_BASE_REPOSITORY: 'http://hg.mozilla.org/mozilla-central' - - image: '{{#docker_image}}phone-builder{{/docker_image}}' - maxRunTime: 14400 - - command: - - build-phone.sh - - artifacts: - 'private/build': - type: directory - path: '/home/worker/artifacts/' - expires: - relative-datestamp: '1 year' - - extra: - # Rather then enforcing particular conventions we require that all build - # tasks provide the "build" extra field to specify where the build and tests - # files are located. - locations: - build: 'private/build/b2g-android-arm.tar.gz' - img: 'private/build/flame-kk.zip' - tests: 'private/build/gaia.zip' - symbols: 'private/build/b2g-crashreporter-symbols.zip' - sources: 'private/build/sources.xml' - - treeherder: - symbol: B
--- a/taskcluster/taskgraph/task/docker_image.py +++ b/taskcluster/taskgraph/task/docker_image.py @@ -57,17 +57,17 @@ class DockerImageTask(base.Task): 'level': params['level'], 'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml' .format(repo=params['head_repository'], rev=params['head_rev']), } tasks = [] templates = Templates(path) for image_name in config['images']: - context_path = os.path.join('taskcluster', 'docker', image_name) + context_path = os.path.join('testing', 'docker', image_name) image_parameters = dict(parameters) image_parameters['context_path'] = context_path image_parameters['artifact_path'] = 'public/image.tar' image_parameters['image_name'] = image_name image_artifact_path = \ "public/docker_image_contexts/{}/context.tar.gz".format(image_name)
--- a/taskcluster/taskgraph/transforms/task.py +++ b/taskcluster/taskgraph/transforms/task.py @@ -146,17 +146,17 @@ task_description_schema = Schema({ # For tasks that will run in docker-worker or docker-engine, this is the # name of the docker image or in-tree docker image to run the task in. If # in-tree, then a dependency will be created automatically. This is # generally `desktop-test`, or an image that acts an awful lot like it. Required('docker-image'): Any( # a raw Docker image path (repo/image:tag) basestring, - # an in-tree generated docker image (from `taskcluster/docker/<name>`) + # an in-tree generated docker image (from `testing/docker/<name>`) {'in-tree': basestring} ), # worker features that should be enabled Required('relengapi-proxy', default=False): bool, Required('chainOfTrust', default=False): bool, Required('taskcluster-proxy', default=False): bool, Required('allow-ptrace', default=False): bool,
--- a/taskcluster/taskgraph/transforms/tests/test_description.py +++ b/taskcluster/taskgraph/transforms/tests/test_description.py @@ -114,17 +114,17 @@ test_description_schema = Schema({ # For tasks that will run in docker-worker or docker-engine, this is the # name of the docker image or in-tree docker image to run the task in. If # in-tree, then a dependency will be created automatically. This is # generally `desktop-test`, or an image that acts an awful lot like it. Required('docker-image', default={'in-tree': 'desktop-test'}): Any( # a raw Docker image path (repo/image:tag) basestring, - # an in-tree generated docker image (from `taskcluster/docker/<name>`) + # an in-tree generated docker image (from `testing/docker/<name>`) {'in-tree': basestring} ), # seconds of runtime after which the task will be killed. Like 'chunks', # this can be keyed by test pltaform. Required('max-run-time', default=3600): Any( int, {'by-test-platform': {basestring: int}},
--- a/taskcluster/taskgraph/util/docker.py +++ b/taskcluster/taskgraph/util/docker.py @@ -12,17 +12,17 @@ import tarfile import tempfile from mozpack.archive import ( create_tar_gz_from_files, ) GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..')) -DOCKER_ROOT = os.path.join(GECKO, 'taskcluster', 'docker') +DOCKER_ROOT = os.path.join(GECKO, 'testing', 'docker') ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}' def docker_image(name, default_version=None): '''Determine the docker image name, including repository and tag, from an in-tree docker file.''' try: with open(os.path.join(DOCKER_ROOT, name, 'REGISTRY')) as f:
new file mode 100644 --- /dev/null +++ b/testing/docker/android-gradle-build/Dockerfile @@ -0,0 +1,97 @@ +# TODO remove VOLUME below when the base image is updated next. +FROM taskcluster/centos6-build-upd:0.1.6.20160329195300 +MAINTAINER Nick Alexander <nalexander@mozilla.com> + +# BEGIN ../desktop-build/Dockerfile + +# TODO remove when base image is updated +VOLUME /home/worker/workspace +VOLUME /home/worker/tooltool-cache + +# Add build scripts; these are the entry points from the taskcluster worker, and +# operate on environment variables +ADD bin /home/worker/bin +RUN chmod +x /home/worker/bin/* + +# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb +# %include testing/docker/recipes/xvfb.sh +ADD topsrcdir/testing/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh + +# Add configuration +COPY dot-config /home/worker/.config + +# Generate machine uuid file +RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id + +# Stubbed out credentials; mozharness looks for this file an issues a WARNING +# if it's not found, which causes the build to fail. Note that this needs to +# be in the parent of the workspace directory and in the directory where +# mozharness is run (not its --work-dir). See Bug 1169652. +ADD oauth.txt /home/worker/ + +# stubbed out buildprops, which keeps mozharness from choking +# Note that this needs to be in the parent of the workspace directory and in +# the directory where mozharness is run (not its --work-dir) +ADD buildprops.json /home/worker/ + +# install tooltool directly from github where tooltool_wrapper.sh et al. expect +# to find it +RUN wget -O /builds/tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py +RUN chmod +x /builds/tooltool.py + +# END ../desktop-build/Dockerfile + +# Reset user/workdir from parent image so we can install software. +WORKDIR / +USER root + +# Update base. +RUN yum upgrade -y + +# Install JDK and Sonatype Nexus. Cribbed directly from +# https://github.com/sonatype/docker-nexus/blob/fffd2c61b2368292040910c055cf690c8e76a272/oss/Dockerfile. + +# Install the screen package here to use with xvfb. +# Move installation to base centos6-build image once Bug 1272629 is fixed +RUN yum install -y \ + createrepo \ + curl \ + java-1.7.0-openjdk-devel \ + java-1.7.0-openjdk \ + screen \ + sudo \ + tar \ + unzip \ + wget \ + zip \ + && yum clean all + +ENV NEXUS_VERSION 2.12.0-01 +ENV NEXUS_SHA1SUM 1a9aaad8414baffe0a2fd46eed1f41b85f4049e6 + +RUN mkdir -p /opt/sonatype/nexus + +WORKDIR /tmp +RUN curl --fail --silent --location --retry 3 \ + https://download.sonatype.com/nexus/oss/nexus-${NEXUS_VERSION}-bundle.tar.gz \ + -o /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz + +# Observe the two spaces below. Seriously. +RUN echo "${NEXUS_SHA1SUM} nexus-${NEXUS_VERSION}-bundle.tar.gz" > nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1 +RUN sha1sum --check nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1 + +RUN tar zxf nexus-${NEXUS_VERSION}-bundle.tar.gz \ + && mv /tmp/nexus-${NEXUS_VERSION}/* /opt/sonatype/nexus/ \ + && rm -rf /tmp/nexus-${NEXUS_VERSION} \ + && rm -rf /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz + +# Install tooltool directly from github. +RUN mkdir /build +ADD https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py /build/tooltool.py +RUN chmod +rx /build/tooltool.py + +# Back to the centos6-build workdir, matching desktop-build. +WORKDIR /home/worker + +# Set a default command useful for debugging +CMD ["/bin/bash", "--login"]
rename from taskcluster/docker/android-gradle-build/README.md rename to testing/docker/android-gradle-build/README.md
new file mode 100644 --- /dev/null +++ b/testing/docker/android-gradle-build/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
rename from taskcluster/docker/android-gradle-build/VERSION rename to testing/docker/android-gradle-build/VERSION
rename from taskcluster/docker/android-gradle-build/bin/after.sh rename to testing/docker/android-gradle-build/bin/after.sh
rename from taskcluster/docker/android-gradle-build/bin/before.sh rename to testing/docker/android-gradle-build/bin/before.sh
rename from taskcluster/docker/android-gradle-build/bin/build.sh rename to testing/docker/android-gradle-build/bin/build.sh
new file mode 100644 --- /dev/null +++ b/testing/docker/android-gradle-build/bin/checkout-script.sh @@ -0,0 +1,17 @@ +#! /bin/bash -vex + +set -x -e + +# Inputs, with defaults + +: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central} +: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default} + +: SCRIPT_DOWNLOAD_PATH ${SCRIPT_DOWNLOAD_PATH:=$PWD} +: SCRIPT_PATH ${SCRIPT_PATH:?"script path must be set"} +set -v + +# download script from the gecko repository +url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${SCRIPT_PATH} +wget --directory-prefix=${SCRIPT_DOWNLOAD_PATH} $url +chmod +x `basename ${SCRIPT_PATH}`
rename from taskcluster/docker/android-gradle-build/bin/checkout-sources.sh rename to testing/docker/android-gradle-build/bin/checkout-sources.sh
new file mode 100644 --- /dev/null +++ b/testing/docker/android-gradle-build/buildprops.json @@ -0,0 +1,9 @@ +{ + "properties": { + "buildername": "" + }, + "sourcestamp": { + "changes": [] + }, + "comments": "TaskCluster Job" +}
new file mode 100644 --- /dev/null +++ b/testing/docker/android-gradle-build/oauth.txt @@ -0,0 +1,2 @@ +taskcluster_clientId = None +taskcluster_accessToken = None
rename from taskcluster/docker/base-build/system-setup.sh rename to testing/docker/base-build/system-setup.sh
new file mode 100644 --- /dev/null +++ b/testing/docker/base-test/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
rename from taskcluster/docker/base-test/sources.list rename to testing/docker/base-test/sources.list
rename from taskcluster/docker/centos6-build-upd/Dockerfile rename to testing/docker/centos6-build-upd/Dockerfile
new file mode 100644 --- /dev/null +++ b/testing/docker/centos6-build-upd/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
rename from taskcluster/docker/centos6-build-upd/VERSION rename to testing/docker/centos6-build-upd/VERSION
rename from taskcluster/docker/centos6-build/Dockerfile rename to testing/docker/centos6-build/Dockerfile
new file mode 100644 --- /dev/null +++ b/testing/docker/centos6-build/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
new file mode 100644 --- /dev/null +++ b/testing/docker/centos6-build/VERSION @@ -0,0 +1,1 @@ +0.1.6
rename from taskcluster/docker/centos6-build/system-setup.sh rename to testing/docker/centos6-build/system-setup.sh
new file mode 100644 --- /dev/null +++ b/testing/docker/decision/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
rename from taskcluster/docker/decision/system-setup.sh rename to testing/docker/decision/system-setup.sh
rename from taskcluster/docker/desktop-build/bin/build.sh rename to testing/docker/desktop-build/bin/build.sh
rename from taskcluster/docker/desktop-build/bin/checkout-script.sh rename to testing/docker/desktop-build/bin/checkout-script.sh
rename from taskcluster/docker/desktop-build/bin/checkout-sources.sh rename to testing/docker/desktop-build/bin/checkout-sources.sh
rename from taskcluster/docker/desktop-build/buildprops.json rename to testing/docker/desktop-build/buildprops.json
rename from taskcluster/docker/desktop-build/oauth.txt rename to testing/docker/desktop-build/oauth.txt
rename from taskcluster/docker/desktop1604-test/buildprops.json rename to testing/docker/desktop-test/buildprops.json
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/deja-dup-monitor.desktop @@ -0,0 +1,19 @@ +[Desktop Entry] +Version=1.0 +X-Ubuntu-Gettext-Domain=deja-dup + +Name=Backup Monitor +Comment=Schedules backups at regular intervals + +Icon=deja-dup +TryExec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor +Exec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor + +# Bug 984944/1240084 - It prevents taking screenshots +X-GNOME-Autostart-Delay=false + +StartupNotify=false +NoDisplay=true + +Type=Application +Categories=System;Utility;Archiving;
rename from taskcluster/docker/tester/dot-config/pip/pip.conf rename to testing/docker/desktop-test/dot-files/config/pip/pip.conf
rename from taskcluster/docker/desktop1604-test/dot-files/config/user-dirs.dirs rename to testing/docker/desktop-test/dot-files/config/user-dirs.dirs
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/dot-files/config/user-dirs.locale @@ -0,0 +1,1 @@ +en_US
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/dot-files/pulse/default.pa @@ -0,0 +1,164 @@ +#!/usr/bin/pulseaudio -nF +# +# This file is part of PulseAudio. +# +# PulseAudio is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PulseAudio is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with PulseAudio; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + +# This startup script is used only if PulseAudio is started per-user +# (i.e. not in system mode) + +.nofail + +### Load something into the sample cache +#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav +#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav +#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav +#load-sample-lazy pulse-access /usr/share/sounds/generic.wav + +.fail + +### Automatically restore the volume of streams and devices +load-module module-device-restore +load-module module-stream-restore +load-module module-card-restore + +### Automatically augment property information from .desktop files +### stored in /usr/share/application +load-module module-augment-properties + +### Load audio drivers statically +### (it's probably better to not load these drivers manually, but instead +### use module-udev-detect -- see below -- for doing this automatically) +#load-module module-alsa-sink +#load-module module-alsa-source device=hw:1,0 +#load-module module-oss device="/dev/dsp" sink_name=output source_name=input +#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input +#load-module module-null-sink +#load-module module-pipe-sink + +### Automatically load driver modules depending on the hardware available +.ifexists module-udev-detect.so +load-module module-udev-detect +.else +### Use the static hardware detection module (for systems that lack udev/hal support) +load-module module-detect +.endif + +### Automatically connect sink and source if JACK server is present +.ifexists module-jackdbus-detect.so +.nofail +load-module module-jackdbus-detect +.fail +.endif + +### Automatically load driver modules for Bluetooth hardware +# This module causes a pulseaudio startup failure on "gecko-tester" +#.ifexists module-bluetooth-discover.so +#load-module module-bluetooth-discover +#.endif + +### Load several protocols +.ifexists module-esound-protocol-unix.so +load-module module-esound-protocol-unix +.endif +load-module module-native-protocol-unix + +### Network access (may be configured with paprefs, so leave this commented +### here if you plan to use paprefs) +#load-module module-esound-protocol-tcp +#load-module module-native-protocol-tcp +#load-module module-zeroconf-publish + +### Load the RTP receiver module (also configured via paprefs, see above) +#load-module module-rtp-recv + +### Load the RTP sender module (also configured via paprefs, see above) +#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'" +#load-module module-rtp-send source=rtp.monitor + +### Load additional modules from GConf settings. This can be configured with the paprefs tool. +### Please keep in mind that the modules configured by paprefs might conflict with manually +### loaded modules. +.ifexists module-gconf.so +.nofail +load-module module-gconf +.fail +.endif + +### Automatically restore the default sink/source when changed by the user +### during runtime +### NOTE: This should be loaded as early as possible so that subsequent modules +### that look up the default sink/source get the right value +load-module module-default-device-restore + +### Automatically move streams to the default sink if the sink they are +### connected to dies, similar for sources +load-module module-rescue-streams + +### Make sure we always have a sink around, even if it is a null sink. +load-module module-always-sink + +### Honour intended role device property +load-module module-intended-roles + +### Automatically suspend sinks/sources that become idle for too long +load-module module-suspend-on-idle + +### If autoexit on idle is enabled we want to make sure we only quit +### when no local session needs us anymore. +# This module causes a pulseaudio startup failure on "gecko-tester" +#.ifexists module-console-kit.so +#load-module module-console-kit +#.endif + +### Enable positioned event sounds +load-module module-position-event-sounds + +### Cork music streams when a phone stream is active +#load-module module-cork-music-on-phone + +### Modules to allow autoloading of filters (such as echo cancellation) +### on demand. module-filter-heuristics tries to determine what filters +### make sense, and module-filter-apply does the heavy-lifting of +### loading modules and rerouting streams. +load-module module-filter-heuristics +load-module module-filter-apply + +### Load DBus protocol +#.ifexists module-dbus-protocol.so +#load-module module-dbus-protocol +#.endif + +# X11 modules should not be started from default.pa so that one daemon +# can be shared by multiple sessions. + +### Load X11 bell module +#load-module module-x11-bell sample=bell-windowing-system + +### Register ourselves in the X11 session manager +#load-module module-x11-xsmp + +### Publish connection data in the X11 root window +#.ifexists module-x11-publish.so +#.nofail +#load-module module-x11-publish +#.fail +#.endif + +load-module module-switch-on-port-available + +### Make some devices default +#set-default-sink output +#set-default-source input
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/fonts.conf @@ -0,0 +1,5 @@ +<match target="font"> + <edit name="antialias" mode="assign"> + <bool>false</bool> + </edit> +</match>
rename from taskcluster/docker/desktop1604-test/jockey-gtk.desktop rename to testing/docker/desktop-test/jockey-gtk.desktop
rename from taskcluster/docker/desktop1604-test/release-upgrades rename to testing/docker/desktop-test/release-upgrades
rename from taskcluster/docker/desktop-test/taskcluster-interactive-shell rename to testing/docker/desktop-test/taskcluster-interactive-shell
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/tc-vcs-config.yml @@ -0,0 +1,40 @@ +# Default configuration used by the tc-vs tools these can be overridden by +# passing the config you wish to use over the command line... +git: git +hg: hg + +repoCache: + # Repo url to clone when running repo init.. + repoUrl: https://gerrit.googlesource.com/git-repo.git + # Version of repo to utilize... + repoRevision: master + # The root where all downloaded cache files are stored on the local machine... + cacheDir: '{{env.HOME}}/.tc-vcs-repo/' + # Name/prefixed used as part of the base url. + cacheName: sources/{{name}}.tar.gz + # Command used to upload the tarball + uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" + # Large http get requests are often slower using nodes built in http layer so + # we utilize a subprocess which is responsible for fetching... + get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} + # Used to create clone tarball + compress: tar -czf {{dest}} {{source}} + # All cache urls use tar + gz this is the command used to extract those files + # downloaded by the "get" command. + extract: tar -x -z -C {{dest}} -f {{source}} + +cloneCache: + # The root where all downloaded cache files are stored on the local machine... + cacheDir: '{{env.HOME}}/.tc-vcs/' + # Command used to upload the tarball + uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" + # Large http get requests are often slower using nodes built in http layer so + # we utilize a subprocess which is responsible for fetching... + get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} + # Used to create clone tarball + compress: tar -czf {{dest}} {{source}} + # All cache urls use tar + gz this is the command used to extract those files + # downloaded by the "get" command. + extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}} + # Name/prefixed used as part of the base url. + cacheName: clones/{{name}}.tar.gz
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop-test/tester.env @@ -0,0 +1,4 @@ +GAIA_REV=tip +GAIA_REF=tip +GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central +GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/apport @@ -0,0 +1,1 @@ +enabled=0
rename from taskcluster/docker/desktop1604-test/bin/run-wizard rename to testing/docker/desktop1604-test/bin/run-wizard
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/buildprops.json @@ -0,0 +1,8 @@ +{ + "properties": { + "buildername": "" + }, + "sourcestamp": { + "changes": [] + } +}
rename from taskcluster/docker/desktop1604-test/deja-dup-monitor.desktop rename to testing/docker/desktop1604-test/deja-dup-monitor.desktop
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/dot-files/config/user-dirs.dirs @@ -0,0 +1,15 @@ +# This file is written by xdg-user-dirs-update +# If you want to change or add directories, just edit the line you're +# interested in. All local changes will be retained on the next run +# Format is XDG_xxx_DIR="$HOME/yyy", where yyy is a shell-escaped +# homedir-relative path, or XDG_xxx_DIR="/yyy", where /yyy is an +# absolute path. No other format is supported. + +XDG_DESKTOP_DIR="$HOME/Desktop" +XDG_DOWNLOAD_DIR="$HOME/Downloads" +XDG_TEMPLATES_DIR="$HOME/Templates" +XDG_PUBLICSHARE_DIR="$HOME/Public" +XDG_DOCUMENTS_DIR="$HOME/Documents" +XDG_MUSIC_DIR="$HOME/Music" +XDG_PICTURES_DIR="$HOME/Pictures" +XDG_VIDEOS_DIR="$HOME/Videos"
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/dot-files/config/user-dirs.locale @@ -0,0 +1,1 @@ +en_US
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/dot-files/pulse/default.pa @@ -0,0 +1,164 @@ +#!/usr/bin/pulseaudio -nF +# +# This file is part of PulseAudio. +# +# PulseAudio is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PulseAudio is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with PulseAudio; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + +# This startup script is used only if PulseAudio is started per-user +# (i.e. not in system mode) + +.nofail + +### Load something into the sample cache +#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav +#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav +#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav +#load-sample-lazy pulse-access /usr/share/sounds/generic.wav + +.fail + +### Automatically restore the volume of streams and devices +load-module module-device-restore +load-module module-stream-restore +load-module module-card-restore + +### Automatically augment property information from .desktop files +### stored in /usr/share/application +load-module module-augment-properties + +### Load audio drivers statically +### (it's probably better to not load these drivers manually, but instead +### use module-udev-detect -- see below -- for doing this automatically) +#load-module module-alsa-sink +#load-module module-alsa-source device=hw:1,0 +#load-module module-oss device="/dev/dsp" sink_name=output source_name=input +#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input +#load-module module-null-sink +#load-module module-pipe-sink + +### Automatically load driver modules depending on the hardware available +.ifexists module-udev-detect.so +load-module module-udev-detect +.else +### Use the static hardware detection module (for systems that lack udev/hal support) +load-module module-detect +.endif + +### Automatically connect sink and source if JACK server is present +.ifexists module-jackdbus-detect.so +.nofail +load-module module-jackdbus-detect +.fail +.endif + +### Automatically load driver modules for Bluetooth hardware +# This module causes a pulseaudio startup failure on "gecko-tester" +#.ifexists module-bluetooth-discover.so +#load-module module-bluetooth-discover +#.endif + +### Load several protocols +.ifexists module-esound-protocol-unix.so +load-module module-esound-protocol-unix +.endif +load-module module-native-protocol-unix + +### Network access (may be configured with paprefs, so leave this commented +### here if you plan to use paprefs) +#load-module module-esound-protocol-tcp +#load-module module-native-protocol-tcp +#load-module module-zeroconf-publish + +### Load the RTP receiver module (also configured via paprefs, see above) +#load-module module-rtp-recv + +### Load the RTP sender module (also configured via paprefs, see above) +#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'" +#load-module module-rtp-send source=rtp.monitor + +### Load additional modules from GConf settings. This can be configured with the paprefs tool. +### Please keep in mind that the modules configured by paprefs might conflict with manually +### loaded modules. +.ifexists module-gconf.so +.nofail +load-module module-gconf +.fail +.endif + +### Automatically restore the default sink/source when changed by the user +### during runtime +### NOTE: This should be loaded as early as possible so that subsequent modules +### that look up the default sink/source get the right value +load-module module-default-device-restore + +### Automatically move streams to the default sink if the sink they are +### connected to dies, similar for sources +load-module module-rescue-streams + +### Make sure we always have a sink around, even if it is a null sink. +load-module module-always-sink + +### Honour intended role device property +load-module module-intended-roles + +### Automatically suspend sinks/sources that become idle for too long +load-module module-suspend-on-idle + +### If autoexit on idle is enabled we want to make sure we only quit +### when no local session needs us anymore. +# This module causes a pulseaudio startup failure on "gecko-tester" +#.ifexists module-console-kit.so +#load-module module-console-kit +#.endif + +### Enable positioned event sounds +load-module module-position-event-sounds + +### Cork music streams when a phone stream is active +#load-module module-cork-music-on-phone + +### Modules to allow autoloading of filters (such as echo cancellation) +### on demand. module-filter-heuristics tries to determine what filters +### make sense, and module-filter-apply does the heavy-lifting of +### loading modules and rerouting streams. +load-module module-filter-heuristics +load-module module-filter-apply + +### Load DBus protocol +#.ifexists module-dbus-protocol.so +#load-module module-dbus-protocol +#.endif + +# X11 modules should not be started from default.pa so that one daemon +# can be shared by multiple sessions. + +### Load X11 bell module +#load-module module-x11-bell sample=bell-windowing-system + +### Register ourselves in the X11 session manager +#load-module module-x11-xsmp + +### Publish connection data in the X11 root window +#.ifexists module-x11-publish.so +#.nofail +#load-module module-x11-publish +#.fail +#.endif + +load-module module-switch-on-port-available + +### Make some devices default +#set-default-sink output +#set-default-source input
rename from taskcluster/docker/desktop1604-test/fonts.conf rename to testing/docker/desktop1604-test/fonts.conf
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/jockey-gtk.desktop @@ -0,0 +1,15 @@ +[Desktop Entry] +Name=Check for new hardware drivers +Comment=Notify about new hardware drivers available for the system +Icon=jockey +Exec=sh -c "test -e /var/cache/jockey/check || exec jockey-gtk --check" +Terminal=false +Type=Application +Categories=System;Settings;GTK;HardwareSettings; +NotShowIn=KDE; +X-Ubuntu-Gettext-Domain=jockey + +# Bug 984944/1240084 - It prevents taking screenshots +X-GNOME-Autostart-Delay=false + +NoDisplay=true
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/motd @@ -0,0 +1,6 @@ +Welcome to your taskcluster interactive shell! The regularly scheduled task +has been paused to give you a chance to set up your debugging environment. + +For your convenience, the exact mozharness command needed for this task can +be invoked using the 'run-mozharness' command. +
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/release-upgrades @@ -0,0 +1,17 @@ +# Default behavior for the release upgrader. + +[DEFAULT] +# Default prompting behavior, valid options: +# +# never - Never check for a new release. +# normal - Check to see if a new release is available. If more than one new +# release is found, the release upgrader will attempt to upgrade to +# the release that immediately succeeds the currently-running +# release. +# lts - Check to see if a new LTS release is available. The upgrader +# will attempt to upgrade to the first LTS release available after +# the currently-running one. Note that this option should not be +# used if the currently-running release is not itself an LTS +# release, since in that case the upgrader won't be able to +# determine if a newer release is available. +Prompt=never
rename from taskcluster/docker/desktop1604-test/taskcluster-interactive-shell rename to testing/docker/desktop1604-test/taskcluster-interactive-shell
rename from taskcluster/docker/tester/tc-vcs-config.yml rename to testing/docker/desktop1604-test/tc-vcs-config.yml
new file mode 100644 --- /dev/null +++ b/testing/docker/desktop1604-test/tester.env @@ -0,0 +1,4 @@ +GAIA_REV=tip +GAIA_REF=tip +GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central +GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
rename from taskcluster/docker/image_builder/Dockerfile rename to testing/docker/image_builder/Dockerfile
rename from taskcluster/docker/image_builder/bin/build_image.sh rename to testing/docker/image_builder/bin/build_image.sh
rename from taskcluster/docker/recipes/centos6-build-system-setup.sh rename to testing/docker/recipes/centos6-build-system-setup.sh
rename from taskcluster/docker/recipes/install-mercurial.sh rename to testing/docker/recipes/install-mercurial.sh
rename from taskcluster/docker/recipes/ubuntu1204-test-system-setup.sh rename to testing/docker/recipes/ubuntu1204-test-system-setup.sh
rename from taskcluster/docker/recipes/ubuntu1604-test-system-setup.sh rename to testing/docker/recipes/ubuntu1604-test-system-setup.sh
rename from taskcluster/docker/rust-build/build_cargo.sh rename to testing/docker/rust-build/build_cargo.sh
rename from taskcluster/docker/rust-build/build_rust.sh rename to testing/docker/rust-build/build_rust.sh
rename from taskcluster/docker/rust-build/build_rust_mac.sh rename to testing/docker/rust-build/build_rust_mac.sh
rename from taskcluster/docker/rust-build/fetch_cargo.sh rename to testing/docker/rust-build/fetch_cargo.sh
rename from taskcluster/docker/rust-build/fetch_rust.sh rename to testing/docker/rust-build/fetch_rust.sh
rename from taskcluster/docker/rust-build/package_rust.sh rename to testing/docker/rust-build/package_rust.sh
rename from taskcluster/docker/rust-build/repack_rust.py rename to testing/docker/rust-build/repack_rust.py --- a/taskcluster/docker/rust-build/repack_rust.py +++ b/testing/docker/rust-build/repack_rust.py @@ -5,186 +5,173 @@ with the necessary tool and target suppo build environment. ''' import os.path import requests import subprocess import toml - def fetch_file(url): - '''Download a file from the given url if it's not already present.''' - filename = os.path.basename(url) - if os.path.exists(filename): - return - r = requests.get(url, stream=True) - r.raise_for_status() - with open(filename, 'wb') as fd: - for chunk in r.iter_content(4096): - fd.write(chunk) - + '''Download a file from the given url if it's not already present.''' + filename = os.path.basename(url) + if os.path.exists(filename): + return + r = requests.get(url, stream=True) + r.raise_for_status() + with open(filename, 'wb') as fd: + for chunk in r.iter_content(4096): + fd.write(chunk) def fetch(url): - '''Download and verify a package url.''' - base = os.path.basename(url) - print('Fetching %s...' % base) - fetch_file(url + '.asc') - fetch_file(url) - fetch_file(url + '.sha256') - fetch_file(url + '.asc.sha256') - print('Verifying %s...' % base) - subprocess.check_call(['shasum', '-c', base + '.sha256']) - subprocess.check_call(['shasum', '-c', base + '.asc.sha256']) - subprocess.check_call(['gpg', '--verify', base + '.asc', base]) - subprocess.check_call(['keybase', 'pgp', 'verify', - '-d', base + '.asc', - '-i', base, - ]) - + '''Download and verify a package url.''' + base = os.path.basename(url) + print('Fetching %s...' % base) + fetch_file(url + '.asc') + fetch_file(url) + fetch_file(url + '.sha256') + fetch_file(url + '.asc.sha256') + print('Verifying %s...' % base) + subprocess.check_call(['shasum', '-c', base + '.sha256']) + subprocess.check_call(['shasum', '-c', base + '.asc.sha256']) + subprocess.check_call(['gpg', '--verify', base + '.asc', base]) + subprocess.check_call(['keybase', 'pgp', 'verify', + '-d', base + '.asc', + '-i', base, + ]) def install(filename, target): - '''Run a package's installer script against the given target directory.''' - print(' Unpacking %s...' % filename) - subprocess.check_call(['tar', 'xf', filename]) - basename = filename.split('.tar')[0] - print(' Installing %s...' % basename) - install_cmd = [os.path.join(basename, 'install.sh')] - install_cmd += ['--prefix=' + os.path.abspath(target)] - install_cmd += ['--disable-ldconfig'] - subprocess.check_call(install_cmd) - print(' Cleaning %s...' % basename) - subprocess.check_call(['rm', '-rf', basename]) - + '''Run a package's installer script against the given target directory.''' + print(' Unpacking %s...' % filename) + subprocess.check_call(['tar', 'xf', filename]) + basename = filename.split('.tar')[0] + print(' Installing %s...' % basename) + install_cmd = [os.path.join(basename, 'install.sh')] + install_cmd += ['--prefix=' + os.path.abspath(target)] + install_cmd += ['--disable-ldconfig'] + subprocess.check_call(install_cmd) + print(' Cleaning %s...' % basename) + subprocess.check_call(['rm', '-rf', basename]) def package(manifest, pkg, target): - '''Pull out the package dict for a particular package and target - from the given manifest.''' - version = manifest['pkg'][pkg]['version'] - info = manifest['pkg'][pkg]['target'][target] - return (version, info) - + '''Pull out the package dict for a particular package and target + from the given manifest.''' + version = manifest['pkg'][pkg]['version'] + info = manifest['pkg'][pkg]['target'][target] + return (version, info) def fetch_package(manifest, pkg, host): - version, info = package(manifest, pkg, host) - print('%s %s\n %s\n %s' % (pkg, version, info['url'], info['hash'])) - if not info['available']: - print('%s marked unavailable for %s' % (pkg, host)) - raise AssertionError - fetch(info['url']) - return info - + version, info = package(manifest, pkg, host) + print('%s %s\n %s\n %s' % (pkg, version, info['url'], info['hash'])) + if not info['available']: + print('%s marked unavailable for %s' % (pkg, host)) + raise AssertionError + fetch(info['url']) + return info def fetch_std(manifest, targets): - stds = [] - for target in targets: - info = fetch_package(manifest, 'rust-std', target) - stds.append(info) - return stds - + stds = [] + for target in targets: + info = fetch_package(manifest, 'rust-std', target) + stds.append(info) + return stds def tar_for_host(host): - if 'linux' in host: - tar_options = 'cJf' - tar_ext = '.tar.xz' - else: - tar_options = 'cjf' - tar_ext = '.tar.bz2' - return tar_options, tar_ext - + if 'linux' in host: + tar_options = 'cJf' + tar_ext = '.tar.xz' + else: + tar_options = 'cjf' + tar_ext = '.tar.bz2' + return tar_options, tar_ext def repack(host, targets, channel='stable', suffix=''): - print("Repacking rust for %s..." % host) - url = 'https://static.rust-lang.org/dist/channel-rust-' + channel + '.toml' - req = requests.get(url) - req.raise_for_status() - manifest = toml.loads(req.content) - if manifest['manifest-version'] != '2': - print('ERROR: unrecognized manifest version %s.' % manifest[ - 'manifest-version']) - return - print('Using manifest for rust %s as of %s.' % (channel, manifest['date'])) - print('Fetching packages...') - rustc = fetch_package(manifest, 'rustc', host) - cargo = fetch_package(manifest, 'cargo', host) - stds = fetch_std(manifest, targets) - print('Installing packages...') - tar_basename = 'rustc-' + host - if suffix: - tar_basename += '-' + suffix - tar_basename += '-repack' - install_dir = 'rustc' - subprocess.check_call(['rm', '-rf', install_dir]) - install(os.path.basename(rustc['url']), install_dir) - install(os.path.basename(cargo['url']), install_dir) - for std in stds: - install(os.path.basename(std['url']), install_dir) - pass - print('Tarring %s...' % tar_basename) - tar_options, tar_ext = tar_for_host(host) - subprocess.check_call( - ['tar', tar_options, tar_basename + tar_ext, install_dir]) - subprocess.check_call(['rm', '-rf', install_dir]) - + print("Repacking rust for %s..." % host) + url = 'https://static.rust-lang.org/dist/channel-rust-' + channel + '.toml' + req = requests.get(url) + req.raise_for_status() + manifest = toml.loads(req.content) + if manifest['manifest-version'] != '2': + print('ERROR: unrecognized manifest version %s.' % manifest['manifest-version']) + return + print('Using manifest for rust %s as of %s.' % (channel, manifest['date'])) + print('Fetching packages...') + rustc = fetch_package(manifest, 'rustc', host) + cargo = fetch_package(manifest, 'cargo', host) + stds = fetch_std(manifest, targets) + print('Installing packages...') + tar_basename = 'rustc-' + host + if suffix: + tar_basename += '-' + suffix + tar_basename += '-repack' + install_dir = 'rustc' + subprocess.check_call(['rm', '-rf', install_dir]) + install(os.path.basename(rustc['url']), install_dir) + install(os.path.basename(cargo['url']), install_dir) + for std in stds: + install(os.path.basename(std['url']), install_dir) + pass + print('Tarring %s...' % tar_basename) + tar_options, tar_ext = tar_for_host(host) + subprocess.check_call(['tar', tar_options, tar_basename + tar_ext, install_dir]) + subprocess.check_call(['rm', '-rf', install_dir]) def repack_cargo(host, channel='nightly'): - print("Repacking cargo for %s..." % host) - # Cargo doesn't seem to have a .toml manifest. - base_url = 'https://static.rust-lang.org/cargo-dist/' - req = requests.get(os.path.join(base_url, 'channel-cargo-' + channel)) - req.raise_for_status() - file = '' - for line in req.iter_lines(): - if line.find(host) != -1: - file = line.strip() - if not file: - print('No manifest entry for %s!' % host) - return - manifest = { - 'date': req.headers['Last-Modified'], - 'pkg': { - 'cargo': { - 'version': channel, - 'target': { - host: { - 'url': os.path.join(base_url, file), - 'hash': None, - 'available': True, - }, - }, - }, - }, - } - print('Using manifest for cargo %s.' % channel) - print('Fetching packages...') - cargo = fetch_package(manifest, 'cargo', host) - print('Installing packages...') - install_dir = 'cargo' - subprocess.check_call(['rm', '-rf', install_dir]) - install(os.path.basename(cargo['url']), install_dir) - tar_basename = 'cargo-%s-repack' % host - print('Tarring %s...' % tar_basename) - tar_options, tar_ext = tar_for_host(host) - subprocess.check_call( - ['tar', tar_options, tar_basename + tar_ext, install_dir]) - subprocess.check_call(['rm', '-rf', install_dir]) - + print("Repacking cargo for %s..." % host) + # Cargo doesn't seem to have a .toml manifest. + base_url = 'https://static.rust-lang.org/cargo-dist/' + req = requests.get(os.path.join(base_url, 'channel-cargo-' + channel)) + req.raise_for_status() + file = '' + for line in req.iter_lines(): + if line.find(host) != -1: + file = line.strip() + if not file: + print('No manifest entry for %s!' % host) + return + manifest = { + 'date': req.headers['Last-Modified'], + 'pkg': { + 'cargo': { + 'version': channel, + 'target': { + host: { + 'url': os.path.join(base_url, file), + 'hash': None, + 'available': True, + }, + }, + }, + }, + } + print('Using manifest for cargo %s.' % channel) + print('Fetching packages...') + cargo = fetch_package(manifest, 'cargo', host) + print('Installing packages...') + install_dir = 'cargo' + subprocess.check_call(['rm', '-rf', install_dir]) + install(os.path.basename(cargo['url']), install_dir) + tar_basename = 'cargo-%s-repack' % host + print('Tarring %s...' % tar_basename) + tar_options, tar_ext = tar_for_host(host) + subprocess.check_call(['tar', tar_options, tar_basename + tar_ext, install_dir]) + subprocess.check_call(['rm', '-rf', install_dir]) # rust platform triples -android = "armv7-linux-androideabi" -linux64 = "x86_64-unknown-linux-gnu" -linux32 = "i686-unknown-linux-gnu" -mac64 = "x86_64-apple-darwin" -mac32 = "i686-apple-darwin" -win64 = "x86_64-pc-windows-msvc" -win32 = "i686-pc-windows-msvc" +android="armv7-linux-androideabi" +linux64="x86_64-unknown-linux-gnu" +linux32="i686-unknown-linux-gnu" +mac64="x86_64-apple-darwin" +mac32="i686-apple-darwin" +win64="x86_64-pc-windows-msvc" +win32="i686-pc-windows-msvc" if __name__ == '__main__': - repack(mac64, [mac64, mac32]) - repack(win32, [win32]) - repack(win64, [win64]) - repack(linux64, [linux64, linux32]) - repack(linux64, [linux64, mac64, mac32], suffix='mac-cross') - repack(linux64, [linux64, android], suffix='android-cross') - repack_cargo(mac64) - repack_cargo(win32) - repack_cargo(win64) - repack_cargo(linux64) + repack(mac64, [mac64, mac32]) + repack(win32, [win32]) + repack(win64, [win64]) + repack(linux64, [linux64, linux32]) + repack(linux64, [linux64, mac64, mac32], suffix='mac-cross') + repack(linux64, [linux64, android], suffix='android-cross') + repack_cargo(mac64) + repack_cargo(win32) + repack_cargo(win64) + repack_cargo(linux64)
rename from taskcluster/docker/rust-build/tcbuild.py rename to testing/docker/rust-build/tcbuild.py --- a/taskcluster/docker/rust-build/tcbuild.py +++ b/testing/docker/rust-build/tcbuild.py @@ -3,82 +3,73 @@ This script triggers a taskcluster task, waits for it to finish, fetches the artifacts, uploads them to tooltool, and updates the in-tree tooltool manifests. ''' from __future__ import print_function import requests.packages.urllib3 +requests.packages.urllib3.disable_warnings() import argparse import datetime import json import os import shutil import sys import taskcluster import tempfile import time import tooltool -requests.packages.urllib3.disable_warnings() - - def local_file(filename): ''' Return a path to a file next to this script. ''' return os.path.join(os.path.dirname(__file__), filename) - def read_tc_auth(tc_auth_file): ''' Read taskcluster credentials from tc_auth_file and return them as a dict. ''' return json.load(open(tc_auth_file, 'rb')) - def fill_template_dict(d, keys): for key, val in d.items(): if isinstance(val, basestring) and '{' in val: d[key] = val.format(**keys) elif isinstance(val, dict): fill_template_dict(val, keys) - def fill_template(template_file, keys): ''' Take the file object template_file, parse it as JSON, and interpolate (using str.template) its keys using keys. ''' template = json.load(template_file) fill_template_dict(template, keys) return template - def spawn_task(queue, args): ''' Spawn a Taskcluster task in queue using args. ''' task_id = taskcluster.utils.slugId() with open(local_file('task.json'), 'rb') as template: keys = vars(args) now = datetime.datetime.utcnow() keys['task_created'] = now.isoformat() + 'Z' - keys['task_deadline'] = (now + datetime.timedelta( - hours=2)).isoformat() + 'Z' - keys['artifacts_expires'] = (now + datetime.timedelta( - days=1)).isoformat() + 'Z' + keys['task_deadline'] = (now + datetime.timedelta(hours=2)).isoformat() + 'Z' + keys['artifacts_expires'] = (now + datetime.timedelta(days=1)).isoformat() + 'Z' payload = fill_template(template, keys) queue.createTask(task_id, payload) print('--- %s task %s submitted ---' % (now, task_id)) return task_id - def wait_for_task(queue, task_id, initial_wait=5): ''' Wait until queue reports that task task_id is completed, and return its run id. Sleep for initial_wait seconds before checking status the first time. Then poll periodically and print a running log of the task status. ''' @@ -86,30 +77,29 @@ def wait_for_task(queue, task_id, initia previous_state = None have_ticks = False while True: res = queue.status(task_id) state = res['status']['state'] if state != previous_state: now = datetime.datetime.utcnow() if have_ticks: - sys.stdout.write('\n') - have_ticks = False + sys.stdout.write('\n') + have_ticks = False print('--- %s task %s %s ---' % (now, task_id, state)) previous_state = state if state == 'completed': return len(res['status']['runs']) - 1 if state in ('failed', 'exception'): raise Exception('Task failed') sys.stdout.write('.') sys.stdout.flush() have_ticks = True time.sleep(10) - def fetch_artifact(queue, task_id, run_id, name, dest_dir): ''' Fetch the artifact with name from task_id and run_id in queue, write it to a file in dest_dir, and return the path to the written file. ''' url = queue.buildUrl('getArtifact', task_id, run_id, name) fn = os.path.join(dest_dir, os.path.basename(name)) @@ -120,23 +110,21 @@ def fetch_artifact(queue, task_id, run_i with open(fn, 'wb') as f: for chunk in r.iter_content(1024): f.write(chunk) except requests.exceptions.HTTPError: print('HTTP Error %d fetching %s' % (r.status_code, name)) return None return fn - def make_artifact_dir(task_id, run_id): prefix = 'tc-artifacts.%s.%d.' % (task_id, run_id) print('making artifact dir %s' % prefix) return tempfile.mkdtemp(prefix=prefix) - def fetch_artifacts(queue, task_id, run_id): ''' Fetch all artifacts from task_id and run_id in queue, write them to temporary files, and yield the path to each. ''' try: tempdir = make_artifact_dir(task_id, run_id) res = queue.listArtifacts(task_id, run_id) @@ -145,21 +133,20 @@ def fetch_artifacts(queue, task_id, run_ if a['name'].startswith('public/logs'): continue # Skip interfaces if a['name'].startswith('private/docker-worker'): continue yield fetch_artifact(queue, task_id, run_id, a['name'], tempdir) finally: if os.path.isdir(tempdir): - # shutil.rmtree(tempdir) + #shutil.rmtree(tempdir) print('Artifacts downloaded to %s' % tempdir) pass - def upload_to_tooltool(tooltool_auth, task_id, artifact): ''' Upload artifact to tooltool using tooltool_auth as the authentication token. Return the path to the generated tooltool manifest. ''' try: oldcwd = os.getcwd() os.chdir(os.path.dirname(artifact)) @@ -177,54 +164,43 @@ def upload_to_tooltool(tooltool_auth, ta '-m', manifest, '--authentication-file', tooltool_auth, '--message', 'Built from taskcluster task {}'.format(task_id), ]) return manifest finally: os.chdir(oldcwd) - def update_manifest(artifact, manifest, local_gecko_clone): - platform = 'linux' + platform = linux manifest_dir = os.path.join(local_gecko_clone, 'testing', 'config', 'tooltool-manifests') platform_dir = [p for p in os.listdir(manifest_dir) if p.startswith(platform)][0] tree_manifest = os.path.join(manifest_dir, platform_dir, 'releng.manifest') print('%s -> %s' % (manifest, tree_manifest)) shutil.copyfile(manifest, tree_manifest) - def main(): parser = argparse.ArgumentParser(description='Build and upload binaries') - parser.add_argument('taskcluster_auth', - help='Path to a file containing Taskcluster client ' - 'ID and authentication token as a JSON file in ' - 'the form {"clientId": "...", "accessToken": "..."}') - parser.add_argument('--tooltool-auth', - help='Path to a file containing a tooltool ' - 'authentication token valid for uploading files') - parser.add_argument('--local-gecko-clone', - help='Path to a local Gecko clone whose tooltool ' - 'manifests will be updated with the newly-built binaries') + parser.add_argument('taskcluster_auth', help='Path to a file containing Taskcluster client ID and authentication token as a JSON file in the form {"clientId": "...", "accessToken": "..."}') + parser.add_argument('--tooltool-auth', help='Path to a file containing a tooltool authentication token valid for uploading files') + parser.add_argument('--local-gecko-clone', help='Path to a local Gecko clone whose tooltool manifests will be updated with the newly-built binaries') parser.add_argument('--rust-branch', default='stable', help='Revision of the rust repository to use') parser.add_argument('--task', help='Use an existing task') args = parser.parse_args() tc_auth = read_tc_auth(args.taskcluster_auth) queue = taskcluster.Queue({'credentials': tc_auth}) if args.task: task_id, initial_wait = args.task, 0 else: task_id, initial_wait = spawn_task(queue, args), 25 run_id = wait_for_task(queue, task_id, initial_wait) for artifact in fetch_artifacts(queue, task_id, run_id): if args.tooltool_auth: - manifest = upload_to_tooltool(args.tooltool_auth, task_id, - artifact) + manifest = upload_to_tooltool(args.tooltool_auth, task_id, artifact) if args.local_gecko_clone: update_manifest(artifact, manifest, args.local_gecko_clone) - if __name__ == '__main__': main()
rename from taskcluster/docker/rust-build/upload_rust.sh rename to testing/docker/rust-build/upload_rust.sh
new file mode 100644 --- /dev/null +++ b/testing/docker/tester/REGISTRY @@ -0,0 +1,1 @@ +taskcluster
rename from taskcluster/docker/tester/dot-config/user-dirs.dirs rename to testing/docker/tester/dot-config/user-dirs.dirs
rename from taskcluster/docker/tester/dot-config/user-dirs.locale rename to testing/docker/tester/dot-config/user-dirs.locale
rename from taskcluster/docker/tester/dot-pulse/default.pa rename to testing/docker/tester/dot-pulse/default.pa
new file mode 100644 --- /dev/null +++ b/testing/docker/tester/tc-vcs-config.yml @@ -0,0 +1,40 @@ +# Default configuration used by the tc-vs tools these can be overridden by +# passing the config you wish to use over the command line... +git: git +hg: hg + +repoCache: + # Repo url to clone when running repo init.. + repoUrl: https://gerrit.googlesource.com/git-repo.git + # Version of repo to utilize... + repoRevision: master + # The root where all downloaded cache files are stored on the local machine... + cacheDir: '{{env.HOME}}/.tc-vcs-repo/' + # Name/prefixed used as part of the base url. + cacheName: sources/{{name}}.tar.gz + # Command used to upload the tarball + uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" + # Large http get requests are often slower using nodes built in http layer so + # we utilize a subprocess which is responsible for fetching... + get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} + # Used to create clone tarball + compress: tar -czf {{dest}} {{source}} + # All cache urls use tar + gz this is the command used to extract those files + # downloaded by the "get" command. + extract: tar -x -z -C {{dest}} -f {{source}} + +cloneCache: + # The root where all downloaded cache files are stored on the local machine... + cacheDir: '{{env.HOME}}/.tc-vcs/' + # Command used to upload the tarball + uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'" + # Large http get requests are often slower using nodes built in http layer so + # we utilize a subprocess which is responsible for fetching... + get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}} + # Used to create clone tarball + compress: tar -czf {{dest}} {{source}} + # All cache urls use tar + gz this is the command used to extract those files + # downloaded by the "get" command. + extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}} + # Name/prefixed used as part of the base url. + cacheName: clones/{{name}}.tar.gz