Closing the repo default tip
authorRail Aliiev <rail@mozilla.com>
Wed, 17 Dec 2014 15:27:23 -0500
changeset 584 86fbbd2df71da06bbd13f19a48dbd917dcbc9953
parent 583 faaf646fb047963c8c3662d74780c90d2adc8539
push id575
push userraliiev@mozilla.com
push dateWed, 17 Dec 2014 20:27:28 +0000
Closing the repo
.coveragerc
.gitignore
.hgignore
.travis.yml
ami_configs/bld-linux64.json
ami_configs/bld-linux64/additional_packages
ami_configs/bld-linux64/boot/grub/grub.conf
ami_configs/bld-linux64/etc/fstab
ami_configs/bld-linux64/etc/hosts
ami_configs/bld-linux64/etc/init.d
ami_configs/bld-linux64/etc/rc.local
ami_configs/bld-linux64/etc/sysconfig
ami_configs/bld-linux64/etc/yum-local.cfg
ami_configs/bld-linux64/groupinstall
ami_configs/bld-linux64/grub-install.diff
ami_configs/bld-linux64/grub.cmd
ami_configs/centos-6-x86_64-base.json
ami_configs/centos-6-x86_64-base/additional_packages
ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
ami_configs/centos-6-x86_64-base/etc/fstab
ami_configs/centos-6-x86_64-base/etc/hosts
ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
ami_configs/centos-6-x86_64-base/etc/rc.local
ami_configs/centos-6-x86_64-base/etc/sysconfig/network
ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
ami_configs/centos-6-x86_64-base/groupinstall
ami_configs/centos-6-x86_64-hvm-base.json
ami_configs/centos-6-x86_64-hvm-base/additional_packages
ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
ami_configs/centos-6-x86_64-hvm-base/etc/fstab
ami_configs/centos-6-x86_64-hvm-base/etc/hosts
ami_configs/centos-6-x86_64-hvm-base/etc/init.d
ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
ami_configs/centos-6-x86_64-hvm-base/groupinstall
ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
ami_configs/centos-6-x86_64-hvm-try
ami_configs/centos-6-x86_64-hvm-try.json
ami_configs/centos-6-x86_64-server.json
ami_configs/centos-6-x86_64-server/additional_packages
ami_configs/centos-6-x86_64-server/boot
ami_configs/centos-6-x86_64-server/etc
ami_configs/centos-6-x86_64-server/groupinstall
ami_configs/fake_puppet.conf
ami_configs/fake_puppet.sh
ami_configs/kill_chroot.sh
ami_configs/rbt-win64.json
ami_configs/rbt-win64.user_data
ami_configs/releng-public.list
ami_configs/releng-public.repo
ami_configs/s3-get
ami_configs/try-linux64
ami_configs/try-linux64.json
ami_configs/tst-win64.json
ami_configs/tst-win64.user_data
ami_configs/ubuntu-12.04-i386-desktop.json
ami_configs/ubuntu-12.04-i386-desktop/etc
ami_configs/ubuntu-12.04-i386-desktop/host_packages
ami_configs/ubuntu-12.04-i386-desktop/packages
ami_configs/ubuntu-12.04-i386-desktop/usr
ami_configs/ubuntu-12.04-x86_64-desktop-hvm.json
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/boot/grub/device.map
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/default/grub
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/fstab
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/hosts
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/network
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/rc.local
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/host_packages
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/packages
ami_configs/ubuntu-12.04-x86_64-desktop-hvm/usr
ami_configs/ubuntu-12.04-x86_64-desktop.json
ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
ami_configs/ubuntu-12.04-x86_64-desktop/host_packages
ami_configs/ubuntu-12.04-x86_64-desktop/packages
ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
ami_configs/ubuntu-12.04-x86_64-server-hvm.json
ami_configs/ubuntu-12.04-x86_64-server-hvm/boot
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/cloud/cloud.cfg.d/99-apt-preserve-sources-list.cfg
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/default
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/fstab
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/hosts
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/network
ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/rc.local
ami_configs/ubuntu-12.04-x86_64-server-hvm/host_packages
ami_configs/ubuntu-12.04-x86_64-server-hvm/packages
ami_configs/ubuntu-12.04-x86_64-server-hvm/usr
cloudtools/__init__.py
cloudtools/aws/__init__.py
cloudtools/aws/ami.py
cloudtools/aws/instance.py
cloudtools/aws/sanity.py
cloudtools/aws/spot.py
cloudtools/aws/vpc.py
cloudtools/buildbot.py
cloudtools/dns.py
cloudtools/fabric/__init__.py
cloudtools/fileutils.py
cloudtools/graphite.py
cloudtools/jacuzzi.py
cloudtools/slavealloc.py
cloudtools/ssh.py
cloudtools/yaml.py
configs/bld-linux64
configs/bld-linux64-s3
configs/bld-linux64.cloud-init
configs/buildbot-master
configs/buildbot-master.cloud-init
configs/dev-linux64
configs/dev-linux64.cloud-init
configs/instance2ami.json
configs/routingtables.yml
configs/securitygroups.yml
configs/subnets.yml
configs/try-linux64
configs/try-linux64-s3
configs/try-linux64.cloud-init
configs/tst-emulator64
configs/tst-emulator64.cloud-init
configs/tst-linux32
configs/tst-linux32.cloud-init
configs/tst-linux64
configs/tst-linux64-hvm
configs/tst-linux64.cloud-init
configs/tst-win64
configs/tst-win64.user_data
configs/vcssync-linux64
configs/watch_pending.cfg
configs/watch_pending.cfg.example
conftest.py
instance_data/us-east-1.instance_data_dev.json
instance_data/us-east-1.instance_data_master.json
instance_data/us-east-1.instance_data_prod.json
instance_data/us-east-1.instance_data_tests.json
instance_data/us-east-1.instance_data_try.json
instance_data/us-west-2.instance_data_dev.json
instance_data/us-west-2.instance_data_master.json
instance_data/us-west-2.instance_data_prod.json
instance_data/us-west-2.instance_data_tests.json
instance_data/us-west-2.instance_data_try.json
requirements.txt
scripts/aws_clean_log_dir.py
scripts/aws_create_ami.py
scripts/aws_create_instance.py
scripts/aws_create_win_ami.py
scripts/aws_get_cloudtrail_logs.py
scripts/aws_manage_instances.py
scripts/aws_manage_routingtables.py
scripts/aws_manage_securitygroups.py
scripts/aws_manage_subnets.py
scripts/aws_manage_users.py
scripts/aws_process_cloudtrail_logs.py
scripts/aws_publish_amis.py
scripts/aws_sanity_checker.py
scripts/aws_stop_idle.py
scripts/aws_terminate_by_ami_id.py
scripts/aws_watch_pending.py
scripts/check_dns.py
scripts/copy_ami.py
scripts/delete_old_spot_amis.py
scripts/ec22ip.py
scripts/free_ips.py
scripts/get_spot_amis.py
scripts/spot_sanity_check.py
scripts/tag_spot_instances.py
tests/test_cloudtools_aws.py
tests/test_cloudtools_aws_instance.py
tests/test_cloudtools_aws_spot.py
tests/test_cloudtools_aws_vpc.py
tests/test_cloudtools_dns.py
tests/test_cloudtools_fabric.py
tests/test_cloudtools_graphite.py
tests/test_cloudtools_jacuzzi.py
tests/test_cloudtools_slavealloc.py
tests/test_cloudtools_ssh.py
tests/test_cloudtools_yaml.py
tox.ini
deleted file mode 100644
--- a/.coveragerc
+++ /dev/null
@@ -1,7 +0,0 @@
-[run]
-branch = True
-include =
-    cloudtools/*
-    scripts/*
-omit=
-    .tox
deleted file mode 120000
--- a/.gitignore
+++ /dev/null
@@ -1,1 +0,0 @@
-.hgignore
\ No newline at end of file
deleted file mode 100644
--- a/.hgignore
+++ /dev/null
@@ -1,6 +0,0 @@
-\.pyc$
-\..*\.swp
-.*\.log
-\.tox/
-\.ropeproject/
-\.coverage
deleted file mode 100644
--- a/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: python
-python:
-  - "2.7"
-
-before_install:
-    - sudo apt-get update
-    - sudo apt-get install libmysqlclient-dev
-install:
-    - travis_retry pip install tox==1.8
-script:
-    - tox -e py27,py27-coveralls
deleted file mode 100644
--- a/ami_configs/bld-linux64.json
+++ /dev/null
@@ -1,74 +0,0 @@
-{
-    "us-west-1": {
-        "ami": "ami-fe393ebb",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-usw1",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-east-1": {
-        "ami": "ami-b06a98d8",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-use1",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-7bdaa84b",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-usw2",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    }
-}
deleted file mode 120000
--- a/ami_configs/bld-linux64/additional_packages
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-hvm-base/additional_packages
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/bld-linux64/boot/grub/grub.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-default=0
-timeout=0
-hiddenmenu
-title default kernel
-        root (hd0,0)
-        kernel /vmlinuz-@VERSION@ ro root=/dev/mapper/cloud_root-lv_root selinux=0 console=ttyS0 ro
-        initrd /initramfs-@VERSION@.img
deleted file mode 100644
--- a/ami_configs/bld-linux64/etc/fstab
+++ /dev/null
@@ -1,6 +0,0 @@
-/dev/cloud_root/lv_root   /         @FS_TYPE@   defaults,noatime,nodiratime,commit=60        1 1
-/dev/xvda1   /boot         ext2   defaults,noatime        1 1
-none       /proc     proc    defaults        0 0
-none       /sys      sysfs   defaults        0 0
-none       /dev/pts  devpts  gid=5,mode=620  0 0
-none       /dev/shm  tmpfs   defaults        0 0
deleted file mode 120000
--- a/ami_configs/bld-linux64/etc/hosts
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-hvm-base/etc/hosts
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/etc/init.d
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-hvm-base/etc/init.d
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/etc/rc.local
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-hvm-base/etc/rc.local
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/etc/sysconfig
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-hvm-base/etc/sysconfig
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/etc/yum-local.cfg
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-hvm-base/etc/yum-local.cfg
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/groupinstall
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-hvm-base/groupinstall
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/bld-linux64/grub-install.diff
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-hvm-base/grub-install.diff
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/bld-linux64/grub.cmd
+++ /dev/null
@@ -1,4 +0,0 @@
-device (hd0) /mnt-tmp/@IMG@
-root (hd0,0)
-setup (hd0)
-quit
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base.json
+++ /dev/null
@@ -1,53 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-7d0c6314",
-        "instance_type": "c1.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "target": {
-            "size": 4,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdl",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64"
-            }
-        }
-    },
-    "us-west-1": {
-        "ami": "ami-c7fad582",
-        "instance_type": "c1.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "target": {
-            "size": 4,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdl",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64"
-            }
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-5344d263",
-        "instance_type": "c1.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "target": {
-            "size": 4,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdl",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/additional_packages
+++ /dev/null
@@ -1,3 +0,0 @@
-dhclient
-openssh-server
-kernel
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-default=0
-timeout=0
-hiddenmenu
-title default kernel
-        root (hd0)
-        kernel /boot/vmlinuz-@VERSION@ ro root=LABEL=root_dev selinux=0
-        initrd /boot/initramfs-@VERSION@.img
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/etc/fstab
+++ /dev/null
@@ -1,5 +0,0 @@
-LABEL=@ROOT_DEV_LABEL@   /         @FS_TYPE@   defaults,noatime        1 1
-none       /proc     proc    defaults        0 0
-none       /sys      sysfs   defaults        0 0
-none       /dev/pts  devpts  gid=5,mode=620  0 0
-none       /dev/shm  tmpfs   defaults        0 0
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/etc/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-127.0.0.1 localhost.localdomain localhost
-::1 localhost6.localdomain6 localhost
deleted file mode 100755
--- a/ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
+++ /dev/null
@@ -1,37 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides:          rc.local
-# Required-Start:    $remote_fs $syslog $all
-# Required-Stop:
-# Default-Start:     2 3 4 5
-# Default-Stop:
-# Short-Description: Run /etc/rc.local if it exist
-### END INIT INFO
-
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-
-
-do_start() {
-    if [ -x /etc/rc.local ]; then
-        echo "Running local boot scripts (/etc/rc.local)"
-        /etc/rc.local
-        return $?
-    fi
-}
-
-case "$1" in
-    start)
-        do_start
-        ;;
-    restart|reload|force-reload)
-        echo "Error: argument '$1' not supported" >&2
-        exit 3
-        ;;
-    stop)
-        ;;
-    *)
-        echo "Usage: $0 start|stop" >&2
-        exit 3
-        ;;
-esac
deleted file mode 100755
--- a/ami_configs/centos-6-x86_64-base/etc/rc.local
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh
-#
-# This script will be executed *after* all the other init scripts.
-# You can put your own initialization stuff in here if you don't
-# want to do the full Sys V style init stuff.
-
-touch /var/lock/subsys/local
-
-if [ ! -d /root/.ssh ] ; then
-    mkdir -p /root/.ssh
-    chmod 0700 /root/.ssh
-fi
-
-# bz 707364
-if [ ! -f /etc/blkid/blkid.tab ] ; then
-    blkid /dev/xvda &>/dev/null
-fi
-
-ATTEMPTS=10
-FAILED=0
-# Fetch public key using HTTP
-if [ -f /root/.ssh/authorized_keys ]; then
-    mv -f /root/.ssh/authorized_keys /root/.ssh/authorized_keys.bak
-fi
-
-while [ ! -f /root/.ssh/authorized_keys ]; do
-    curl -f http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > /tmp/aws-key 2>/dev/null
-    if [ $? -eq 0 ]; then
-        cat /tmp/aws-key >> /root/.ssh/authorized_keys
-        chmod 0600 /root/.ssh/authorized_keys
-        rm -f /tmp/aws-key
-        echo "Successfully retrieved AWS public key from instance metadata"
-    else
-        FAILED=$(($FAILED + 1))
-        if [ $FAILED -ge $ATTEMPTS ]; then
-            echo "Failed to retrieve AWS public key after $FAILED attempts, quitting"
-            if [ -f /root/.ssh/authorized_keys.bak ]; then
-                echo "Reusing the previous one"
-                mv -f /root/.ssh/authorized_keys.bak /root/.ssh/authorized_keys
-            fi
-            break
-        fi
-        echo "Could not retrieve AWS public key (attempt #$FAILED/$ATTEMPTS), retrying in 5 seconds..."
-        sleep 5
-    fi
-done
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/etc/sysconfig/network
+++ /dev/null
@@ -1,1 +0,0 @@
-NETWORKING=yes
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
+++ /dev/null
@@ -1,7 +0,0 @@
-DEVICE=eth0
-BOOTPROTO=dhcp
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=yes
-PEERDNS=yes
-IPV6INIT=no
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
+++ /dev/null
@@ -1,32 +0,0 @@
-[main]
-cachedir=/var/cache/yum
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-exclude=*-debuginfo
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=yum
-syslog_device=
-
-[base]
-name=CentOS-6.2 - Base
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/centos/6/latest/os/x86_64-for-ks/
-
-[epel]
-name=epel
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/epel/6/latest/$basearch
-
-[puppetlabs]
-name=puppetlabs
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/puppetlabs/el/6/products/$basearch
-
-[releng-noarch]
-name=releng-noarch
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/releng/public/CentOS/6/noarch
-
-[releng]
-name=releng
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/releng/public/CentOS/6/$basearch
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-base/groupinstall
+++ /dev/null
@@ -1,1 +0,0 @@
-Base
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-e9a18d80",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 35,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-f8f297c8",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 35,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "bld-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base/additional_packages
+++ /dev/null
@@ -1,4 +0,0 @@
-dhclient
-openssh-server
-kernel
-grub
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
+++ /dev/null
@@ -1,1 +0,0 @@
-(hd0) /dev/xvdh
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-default=0
-timeout=0
-hiddenmenu
-title default kernel
-        root (hd0,0)
-        kernel /vmlinuz-@VERSION@ ro root=LABEL=root_dev selinux=0 console=ttyS0 ro
-        initrd /initramfs-@VERSION@.img
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/fstab
+++ /dev/null
@@ -1,7 +0,0 @@
-LABEL=@ROOT_DEV_LABEL@   /         @FS_TYPE@   defaults,noatime,nodiratime,commit=60        1 1
-/dev/xvda1 /boot ext2 rw 0 0
-none       /proc     proc    defaults        0 0
-none       /sys      sysfs   defaults        0 0
-none       /dev/pts  devpts  gid=5,mode=620  0 0
-none       /dev/shm  tmpfs   defaults        0 0
-
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/hosts
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/hosts
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/init.d
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/init.d
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/rc.local
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/sysconfig
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/yum-local.cfg
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-base/groupinstall
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-base/groupinstall
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
+++ /dev/null
@@ -1,18 +0,0 @@
---- grub-install.orig	2014-01-17 20:18:08.326272350 +0000
-+++ grub-install	2014-01-17 20:18:59.568094231 +0000
-@@ -104,6 +104,7 @@
- 		grep -v '/mapper/[[:alnum:]]\+-[[:alnum:]]\+$' | uniq |
- 		sed -e 's%\([shv]d[a-z]\)[0-9]*$%\1%' \
- 				  -e 's%\(d[0-9]*\)p[0-9]*$%\1%' \
-+				  -e 's%\(xvd[a-z]\)[0-9]*$%\1%' \
- 				  -e 's%\(fd[0-9]*\)$%\1%' \
- 				  -e 's%/part[0-9]*$%/disc%' \
- 				  -e 's%\(c[0-7]d[0-9]*\).*$%\1%' \
-@@ -114,6 +115,7 @@
- 		grep -v '/mapper/[[:alnum:]]\+-[[:alnum:]]\+$' | uniq |
- 		sed -e 's%.*/[shv]d[a-z]\([0-9]*\)$%\1%' \
- 				  -e 's%.*d[0-9]*p%%' \
-+				  -e 's%.*/xvd[a-z]\([0-9]*\)$%\1%' \
- 				  -e 's%.*/fd[0-9]*$%%' \
- 				  -e 's%.*/floppy/[0-9]*$%%' \
- 				  -e 's%.*/\(disc\|part\([0-9]*\)\)$%\2%' \
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-hvm-try
+++ /dev/null
@@ -1,1 +0,0 @@
-centos-6-x86_64-hvm-base
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-hvm-try.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-e9a18d80",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 35,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "try-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-f8f297c8",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 35,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "try-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    }
-}
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-server.json
+++ /dev/null
@@ -1,1 +0,0 @@
-centos-6-x86_64-base.json
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/centos-6-x86_64-server/additional_packages
+++ /dev/null
@@ -1,4 +0,0 @@
-dhclient
-openssh-server
-kernel
-postfix
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-server/boot
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-base/boot
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-server/etc
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-base/etc
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/centos-6-x86_64-server/groupinstall
+++ /dev/null
@@ -1,1 +0,0 @@
-../centos-6-x86_64-base/groupinstall
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/fake_puppet.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-description "Fake puppet script to unblock downstream upstart jobs"
-
-# Don't expect a long running process
-task
-
-start on (local-filesystems and net-device-up IFACE!=lo)
-script
-     /usr/sbin/fake_puppet.sh
-end script
-
deleted file mode 100755
--- a/ami_configs/fake_puppet.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/bash
-exit 0
deleted file mode 100644
--- a/ami_configs/kill_chroot.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-PREFIX=$1
-
-for ROOT in /proc/*/root; do
-    LINK=$(readlink $ROOT)
-    if [ "x$LINK" != "x" ]; then
-        if [ "x${LINK:0:${#PREFIX}}" = "x$PREFIX" ]; then
-            # this process is in the chroot...
-            PID=$(basename $(dirname "$ROOT"))
-            kill "$PID"
-            kill -9 "$PID"
-        fi
-    fi
-done
deleted file mode 100644
--- a/ami_configs/rbt-win64.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-    "hostname": "rbt-win64-ec2-%03d",
-    "us-east-1": {
-        "type": "rbt-win64",
-        "instance_profile_name": "tst-win64",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami": "ami-7527031c",
-        "subnet_ids": ["subnet-8f32cbe5", "subnet-3835cc52", "subnet-ed35cc87", "subnet-ae35ccc4"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-f3927c9c", "sg-18a07677"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "ami_configs/rbt-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    },
-    "us-west-2": {
-        "type": "rbt-win64",
-        "instance_profile_name": "tst-win64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami_desc": "Windows_Server-2012-RTM-English-64Bit-Base-2014.03.12",
-        "ami": "ami-c61678f6",
-        "subnet_ids": ["subnet-a4cba8cd", "subnet-aecba8c7", "subnet-be89a2ca", "subnet-d6cba8bf"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-d5617cb9", "sg-84beade6"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "ami_configs/rbt-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/rbt-win64.user_data
+++ /dev/null
@@ -1,276 +0,0 @@
-<powershell>
-### TODO: Add error handling
-### TODO: Check Windows defender, indexing service
-### TODO: Add runslave.bat with: msys, unzip, hg (to system-wide path?)
-### TODO: Figure out how to resolve http://repos/
-### TODO: Minimize cmdwindow?
-### TODO: install win32file
-
-Start-Transcript -Path 'c:\userdata-transcript.txt' -Force
-Set-StrictMode -Version Latest
-Set-ExecutionPolicy Unrestricted
-
-Import-Module AWSPowerShell
-
-$MOZBUILD = "C:\mozilla-build"
-$VIRTUALENV = "$MOZBUILD\buildbotve"
-$PYTHONDIR = "C:\Python27"
-$PYTHON = "$PYTHONDIR\python.exe"
-
-$LOG = 'c:\userdata-log.txt'
-Function Log ($str) {{
-    $d = Get-Date
-    Add-Content $LOG -value "$d - $str"
-}}
-
-Log "Userdata started"
-
-
-### Functions
-
-# We need this helper, because PowerShell has a separate
-# notion of directory for all child commands, and directory
-# for the script. Running commands directly use the
-# location set by cd, while things like DownloadFile
-# will use the script directory (set by SetCurrentDirectory)
-#
-# This function makes things a little bit easier to follow
-Function SetDirectory ($dir) {{
-    Set-Location $dir
-    [System.IO.Directory]::SetCurrentDirectory($dir)
-}}
-
-# silent MSI install helper
-Function InstallMSI ($msi) {{
-    Start-Process -Wait -FilePath "msiexec.exe" -ArgumentList "/qb /i $msi"
-}}
-
-# HTTP download helper
-Function GetFromHTTP ($url, $path) {{
-    Log "Downloading $url to $path"
-    $client = new-object System.Net.WebClient
-    $client.DownloadFile($url, $path)
-}}
-
-# Fetch something from our S3 bucket; less verbose version than writing
-# Out the Read-S3Object command. Always puts in current dir.
-Function GetFromS3 ($obj) {{
-    Read-S3Object -BucketName mozilla-releng-tools -Key $obj -File $obj
-}}
-
-
-### Install stuff
-SetDirectory $Env:USERPROFILE
-
-### Install python
-GetFromS3 python-2.7.5.msi
-Log "Installing python"
-Start-Process -Wait -FilePath "python-2.7.5.msi" -ArgumentList "/qn"
-Log "Done"
-
-Log "Installing pywin32 to $PYTHON"
-GetFromS3 ez_setup.py
-GetFromS3 setuptools-3.3.zip
-GetFromS3 pywin32-218.win32-py2.7.exe
-& $PYTHON ez_setup.py setuptools-3.3.zip
-& $PYTHONDIR\Scripts\easy_install.exe pywin32-218.win32-py2.7.exe
-Log "Done"
-
-### Install MozillaBuild
-GetFromS3 MozillaBuildSetup-Latest.exe
-Log "Install MozillaBuild"
-Start-Process -Wait -FilePath "MozillaBuildSetup-Latest.exe" -ArgumentList "/S"
-Log "Done"
-
-### Append to system PATH
-[System.Environment]::SetEnvironmentVariable("PATH", $Env:Path + ";c:\mozilla-build\msys\bin;c:\mozilla-build\hg;c:\mozilla-build\vim\vim72;c:\mozilla-build\python;c:\mozilla-build\python\scripts;c:\mozilla-build\7zip;c:\mozilla-build\info-zip;c:\mozilla-build\wget", "Machine")
-
-# Grab the Debug CRT runtimes and install them
-Log "Installing Debug CRTs"
-GetFromS3 Microsoft_VC100_DebugCRT_x64.msi
-GetFromS3 Microsoft_VC100_DebugCRT_x86.msi
-InstallMSI Microsoft_VC100_DebugCRT_x64.msi
-InstallMSI Microsoft_VC100_DebugCRT_x86.msi
-Log "Done"
-
-### Install SSH
-Log "Installing SSH"
-SetDirectory "C:\Program Files (x86)"
-GetFromS3 KTS.zip
-& $MOZBUILD\7zip\7z x -y KTS.zip
-del KTS.zip
-SetDirectory "C:\Program Files (x86)\KTS"
-& .\install.bat
-
-Log "Adding sshd to firewall"
-netsh advfirewall firewall add rule name="sshd" dir=in action=allow program="C:\Program Files (x86)\KTS\daemon.exe" enable=yes
-Log "Done"
-
-### Install buildbot
-Log "Installing buildbot"
-SetDirectory $Env:USERPROFILE
-# Grab Python and its packages from S3, all in a single tar file.
-GetFromS3 python-packages.tar
-& $MOZBUILD\7zip\7z x -y python-packages.tar
-SetDirectory 'C:\mozilla-build'
-# delete the old buildbotve dir and create a new one
-Remove-Item $VIRTUALENV -Force -Recurse -EA SilentlyContinue
-New-Item -itemtype directory -path $VIRTUALENV -force
-
-# two steps, because 7zip.
-& $MOZBUILD\7zip\7z e -y $Env:USERPROFILE\virtualenv-1.9.1.tar.gz
-& $MOZBUILD\7zip\7z x -y virtualenv-1.9.1.tar
-del virtualenv-1.9.1.tar
-
-# set up the virtualenv
-& $PYTHON virtualenv-1.9.1\virtualenv.py --distribute $VIRTUALENV
-Log "Set up virtualenv"
-Copy-Item virtualenv-1.9.1\virtualenv.py -Destination $VIRTUALENV\virtualenv.py
-
-# install the world
-$python_packages = @("pywin32-218.win32-py2.7.exe",
-"zope.interface-3.6.1-py2.7-win32.egg",
-"Twisted-13.0.0.win32-py2.7.exe",
-"buildbot-slave-0.8.4-pre-moz2.tar.gz",
-"buildbot-0.8.4-pre-moz2.tar.gz",
-"simplejson-3.3.0.tar.gz")
-foreach ($pp in $python_packages) {{
-    Log "Installing $pp.."
-    if (($pp).EndsWith(".exe") -or ($pp).EndsWith(".egg")) {{
-        & $VIRTUALENV\Scripts\easy_install.exe $Env:USERPROFILE\$pp
-    }} else {{
-        & $VIRTUALENV\Scripts\pip.exe install --no-deps $Env:USERPROFILE\$pp
-    }}
-}}
-
-Log "Installed python packages"
-
-# patch twisted
-Log "Patching twisted"
-Clear-Content "$VIRTUALENV\twisted.patch" -EA SilentlyContinue
-Add-Content "$VIRTUALENV\twisted.patch" -value @"
---- twisted/internet/_dumbwin32proc.py.orig  2013-06-08 11:39:31 -0400
-+++ twisted/internet/_dumbwin32proc.py  2013-06-08 11:39:20 -0400
-@@ -252,7 +252,8 @@
-         if self.pid is None:
-             raise error.ProcessExitedAlready()
-         if signalID in ("INT", "TERM", "KILL"):
--            win32process.TerminateProcess(self.hProcess, 1)
-+            # win32process.TerminateProcess(self.hProcess, 1)
-+            os.popen('taskkill /T /F /PID %s' % self.pid)
- 
- 
-     def _getReason(self, status):
-"@
-& $MOZBUILD\msys\bin\patch.exe "$VIRTUALENV\Lib\site-packages\Twisted-13.0.0-py2.7-win32.egg\twisted\internet\_dumbwin32proc.py" "$VIRTUALENV\twisted.patch" 
-
-### Set up users
-net user cltbld {password} /add
-net localgroup "Remote Desktop Users" cltbld /add
-wmic path Win32_UserAccount where Name='cltbld' set PasswordExpires=false
-
-net user cltbld-starter {password} /add
-net localgroup "Remote Desktop Users" cltbld-starter /add
-wmic path Win32_UserAccount where Name='cltbld-starter' set PasswordExpires=false
-
-net user Administrator {password}
-wmic path Win32_UserAccount where Name='Administrator' set PasswordExpires=false
-
-# Get this for later
-SetDirectory $Env:USERPROFILE
-GetFromS3 Autologon.exe
-GetFromS3 userpolicy.inf
-
-### Set up runslave.py
-SetDirectory "C:\"
-GetFromS3 runslave.py
-GetFromS3 runslave.bat
-Log "Grabbed runslave.py"
-
-Log "Enabling cleartype on login"
-schtasks /create /tn cleartype /tr "powershell -noexit C:\cleartype.ps1" /sc ONLOGON /ru cltbld
-Log "Done"
-
-Log "Creating registry entry to run Explorer on login to force the Desktop interface"
-reg add HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run /v desktopmode /d C:\Windows\explorer.exe
-Log "Done"
-
-Log "Importing registry entry to enable the default Windows theme" 
-GetFromS3 set_default_theme.reg 
-reg import set_default_theme.reg 
-Log "Done"
-
-#### Set up runtests
-SetDirectory "C:\"
-GetFromS3 runtests.bat
-GetFromS3 runtests.sh
-
-Log "Creating scheduled task to run tests on login"
-schtasks /create /tn runtests /tr "C:\runtests.bat" /sc ONLOGON /ru cltbld
-Log "Done"
-
-Log "Creating scheduled task to login to localhost via RDP on boot"
-# From http://www.donkz.nl/
-# Does cmdline rdp sessions
-# Run "rdp.exe /encrypt" to generate encrypted forms of passwords
-GetFromS3 rdp.exe
-GetFromS3 start-rdp.bat
-GetFromS3 cleartype.ps1
-
-schtasks /create /tn start-rdp /tr "C:\start-rdp.bat" /sc ONLOGON /ru cltbld-starter
-Log "Done"
-
-### We use uptime.exe from aws_stop_idle.py to find the host's uptime since we
-### don't always have access to run 'net stats srv'
-GetFromS3 uptime.exe
-
-
-### Set up the maintenance service
-Log "Setting up maintenance service"
-Remove-Item "$MOZBUILD\maintenance" -Force -Recurse -EA SilentlyContinue
-New-Item -itemtype directory -path "$MOZBUILD\maintenance"
-SetDirectory "$MOZBUILD\maintenance"
-
-GetFromS3 maintenanceservice.zip
-GetFromS3 MozRoot.cer
-GetFromS3 maintenance-service-keys.reg
-
-# Add MozRoot.cer
-& certutil -addstore CA MozRoot.cer
-Log "Installed MozRoot.cer"
-
-& regedit /s keys.reg
-Log "Installed service registry keys"
-
-& $MOZBUILD\7zip\7z x -y maintenanceservice.zip
-& .\maintenanceservice_installer.exe
-Log "Installed service"
-
-Log "Done"
-
-# Enable EC2Config to run at the next boot
-Log "Enabling EC2Config at next boot"
-$EC2SettingsFile="C:\Program Files\Amazon\Ec2ConfigService\Settings\Config.xml"
-$xml = [xml](get-content $EC2SettingsFile)
-$xmlElement = $xml.get_DocumentElement()
-$xmlElementToModify = $xmlElement.Plugins
-
-foreach ($element in $xmlElementToModify.Plugin)
-{{
-    if ($element.name -eq "Ec2SetPassword")
-    {{
-        $element.State="Enabled"
-    }}
-    elseif ($element.name -eq "Ec2HandleUserData")
-    {{
-        $element.State="Enabled"
-    }}
-}}
-$xml.Save($EC2SettingsFile)
-Log "Done"
-
-### Shutdown to signal we're done. We also need to shutdown/restart to get the hostname changed
-### aws_create_instance will clear our user data.
-Log "Done. Shutting down now!"
-shutdown /t 0 /f /s /c "AMI shutdown"
-</powershell>
deleted file mode 100644
--- a/ami_configs/releng-public.list
+++ /dev/null
@@ -1,4 +0,0 @@
-deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu precise main restricted universe
-deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu precise-security main restricted universe
-deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/releng precise main
-deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/puppetlabs precise main dependencies
deleted file mode 100644
--- a/ami_configs/releng-public.repo
+++ /dev/null
@@ -1,52 +0,0 @@
-[base]
-name=base
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/centos/6/latest/os/$basearch
-enabled=1
-gpgcheck=0
-
-[puppet]
-name=puppet
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/puppetlabs/el/6/products/$basearch
-enabled=1
-gpgcheck=0
-
-[updates]
-name=updates
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/centos/6/latest/updates/$basearch
-enabled=1
-gpgcheck=0
-
-[puppetlabs-main]
-name=puppetlabs-main
-descr=Puppet
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/puppetlabs/el/6/products/$basearch
-enabled=1
-gpgcheck=0
-
-[puppetlabs-deps]
-name=puppetlabs-deps
-descr=Puppet
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/mirrors/puppetlabs/el/6/dependencies/$basearch
-enabled=1
-gpgcheck=0
-
-[releng]
-name=releng
-descr=Releng
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/releng/public/CentOS/6/$basearch
-enabled=1
-gpgcheck=0
-
-[releng-noarch]
-name=releng-noarch
-descr=Releng-noarch
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/releng/public/CentOS/6/noarch
-enabled=1
-gpgcheck=0
-
-[cloud-init]
-name=epel
-baseurl=http://puppetagain.pub.build.mozilla.org/data/repos/yum/custom/cloud-init/$basearch
-enabled=1
-gpgcheck=0
-
deleted file mode 100755
--- a/ami_configs/s3-get
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import boto
-import sys
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-b", "--bucket", required=True,
-                        help="Bucket name")
-    parser.add_argument("-k", "--key", required=True,
-                        help="Key name")
-    parser.add_argument("-o", "--out-file", required=True,
-                        help="File name")
-    args = parser.parse_args()
-
-    conn = boto.connect_s3(anon=True)
-    bucket = conn.get_bucket(args.bucket)
-    key = bucket.get_key(args.key)
-    if args.out_file == "-":
-        key.get_contents_to_file(sys.stdout)
-    else:
-        key.get_contents_to_filename(args.out_file)
-
-if __name__ == "__main__":
-    main()
deleted file mode 120000
--- a/ami_configs/try-linux64
+++ /dev/null
@@ -1,1 +0,0 @@
-bld-linux64
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/try-linux64.json
+++ /dev/null
@@ -1,74 +0,0 @@
-{
-    "us-west-1": {
-        "ami": "ami-fe393ebb",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-usw1",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "try-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-east-1": {
-        "ami": "ami-b06a98d8",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-use1",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "try-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-7bdaa84b",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "centos",
-        "root_device_type": "instance-store",
-        "virtualization_type": "hvm",
-        "bucket": "mozilla-releng-amis-usw2",
-        "bucket_dir": "bundles",
-        "aws_user_id": "314336048151",
-        "target": {
-            "size": 50,
-            "fs_type": "ext4",
-            "e2_label": "root_dev",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt",
-            "tags": {
-                "moz-type": "try-linux64",
-                "moz-instance-family": "c3",
-                "moz-virtualization-type": "hvm"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/tst-win64.json
+++ /dev/null
@@ -1,43 +0,0 @@
-{
-    "hostname": "tst-win64-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-win64",
-        "instance_profile_name": "tst-win64",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami_desc": "Windows_Server-2012-RTM-English-64Bit-Base-2014.03.12",
-        "ami": "ami-dfcdc4b6",
-        "subnet_ids": ["subnet-8f32cbe5", "subnet-3835cc52", "subnet-ed35cc87", "subnet-ae35ccc4"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-f3927c9c", "sg-18a07677"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "ami_configs/tst-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    },
-    "us-west-2": {
-        "type": "tst-win64",
-        "instance_profile_name": "tst-win64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami_desc": "Windows_Server-2012-RTM-English-64Bit-Base-2014.03.12",
-        "ami": "ami-c61678f6",
-        "subnet_ids": ["subnet-a4cba8cd", "subnet-aecba8c7", "subnet-be89a2ca", "subnet-d6cba8bf"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-d5617cb9", "sg-84beade6"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "ami_configs/tst-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/tst-win64.user_data
+++ /dev/null
@@ -1,275 +0,0 @@
-<powershell>
-### TODO: Add error handling
-### TODO: Check Windows defender, indexing service
-### TODO: Add runslave.bat with: msys, unzip, hg (to system-wide path?)
-### TODO: Figure out how to resolve http://repos/
-### TODO: Minimize cmdwindow?
-### TODO: install win32file
-
-Start-Transcript -Path 'c:\userdata-transcript.txt' -Force
-Set-StrictMode -Version Latest
-Set-ExecutionPolicy Unrestricted
-
-Import-Module AWSPowerShell
-
-$MOZBUILD = "C:\mozilla-build"
-$VIRTUALENV = "$MOZBUILD\buildbotve"
-$PYTHONDIR = "C:\Python27"
-$PYTHON = "$PYTHONDIR\python.exe"
-
-$LOG = 'c:\userdata-log.txt'
-Function Log ($str) {{
-    $d = Get-Date
-    Add-Content $LOG -value "$d - $str"
-}}
-
-Log "Userdata started"
-
-
-### Functions
-
-# We need this helper, because PowerShell has a separate
-# notion of directory for all child commands, and directory
-# for the script. Running commands directly use the
-# location set by cd, while things like DownloadFile
-# will use the script directory (set by SetCurrentDirectory)
-#
-# This function makes things a little bit easier to follow
-Function SetDirectory ($dir) {{
-    Set-Location $dir
-    [System.IO.Directory]::SetCurrentDirectory($dir)
-}}
-
-# silent MSI install helper
-Function InstallMSI ($msi) {{
-    Start-Process -Wait -FilePath "msiexec.exe" -ArgumentList "/qb /i $msi"
-}}
-
-# HTTP download helper
-Function GetFromHTTP ($url, $path) {{
-    Log "Downloading $url to $path"
-    $client = new-object System.Net.WebClient
-    $client.DownloadFile($url, $path)
-}}
-
-# Fetch something from our S3 bucket; less verbose version than writing
-# Out the Read-S3Object command. Always puts in current dir.
-Function GetFromS3 ($obj) {{
-    Read-S3Object -BucketName mozilla-releng-tools -Key $obj -File $obj
-}}
-
-
-### Install stuff
-SetDirectory $Env:USERPROFILE
-
-### Install python
-GetFromS3 python-2.7.5.msi
-Log "Installing python"
-Start-Process -Wait -FilePath "python-2.7.5.msi" -ArgumentList "/qn"
-Log "Done"
-
-Log "Installing pywin32 to $PYTHON"
-GetFromS3 ez_setup.py
-GetFromS3 setuptools-3.3.zip
-GetFromS3 pywin32-218.win32-py2.7.exe
-& $PYTHON ez_setup.py setuptools-3.3.zip
-& $PYTHONDIR\Scripts\easy_install.exe pywin32-218.win32-py2.7.exe
-Log "Done"
-
-### Install MozillaBuild
-GetFromS3 MozillaBuildSetup-Latest.exe
-Log "Install MozillaBuild"
-Start-Process -Wait -FilePath "MozillaBuildSetup-Latest.exe" -ArgumentList "/S"
-Log "Done"
-
-### Append to system PATH
-[System.Environment]::SetEnvironmentVariable("PATH", $Env:Path + ";c:\mozilla-build\msys\bin;c:\mozilla-build\hg;c:\mozilla-build\vim\vim72;c:\mozilla-build\python;c:\mozilla-build\python\scripts;c:\mozilla-build\7zip;c:\mozilla-build\info-zip;c:\mozilla-build\wget", "Machine")
-
-# Grab the Debug CRT runtimes and install them
-Log "Installing Debug CRTs"
-GetFromS3 Microsoft_VC100_DebugCRT_x64.msi
-GetFromS3 Microsoft_VC100_DebugCRT_x86.msi
-InstallMSI Microsoft_VC100_DebugCRT_x64.msi
-InstallMSI Microsoft_VC100_DebugCRT_x86.msi
-Log "Done"
-
-### Install SSH
-Log "Installing SSH"
-SetDirectory "C:\Program Files (x86)"
-GetFromS3 KTS.zip
-& $MOZBUILD\7zip\7z x -y KTS.zip
-del KTS.zip
-SetDirectory "C:\Program Files (x86)\KTS"
-& .\install.bat
-
-Log "Adding sshd to firewall"
-netsh advfirewall firewall add rule name="sshd" dir=in action=allow program="C:\Program Files (x86)\KTS\daemon.exe" enable=yes
-Log "Done"
-
-### Install buildbot
-Log "Installing buildbot"
-SetDirectory $Env:USERPROFILE
-# Grab Python and its packages from S3, all in a single tar file.
-GetFromS3 python-packages.tar
-& $MOZBUILD\7zip\7z x -y python-packages.tar
-SetDirectory 'C:\mozilla-build'
-# delete the old buildbotve dir and create a new one
-Remove-Item $VIRTUALENV -Force -Recurse -EA SilentlyContinue
-New-Item -itemtype directory -path $VIRTUALENV -force
-
-# two steps, because 7zip.
-& $MOZBUILD\7zip\7z e -y $Env:USERPROFILE\virtualenv-1.9.1.tar.gz
-& $MOZBUILD\7zip\7z x -y virtualenv-1.9.1.tar
-del virtualenv-1.9.1.tar
-
-# set up the virtualenv
-& $PYTHON virtualenv-1.9.1\virtualenv.py --distribute $VIRTUALENV
-Log "Set up virtualenv"
-Copy-Item virtualenv-1.9.1\virtualenv.py -Destination $VIRTUALENV\virtualenv.py
-
-# install the world
-$python_packages = @("pywin32-218.win32-py2.7.exe",
-"zope.interface-3.6.1-py2.7-win32.egg",
-"Twisted-13.0.0.win32-py2.7.exe",
-"buildbot-slave-0.8.4-pre-moz2.tar.gz",
-"buildbot-0.8.4-pre-moz2.tar.gz",
-"simplejson-3.3.0.tar.gz")
-foreach ($pp in $python_packages) {{
-    Log "Installing $pp.."
-    if (($pp).EndsWith(".exe") -or ($pp).EndsWith(".egg")) {{
-        & $VIRTUALENV\Scripts\easy_install.exe $Env:USERPROFILE\$pp
-    }} else {{
-        & $VIRTUALENV\Scripts\pip.exe install --no-deps $Env:USERPROFILE\$pp
-    }}
-}}
-
-Log "Installed python packages"
-
-# patch twisted
-Log "Patching twisted"
-Clear-Content "$VIRTUALENV\twisted.patch" -EA SilentlyContinue
-Add-Content "$VIRTUALENV\twisted.patch" -value @"
---- twisted/internet/_dumbwin32proc.py.orig  2013-06-08 11:39:31 -0400
-+++ twisted/internet/_dumbwin32proc.py  2013-06-08 11:39:20 -0400
-@@ -252,7 +252,8 @@
-         if self.pid is None:
-             raise error.ProcessExitedAlready()
-         if signalID in ("INT", "TERM", "KILL"):
--            win32process.TerminateProcess(self.hProcess, 1)
-+            # win32process.TerminateProcess(self.hProcess, 1)
-+            os.popen('taskkill /T /F /PID %s' % self.pid)
- 
- 
-     def _getReason(self, status):
-"@
-& $MOZBUILD\msys\bin\patch.exe "$VIRTUALENV\Lib\site-packages\Twisted-13.0.0-py2.7-win32.egg\twisted\internet\_dumbwin32proc.py" "$VIRTUALENV\twisted.patch" 
-
-### Set up users
-net user cltbld {password} /add
-net localgroup "Remote Desktop Users" cltbld /add
-wmic path Win32_UserAccount where Name='cltbld' set PasswordExpires=false
-
-net user cltbld-starter {password} /add
-net localgroup "Remote Desktop Users" cltbld-starter /add
-wmic path Win32_UserAccount where Name='cltbld-starter' set PasswordExpires=false
-
-net user Administrator {password}
-wmic path Win32_UserAccount where Name='Administrator' set PasswordExpires=false
-
-# Get this for later
-SetDirectory $Env:USERPROFILE
-GetFromS3 Autologon.exe
-GetFromS3 userpolicy.inf
-
-### Set up runslave.py
-SetDirectory "C:\"
-GetFromS3 runslave.py
-GetFromS3 runslave.bat
-Log "Grabbed runslave.py"
-
-Log "Enabling cleartype on login"
-schtasks /create /tn cleartype /tr "powershell -noexit C:\cleartype.ps1" /sc ONLOGON /ru cltbld
-Log "Done"
-
-Log "Creating scheduled task to run buildbot on boot"
-schtasks /create /tn runslave /tr "C:\runslave.bat" /sc ONLOGON /ru cltbld
-Log "Done"
-
-Log "Creating registry entry to run Explorer on login to force the Desktop interface"
-reg add HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run /v desktopmode /d C:\Windows\explorer.exe
-Log "Done"
-
-Log "Importing registry entry to enable the default Windows theme" 
-GetFromS3 set_default_theme.reg 
-reg import set_default_theme.reg 
-Log "Done" 
-
-#Log "Creating scheduled task to run tests on login"
-#schtasks /create /tn runtests /tr "C:\runtests.bat" /sc ONLOGON /ru cltbld
-#Log "Done"
-
-Log "Creating scheduled task to login to localhost via RDP on boot"
-# From http://www.donkz.nl/
-# Does cmdline rdp sessions
-# Run "rdp.exe /encrypt" to generate encrypted forms of passwords
-GetFromS3 rdp.exe
-GetFromS3 start-rdp.bat
-GetFromS3 cleartype.ps1
-
-schtasks /create /tn start-rdp /tr "C:\start-rdp.bat" /sc ONLOGON /ru cltbld-starter
-Log "Done"
-
-### We use uptime.exe from aws_stop_idle.py to find the host's uptime since we
-### don't always have access to run 'net stats srv'
-GetFromS3 uptime.exe
-
-
-### Set up the maintenance service
-Log "Setting up maintenance service"
-Remove-Item "$MOZBUILD\maintenance" -Force -Recurse -EA SilentlyContinue
-New-Item -itemtype directory -path "$MOZBUILD\maintenance"
-SetDirectory "$MOZBUILD\maintenance"
-
-GetFromS3 maintenanceservice.zip
-GetFromS3 MozRoot.cer
-GetFromS3 maintenance-service-keys.reg
-
-# Add MozRoot.cer
-& certutil -addstore CA MozRoot.cer
-Log "Installed MozRoot.cer"
-
-& regedit /s keys.reg
-Log "Installed service registry keys"
-
-& $MOZBUILD\7zip\7z x -y maintenanceservice.zip
-& .\maintenanceservice_installer.exe
-Log "Installed service"
-
-Log "Done"
-
-# Enable EC2Config to run at the next boot
-Log "Enabling EC2Config at next boot"
-$EC2SettingsFile="C:\Program Files\Amazon\Ec2ConfigService\Settings\Config.xml"
-$xml = [xml](get-content $EC2SettingsFile)
-$xmlElement = $xml.get_DocumentElement()
-$xmlElementToModify = $xmlElement.Plugins
-
-foreach ($element in $xmlElementToModify.Plugin)
-{{
-    if ($element.name -eq "Ec2SetPassword")
-    {{
-        $element.State="Enabled"
-    }}
-    elseif ($element.name -eq "Ec2HandleUserData")
-    {{
-        $element.State="Enabled"
-    }}
-}}
-$xml.Save($EC2SettingsFile)
-Log "Done"
-
-### Shutdown to signal we're done. We also need to shutdown/restart to get the hostname changed
-### aws_create_instance will clear our user data.
-Log "Done. Shutting down now!"
-shutdown /t 0 /f /s /c "AMI shutdown"
-</powershell>
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-i386-desktop.json
+++ /dev/null
@@ -1,32 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-2acc7a42",
-        "instance_type": "m1.medium",
-        "arch": "i386",
-        "distro": "ubuntu",
-        "target": {
-            "size": 8,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-1b47052b",
-        "instance_type": "m1.medium",
-        "arch": "i386",
-        "distro": "ubuntu",
-        "target": {
-            "size": 8,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    }
-}
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-i386-desktop/etc
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop/etc
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-i386-desktop/host_packages
+++ /dev/null
@@ -1,1 +0,0 @@
-debootstrap
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-i386-desktop/packages
+++ /dev/null
@@ -1,6 +0,0 @@
-ubuntu-desktop
-openssh-server
-makedev
-curl
-grub
-linux-image-generic-pae
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-i386-desktop/usr
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop/usr
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-3ccc7a54",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "ubuntu",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 20,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-09470539",
-        "instance_type": "c3.xlarge",
-        "arch": "x86_64",
-        "distro": "ubuntu",
-        "virtualization_type": "hvm",
-        "target": {
-            "size": 20,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/boot/grub/device.map
+++ /dev/null
@@ -1,1 +0,0 @@
-(hd0) /dev/xvdh
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/default/grub
+++ /dev/null
@@ -1,37 +0,0 @@
-# If you change this file, run 'update-grub' afterwards to update
-# /boot/grub/grub.cfg.
-# For full documentation of the options in this file, see:
-#   info -f grub -n 'Simple configuration'
-
-GRUB_DEFAULT=0
-#GRUB_HIDDEN_TIMEOUT=0
-#GRUB_HIDDEN_TIMEOUT_QUIET=true
-GRUB_TIMEOUT=5
-GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
-GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"
-GRUB_CMDLINE_LINUX=""
-
-# Uncomment to enable BadRAM filtering, modify to suit your needs
-# This works with Linux (no patch required) and with any kernel that obtains
-# the memory map information from GRUB (GNU Mach, kernel of FreeBSD ...)
-#GRUB_BADRAM="0x01234567,0xfefefefe,0x89abcdef,0xefefefef"
-
-# Uncomment to disable graphical terminal (grub-pc only)
-GRUB_TERMINAL=console
-
-# The resolution used on graphical terminal
-# note that you can use only modes which your graphic card supports via VBE
-# you can see them in real GRUB with the command `vbeinfo'
-#GRUB_GFXMODE=640x480
-
-# Uncomment if you don't want GRUB to pass "root=UUID=xxx" parameter to Linux
-#GRUB_DISABLE_LINUX_UUID=true
-
-# Uncomment to disable generation of recovery mode menu entries
-GRUB_DISABLE_RECOVERY="true"
-
-# Uncomment to get a beep at grub start
-#GRUB_INIT_TUNE="480 440 1"
-
-#Disable recordfail timeout
-GRUB_RECORDFAIL_TIMEOUT=0
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/fstab
+++ /dev/null
@@ -1,6 +0,0 @@
-/dev/xvda1 /boot ext2 defaults 0 2
-LABEL=@ROOT_DEV_LABEL@   /         @FS_TYPE@   defaults,noatime,nodiratime,commit=30        1 1
-none       /proc     proc    defaults        0 0
-none       /sys      sysfs   defaults        0 0
-none       /dev/pts  devpts  gid=5,mode=620  0 0
-none       /dev/shm  tmpfs   defaults        0 0
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/hosts
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/hosts
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/network
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop/etc/network
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/etc/rc.local
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/rc.local
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/host_packages
+++ /dev/null
@@ -1,2 +0,0 @@
-lvm2
-debootstrap
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/packages
+++ /dev/null
@@ -1,8 +0,0 @@
-ubuntu-desktop
-openssh-server
-makedev
-curl
-grub-pc
-grub-legacy-ec2
-lvm2
-linux-image-generic
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop-hvm/usr
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop/usr
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop.json
+++ /dev/null
@@ -1,32 +0,0 @@
-{
-    "us-east-1": {
-        "ami": "ami-2ccc7a44",
-        "instance_type": "c1.xlarge",
-        "arch": "x86_64",
-        "distro": "ubuntu",
-        "target": {
-            "size": 8,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    },
-    "us-west-2": {
-        "ami": "ami-1d47052d",
-        "instance_type": "c1.xlarge",
-        "arch": "x86_64",
-        "distro": "ubuntu",
-        "target": {
-            "size": 8,
-            "fs_type": "ext4",
-            "mkfs_args": "-O ^has_journal",
-            "e2_label": "cloudimg-rootfs",
-            "aws_dev_name": "/dev/sdh",
-            "int_dev_name": "/dev/xvdh",
-            "mount_point": "/mnt1"
-        }
-    }
-}
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
+++ /dev/null
@@ -1,5 +0,0 @@
-LABEL=@ROOT_DEV_LABEL@   /         @FS_TYPE@   defaults,noatime,nodiratime,commit=30        1 1
-none       /proc     proc    defaults        0 0
-none       /sys      sysfs   defaults        0 0
-none       /dev/pts  devpts  gid=5,mode=620  0 0
-none       /dev/shm  tmpfs   defaults        0 0
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/hosts
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
+++ /dev/null
@@ -1,6 +0,0 @@
-# The loopback network interface
-auto lo
-iface lo inet loopback
-# The primary network interface
-auto eth0
-iface eth0 inet dhcp
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
+++ /dev/null
@@ -1,1 +0,0 @@
-../../centos-6-x86_64-base/etc/rc.local
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/host_packages
+++ /dev/null
@@ -1,1 +0,0 @@
-debootstrap
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/packages
+++ /dev/null
@@ -1,6 +0,0 @@
-ubuntu-desktop
-openssh-server
-makedev
-curl
-grub
-linux-image-generic
deleted file mode 100755
--- a/ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# action forbidden by policy
-exit 101
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm.json
+++ /dev/null
@@ -1,1 +0,0 @@
-ubuntu-12.04-x86_64-desktop-hvm.json
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/boot
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop-hvm/boot
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/cloud/cloud.cfg.d/99-apt-preserve-sources-list.cfg
+++ /dev/null
@@ -1,1 +0,0 @@
-apt_preserve_sources_list: True
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/default
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop-hvm/etc/default
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/fstab
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop-hvm/etc/fstab
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/hosts
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop-hvm/etc/hosts
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/network
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop-hvm/etc/network
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/etc/rc.local
+++ /dev/null
@@ -1,1 +0,0 @@
-../../ubuntu-12.04-x86_64-desktop-hvm/etc/rc.local
\ No newline at end of file
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/host_packages
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop-hvm/host_packages
\ No newline at end of file
deleted file mode 100644
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/packages
+++ /dev/null
@@ -1,9 +0,0 @@
-ubuntu-minimal
-openssh-server
-makedev
-curl
-grub-pc
-grub-legacy-ec2
-lvm2
-linux-image-generic
-cloud-init
deleted file mode 120000
--- a/ami_configs/ubuntu-12.04-x86_64-server-hvm/usr
+++ /dev/null
@@ -1,1 +0,0 @@
-../ubuntu-12.04-x86_64-desktop-hvm/usr
\ No newline at end of file
deleted file mode 100644
deleted file mode 100644
--- a/cloudtools/aws/__init__.py
+++ /dev/null
@@ -1,278 +0,0 @@
-import os
-import logging
-import time
-import calendar
-import iso8601
-import json
-from boto.ec2 import connect_to_region
-from boto.vpc import VPCConnection
-from boto.s3.connection import S3Connection
-from repoze.lru import lru_cache
-from fabric.api import run
-
-log = logging.getLogger(__name__)
-AMI_CONFIGS_DIR = os.path.join(os.path.dirname(__file__), "../../ami_configs")
-INSTANCE_CONFIGS_DIR = os.path.join(os.path.dirname(__file__), "../../configs")
-DEFAULT_REGIONS = ['us-east-1', 'us-west-2']
-
-# Number of seconds from an instance's launch time for it to be considered
-# 'fresh'
-FRESH_INSTANCE_DELAY = 20 * 60
-FRESH_INSTANCE_DELAY_JACUZZI = 10 * 60
-
-
-@lru_cache(10)
-def get_aws_connection(region):
-    """Connect to an EC2 region. Caches connection objects"""
-    return connect_to_region(region)
-
-
-@lru_cache(10)
-def get_s3_connection():
-    """Connect to S3. Caches connection objects"""
-    return S3Connection()
-
-
-@lru_cache(10)
-def get_vpc(region):
-    conn = get_aws_connection(region)
-    return VPCConnection(region=conn.region)
-
-
-def wait_for_status(obj, attr_name, attr_value, update_method):
-    log.debug("waiting for %s availability", obj)
-    while True:
-        try:
-            getattr(obj, update_method)()
-            if getattr(obj, attr_name) == attr_value:
-                break
-            else:
-                time.sleep(1)
-        except:
-            log.exception('hit error waiting')
-            time.sleep(10)
-
-
-def attach_and_wait_for_volume(volume, aws_dev_name, internal_dev_name,
-                               instance_id):
-    """Attach a volume to an instance and wait until it is available"""
-    wait_for_status(volume, "status", "available", "update")
-    while True:
-        try:
-            volume.attach(instance_id, aws_dev_name)
-            break
-        except:
-            log.debug('hit error waiting for volume to be attached')
-            time.sleep(10)
-    while True:
-        try:
-            volume.update()
-            if volume.status == 'in-use':
-                if run('ls %s' % internal_dev_name).succeeded:
-                    break
-        except:
-            log.debug('hit error waiting for volume to be attached')
-            time.sleep(10)
-
-
-def mount_device(device, mount_point):
-    run('mkdir -p "%s"' % mount_point)
-    run('mount "{device}" "{mount_point}"'.format(device=device,
-                                                  mount_point=mount_point))
-
-
-def name_available(conn, name):
-    res = conn.get_all_instances()
-    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
-    names = [i.tags.get("Name") for i in instances if i.state != "terminated"]
-    if name in names:
-        return False
-    else:
-        return True
-
-
-def parse_aws_time(t):
-    """Parses ISO8601 time format and returns local epoch time"""
-    t = calendar.timegm(time.strptime(t[:19], '%Y-%m-%dT%H:%M:%S'))
-    return t
-
-
-def aws_time_to_datetime(t):
-    return iso8601.parse_date(t)
-
-
-def aws_get_running_instances(instances, moz_instance_type):
-    retval = []
-    for i in instances:
-        if i.state != 'running':
-            continue
-        if i.tags.get('moz-type') != moz_instance_type:
-            continue
-        if i.tags.get('moz-state') != 'ready':
-            continue
-        retval.append(i)
-
-    return retval
-
-
-_aws_instances_cache = {}
-
-
-def aws_get_all_instances(regions):
-    """
-    Returns a list of all instances in the given regions
-    """
-    log.debug("fetching all instances for %s", regions)
-    retval = []
-    for region in regions:
-        if region in _aws_instances_cache:
-            log.debug("aws_get_all_instances - cache hit for %s", region)
-            retval.extend(_aws_instances_cache[region])
-        else:
-            conn = get_aws_connection(region)
-            region_instances = conn.get_only_instances()
-            log.debug("aws_get_running_instances - caching %s", region)
-            _aws_instances_cache[region] = region_instances
-            retval.extend(region_instances)
-    return retval
-
-
-@lru_cache(10)
-def get_user_data_tmpl(moz_instance_type):
-    cloud_init_config = os.path.join(INSTANCE_CONFIGS_DIR,
-                                     "%s.cloud-init" % moz_instance_type)
-    try:
-        with open(cloud_init_config) as f:
-            return f.read()
-    except Exception:
-        return None
-
-
-def aws_filter_instances(instances, state=None, tags=None):
-    retval = []
-    for i in instances:
-        matched = True
-        if state and i.state != state:
-            matched = False
-            continue
-        if tags:
-            for k, v in tags.items():
-                if i.tags.get(k) != v:
-                    matched = False
-                    continue
-        if i.tags.get("moz-loaned-to"):
-            # Skip loaned instances
-            matched = False
-            continue
-        if matched:
-            retval.append(i)
-    return retval
-
-
-def filter_spot_instances(instances):
-    return [i for i in instances if i.spot_instance_request_id]
-
-
-def filter_ondemand_instances(instances):
-    return [i for i in instances if i.spot_instance_request_id is None]
-
-
-def filter_instances_launched_since(instances, launched_since):
-    """Returns a list of instances that were launched since `launched_since` (a
-    timestamp)"""
-    retval = []
-    for i in instances:
-        d = iso8601.parse_date(i.launch_time)
-        t = calendar.timegm(d.utctimetuple())
-        if t > launched_since:
-            retval.append(i)
-    return retval
-
-
-def aws_get_fresh_instances(instances, slaveset):
-    if slaveset:
-        # jaccuzied slaves, use shorter delay
-        since = time.time() - FRESH_INSTANCE_DELAY_JACUZZI
-    else:
-        since = time.time() - FRESH_INSTANCE_DELAY
-    return filter_instances_launched_since(instances, since)
-
-
-def reduce_by_freshness(count, instances, moz_instance_type, slaveset):
-    fresh = aws_get_fresh_instances(instances, slaveset)
-    num_fresh = len(fresh)
-    log.debug("%i running (%i fresh), slaveset %s", len(instances), num_fresh,
-              slaveset)
-    # TODO: This logic is probably too simple
-    # Reduce the number of required slaves by the number of freshly
-    # started instaces, plus 10% of those that have been running a
-    # while
-    reduce_by = num_fresh
-    if not slaveset:
-        # if not in jacuzzi, reduce by 10% of already running instances
-        num_old = len(instances) - num_fresh
-        reduce_by += num_old / 10
-    # log.debug("reducing required count for %s %s %s "
-    log.debug("reducing required count for %s by %i (need: %i, running: %i) "
-              "slaveset: %s", moz_instance_type, reduce_by, count,
-              len(instances), slaveset)
-    return max(0, count - reduce_by)
-
-
-def distribute_in_region(count, regions, region_priorities):
-    """Distributes a number accordong to priorities.
-    Returns a dictionary keyed by region."""
-    rv = {}
-    # filter out not used regions
-    region_priorities = dict((k, v) for k, v in region_priorities.iteritems()
-                             if k in regions)
-    mass = sum(region_priorities.values())
-    for r in regions:
-        if r not in region_priorities:
-            continue
-        rv[r] = count * region_priorities[r] / mass
-    # rounding leftower goes to the region with highest priority
-    total = sum(rv.values())
-    if count - total > 0:
-        best_region = sorted(region_priorities.items(), key=lambda i: i[1],
-                             reverse=True)[0][0]
-        rv[best_region] += count - total
-    return rv
-
-
-@lru_cache(10)
-def load_instance_config(moz_instance_type):
-    return json.load(open(os.path.join(INSTANCE_CONFIGS_DIR,
-                                       moz_instance_type)))
-
-
-def jacuzzi_suffix(slaveset):
-    if slaveset:
-        return "jacuzzied"
-    else:
-        return "not_jacuzzied"
-
-
-def get_buildslave_instances(region, moz_types):
-    # Look for running `moz_types` instances with moz-state=ready
-    conn = get_aws_connection(region)
-    instances = conn.get_only_instances(filters={
-        'tag:moz-state': 'ready',
-        'instance-state-name': 'running',
-    })
-
-    retval = []
-    for i in instances:
-        if i.tags.get("moz-type") in moz_types and \
-                i.tags.get("moz-state") == "ready" and \
-                not i.tags.get("moz-loaned-to"):
-            retval.append(i)
-
-    return retval
-
-
-def get_impaired_instance_ids(region):
-    conn = get_aws_connection(region)
-    impaired = conn.get_all_instance_status(
-        filters={'instance-status.status': 'impaired'})
-    return [i.id for i in impaired]
deleted file mode 100644
--- a/cloudtools/aws/ami.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import time
-import logging
-import xml.dom.minidom
-import os
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-from fabric.api import run, put, cd
-
-from . import AMI_CONFIGS_DIR, wait_for_status, get_aws_connection, \
-    get_s3_connection
-
-log = logging.getLogger(__name__)
-
-
-def ami_cleanup(mount_point, distro, remove_extra=None):
-    remove_extra = remove_extra or []
-    remove = [
-        "root/*.sh",
-        "root/*.log",
-        "root/userdata",
-        "var/lib/puppet",
-        "etc/init.d/puppet"
-    ]
-    with cd(mount_point):
-        for e in remove + remove_extra:
-            run('rm -rf %s' % (e,))
-        run("sed -i -e 's/127.0.0.1.*/127.0.0.1 localhost/g' etc/hosts")
-        put("%s/fake_puppet.sh" % AMI_CONFIGS_DIR,
-            "usr/sbin/fake_puppet.sh", mirror_local_mode=True)
-        # replace puppet init with our script
-        if distro == "ubuntu":
-            put("%s/fake_puppet.conf" % AMI_CONFIGS_DIR,
-                "etc/init/puppet.conf", mirror_local_mode=True)
-            run("echo localhost > etc/hostname")
-        else:
-            run("ln -sf /usr/sbin/fake_puppet.sh etc/init.d/puppet")
-            run('echo "NETWORKING=yes" > etc/sysconfig/network')
-
-
-def volume_to_ami(volume, ami_name, arch, virtualization_type,
-                  root_device_name, tags, kernel_id=None):
-    log.info('Creating a snapshot')
-    snap = volume.create_snapshot(ami_name)
-    wait_for_status(snap, "status", "completed", "update")
-    snap.add_tag("Name", ami_name)
-
-    bdm = BlockDeviceMapping()
-    bdm[root_device_name] = BlockDeviceType(snapshot_id=snap.id)
-
-    log.info('Creating AMI')
-
-    ami_id = volume.connection.register_image(
-        ami_name,
-        ami_name,
-        architecture=arch,
-        kernel_id=kernel_id,
-        root_device_name=root_device_name,
-        block_device_map=bdm,
-        virtualization_type=virtualization_type,
-    )
-    log.info('Waiting...')
-    while True:
-        try:
-            ami = volume.connection.get_image(ami_id)
-            ami.add_tag('Name', ami_name)
-            ami.add_tag('moz-created', int(time.time()))
-            for tag, value in tags.iteritems():
-                ami.add_tag(tag, value)
-            log.info('AMI created')
-            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
-            break
-        except:
-            log.info('Wating for AMI')
-            time.sleep(10)
-    wait_for_status(ami, "state", "available", "update")
-    return ami
-
-
-def copy_ami(source_ami, region_to_copy):
-    log.info("Copying %s to %s", source_ami, region_to_copy)
-    conn = get_aws_connection(region_to_copy)
-    ami_copy = conn.copy_image(source_ami.region.name, source_ami.id,
-                               source_ami.name, source_ami.description)
-    while True:
-        try:
-            new_ami = conn.get_image(ami_copy.image_id)
-            for tag, value in source_ami.tags.iteritems():
-                new_ami.add_tag(tag, value)
-            new_ami.update()
-            log.info('AMI created')
-            log.info('ID: {id}, name: {name}'.format(id=new_ami.id,
-                                                     name=new_ami.name))
-            break
-        except:
-            log.info('Wating for AMI')
-            time.sleep(10)
-    return new_ami
-
-
-def get_spot_amis(region, tags, name_glob="spot-*", root_device_type=None):
-    conn = get_aws_connection(region)
-    filters = {"state": "available"}
-    for tag, value in tags.iteritems():
-        filters["tag:%s" % tag] = value
-    # override Name tag
-    filters["tag:Name"] = name_glob
-    if root_device_type:
-        filters["root-device-type"] = root_device_type
-    avail_amis = conn.get_all_images(owners=["self"], filters=filters)
-    return sorted(avail_amis, key=lambda ami: ami.tags.get("moz-created"))
-
-
-def delete_ebs_ami(ami):
-    snap_id = ami.block_device_mapping[ami.root_device_name].snapshot_id
-    snap = ami.connection.get_all_snapshots(snapshot_ids=[snap_id])[0]
-    log.warn("Deleting EBS-backed AMI %s (%s)", ami, ami.tags.get("Name"))
-    ami.deregister()
-    log.warn("Deleting %s (%s)", snap, snap.description)
-    snap.delete()
-
-
-def delete_instance_store_ami(ami):
-    bucket, location = ami.location.split("/", 1)
-    folder = os.path.dirname(location)
-    conn = get_s3_connection()
-    bucket = conn.get_bucket(bucket)
-    key = bucket.get_key(location)
-    manifest = key.get_contents_as_string()
-    dom = xml.dom.minidom.parseString(manifest)
-    files = [f.firstChild.nodeValue for f in
-             dom.getElementsByTagName("filename")]
-    to_delete = [os.path.join(folder, f) for f in files] + [location]
-    log.warn("Deleting S3-backed %s (%s)", ami, ami.tags.get("Name"))
-    ami.deregister()
-    log.warn("Deleting files from S3: %s", to_delete)
-    bucket.delete_keys(to_delete)
-
-
-def delete_ami(ami, dry_run=False):
-    if dry_run:
-        log.warn("Dry run: would delete %s", ami)
-        return
-    if ami.root_device_type == "ebs":
-        delete_ebs_ami(ami)
-    elif ami.root_device_type == "instance-store":
-        delete_instance_store_ami(ami)
-
-
-def delete_old_amis(region, tags, keep_last, root_device_type="ebs",
-                    dry_run=False):
-    amis = get_spot_amis(region, tags, root_device_type=root_device_type)
-    if len(amis) > keep_last:
-        if keep_last == 0:
-            amis_to_delete = amis
-        else:
-            amis_to_delete = amis[:-keep_last]
-
-        for a in amis_to_delete:
-            delete_ami(a, dry_run)
-    else:
-        log.info("Nothing to delete")
-
-
-def get_ami(region, moz_instance_type, root_device_type=None):
-    """Returns a list of AMIs sorted by creation time, reversed.
-    root_device type can be either "ebs" or "instance-store"
-    virtualization_type can be either "hvm" or "paravirtual"""
-    spot_amis = get_spot_amis(region=region,
-                              tags={"moz-type": moz_instance_type},
-                              root_device_type=root_device_type)
-    last_ami = spot_amis[-1]
-    return last_ami
deleted file mode 100644
--- a/cloudtools/aws/instance.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import os
-import uuid
-import logging
-import time
-import random
-import StringIO
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-from boto.ec2.networkinterface import NetworkInterfaceSpecification, \
-    NetworkInterfaceCollection
-from fabric.api import run, sudo, put
-from fabric.context_managers import cd
-from ..fabric import setup_fabric_env
-from ..dns import get_ip
-from . import wait_for_status, AMI_CONFIGS_DIR, get_aws_connection, \
-    get_user_data_tmpl
-from .vpc import get_subnet_id, ip_available, get_vpc
-from boto.exception import BotoServerError, EC2ResponseError
-
-log = logging.getLogger(__name__)
-
-
-def run_instance(region, hostname, config, key_name, user='root',
-                 key_filename=None, dns_required=False):
-    conn = get_aws_connection(region)
-    bdm = None
-    if 'device_map' in config:
-        bdm = BlockDeviceMapping()
-        for device, device_info in config['device_map'].items():
-            bdm[device] = BlockDeviceType(size=device_info['size'],
-                                          delete_on_termination=True)
-    interfaces = None
-    if dns_required:
-        interfaces = make_instance_interfaces(
-            region=region, hostname=hostname, ignore_subnet_check=True,
-            avail_subnets=None, security_groups=[], use_public_ip=True)
-
-    reservation = conn.run_instances(
-        image_id=config['ami'],
-        key_name=key_name,
-        instance_type=config['instance_type'],
-        block_device_map=bdm,
-        client_token=str(uuid.uuid4())[:16],
-        network_interfaces=interfaces,
-    )
-
-    instance = reservation.instances[0]
-    log.info("instance %s created, waiting to come up", instance)
-    # Wait for the instance to come up
-    wait_for_status(instance, "state", "running", "update")
-    setup_fabric_env(instance=instance, user=user, abort_on_prompts=True,
-                     disable_known_hosts=True, key_filename=key_filename)
-
-    # wait until the instance is responsive
-    while True:
-        try:
-            if run('date').succeeded:
-                break
-        except:
-            log.debug('hit error waiting for instance to come up')
-        time.sleep(10)
-
-    instance.add_tag('Name', hostname.split(".")[0])
-    instance.add_tag('FQDN', hostname)
-    # Overwrite root's limited authorized_keys
-    if user != 'root':
-        sudo("cp -f ~%s/.ssh/authorized_keys "
-             "/root/.ssh/authorized_keys" % user)
-        sudo("sed -i -e '/PermitRootLogin/d' "
-             "-e '$ a PermitRootLogin without-password' /etc/ssh/sshd_config")
-        sudo("service sshd restart || service ssh restart")
-        sudo("sleep 10")
-    return instance
-
-
-def assimilate_instance(instance, config, ssh_key, instance_data, deploypass,
-                        chroot="", reboot=True):
-    """Assimilate hostname into our collective
-
-    What this means is that hostname will be set up with some basic things like
-    a script to grab AWS user data, and get it talking to puppet (which is
-    specified in said config).
-    """
-
-    def run_chroot(cmd, *args, **kwargs):
-        if chroot:
-            run("chroot {} {}".format(chroot, cmd), *args, **kwargs)
-        else:
-            run(cmd, *args, **kwargs)
-
-    distro = config.get('distro', '')
-    if distro.startswith('win'):
-        return assimilate_windows(instance, config, instance_data)
-
-    setup_fabric_env(instance=instance, key_filename=ssh_key)
-
-    # Sanity check
-    run("date")
-
-    # Set our hostname
-    hostname = "{hostname}".format(**instance_data)
-    log.info("Bootstrapping %s...", hostname)
-    run_chroot("hostname %s" % hostname)
-    if distro in ('ubuntu', 'debian'):
-        run("echo {hostname} > {chroot}/etc/hostname".format(hostname=hostname,
-                                                             chroot=chroot))
-
-    # Resize the file systems
-    # We do this because the AMI image usually has a smaller filesystem than
-    # the instance has.
-    if 'device_map' in config:
-        for device, mapping in config['device_map'].items():
-            if not mapping.get("skip_resize"):
-                run('resize2fs {dev}'.format(dev=mapping['instance_dev']))
-
-    # Set up /etc/hosts to talk to 'puppet'
-    hosts = ['127.0.0.1 %s localhost' % hostname,
-             '::1 localhost6.localdomain6 localhost6']
-    hosts = StringIO.StringIO("\n".join(hosts) + "\n")
-    put(hosts, "{}/etc/hosts".format(chroot))
-
-    if distro in ('ubuntu', 'debian'):
-        put('%s/releng-public.list' % AMI_CONFIGS_DIR,
-            '{}/etc/apt/sources.list'.format(chroot))
-        run_chroot("apt-get update")
-        run_chroot("apt-get install -y --allow-unauthenticated "
-                   "puppet cloud-init")
-        run_chroot("apt-get clean")
-    else:
-        # Set up yum repos
-        run('rm -f {}/etc/yum.repos.d/*'.format(chroot))
-        put('%s/releng-public.repo' % AMI_CONFIGS_DIR,
-            '{}/etc/yum.repos.d/releng-public.repo'.format(chroot))
-        run_chroot('yum clean all')
-        run_chroot('yum install -q -y puppet cloud-init')
-
-    run_chroot("wget -O /root/puppetize.sh "
-               "https://hg.mozilla.org/build/puppet/"
-               "raw-file/production/modules/puppet/files/puppetize.sh")
-    run_chroot("chmod 755 /root/puppetize.sh")
-    put(StringIO.StringIO(deploypass), "{}/root/deploypass".format(chroot))
-    put(StringIO.StringIO("exit 0\n"),
-        "{}/root/post-puppetize-hook.sh".format(chroot))
-
-    puppet_master = random.choice(instance_data["puppet_masters"])
-    log.info("Puppetizing %s, it may take a while...", hostname)
-    run_chroot("env PUPPET_SERVER=%s /root/puppetize.sh" % puppet_master)
-
-    if "buildslave_password" in instance_data:
-        # Set up a stub buildbot.tac
-        run_chroot("sudo -u cltbld /tools/buildbot/bin/buildslave create-slave "
-                   "/builds/slave {buildbot_master} {name} "
-                   "{buildslave_password}".format(**instance_data))
-    if instance_data.get("hg_bundles"):
-        unbundle_hg(instance_data['hg_bundles'])
-    if instance_data.get("s3_tarballs"):
-        unpack_tarballs(instance_data["s3_tarballs"])
-    if instance_data.get("hg_repos"):
-        share_repos(instance_data["hg_repos"])
-
-    run("sync")
-    run("sync")
-    if reboot:
-        log.info("Rebooting %s...", hostname)
-        run("reboot")
-
-
-def assimilate_windows(instance, config, instance_data):
-    # Wait for the instance to stop, and then clear its userData and start it
-    # again
-    log.info("waiting for instance to shut down")
-    wait_for_status(instance, 'state', 'stopped', 'update')
-
-    log.info("clearing userData")
-    instance.modify_attribute("userData", None)
-    log.info("starting instance")
-    instance.start()
-    log.info("waiting for instance to start")
-    # Wait for the instance to come up
-    wait_for_status(instance, 'state', 'running', 'update')
-
-
-def unbundle_hg(hg_bundles):
-    log.info("Cloning HG bundles")
-    hg = "/tools/python27-mercurial/bin/hg"
-    for share, bundle in hg_bundles.iteritems():
-        target_dir = '/builds/hg-shared/%s' % share
-        sudo('rm -rf {d} && mkdir -p {d}'.format(d=target_dir),
-             user="cltbld")
-        sudo('{hg} init {d}'.format(hg=hg, d=target_dir), user="cltbld")
-        hgrc = "[paths]\n"
-        hgrc += "default = https://hg.mozilla.org/%s\n" % share
-        put(StringIO.StringIO(hgrc), '%s/.hg/hgrc' % target_dir)
-        run("chown cltbld: %s/.hg/hgrc" % target_dir)
-        sudo('{hg} -R {d} unbundle {b}'.format(hg=hg, d=target_dir,
-                                               b=bundle), user="cltbld")
-    log.info("Unbundling HG repos finished")
-
-
-def unpack_tarballs(tarballs):
-    log.info("Unpacking tarballs")
-    put("%s/s3-get" % AMI_CONFIGS_DIR, "/tmp/s3-get")
-    for dest_dir, info in tarballs.iteritems():
-        bucket, key = info["bucket"], info["key"]
-        sudo("mkdir -p {d}".format(d=dest_dir), user="cltbld")
-        with cd(dest_dir):
-            sudo("python /tmp/s3-get -b {bucket} -k {key} -o - | tar xf -".format(
-                 bucket=bucket, key=key), user="cltbld")
-    run("rm -f /tmp/s3-get")
-    log.info("Unpacking tarballs finished")
-
-
-def share_repos(hg_repos):
-    log.info("Cloning HG repos")
-    hg = "/tools/python27-mercurial/bin/hg"
-    for share, repo in hg_repos.iteritems():
-        target_dir = '/builds/hg-shared/%s' % share
-        parent_dir = os.path.dirname(target_dir.rstrip("/"))
-        sudo('rm -rf {d} && mkdir -p {p}'.format(d=target_dir, p=parent_dir),
-             user="cltbld")
-        sudo('{hg} clone -U {repo} {d}'.format(hg=hg, repo=repo, d=target_dir),
-             user="cltbld")
-    log.info("Cloning HG repos finished")
-
-
-def make_instance_interfaces(region, hostname, ignore_subnet_check,
-                             avail_subnets, security_groups, use_public_ip):
-    vpc = get_vpc(region)
-    ip_address = get_ip(hostname)
-    subnet_id = None
-
-    if ip_address:
-        log.info("Using IP %s", ip_address)
-        s_id = get_subnet_id(vpc, ip_address)
-        log.info("subnet %s", s_id)
-        if ignore_subnet_check:
-            log.info("ignore_subnet_check, usning %s", s_id)
-            subnet_id = s_id
-        elif s_id in avail_subnets:
-            if ip_available(region, ip_address):
-                subnet_id = s_id
-            else:
-                log.warning("%s already assigned" % ip_address)
-
-    if not ip_address or not subnet_id:
-        ip_address = None
-        log.info("Picking random IP")
-        subnet_id = random.choice(avail_subnets)
-    interface = NetworkInterfaceSpecification(
-        subnet_id=subnet_id, private_ip_address=ip_address,
-        delete_on_termination=True,
-        groups=security_groups,
-        associate_public_ip_address=use_public_ip
-    )
-    return NetworkInterfaceCollection(interface)
-
-
-def create_block_device_mapping(ami, device_map):
-    bdm = BlockDeviceMapping()
-    for device, device_info in device_map.items():
-        if ami.root_device_type == "instance-store" and \
-                not device_info.get("ephemeral_name"):
-            # EBS is not supported by S3-backed AMIs at request time
-            # EBS volumes can be attached when an instance is running
-            continue
-        bd = BlockDeviceType()
-        if device_info.get('size'):
-            bd.size = device_info['size']
-        if ami.root_device_name == device:
-            ami_size = ami.block_device_mapping[device].size
-            if ami.virtualization_type == "hvm":
-                # Overwrite root device size for HVM instances, since they
-                # cannot be resized online
-                bd.size = ami_size
-            elif device_info.get('size'):
-                # make sure that size is enough for this AMI
-                assert ami_size <= device_info['size'], \
-                    "Instance root device size cannot be smaller than AMI " \
-                    "root device"
-        if device_info.get("delete_on_termination") is not False:
-            bd.delete_on_termination = True
-        if device_info.get("ephemeral_name"):
-            bd.ephemeral_name = device_info["ephemeral_name"]
-        if device_info.get("volume_type"):
-            bd.volume_type = device_info["volume_type"]
-
-        bdm[device] = bd
-    return bdm
-
-
-def user_data_from_template(moz_instance_type, fqdn):
-    user_data = get_user_data_tmpl(moz_instance_type)
-    if user_data:
-        user_data = user_data.format(fqdn=fqdn,
-                                     moz_instance_type=moz_instance_type)
-
-    return user_data
-
-
-def tag_ondemand_instance(instance, name, fqdn, moz_instance_type):
-    tags = {"Name": name, "FQDN": fqdn, "moz-type": moz_instance_type,
-            "moz-state": "ready"}
-    # Sleep for a little bit to prevent us hitting
-    # InvalidInstanceID.NotFound right away
-    time.sleep(0.5)
-    max_tries = 10
-    sleep_time = 5
-    for i in range(max_tries):
-        try:
-            for tag, value in tags.iteritems():
-                instance.add_tag(tag, value)
-            return instance
-        except EC2ResponseError, e:
-            if e.code == "InvalidInstanceID.NotFound":
-                if i < max_tries - 1:
-                    # Try again
-                    log.debug("waiting for instance")
-                    time.sleep(sleep_time)
-                    sleep_time = min(30, sleep_time * 1.5)
-                    continue
-        except BotoServerError, e:
-            if e.code == "RequestLimitExceeded":
-                if i < max_tries - 1:
-                    # Try again
-                    log.debug("request limit exceeded; sleeping and "
-                              "trying again")
-                    time.sleep(sleep_time)
-                    sleep_time = min(30, sleep_time * 1.5)
-                    continue
-            raise
deleted file mode 100644
--- a/cloudtools/aws/sanity.py
+++ /dev/null
@@ -1,432 +0,0 @@
-"""aws_slave module"""
-
-import os
-import json
-import time
-import logging
-import urllib2
-import socket
-import calendar
-from datetime import timedelta
-from cloudtools.aws import parse_aws_time
-
-log = logging.getLogger(__name__)
-
-BUILDAPI_URL_JSON = "http://buildapi.pvt.build.mozilla.org/buildapi/recent/{slave_name}?format=json"
-BUILDAPI_URL = "http://buildapi.pvt.build.mozilla.org/buildapi/recent/{slave_name}"
-
-SLAVE_TAGS = ('try-linux64', 'tst-linux32', 'tst-linux64', 'tst-emulator64',
-              'bld-linux64')
-
-KNOWN_TYPES = ('puppetmaster', 'buildbot-master', 'dev-linux64', 'infra',
-               'bld-linux64', 'try-linux64', 'tst-linux32', 'tst-linux64',
-               'tst-emulator64', 'tst-win64', 'dev', 'packager',
-               'vcssync', "signing")
-
-EXPECTED_MAX_UPTIME = {
-    "puppetmaster": "meh",
-    "buildbot-master": "meh",
-    "dev": "meh",
-    "infra": "meh",
-    "vcssync": "meh",
-    "dev-linux64": 8,
-    "bld-linux64": 24,
-    "try-linux64": 12,
-    "tst-linux32": 12,
-    "tst-linux64": 12,
-    "tst-emulator64": 12,
-    "default": 4
-}
-
-EXPECTED_MAX_DOWNTIME = {
-    "puppetmaster": 0,
-    "buildbot-master": 0,
-    "dev": 0,
-    "infra": 0,
-    "vcssync": 0,
-    "dev-linux64": 72,
-    "bld-linux64": 72,
-    "try-linux64": 72,
-    "tst-linux32": 72,
-    "tst-linux64": 72,
-    "tst-emulator64": 72,
-    "packager": "meh",
-    "default": 24
-}
-
-
-def timedelta_to_time_string(timeout):
-    """converts a time delta in seconds to Xd, Yh, Zm.
-    If days == 0 it returns Yh, Zm"""
-    if timeout == 'meh':
-        return 'N/A'
-    time_d = timedelta(seconds=timeout)
-    days = time_d.days
-    hours = time_d.seconds // 3600
-    minutes = time_d.seconds // 60 % 60
-    time_string = "{hours}h:{minutes}m".format(hours=hours, minutes=minutes)
-    if days != 0:
-        time_string = "{days}d {time_string}".format(days=days,
-                                                     time_string=time_string)
-    return time_string
-
-
-def launch_time_to_epoch(launch_time):
-    """converts a lunch_time into a timestamp"""
-    return calendar.timegm(
-        time.strptime(launch_time[:19], '%Y-%m-%dT%H:%M:%S'))
-
-
-class AWSInstance(object):
-    """AWS AWSInstance"""
-    def __init__(self, instance, events_dir=None):
-        self.instance = instance
-        self.now = time.time()
-        self.timeout = None
-        self.last_job_endtime = None
-        self.max_downtime = self._get_timeout(EXPECTED_MAX_DOWNTIME)
-        self.max_uptime = self._get_timeout(EXPECTED_MAX_UPTIME)
-        self.events_dir = events_dir
-
-    def _get_tag(self, tag_name, default=None):
-        """returns tag_name tag from instance tags"""
-        instance = self.instance
-        return instance.tags.get(tag_name, default)
-
-    def _get_timeout(self, timeouts):
-        """returns the timeout in seconds from timeouts"""
-        default_timeout = timeouts['default']
-        instance_type = self.get_instance_type()
-        timeout = timeouts.get(instance_type, default_timeout)
-        if timeout == "meh":
-            # set the timeout in the future...
-            self.timeout = self.now + 3600
-            log.debug('{0}: timeout = {1}'.format(self.get_id(), self.timeout))
-        else:
-            self.timeout = int(timeout) * 3600
-            log.debug('{0}: timeout = {1}'.format(self.get_id(), self.timeout))
-        return self.timeout
-
-    def _get_bug_string(self):
-        """returns the bug string (moz-bug tag)"""
-        return self._get_tag('moz-bug', 'an unknown bug')
-
-    def _get_loaned_string(self):
-        """returns the loaned to string (moz-bug tag)"""
-        return self._get_tag('moz-loaned-to', 'unknown')
-
-    def _get_state(self):
-        """gets the current state from instance.state"""
-        instance = self.instance
-        return instance.state
-
-    def _get_moz_state(self):
-        """returns moz-state string (moz-state tag)"""
-        return self._get_tag("moz-state")
-
-    def _get_moz_type(self):
-        """returns moz-type string (moz-type tag)"""
-        return self._get_tag("moz-type")
-
-    def _get_uptime_timestamp(self):
-        """returns the uptime in timestamp format"""
-        instance = self.instance
-        return time.time() - launch_time_to_epoch(instance.launch_time)
-
-    def get_uptime(self):
-        """returns the uptime in human readable format"""
-        return timedelta_to_time_string(self._get_uptime_timestamp())
-
-    def get_name(self):
-        """retuns tag name"""
-        return self._get_tag('Name')
-
-    def get_instance_type(self):
-        """returns the instance type (moz-type tag)"""
-        return self._get_tag('moz-type')
-
-    def get_id(self):
-        """returns the id of the instance"""
-        instance = self.instance
-        return instance.id
-
-    def get_region(self):
-        """returns the current region"""
-        instance = self.instance
-        region = instance.region
-        return region.name
-
-    def is_long_running(self):
-        """returns True is this instance is running for a long time"""
-        if not self.is_running():
-            return False
-        if self.is_loaned():
-            return False
-        my_uptime = self._get_uptime_timestamp()
-        return my_uptime > self.max_uptime
-
-    def is_long_stopped(self):
-        """returns True is this instance is running for a long time"""
-        if self.is_running():
-            return False
-        if self.is_loaned():
-            return False
-        # get the uptime and assume it has been always down...
-        my_downtime = self._get_uptime_timestamp()
-        if self.events_dir:
-            # ... unless we have the local logs
-            my_downtime = self.get_stop_time_from_logs()
-        return my_downtime > self.max_downtime
-
-    def is_lazy(self):
-        """returns True if this instance is on line for a while and it's not
-           getting any jobs. It makes sense only if this machine is a slave.
-           (must be implemented in the Slave class)"""
-        return False
-
-    def is_running(self):
-        """returns True if instance is running"""
-        return self._get_state() == 'running'
-
-    def is_stopped(self):
-        """returns True if instance is stopped"""
-        return self._get_state() == 'stopped'
-
-    def is_loaned(self):
-        """returns True if the instance is loaned"""
-        return self._get_tag("moz-loaned-to")
-
-    def bad_type(self):
-        """returns True if the instance type is not in KNOWN_TYPES"""
-        bad_type = False
-        if not self._get_moz_type() in KNOWN_TYPES:
-            bad_type = True
-        return bad_type
-
-    def bad_state(self):
-        """returns True if the instance type is not in KNOWN_TYPES"""
-        bad_state = False
-        if self._get_moz_state() != 'ready':
-            bad_state = True
-        return bad_state
-
-    def loaned_message(self):
-        """if the machine is loaned, returns the following message:
-           Loaned to USER in BUG, STATUS
-           where:
-           USER is the content of moz-loaned-to tag,
-           BUG is the content of moz-bug tag (unknown if N/A)
-           STATUS is the uptime if the machine is running, 'stopped' otherwise
-           if the machine is not loaned it returns an empty string"""
-        # instance_name (instance id, region) followd by:
-        # Loaned to xxxxxxx@mozilla.com in an unknown bug, stopped
-        # or Loaned to xxxxxx@mozilla.com in bug xxx, up for x hours
-        msg = ""
-        if not self.is_loaned():
-            return msg
-        loaned_to = self._get_loaned_string()
-        bug = self._get_bug_string()
-        status = 'stopped'
-        if not self.is_stopped():
-            status = "up for {0}".format(self.get_uptime())
-        msg = "{me} Loaned to: {loaned_to}, in {bug}, {status}".format(
-              me=self.__repr__(),
-              loaned_to=loaned_to,
-              bug=bug,
-              status=status)
-        return msg
-
-    def stopped_message(self):
-        """if the instance is stopped, it returns the following string:
-           instance_name (instance id, region) down for X hours"""
-        if not self.is_stopped():
-            return ""
-        stop_time = self.get_stop_time_from_logs()
-        if not stop_time:
-            stop_time = self.get_uptime()
-        else:
-            stop_time = timedelta_to_time_string(stop_time)
-        return "{0} down for {1}".format(self.__repr__(), stop_time)
-
-    def running_message(self):
-        """if the instance is running, it returns the following string:
-           instance_name (instance id, region) up for X hours"""
-        if not self.is_running():
-            return ""
-        return "{0} up for {1}".format(self.__repr__(), self.get_uptime())
-
-    def unknown_state_message(self):
-        """returns the following message:
-           Unknown state REASON
-           where REASON is the content of moz-state tag
-           it returns an empty string is moz-state is 'ready'"""
-        return "{0} ({1}, {2}) Unknown state: '{3}'".format(
-            self.get_name(), self.get_id(), self.get_region(),
-            self._get_moz_state())
-
-    def unknown_type_message(self):
-        """returns the following message:
-           Unknown type TYPE
-           where TYPE is the content of moz-type tag
-           it returns an empty sting is moz-state is 'ready'"""
-        return "{0} ({1}, {2}) Unknown type: '{2}'".format(
-            self.get_name(), self.get_id(), self.get_region(),
-            self._get_moz_type())
-
-    def longrunning_message(self):
-        """returns the running_message and appends (no info from buildapi)"""
-        message = self.running_message()
-        return " ".join([message, "(no info from buildapi)"])
-
-    def _event_log_file(self, event):
-        """returns the json file from the event directory"""
-        if not self.events_dir:
-            return
-        instance_json = os.path.join(self.events_dir, event, self.get_id())
-        if os.path.exists(instance_json):
-            return instance_json
-        return
-
-    def _get_stop_log(self):
-        """gets the cloudtrail log file about the last stop event for the
-           current instance"""
-        return self._event_log_file('StopInstances')
-
-    def _get_start_log(self):
-        """gets the cloudtrail log file about the last start event for the
-           current instance"""
-        # currently start events are not processed, so it always returns None
-        return self._event_log_file('StartInstances')
-
-    def _get_terminate_log(self):
-        """gets the cloudtrail log file about the last terminate event for the
-           current instance"""
-        # currently start events are not processed, so it always returns None
-        return self._event_log_file('TerminateInstances')
-
-    def _get_time_from_json(self, json_file):
-        """reads a json log and returns the eventTime"""
-        try:
-            with open(json_file) as json_f:
-                data = json.loads(json_f.read())
-                event = parse_aws_time(data['eventTime'])
-                now = time.time()
-                tdelta = (now - event)/3600
-                return tdelta
-        except TypeError:
-            # json_file is None; aws_sanity_checker has no events-dir set
-            pass
-        except IOError:
-            # file does not exist
-            pass
-        except ValueError:
-            # bad json filex
-            log.debug('JSON cannot load %s', json_file)
-
-    def get_stop_time_from_logs(self):
-        """time in hours since the last stop event. Returns None if the event
-           does not exist"""
-        stop_time = self._get_time_from_json(self._get_stop_log())
-        if stop_time:
-            # stop time could be None, when there are no stop events
-            # stop time is in seconds, self.max_downtime in hours
-            stop_time = stop_time * 3600
-        return stop_time
-
-    def __repr__(self):
-        # returns:
-        # try-linux64-ec2-044 (i-a8ccfb88, us-east-1)
-        return "{name} ({instance_id}, {region})".format(
-            name=self.get_name(),
-            instance_id=self.get_id(),
-            region=self.get_region())
-
-
-class Slave(AWSInstance):
-    """AWS slave"""
-    def when_last_job_ended(self):
-        """converts get_last_job_endtime into a human readable format"""
-        last_job = self.get_last_job_endtime()
-        if last_job and last_job != 'meh':
-            delta = self.now - last_job
-            last_job = timedelta_to_time_string(delta)
-        return last_job
-
-    def get_last_job_endtime(self, timeout=5):
-        """gets the last endtime from buildapi"""
-        # discard tmp and None instances as they are not on buildapi
-        if self.get_name in ['tmp', None]:
-            self.last_job_endtime = self.now
-            return self.last_job_endtime
-        if self.last_job_endtime:
-            return self.last_job_endtime
-        url = self.get_buildapi_json_url()
-        endtime = self.now
-        try:
-            json_data = urllib2.urlopen(url, timeout=timeout)
-            data = json.load(json_data)
-            try:
-                endtime = max([job['endtime'] for job in data])
-                log.debug("{instance}: max endtime: {endtime}".format(
-                    instance=self.get_id(), endtime=endtime))
-            except TypeError:
-                # somehow endtime is not set
-                # ignore and use None
-                log.debug("{instance}: endtime is not set".format(
-                    instance=self.get_id()))
-            except ValueError:
-                # no jobs completed, ignore
-                log.debug("{instance}: no jobs completed".format(
-                    instance=self.get_id()))
-        except urllib2.HTTPError as error:
-            log.debug('http error {0}, url: {1}'.format(error.code, url))
-        except urllib2.URLError as error:
-            # in python < 2.7 this exception intercepts timeouts
-            log.debug('url: {0} - error {1}'.format(url, error.reason))
-        # in python > 2.7, timeout is a socket.timeout exception
-        except socket.timeout as error:
-            log.debug('connection timed out, url: {0}'.format(url))
-        self.last_job_endtime = endtime
-        return self.last_job_endtime
-
-    def get_buildapi_url(self):
-        """returns buildapi's url"""
-        return BUILDAPI_URL.format(slave_name=self.get_name())
-
-    def get_buildapi_json_url(self):
-        """returns buildapi's json url"""
-        return BUILDAPI_URL_JSON.format(slave_name=self.get_name())
-
-    def is_lazy(self):
-        """Checks if this instance is online for more than EXPECTED_MAX_UPTIME,
-           and it's not taking jobs"""
-        if not self.is_running():
-            return False
-
-        # get all the machines running for more than expected
-        if not super(Slave, self).is_long_running():
-            return False
-
-        delta = self.now - self.get_last_job_endtime()
-        if delta < self.max_uptime:
-            # this instance got a job recently
-            return False
-        # no recent jobs, this machine is long running
-        return True
-
-    def longrunning_message(self):
-        """if the slave is long runnring, it returns the following string:
-           up for 147 hours BUILDAPI_INFO"""
-        message = self.running_message()
-        if message:
-            message = "{0} ({1} since last build)".format(
-                message, self.when_last_job_ended())
-        return message
-
-
-def aws_instance_factory(instance, events_dir):
-    aws_instance = AWSInstance(instance)
-    # is aws_instance a slave ?
-    if aws_instance.get_instance_type() in SLAVE_TAGS:
-        aws_instance = Slave(instance, events_dir)
-    return aws_instance
deleted file mode 100644
--- a/cloudtools/aws/spot.py
+++ /dev/null
@@ -1,371 +0,0 @@
-import logging
-import boto
-from datetime import datetime, timedelta
-from repoze.lru import lru_cache
-from . import get_aws_connection, aws_time_to_datetime
-from ..slavealloc import get_classified_slaves
-from ..jacuzzi import get_allocated_slaves
-
-CANCEL_STATUS_CODES = ["capacity-oversubscribed", "price-too-low",
-                       "capacity-not-available"]
-TERMINATED_BY_AWS_STATUS_CODES = [
-    "instance-terminated-by-price",
-    "instance-terminated-capacity-oversubscribed",
-]
-IGNORABLE_STATUS_CODES = CANCEL_STATUS_CODES + TERMINATED_BY_AWS_STATUS_CODES \
-    + ["bad-parameters", "canceled-before-fulfillment", "fulfilled",
-       "instance-terminated-by-user", "pending-evaluation",
-       "pending-fulfillment"]
-
-log = logging.getLogger(__name__)
-_spot_cache = {}
-_spot_requests = {}
-
-
-def populate_spot_requests_cache(region, request_ids=None):
-    log.debug("Caching spot requests in %s", region)
-    kwargs = {}
-    if request_ids:
-        kwargs["request_ids"] = request_ids
-    conn = get_aws_connection(region)
-    try:
-        reqs = conn.get_all_spot_instance_requests(**kwargs)
-    except boto.exception.EC2ResponseError:
-        log.debug("Some of the requests not found, requesting all")
-        reqs = conn.get_all_spot_instance_requests()
-    for req in reqs:
-        _spot_requests[region, req.id] = req
-
-
-def get_spot_request(region, request_id):
-    if (region, request_id) in _spot_requests:
-        return _spot_requests[region, request_id]
-    populate_spot_requests_cache(region)
-    return _spot_requests.get((region, request_id))
-
-
-def get_spot_instances(region, state="running"):
-    log.info("Processing region %s", region)
-    conn = get_aws_connection(region)
-    filters = {
-        'instance-lifecycle': 'spot',
-        'instance-state-name': state,
-    }
-    return conn.get_only_instances(filters=filters)
-
-
-def get_instances_to_tag(region):
-    rv = []
-    log.debug("Getting all spot instances in %s...", region)
-    all_spot_instances = get_spot_instances(region)
-    log.debug("Total %s instances found", len(all_spot_instances))
-    for i in all_spot_instances:
-        name = i.tags.get('Name')
-        fqdn = i.tags.get('FQDN')
-        moz_type = i.tags.get('moz-type')
-        moz_state = i.tags.get('moz-state')
-        # If one of the tags is unset/empty
-        if not all([name, fqdn, moz_type, moz_state]):
-            log.debug("Adding %s in %s to queue", i, region)
-            rv.append(i)
-    log.debug("Done with %s", region)
-    return rv
-
-
-def copy_spot_request_tags(i):
-    log.debug("Tagging %s", i)
-    req = get_spot_request(i.region.name, i.spot_instance_request_id)
-    if not req:
-        log.debug("Cannot find spot request for %s", i)
-        return
-    tags = {}
-    for tag_name, tag_value in sorted(req.tags.iteritems()):
-        if tag_name not in i.tags:
-            log.info("Adding '%s' tag with '%s' value to %s", tag_name,
-                     tag_value, i)
-            tags[tag_name] = tag_value
-    tags["moz-state"] = "ready"
-    i.connection.create_tags([i.id], tags)
-
-
-@lru_cache(10)
-def get_active_spot_requests(region):
-    """Gets open and active spot requests"""
-    log.debug("getting all spot requests for %s", region)
-    conn = get_aws_connection(region)
-    spot_requests = conn.get_all_spot_instance_requests(
-        filters={'state': ['open', 'active']})
-    return spot_requests
-
-
-@lru_cache(100)
-def get_spot_requests(region, instance_type, availability_zone):
-    log.debug("getting filtered spot requests for %s (%s)", availability_zone,
-              instance_type)
-    all_requests = get_active_spot_requests(region)
-    retval = []
-    if not all_requests:
-        return retval
-
-    for r in all_requests:
-        if r.launch_specification.instance_type != instance_type:
-            continue
-        if r.launched_availability_zone != availability_zone:
-            continue
-        retval.append(r)
-    return retval
-
-
-def get_spot_requests_for_moztype(region, moz_instance_type):
-    """retruns a list of all open and active spot requests"""
-    req = get_active_spot_requests(region)
-    return [r for r in req if r.tags.get('moz-type') == moz_instance_type]
-
-
-@lru_cache(100)
-def usable_spot_choice(choice, minutes=15):
-    """Sanity check recent spot requests"""
-    region = choice.region
-    az = choice.availability_zone
-    instance_type = choice.instance_type
-    bid_price = choice.bid_price
-    current_price = choice.current_price
-    log.debug("Sanity checking %s in %s", instance_type, az)
-
-    # if price is higher than 80% of the bid price do not use the choice
-    if current_price > bid_price * 0.8:
-        log.debug("Price is higher than 80%% of ours, %s", choice)
-        return False
-
-    spot_requests = get_spot_requests(region, instance_type, az)
-    if not spot_requests:
-        log.debug("No available spot requests in last %sm", minutes)
-        return True
-    # filter out requests older than 15 min
-    # first, get the tzinfo of one of the requests
-    delta = timedelta(minutes=minutes)
-    recent_spot_requests = []
-    for r in spot_requests:
-        t = aws_time_to_datetime(r.status.update_time)
-        tz = t.tzinfo
-        now = datetime.now(tz)
-        if t > now - delta:
-            recent_spot_requests.append(r)
-
-    if not recent_spot_requests:
-        log.debug("No recent spot requests in last %sm", minutes)
-        return True
-    bad_statuses = CANCEL_STATUS_CODES + TERMINATED_BY_AWS_STATUS_CODES
-    bad_req = [r for r in spot_requests
-               if r.status.code in bad_statuses or
-               r.tags.get("moz-cancel-reason") in bad_statuses]
-    # Do not try if bad ratio is higher than 10%
-    total = len(spot_requests)
-    total_bad = len(bad_req)
-    log.debug("Found %s recent, %s bad", total, total_bad)
-    if float(total_bad / total) > 0.10:
-        log.debug("Skipping %s, too many failures (%s out of %s)", choice,
-                  total_bad, total)
-        return False
-    # All good!
-    log.debug("Choice %s passes", choice)
-    return True
-
-
-_avail_slave_names = {}
-
-
-def get_available_slave_name(region, moz_instance_type, slaveset, is_spot,
-                             all_instances):
-    key = (region, moz_instance_type, is_spot)
-    if key in _avail_slave_names:
-        # cached entry
-        if not _avail_slave_names[key]:
-            return None
-
-        if slaveset:
-            usable = _avail_slave_names[key].intersection(slaveset)
-        else:
-            usable = _avail_slave_names[key] - set(get_allocated_slaves(None))
-        if not usable:
-            return None
-        name = usable.pop()
-        _avail_slave_names[key].discard(name)
-        return name
-    else:
-        # populate cache and call again
-        all_slave_names = get_classified_slaves(is_spot)
-        all_used_names = set(i.tags.get("Name") for i in all_instances)
-        # used_spot_names contains pending too
-        used_spot_names = set(r.tags.get("Name") for r in
-                              get_active_spot_requests(region))
-        used_names = all_used_names.union(used_spot_names)
-        _avail_slave_names[key] = all_slave_names[moz_instance_type][region] -\
-            used_names
-        return get_available_slave_name(region, moz_instance_type, slaveset,
-                                        is_spot, all_instances)
-
-
-def get_current_spot_prices(connection, product_description, start_time=None,
-                            instance_type=None, ignore_cache=False):
-    """
-    Get the current spot prices for the region associated with the given
-    connection. This may return cached results. Pass ignore_cache=True to
-    bypass the cache
-
-    Args:
-        connection (boto.ec2.Connection): connection to a region
-        product_description (str): which products to restrict the spot prices
-            for, e.g. "Linux/UNIX (Amazon VPC)"
-        start_time (iso8601 str): get spot prices starting from this time
-        instance_type (str): restrict results to this instance type, e.g.
-            "m1.medium"
-        ignore_cache (bool): ignore cached results
-
-    Returns:
-        A dict mapping region to a mapping of instance type to a mapping of
-        availability zone to price. (phew!)
-        For example:
-            {'us-east-1': {'m1.medium': {'us-east-1a': 0.01}}}
-
-    """
-    next_token = None
-    region = connection.region.name
-    current_prices = {}
-    cache_key = (region, product_description, start_time, instance_type)
-    if not ignore_cache and cache_key in _spot_cache:
-        log.debug("returning cached results")
-        return _spot_cache[cache_key]
-
-    if not start_time:
-        # Default to 24 hours
-        now = datetime.utcnow()
-        yesterday = now - timedelta(hours=24)
-        start_time = yesterday.isoformat() + "Z"
-
-    while True:
-        log.debug("getting spot prices for instance_type %s from %s "
-                  "(next_token %s)", instance_type, start_time, next_token)
-        all_prices = connection.get_spot_price_history(
-            product_description=product_description,
-            instance_type=instance_type,
-            start_time=start_time,
-            next_token=next_token,
-        )
-        next_token = all_prices.next_token
-        # make sure to sort them by the timestamp, so we don't process the same
-        # entry twice
-        all_prices = sorted(all_prices, key=lambda x: x.timestamp,
-                            reverse=True)
-        log.debug("got %i results", len(all_prices))
-        for price in all_prices:
-            az = price.availability_zone
-            inst_type = price.instance_type
-            if not current_prices.get(inst_type):
-                current_prices[inst_type] = {}
-            if not current_prices[inst_type].get(az):
-                current_prices[inst_type][az] = price.price
-        if not next_token:
-            break
-
-    retval = {region: current_prices}
-    _spot_cache[cache_key] = retval
-    return retval
-
-
-class Spot:
-    def __init__(self, instance_type, region, availability_zone, current_price,
-                 bid_price, performance_constant):
-        self.instance_type = instance_type
-        self.region = region
-        self.availability_zone = availability_zone
-        self.current_price = current_price
-        self.bid_price = bid_price
-        self.performance_constant = performance_constant
-
-    def __repr__(self):
-        return "%s (%s, %s) %g (value: %g) < %g" % (
-            self.instance_type, self.region, self.availability_zone,
-            self.current_price, self.value, self.bid_price)
-
-    def __str__(self):
-        return self.__repr__()
-
-    def __hash__(self):
-        return hash(self.__repr__())
-
-    @property
-    def value(self):
-        return self.current_price / float(self.performance_constant)
-
-    def __cmp__(self, other):
-        return cmp(self.value, other.value)
-
-
-def get_spot_choices(connections, rules, product_description, start_time=None,
-                     instance_type=None):
-    choices = []
-    prices = {}
-    for connection in connections:
-        prices.update(get_current_spot_prices(connection, product_description,
-                                              start_time, instance_type))
-    for rule in rules:
-        instance_type = rule["instance_type"]
-        bid_price = rule["bid_price"]
-        performance_constant = rule["performance_constant"]
-        ignored_availability_zones = rule.get("ignored_azs", [])
-        for region, region_prices in prices.iteritems():
-            for az, price in region_prices.get(instance_type, {}).iteritems():
-                if az in ignored_availability_zones:
-                    log.debug("Ignoring AZ %s for %s becuase it is listed in "
-                              " ignored_azs: %s", az, instance_type,
-                              ignored_availability_zones)
-                    continue
-                if price > bid_price:
-                    log.debug("%s (in %s) too expensive for %s", price, az,
-                              instance_type)
-                else:
-                    choices.append(
-                        Spot(instance_type=instance_type, region=region,
-                             availability_zone=az, current_price=price,
-                             bid_price=bid_price,
-                             performance_constant=performance_constant))
-    # sort by self.value
-    choices.sort()
-    return choices
-
-if __name__ == "__main__":
-    logging.basicConfig(level=logging.DEBUG, format="%(message)s")
-    logging.getLogger("boto").setLevel(logging.INFO)
-    connections = []
-    for region in ['us-west-2', 'us-east-1']:
-        connections.append(get_aws_connection(region))
-    rules = [
-        {
-            "instance_type": "m3.large",
-            "performance_constant": 0.5,
-            "bid_price": 0.10
-        },
-        {
-            "instance_type": "c3.xlarge",
-            "performance_constant": 1,
-            "bid_price": 0.25
-        },
-        {
-            "instance_type": "m3.xlarge",
-            "performance_constant": 1.1,
-            "bid_price": 0.25
-        },
-        {
-            "instance_type": "m3.2xlarge",
-            "performance_constant": 1.4,
-            "bid_price": 0.25
-        },
-        {
-            "instance_type": "c3.2xlarge",
-            "performance_constant": 1.5,
-            "bid_price": 0.25
-        },
-    ]
-    ret = get_spot_choices(connections, rules)
-    print "\n".join(map(str, ret))
deleted file mode 100644
--- a/cloudtools/aws/vpc.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import logging
-from IPy import IP
-from . import get_vpc, get_aws_connection
-
-log = logging.getLogger(__name__)
-
-
-def get_subnet_id(vpc, ip):
-    subnets = vpc.get_all_subnets()
-    for s in subnets:
-        if IP(ip) in IP(s.cidr_block):
-            return s.id
-    return None
-
-
-def ip_available(region, ip):
-    conn = get_aws_connection(region)
-    instances = conn.get_only_instances()
-    ips = [i.private_ip_address for i in instances]
-    interfaces = conn.get_all_network_interfaces()
-    ips.extend(i.private_ip_address for i in interfaces)
-    if ip in ips:
-        return False
-    else:
-        return True
-
-
-def get_avail_subnet(region, subnet_ids, availability_zone=None):
-    vpc = get_vpc(region)
-    subnets = vpc.get_all_subnets(subnet_ids=subnet_ids)
-    subnets = [s for s in subnets if s.available_ip_address_count > 0]
-    if availability_zone:  # pragma: no branch
-        subnets = [s for s in subnets if s.availability_zone ==
-                   availability_zone]
-    subnets.sort(key=lambda s: s.available_ip_address_count)
-    if not subnets:
-        log.debug("No free IP available in %s for subnets %s",
-                  availability_zone, subnet_ids)
-        return None
-    return subnets[-1].id
deleted file mode 100644
--- a/cloudtools/buildbot.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import time
-import sqlalchemy as sa
-import re
-import logging
-import requests
-from sqlalchemy.engine.reflection import Inspector
-from collections import defaultdict
-
-from .jacuzzi import get_allocated_slaves
-
-log = logging.getLogger(__name__)
-ACTIVITY_BOOTING, ACTIVITY_STOPPED = ("booting", "stopped")
-
-
-def find_pending(dburl):
-    db = sa.create_engine(dburl)
-    inspector = Inspector(db)
-    # Newer buildbot has a "buildrequest_claims" table
-    if "buildrequest_claims" in inspector.get_table_names():
-        query = sa.text("""
-        SELECT buildername, id FROM
-               buildrequests WHERE
-               complete=0 AND
-               submitted_at > :yesterday AND
-               submitted_at < :toonew AND
-               (select count(brid) from buildrequest_claims
-                       where brid=id) = 0""")
-    # Older buildbot doesn't
-    else:
-        query = sa.text("""
-        SELECT buildername, id FROM
-               buildrequests WHERE
-               complete=0 AND
-               claimed_at=0 AND
-               submitted_at > :yesterday AND
-               submitted_at < :toonew""")
-
-    result = db.execute(
-        query,
-        yesterday=time.time() - 86400,
-        toonew=time.time() - 10
-    )
-    retval = result.fetchall()
-    return retval
-
-
-def map_builders(pending, builder_map):
-    """Map pending builder names to instance types"""
-    type_map = defaultdict(int)
-    for pending_buildername, _ in pending:
-        for buildername_exp, moz_instance_type in builder_map.items():
-            if re.match(buildername_exp, pending_buildername):
-                slaveset = get_allocated_slaves(pending_buildername)
-                log.debug("%s instance type %s slaveset %s",
-                          pending_buildername, moz_instance_type, slaveset)
-                type_map[moz_instance_type, slaveset] += 1
-                break
-        else:
-            log.debug("%s has pending jobs, but no instance types defined",
-                      pending_buildername)
-    return type_map
-
-
-def get_tacfile(ssh_client):
-    return ssh_client.get_stdout("cat /builds/slave/buildbot.tac")
-
-
-def get_buildbot_master(ssh_client, masters_json):
-    tacfile = get_tacfile(ssh_client)
-    host = re.search("^buildmaster_host = '(.*?)'$", tacfile, re.M)
-    host = host.group(1)
-    port = None
-    for master in masters_json:
-        if master["hostname"] == host:
-            port = master["http_port"]
-            break
-    assert host and port
-    return host, port
-
-
-def graceful_shutdown(ssh_client, masters_json):
-    # Find out which master we're attached to by looking at buildbot.tac
-    log.debug("%s - looking up which master we're attached to",
-              ssh_client.name)
-    host, port = get_buildbot_master(ssh_client, masters_json)
-
-    url = "http://{host}:{port}/buildslaves/{name}/shutdown".format(
-        host=host, port=port, name=ssh_client.name)
-    log.debug("%s - POSTing to %s", ssh_client.name, url)
-    requests.post(url, allow_redirects=False)
-
-
-def get_last_activity(ssh_client):
-    slave_time = ssh_client.get_stdout("date +%Y%m%d%H%M%S").strip()
-    slave_time = time.mktime(time.strptime(slave_time, "%Y%m%d%H%M%S"))
-    uptime = float(ssh_client.get_stdout("cat /proc/uptime").split()[0])
-
-    if uptime < 3 * 60:
-        # Assume we're still booting
-        log.debug("%s - uptime is %.2f; assuming we're still booting up",
-                  ssh_client.name, uptime)
-        return ACTIVITY_BOOTING
-
-    stdout = ssh_client.get_stdout(
-        "tail -n 100 /builds/slave/twistd.log.1 /builds/slave/twistd.log")
-
-    last_activity = None
-    running_command = False
-    t = time.time()
-    line = ""
-    for line in stdout.splitlines():
-        m = re.search(r"^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})", line)
-        if m:
-            t = time.strptime(m.group(1), "%Y-%m-%d %H:%M:%S")
-            t = time.mktime(t)
-        else:
-            # Not sure what to do with this line...
-            continue
-
-        # uncomment to dump out ALL the lines
-        # log.debug("%s - %s", name, line.strip())
-
-        if "RunProcess._startCommand" in line or "using PTY: " in line:
-            log.debug("%s - started command - %s", ssh_client.name,
-                      line.strip())
-            running_command = True
-        elif "commandComplete" in line or "stopCommand" in line:
-            log.debug("%s - done command - %s", ssh_client.name, line.strip())
-            running_command = False
-
-        if "Shut Down" in line:
-            # Check if this happened before we booted, i.e. we're still booting
-            # up
-            if (slave_time - t) > uptime:
-                log.debug(
-                    "%s - shutdown line is older than uptime; assuming we're "
-                    "still booting %s", ssh_client.name, line.strip())
-                last_activity = ACTIVITY_BOOTING
-            else:
-                last_activity = ACTIVITY_STOPPED
-        elif "I have a leftover directory" in line:
-            # Ignore this, it doesn't indicate anything
-            continue
-        elif running_command:
-            # We're in the middle of running something, so say that our last
-            # activity is now (0 seconds ago)
-            last_activity = 0
-        else:
-            last_activity = slave_time - t
-
-    # If the last lines from the log are over 10 minutes ago, and are from
-    # before our reboot, then try rebooting
-    if (slave_time - t) > 10 * 60 and (slave_time - t) > uptime:
-        log.debug(
-            "%s - shut down happened %ss ago, but we've been up for %ss - %s",
-            ssh_client.name, slave_time - t, uptime, line.strip())
-        # If longer than 30 minutes, try rebooting
-        if (slave_time - t) > 30 * 60:
-            log.debug("%s - rebooting", ssh_client.name)
-            ssh_client.reboot()
-
-    # If there's *no* activity (e.g. no twistd.log files), and we've been up a
-    # while, then reboot
-    if last_activity is None and uptime > 15 * 60:
-        log.debug("%s - no activity; rebooting", ssh_client.name)
-        # If longer than 30 minutes, try rebooting
-        ssh_client.reboot()
-
-    log.debug("%s - %s - %s", ssh_client.name, last_activity, line.strip())
-    return last_activity
deleted file mode 100644
--- a/cloudtools/dns.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from socket import gethostbyname, gaierror, gethostbyaddr, herror, \
-    gethostbyname_ex
-
-
-def get_ip(hostname):
-    try:
-        return gethostbyname(hostname)
-    except gaierror:
-        return None
-
-
-def get_ptr(ip):
-    try:
-        return gethostbyaddr(ip)[0]
-    except herror:
-        return None
-
-
-def get_cname(cname):
-    try:
-        return gethostbyname_ex(cname)[0]
-    except:
-        return None
deleted file mode 100644
--- a/cloudtools/fabric/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from fabric.api import env
-import logging
-
-log = logging.getLogger(__name__)
-
-
-def setup_fabric_env(instance, user="root", abort_on_prompts=True,
-                     disable_known_hosts=True, key_filename=None):
-    env.abort_on_prompts = abort_on_prompts
-    env.disable_known_hosts = disable_known_hosts
-    if instance.vpc_id:
-        log.info("Using private IP")
-        env.host_string = instance.private_ip_address
-    else:
-        log.info("Using public DNS")
-        env.host_string = instance.public_dns_name
-    if user:  # pragma: no branch
-        env.user = user
-    if key_filename:  # pragma: no branch
-        env.key_filename = key_filename
deleted file mode 100644
--- a/cloudtools/fileutils.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-import errno
-import os
-import logging
-import gzip
-import json
-log = logging.getLogger(__name__)
-
-
-def mkdir_p(dst_dir, exist_ok=True):
-    """same as os.makedirs(path, exist_ok=True) in python > 3.2"""
-    try:
-        os.makedirs(dst_dir)
-        log.debug('created %s', dst_dir)
-    except OSError, error:
-        if error.errno == errno.EEXIST and os.path.isdir(dst_dir) and exist_ok:
-            pass
-        else:
-            log.error('cannot create %s, %s', dst_dir, error)
-            raise
-
-
-def get_data_from_gz_file(filename):
-    log.debug(filename)
-    try:
-        with gzip.open(filename, 'rb') as f:
-            return f.read()
-    except IOError:
-        log.debug('%s is not a valid gz file', filename)
-        raise
-
-
-def get_data_from_json_file(filename):
-    """returns a json object from filename"""
-    try:
-        log.debug(filename)
-        with open(filename, 'rb') as f:
-            return json.loads(f.read())
-    except ValueError:
-        # discard log file if it's not a good json file
-        # a log file can be broken because the download has been halted or the file
-        # has been modified by the user
-        log.debug('%s is not valid, deleting it', filename)
-        raise
deleted file mode 100644
--- a/cloudtools/graphite.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import logging
-import socket
-import time
-
-log = logging.getLogger(__name__)
-
-
-class GraphiteLogger(object):
-    # to be used by modules
-
-    def __init__(self):
-        self._data = {}
-        self._servers = []
-
-    def add_destination(self, host, port, prefix):
-        self._servers.append((host, port, prefix))
-
-    @staticmethod
-    def _generate_line(prefix, name, value, timestamp):
-        return "{prefix}.{name} {value} {timestamp}\n".format(
-            prefix=prefix, name=name, value=value, timestamp=timestamp)
-
-    def add(self, name, value, timestamp=None, collect=False):
-        # graphite needs numbers, not strings
-        try:
-            float(value)
-        except ValueError:
-            log.error("Graphite accepts numeric values only, discarding...")
-            return
-
-        if not timestamp:
-            timestamp = int(time.time())
-        if collect and name in self._data:
-            self._data[name] = (self._data[name][0] + value, timestamp)
-        else:
-            self._data[name] = (value, timestamp)
-
-    def generate_data(self, prefix):
-        data = []
-        for name, (value, timestamp) in sorted(self._data.iteritems()):
-            data.append(self._generate_line(prefix, name, value, timestamp))
-        return "".join(data)
-
-    def sendall(self):
-        if not self._data:
-            log.debug("Nothing to submit to graphite")
-            return
-
-        for host, port, prefix in self._servers:
-            data = self.generate_data(prefix)
-            log.debug("Graphite send: \n%s", data)
-            try:
-                log.debug("Connecting to graphite at %s:%s", host, port)
-                sock = socket.create_connection((host, port), timeout=10)
-                sock.sendall(data)
-            except Exception:
-                log.exception("Couldn't send graphite data to %s:%s", host,
-                              port)
-                log.warn("Ignoring all grapite submissions!")
-        self._data = {}
-
-_graphite_logger = GraphiteLogger()
-
-
-def get_graphite_logger():
-    global _graphite_logger
-    return _graphite_logger
-
-
-def generate_instance_stats(instances):
-    l = _graphite_logger
-    for i in instances:
-        if i.state != "running":
-            continue
-        template_values = dict(
-            region=i.region.name,
-            moz_instance_type=i.tags.get("moz-type", "none"),
-            instance_type=i.instance_type.replace(".", "-"),
-            life_cycle_type="spot" if i.spot_instance_request_id else
-            "ondemand",
-            virtualization=i.virtualization_type,
-            root_device_type=i.root_device_type
-        )
-        name = "running.{region}.{moz_instance_type}.{instance_type}" \
-            ".{life_cycle_type}.{virtualization}.{root_device_type}"
-        l.add(name.format(**template_values), 1, collect=True)
deleted file mode 100644
--- a/cloudtools/jacuzzi.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import logging
-import requests
-
-JACUZZI_BASE_URL = "http://jacuzzi-allocator.pub.build.mozilla.org/v1"
-log = logging.getLogger(__name__)
-_jacuzzi_allocated_cache = {}
-
-
-def get_allocated_slaves(buildername):
-    if buildername in _jacuzzi_allocated_cache:  # pragma: no branch
-        return _jacuzzi_allocated_cache[buildername]
-
-    if buildername is None:
-        log.debug("getting set of all allocated slaves")
-        r = requests.get("{0}/allocated/all".format(JACUZZI_BASE_URL))
-        _jacuzzi_allocated_cache[buildername] = frozenset(r.json()['machines'])
-        return _jacuzzi_allocated_cache[buildername]
-
-    log.debug("getting slaves allocated to %s", buildername)
-    r = requests.get("{0}/builders/{1}".format(JACUZZI_BASE_URL, buildername))
-    # Handle 404 specially
-    if r.status_code == 404:
-        _jacuzzi_allocated_cache[buildername] = None
-        return None
-    _jacuzzi_allocated_cache[buildername] = frozenset(r.json()['machines'])
-    return _jacuzzi_allocated_cache[buildername]
-
-
-def filter_instances_by_slaveset(instances, slaveset):
-    retval = []
-    if not slaveset:
-        allocated_slaves = get_allocated_slaves(None)
-
-    for i in instances:
-        if slaveset:
-            if i.tags.get('Name') in slaveset:
-                retval.append(i)
-        elif i.tags.get('Name') not in allocated_slaves:
-            retval.append(i)
-
-    return retval
deleted file mode 100644
--- a/cloudtools/slavealloc.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import os
-import time
-import logging
-import json
-import requests
-import tempfile
-import shutil
-from collections import defaultdict
-from repoze.lru import lru_cache
-
-SLAVES_JSON_URL = "http://slavealloc.pvt.build.mozilla.org/api/slaves"
-CACHE_FILE = "slaves.json"
-CACHE_TTL = 10 * 60
-
-log = logging.getLogger(__name__)
-
-
-@lru_cache(10)
-def get_classified_slaves(is_spot=True):
-    js = get_slaves_json(SLAVES_JSON_URL, CACHE_FILE)
-    slaves = [s for s in js if is_spot_slave(s) is is_spot and is_enabled(s)]
-    # 2D dict: x[moz_type][region] = ["slave1", "slave2"]
-    classified_slaves = defaultdict(lambda: defaultdict(set))
-    for s in slaves:
-        moz_type = slave_moz_type(s)
-        region = slave_region(s)
-        name = s.get("name")
-        if all([moz_type, region, name]):
-            classified_slaves[moz_type][region].add(name)
-    return classified_slaves
-
-
-def slave_region(slave):
-    return slave.get("datacenter")
-
-
-def is_spot_slave(slave):
-    return "-spot-" in slave.get("name", "")
-
-
-def is_enabled(slave):
-    return slave.get("enabled")
-
-
-def slave_moz_type(slave):
-    # Separate golden slaves
-    if slave.get("name") and "golden" in slave.get("name"):
-        return "golden"
-
-    # bld-linux64
-    if slave.get("bitlength") == "64" and \
-       slave.get("environment") == "prod" and \
-       slave.get("distro") == "centos6-mock" and \
-       slave.get("purpose") == "build" and \
-       slave.get("trustlevel") == "core":
-        return "bld-linux64"
-
-    # try-linux64
-    if slave.get("bitlength") == "64" and \
-       slave.get("environment") == "prod" and \
-       slave.get("distro") == "centos6-mock" and \
-       slave.get("purpose") == "build" and \
-       slave.get("trustlevel") == "try":
-        return "try-linux64"
-
-    # tst-linux32
-    if slave.get("bitlength") == "32" and \
-       slave.get("environment") == "prod" and \
-       slave.get("distro") == "ubuntu32" and \
-       slave.get("purpose") == "tests" and \
-       slave.get("trustlevel") == "try":
-        return "tst-linux32"
-
-    # tst-linux64
-    if slave.get("bitlength") == "64" and \
-       slave.get("environment") == "prod" and \
-       slave.get("distro") == "ubuntu64" and \
-       slave.get("purpose") == "tests" and \
-       slave.get("speed") == "m1.medium" and \
-       slave.get("trustlevel") == "try":
-        return "tst-linux64"
-
-    # tst-emulator64
-    if slave.get("bitlength") == "64" and \
-       slave.get("environment") == "prod" and \
-       slave.get("distro") == "ubuntu64" and \
-       slave.get("purpose") == "tests" and \
-       slave.get("speed") == "c3.xlarge" and \
-       slave.get("trustlevel") == "try":
-        return "tst-emulator64"
-
-    return None
-
-
-def get_slaves_json(url, cache):
-    try:
-        mtime = os.stat(cache).st_mtime
-        now = time.time()
-        if now - mtime < CACHE_TTL:
-            log.debug("Using cached slaves.json")
-            return read_slaves_json(cache)
-        else:
-            log.debug("File expired, fetching a new one")
-    except (OSError, IOError, KeyError):
-        log.warn("Error reading cache file, trying to fetch", exc_info=True)
-
-    try:
-        download_file(url, cache)
-    except:
-        log.warn("Cannot fetch slaves.json, reusing the existing file",
-                 exc_info=True)
-    return read_slaves_json(cache)
-
-
-def read_slaves_json(filename):
-    return json.load(open(filename))
-
-
-def download_file(url, dest):
-    req = requests.get(url, timeout=30)
-    req.raise_for_status()
-    _, tmp_fname = tempfile.mkstemp()
-    with open(tmp_fname, "wb") as f:
-        f.write(req.content)
-    log.debug("moving %s to %s", tmp_fname, dest)
-    shutil.move(tmp_fname, dest)
deleted file mode 100644
--- a/cloudtools/ssh.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import paramiko
-from .graphite import get_graphite_logger
-
-log = logging.getLogger(__name__)
-gr_log = get_graphite_logger()
-
-
-class SSHClient(paramiko.SSHClient):
-
-    def __init__(self, instance, username, key_filename):
-        super(SSHClient, self).__init__()
-        self.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
-        self.instance = instance
-        self.username = username
-        self.key_filename = key_filename
-        self.ip = instance.private_ip_address
-        self.name = instance.tags.get("Name")
-
-    def connect(self, *args, **kwargs):
-        try:
-            super(SSHClient, self).connect(*args, hostname=self.ip,
-                                           username=self.username,
-                                           key_filename=self.key_filename,
-                                           **kwargs)
-            return self
-        except Exception:
-            log.debug("Couldn't log into %s at %s", self.name, self.ip)
-            return None
-
-    def get_stdout(self, command):
-        stdin, stdout, _ = self.exec_command(command)
-        stdin.close()
-        data = stdout.read()
-        return data
-
-    def reboot(self, command=None):
-        if not command:
-            command = "sudo reboot"
-        self.get_stdout(command)
-        gr_log.add(
-            "rebooted.{}".format(self.instance.tags.get("moz-type", "none")),
-            1, collect=True)
deleted file mode 100644
--- a/cloudtools/yaml.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import copy
-
-
-def process_includes(data):
-    """
-    Iterate over a de-YAML'd data structure.  A top-level 'includes'
-    is treated as a dictionary of includable chunks.  Anywhere else,
-    a dictionary containing only {'include': 'somename'} will include the
-    chunk named 'somename' in its place.
-    """
-    if not isinstance(data, dict) or 'includes' not in data:
-        return data
-    includes = data.pop('includes')
-
-    def iter(d):
-        if isinstance(d, dict):
-            if len(d) == 1 and 'include' in d and d['include'] in includes:
-                return includes[d['include']]
-            return {k: iter(v) for (k, v) in d.iteritems()}
-        elif isinstance(d, list):
-            return [iter(v) for v in d]
-        else:
-            return d
-
-    # repeatedly apply until all includes are processed (nothing changes)
-    while 1:
-        last_data = copy.deepcopy(data)
-        data = iter(data)
-        if data == last_data:
-            return data
deleted file mode 100644
--- a/configs/bld-linux64
+++ /dev/null
@@ -1,73 +0,0 @@
-{
-    "hostname": "bld-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "bld-linux64",
-        "domain": "build.releng.use1.mozilla.com",
-        "ami": "ami-d6cf61be",
-        "subnet_ids": ["subnet-2ba98340", "subnet-2da98346", "subnet-22a98349", "subnet-0822004e", "subnet-2da98346", "subnet-5bc7c62f", "subnet-7091d358"],
-        "security_group_ids": ["sg-e758e982"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "bld-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "bld-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "bld-linux64",
-        "domain": "build.releng.usw2.mozilla.com",
-        "ami": "ami-13377723",
-        "subnet_ids": ["subnet-d748dabe", "subnet-a848dac1", "subnet-ad48dac4", "subnet-c74f48b3"],
-        "security_group_ids": ["sg-f5ca0690"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "bld-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "bld-linux64"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/bld-linux64-s3
+++ /dev/null
@@ -1,61 +0,0 @@
-{
-    "hostname": "bld-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "bld-linux64",
-        "domain": "build.releng.use1.mozilla.com",
-        "ami": "ami-c469c0ac",
-        "subnet_ids": ["subnet-2ba98340", "subnet-2da98346", "subnet-22a98349", "subnet-0822004e", "subnet-2da98346", "subnet-5bc7c62f", "subnet-7091d358"],
-        "security_group_ids": ["sg-e758e982"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "bld-linux64",
-        "device_map": {
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "bld-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "bld-linux64",
-        "domain": "build.releng.usw2.mozilla.com",
-        "ami": "ami-058ece35",
-        "subnet_ids": ["subnet-d748dabe", "subnet-a848dac1", "subnet-ad48dac4", "subnet-c74f48b3"],
-        "security_group_ids": ["sg-f5ca0690"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "bld-linux64",
-        "device_map": {
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "bld-linux64"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/bld-linux64.cloud-init
+++ /dev/null
@@ -1,11 +0,0 @@
-#cloud-config
-
-fqdn: {fqdn}
-hostname: {fqdn}
-package_update: false
-resize_rootfs: true
-manage_etc_hosts: true
-disable_root: false
-ssh_pwauth: true
-moz_instance_type: {moz_instance_type}
-mounts: false
deleted file mode 100644
--- a/configs/buildbot-master
+++ /dev/null
@@ -1,57 +0,0 @@
-{
-    "hostname": "buildbot-master%02d",
-    "us-east-1": {
-        "type": "buildbot-master",
-        "domain": "bb.releng.use1.mozilla.com",
-        "ami": "ami-43c7b72a",
-        "subnet_ids": ["subnet-5bea1b2c", "subnet-8992a1a1", "subnet-9be0f3dd"],
-        "security_group_ids": ["sg-31e8185e"],
-        "instance_type": "m3.medium",
-        "disable_api_termination": true,
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "buildbot-master",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 15,
-                "instance_dev": "/dev/xvde1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdf",
-                "delete_on_termination": false,
-                "skip_resize": true
-            }
-        },
-        "tags": {
-            "moz-type": "buildbot-master"
-        }
-    },
-    "us-west-2": {
-        "type": "buildbot-master",
-        "domain": "bb.releng.usw2.mozilla.com",
-        "ami": "ami-516cfc61",
-        "subnet_ids": ["subnet-7c54b20b", "subnet-e77170a1", "subnet-e457b193", "subnet-c69353a3"],
-        "security_group_ids": ["sg-932e33ff"],
-        "instance_type": "m3.medium",
-        "disable_api_termination": true,
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "buildbot-master",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 15,
-                "instance_dev": "/dev/xvde1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdf",
-                "delete_on_termination": false,
-                "skip_resize": true
-            }
-        },
-        "tags": {
-            "moz-type": "buildbot-master"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/buildbot-master.cloud-init
+++ /dev/null
@@ -1,11 +0,0 @@
-#cloud-config
-
-fqdn: {fqdn}
-hostname: {fqdn}
-package_update: false
-resize_rootfs: true
-manage_etc_hosts: true
-disable_root: false
-ssh_pwauth: true
-moz_instance_type: {moz_instance_type}
-mounts: false
deleted file mode 100644
--- a/configs/dev-linux64
+++ /dev/null
@@ -1,43 +0,0 @@
-{
-    "hostname": "dev-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "dev-linux64",
-        "domain": "dev.releng.use1.mozilla.com",
-        "ami": "ami-41013328",
-        "subnet_ids": ["subnet-2ba98340", "subnet-2da98346", "subnet-22a98349"],
-        "security_group_ids": [],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "size": 250,
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "instance_dev": "/dev/xvda1"
-            }
-        }
-    },
-    "us-west-2": {
-        "type": "dev-linux64",
-        "domain": "dev.releng.usw2.mozilla.com",
-        "ami": "ami-6eea8b5e",
-        "subnet_ids": ["subnet-d748dabe", "subnet-a848dac1", "subnet-ad48dac4"],
-        "security_group_ids": [],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "size": 250,
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "instance_dev": "/dev/xvda1"
-            }
-        }
-    }
-}
deleted file mode 120000
--- a/configs/dev-linux64.cloud-init
+++ /dev/null
@@ -1,1 +0,0 @@
-bld-linux64.cloud-init
\ No newline at end of file
deleted file mode 100644
--- a/configs/instance2ami.json
+++ /dev/null
@@ -1,27 +0,0 @@
-[
-    {"ami-config": "centos-6-x86_64-hvm-try",
-     "instance-config": "try-linux64",
-     "ssh-key": "aws-releng",
-     "ssh-user": "ec2-user",
-     "regions": ["us-east-1", "us-west-2"]},
-    {"ami-config": "ubuntu-12.04-x86_64-desktop",
-     "instance-config": "tst-linux64",
-     "ssh-key": "aws-releng",
-     "ssh-user": "ubuntu",
-     "regions": ["us-east-1", "us-west-2"]},
-    {"ami-config": "ubuntu-12.04-i386-desktop",
-     "instance-config": "tst-linux32",
-     "ssh-key": "aws-releng",
-     "ssh-user": "ubuntu",
-     "regions": ["us-east-1", "us-west-2"]},
-    {"ami-config": "centos-6-x86_64-hvm-base",
-     "instance-config": "bld-linux64",
-     "ssh-key": "aws-releng",
-     "ssh-user": "ec2-user",
-     "regions": ["us-east-1", "us-west-2"]},
-    {"ami-config": "ubuntu-12.04-x86_64-desktop",
-     "instance-config": "tst-emulator64",
-     "ssh-key": "aws-releng",
-     "ssh-user": "ubuntu",
-     "regions": ["us-east-1", "us-west-2"]}
-]
deleted file mode 100644
--- a/configs/routingtables.yml
+++ /dev/null
@@ -1,249 +0,0 @@
-us-west-1:
-    default:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-            10.130.0.0/16: local
-            0.0.0.0/0: VGW
-
-    nat:
-        routes:
-            10.130.0.0/16: local
-            10.0.0.0/8: VGW
-            0.0.0.0/0: null
-
-    "nat subnet":
-        routes:
-            0.0.0.0/0: IGW
-            10.0.0.0/8: VGW
-            10.130.0.0/16: local
-
-us-west-2:
-    default:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            10.132.0.0/16: local
-            0.0.0.0/0: VGW
-
-    masters:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            10.132.0.0/16: local
-            0.0.0.0/0: VGW
-
-    testers:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Bug 1047550 - gaia try jobs clone gaia from github
-            github.com: IGW
-
-            ftp-ssl.mozilla.org: IGW
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-            10.132.0.0/16: local
-            0.0.0.0/0: VGW
-
-    builders:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Mozilla stuff
-            ftp-ssl.mozilla.org: IGW
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-
-            10.132.0.0/16: local
-            0.0.0.0/0: VGW
-    try:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Mozilla stuff
-            ftp-ssl.mozilla.org: IGW
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-
-            10.132.0.0/16: local
-            0.0.0.0/0: VGW
-
-us-east-1:
-    default:
-        routes:
-            10.134.0.0/16: local
-            0.0.0.0/0: VGW
-
-    masters:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            10.134.0.0/16: local
-            0.0.0.0/0: VGW
-
-    testers:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Bug 1047550 - gaia try jobs clone gaia from github
-            github.com: IGW
-
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-            ftp-ssl.mozilla.org: IGW
-            10.134.0.0/16: local
-            0.0.0.0/0: VGW
-
-    builders:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Mozilla stuff
-            ftp-ssl.mozilla.org: IGW
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-
-            10.134.0.0/16: local
-            0.0.0.0/0: VGW
-
-    try:
-        routes:
-            # Amazon routes (check via 'whois')
-            50.16.0.0/14: IGW
-            54.230.0.0/15: IGW
-            54.239.0.0/17: IGW
-            54.240.0.0/12: IGW
-            72.21.192.0/19: IGW
-            176.32.96.0/21: IGW
-            178.236.0.0/21: IGW
-            205.251.192.0/18: IGW
-            207.171.160.0/19: IGW
-
-            # For graphite
-            carbon.hostedgraphite.com: IGW
-            mozilla.carbon.hostedgraphite.com: IGW
-
-            # Mozilla stuff
-            ftp-ssl.mozilla.org: IGW
-            hg.mozilla.org: IGW
-            git.mozilla.org: IGW
-
-            10.134.0.0/16: local
-            0.0.0.0/0: VGW
-
-
-    "EB: NAT1":
-        routes:
-            10.134.0.0/16: local
-            10.0.0.0/8: VGW
-            0.0.0.0/0: i-0dc6a56a
-
-    "EB: NAT2":
-        routes:
-            10.134.0.0/16: local
-            10.0.0.0/8: VGW
-            0.0.0.0/0: IGW
deleted file mode 100644
--- a/configs/securitygroups.yml
+++ /dev/null
@@ -1,303 +0,0 @@
-# Macros that can be included below; see yaml_includes.py.  Be careful in applying these:
-# the include processor doesn't automatically flatten lists or anything like that.
-includes:
-
-    # ping is allowed from anywhere in just about every SG
-    global-ping:
-        proto: icmp
-        ports: [-1]
-        hosts:
-          - 0.0.0.0/0
-
-    # most outgoing flows are completely open
-    global-any:
-        proto: -1
-        hosts:
-          - 0.0.0.0/0
-
-    # administrative hosts have unrestricted access on all ports
-    admin-access:
-        proto: -1
-        hosts:
-          - 10.22.75.6/31  # admin1a/b
-          - admin1.private.scl3.mozilla.com
-          - scan1.ops.scl3.mozilla.com
-          - openvpn1.corpdmz.scl3.mozilla.com
-          - openvpn1.stage.corpdmz.scl3.mozilla.com
-          - ssh1.corpdmz.scl3.mozilla.com
-          - ssh1.stage.corpdmz.scl3.mozilla.com
-          - vpn1.dmz.releng.scl3.mozilla.com
-          - nagios1.private.releng.scl3.mozilla.com  # note: includes tcp/5666
-          - 10.22.240.0/20  # scl3-vpn-net
-          - 10.22.20.0/25  # admin1.scl3-vpn
-
-    # observium has universal SNMP access
-    observium:
-        proto: udp
-        ports: [161]
-        hosts:
-          - observium2.private.scl3.mozilla.com
-
-    # infra puppetizes hosts by SSHing to them from the master
-    infra-puppetize:
-        proto: tcp
-        ports: [22]
-        hosts:
-          - puppet1.private.scl3.mozilla.com
-
-    # all slave VLANs look the same
-    slave-vlan-inbound:
-        - proto: tcp
-          ports: [22]
-          hosts:
-            - cruncher.build.mozilla.org
-            - {include: slaveapi-servers}
-            - dev-master1.build.mozilla.org
-            - aws-manager1.srv.releng.scl3.mozilla.com
-        - include: admin-access
-        - include: observium
-        - include: infra-puppetize
-        - include: global-ping
-
-    slave-vlan-outbound:
-        - include: global-any
-
-    # host set aliases:
-    slaveapi-servers: 10.26.48.16/31
-
-    # network aliases:
-    build-scl3: 10.26.52.0/22
-    test-scl3: 10.26.56.0/22
-    try-scl3: 10.26.64.0/22
-    winbuild-scl3: 10.26.36.0/22
-    wintest-scl3: 10.26.40.0/22
-    wintry-scl3: 10.26.44.0/22
-    pods-scl3: 10.26.128.0/17
-    build-usw2: 10.132.52.0/22
-    test-usw2: 10.132.56.0/22
-    test2-usw2: 10.132.156.0/22
-    try-usw2: 10.132.64.0/22
-    build-use1: 10.134.52.0/22
-    test-use1: 10.134.56.0/22
-    test2-use1: 10.134.156.0/22
-    try-use1: 10.134.64.0/22
-    slave-vlans:
-      - {include: build-scl3}
-      - {include: test-scl3}
-      - {include: try-scl3}
-      - {include: winbuild-scl3}
-      - {include: wintest-scl3}
-      - {include: wintry-scl3}
-      - {include: pods-scl3}
-      - {include: build-usw2}
-      - {include: test-usw2}
-      - {include: test2-usw2}
-      - {include: try-usw2}
-      - {include: build-use1}
-      - {include: test-use1}
-      - {include: test2-use1}
-      - {include: try-use1}
-
-    # and port aliases
-    buildbot-http-portrange: 8000-8999
-    buildbot-rpc-portrange: 9000-9999
-
-tests:
-    description: security group for test slaves
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    apply-to:
-        instances:
-            tags:
-                - [moz-type, tst-linux*]
-                - [Name, tst-linux*-ec2-*]
-        interfaces:
-            tags:
-                - [moz-type, tst-linux*]
-    inbound:
-        include: slave-vlan-inbound
-    outbound:
-        include: slave-vlan-outbound
-
-build:
-    description: security group for build slaves
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    apply-to:
-        instances:
-            tags:
-                - [moz-type, bld-linux64]
-                - [Name, bld-linux64-ec2-*]
-        interfaces:
-            tags:
-                - [moz-type, bld-linux64]
-    inbound:
-        include: slave-vlan-inbound
-    outbound:
-        include: slave-vlan-outbound
-
-try:
-    description: security group for try build slaves
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    apply-to:
-        instances:
-            tags:
-                - [moz-type, try-linux64]
-                - [Name, try-linux64-ec2-*]
-        interfaces:
-            tags:
-                - [moz-type, try-linux64]
-    inbound:
-        include: slave-vlan-inbound
-    outbound:
-        include: slave-vlan-outbound
-
-buildbot-master:
-    description: security group for buildbot masters
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    inbound:
-        # traffic from other masters
-        - proto: tcp
-          ports:
-            - 22  # ssh
-            - {include: buildbot-rpc-portrange}
-            - {include: buildbot-http-portrange}
-          hosts:
-            - 10.26.68.0/24  # bb.releng.scl3
-            - 10.132.68.0/24  # bb.releng.usw2
-            - 10.134.68.0/24  # bb.releng.use1
-            # To keep the SG small, we assume that there are only buildmasters
-            # in the AWS srv VLANs (which is mostly true)
-            - 10.132.48.0/22  # srv.releng.usw2
-            - 10.134.48.0/22  # srv.releng.use1
-            # scl3 individual masters (not in the BB VLANs)
-            - buildbot-master81.srv.releng.scl3.mozilla.com
-            - buildbot-master82.srv.releng.scl3.mozilla.com
-            - buildbot-master83.srv.releng.scl3.mozilla.com
-            - buildbot-master84.srv.releng.scl3.mozilla.com
-            - buildbot-master85.srv.releng.scl3.mozilla.com
-            - buildbot-master86.srv.releng.scl3.mozilla.com
-            - buildbot-master87.srv.releng.scl3.mozilla.com
-            - buildbot-master89.srv.releng.scl3.mozilla.com
-            - buildbot-master100.srv.releng.scl3.mozilla.com
-            - buildbot-master101.srv.releng.scl3.mozilla.com
-            - buildbot-master102.srv.releng.scl3.mozilla.com
-            - buildbot-master103.srv.releng.scl3.mozilla.com
-            - buildbot-master104.srv.releng.scl3.mozilla.com
-            - buildbot-master105.srv.releng.scl3.mozilla.com
-            - buildbot-master106.srv.releng.scl3.mozilla.com
-            - buildbot-master107.srv.releng.scl3.mozilla.com
-            - buildbot-master108.srv.releng.scl3.mozilla.com
-            - buildbot-master109.srv.releng.scl3.mozilla.com
-            - buildbot-master110.srv.releng.scl3.mozilla.com
-            - buildbot-master111.srv.releng.scl3.mozilla.com
-            - buildbot-master112.srv.releng.scl3.mozilla.com
-            # and cruncher, for the utility of it..
-            - cruncher.build.mozilla.org
-
-        # traffic from buildslaves
-        - proto: tcp
-          ports:
-            - {include: buildbot-rpc-portrange}
-          hosts: {include: slave-vlans}
-
-        # ssh access from some automation hosts
-        - proto: tcp
-          ports: [22]
-          hosts:
-            - {include: slaveapi-servers}
-            - dev-master1.build.mozilla.org
-            - aws-manager1.srv.releng.scl3.mozilla.com
-
-        # buildbot-http from aws-manager1
-        - proto: tcp
-          ports:
-            - {include: buildbot-http-portrange}
-          hosts:
-            - aws-manager1.srv.releng.scl3.mozilla.com
-
-        # generic stuff
-        - include: admin-access
-        - include: observium
-        - include: infra-puppetize
-        - include: global-ping
-    outbound:
-        - include: global-any
-
-blobber:
-    description: security group for blobber service
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    inbound:
-        - proto: tcp
-          ports: [443]
-          hosts: {include: slave-vlans}
-        - include: admin-access
-        - include: observium
-        - include: infra-puppetize
-        - include: global-ping
-    outbound:
-        - include: global-any
-
-nagios:
-    description: security group for nagios servers
-    regions:
-        us-west-1: vpc-7a7dd613
-        us-west-2: vpc-cd63f2a4
-        us-east-1: vpc-b42100df
-    inbound:
-        - include: admin-access
-        - include: observium
-        - include: infra-puppetize
-        - include: global-ping
-    outbound:
-        - include: global-any
-
-# proxxy is configured per-region; these two stanzas should be kept parallel
-proxxy-vpc-use1:
-    description: security group for proxxy servers in use1
-    regions:
-        us-east-1: vpc-b42100df
-    inbound:
-        - proto: tcp
-          ports: [80]
-          hosts:
-            - {include: build-use1}
-            - {include: test-use1}
-            - {include: test2-use1}
-            - {include: try-use1}
-        - include: admin-access
-        - include: observium
-        - include: global-ping
-    outbound:
-        - include: global-any
-
-proxxy-vpc-usw2:
-    description: security group for proxxy servers in usw2
-    regions:
-        us-west-2: vpc-cd63f2a4
-    inbound:
-        - proto: tcp
-          ports: [80]
-          hosts:
-            - {include: build-usw2}
-            - {include: test-usw2}
-            - {include: test2-usw2}
-            - {include: try-usw2}
-        - include: admin-access
-        - include: observium
-        - include: global-ping
-    outbound:
-        - include: global-any
deleted file mode 100644
--- a/configs/subnets.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-us-west-2:
-    vpc-cd63f2a4:
-        10.132.48.0/22:
-            name: srv
-            routing_table: masters
-
-        10.132.52.0/22:
-            name: build
-            routing_table: builders
-
-        10.132.56.0/22:
-            name: test
-            routing_table: testers
-
-        10.132.64.0/22:
-            name: try
-            routing_table: try
-
-        10.132.68.0/26:
-            name: bb
-            routing_table: masters
-
-        10.132.68.64/26:
-            name: bb
-            routing_table: masters
-
-        10.132.68.128/26:
-            name: bb
-            routing_table: masters
-
-        10.132.68.192/26:
-            name: bb
-            routing_table: masters
-
-        10.132.156.0/22:
-            name: test
-            routing_table: testers
-
-
-us-east-1:
-    vpc-b42100df:
-        10.134.48.0/22:
-            name: srv
-            routing_table: masters
-
-        10.134.52.0/22:
-            name: build
-            routing_table: builders
-
-        10.134.56.0/22:
-            name: test
-            routing_table: testers
-
-        10.134.64.0/22:
-            name: try
-            routing_table: try
-
-        10.134.68.0/26:
-            name: bb
-            routing_table: masters
-
-        10.134.68.64/26:
-            name: bb
-            routing_table: masters
-
-        10.134.68.128/26:
-            name: bb
-            routing_table: masters
-
-        10.134.68.192/26:
-            name: bb
-            routing_table: masters
-
-
-        10.134.156.0/22:
-            name: test
-            routing_table: testers
-            skip_azs: []
deleted file mode 100644
--- a/configs/try-linux64
+++ /dev/null
@@ -1,73 +0,0 @@
-{
-    "hostname": "try-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "try-linux64",
-        "domain": "try.releng.use1.mozilla.com",
-        "ami": "ami-264ae44e",
-        "subnet_ids": ["subnet-27a9834c", "subnet-39a98352", "subnet-3ea98355", "subnet-93b285e7", "subnet-e5bacacd", "subnet-cd83d28b"],
-        "security_group_ids": ["sg-718b1214"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "try-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "try-linux64",
-        "domain": "try.releng.usw2.mozilla.com",
-        "ami": "ami-d94b0be9",
-        "subnet_ids": ["subnet-ae48dac7", "subnet-a348daca", "subnet-a448dacd", "subnet-72b68206"],
-        "security_group_ids": ["sg-3aaa095f"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/xvda": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "try-linux64"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/try-linux64-s3
+++ /dev/null
@@ -1,61 +0,0 @@
-{
-    "hostname": "try-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "try-linux64",
-        "domain": "try.releng.use1.mozilla.com",
-        "ami": "ami-c469c0ac",
-        "subnet_ids": ["subnet-27a9834c", "subnet-39a98352", "subnet-3ea98355", "subnet-93b285e7", "subnet-e5bacacd", "subnet-cd83d28b"],
-        "security_group_ids": ["sg-718b1214"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "try-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "try-linux64",
-        "domain": "try.releng.usw2.mozilla.com",
-        "ami": "ami-058ece35",
-        "subnet_ids": ["subnet-ae48dac7", "subnet-a348daca", "subnet-a448dacd", "subnet-72b68206"],
-        "security_group_ids": ["sg-3aaa095f"],
-        "instance_type": "c3.xlarge",
-        "distro": "centos",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "try-linux64",
-        "device_map": {
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "try-linux64"
-        }
-    }
-}
deleted file mode 120000
--- a/configs/try-linux64.cloud-init
+++ /dev/null
@@ -1,1 +0,0 @@
-bld-linux64.cloud-init
\ No newline at end of file
deleted file mode 100644
--- a/configs/tst-emulator64
+++ /dev/null
@@ -1,75 +0,0 @@
-{
-    "hostname": "tst-emulator64-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-emulator64",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami": "ami-e48e1e8d",
-        "subnet_ids": ["subnet-ae35ccc4", "subnet-8f32cbe5", "subnet-ff3542d7",
-                       "subnet-b8643190", "subnet-fb97bc8f", "subnet-844b7ec2",
-                       "subnet-ed35cc87", "subnet-5cd0d828", "subnet-7ca5f03a"],
-        "security_group_ids": ["sg-f0f1239f"],
-        "instance_type": "c3.xlarge",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-emulator64",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 20,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "tst-emulator64"
-        }
-    },
-    "us-west-2": {
-        "type": "tst-emulator64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami": "ami-e00a80d0",
-        "subnet_ids": ["subnet-be89a2ca", "subnet-e4464a90", "subnet-d6cba8bf",
-                       "subnet-aecba8c7", "subnet-56082710", "subnet-a4cba8cd",
-                        "subnet-737f9216", "subnet-ec464a98"],
-        "security_group_ids": ["sg-8b9f7ce4"],
-        "instance_type": "c3.xlarge",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-emulator64",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 20,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            },
-            "/dev/sdb": {
-                "ephemeral_name": "ephemeral0",
-                "instance_dev": "/dev/xvdb",
-                "skip_resize": true,
-                "delete_on_termination": false
-            },
-            "/dev/sdc": {
-                "ephemeral_name": "ephemeral1",
-                "instance_dev": "/dev/xvdc",
-                "skip_resize": true,
-                "delete_on_termination": false
-            }
-        },
-        "tags": {
-            "moz-type": "tst-emulator64"
-        }
-    }
-}
deleted file mode 120000
--- a/configs/tst-emulator64.cloud-init
+++ /dev/null
@@ -1,1 +0,0 @@
-tst-linux64.cloud-init
\ No newline at end of file
deleted file mode 100644
--- a/configs/tst-linux32
+++ /dev/null
@@ -1,51 +0,0 @@
-{
-    "hostname": "tst-linux32-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-linux32",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami": "ami-ea8e1e83",
-        "subnet_ids": ["subnet-ae35ccc4", "subnet-8f32cbe5", "subnet-ff3542d7",
-                       "subnet-b8643190", "subnet-fb97bc8f", "subnet-844b7ec2",
-                       "subnet-ed35cc87", "subnet-5cd0d828", "subnet-7ca5f03a"],
-        "security_group_ids": ["sg-f0f1239f"],
-        "instance_type": "m1.medium",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux32",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 15,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux32"
-        }
-    },
-    "us-west-2": {
-        "type": "tst-linux32",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami": "ami-040c8634",
-        "subnet_ids": ["subnet-be89a2ca", "subnet-e4464a90", "subnet-d6cba8bf",
-                       "subnet-aecba8c7", "subnet-56082710", "subnet-a4cba8cd",
-                        "subnet-737f9216", "subnet-ec464a98"],
-        "security_group_ids": ["sg-8b9f7ce4"],
-        "instance_type": "m1.medium",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux32",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 15,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux32"
-        }
-    }
-}
deleted file mode 120000
--- a/configs/tst-linux32.cloud-init
+++ /dev/null
@@ -1,1 +0,0 @@
-tst-linux64.cloud-init
\ No newline at end of file
deleted file mode 100644
--- a/configs/tst-linux64
+++ /dev/null
@@ -1,51 +0,0 @@
-{
-    "hostname": "tst-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-linux64",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami": "ami-e48e1e8d",
-        "subnet_ids": ["subnet-ae35ccc4", "subnet-8f32cbe5", "subnet-ff3542d7",
-                       "subnet-b8643190", "subnet-fb97bc8f", "subnet-844b7ec2",
-                       "subnet-ed35cc87", "subnet-5cd0d828", "subnet-7ca5f03a"],
-        "security_group_ids": ["sg-f0f1239f"],
-        "instance_type": "m1.medium",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux64",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 20,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "tst-linux64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami": "ami-e00a80d0",
-        "subnet_ids": ["subnet-be89a2ca", "subnet-e4464a90", "subnet-d6cba8bf",
-                       "subnet-aecba8c7", "subnet-56082710", "subnet-a4cba8cd",
-                        "subnet-737f9216", "subnet-ec464a98"],
-        "security_group_ids": ["sg-8b9f7ce4"],
-        "instance_type": "m1.medium",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux64",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 20,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux64"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/tst-linux64-hvm
+++ /dev/null
@@ -1,53 +0,0 @@
-{
-    "hostname": "tst-linux64-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-linux64",
-        "domain": "test.releng.use1.mozilla.com",
-        "ami": "ami-6262e30a",
-        "subnet_ids": ["subnet-ae35ccc4", "subnet-8f32cbe5", "subnet-ff3542d7",
-                       "subnet-b8643190", "subnet-fb97bc8f", "subnet-844b7ec2",
-                       "subnet-ed35cc87", "subnet-5cd0d828", "subnet-7ca5f03a"],
-        "security_group_ids": ["sg-f0f1239f"],
-        "instance_type": "g2.2xlarge",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux64",
-        "device_map": {
-            "/dev/sda1": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux64"
-        }
-    },
-    "us-west-2": {
-        "type": "tst-linux64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "ami": "ami-a7dd9397",
-        "subnet_ids": ["subnet-be89a2ca", "subnet-e4464a90", "subnet-d6cba8bf",
-                       "subnet-aecba8c7", "subnet-56082710", "subnet-a4cba8cd",
-                        "subnet-737f9216", "subnet-ec464a98"],
-        "security_group_ids": ["sg-8b9f7ce4"],
-        "instance_type": "g2.2xlarge",
-        "distro": "ubuntu",
-        "ssh_key": "aws-releng",
-        "use_public_ip": true,
-        "instance_profile_name": "tst-linux64",
-        "device_map": {
-            "/dev/sda1": {
-                "delete_on_termination": true,
-                "skip_resize": true,
-                "volume_type": "gp2",
-                "instance_dev": "/dev/xvda1"
-            }
-        },
-        "tags": {
-            "moz-type": "tst-linux64"
-        }
-    }
-}
deleted file mode 100644
--- a/configs/tst-linux64.cloud-init
+++ /dev/null
@@ -1,10 +0,0 @@
-#cloud-config
-
-fqdn: {fqdn}
-hostname: {fqdn}
-package_update: false
-resize_rootfs: true
-manage_etc_hosts: true
-disable_root: false
-ssh_pwauth: true
-moz_instance_type: {moz_instance_type}
deleted file mode 100644
--- a/configs/tst-win64
+++ /dev/null
@@ -1,44 +0,0 @@
-{
-    "hostname": "tst-win64-ec2-%03d",
-    "us-east-1": {
-        "type": "tst-win64",
-        "domain": "test.releng.use1.mozilla.com",
-        "dns_search_domain": "srv.releng.use1.mozilla.com",
-        "ami_desc": "Windows_Server-2012-RTM-English-64Bit-Base-2013.11.13",
-        "ami": "ami-7527031c",
-        "subnet_ids": ["subnet-8f32cbe5", "subnet-3835cc52", "subnet-ed35cc87", "subnet-ae35ccc4"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-18a07677", "sg-f3927c9c"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "configs/tst-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    },
-    "us-west-2": {
-        "type": "tst-win64",
-        "domain": "test.releng.usw2.mozilla.com",
-        "dns_search_domain": "srv.releng.usw2.mozilla.com",
-        "ami_desc": "tst-win64-2014-03-17-14-26",
-        "ami": "ami-c6c8a6f6",
-        "subnet_ids": ["subnet-a4cba8cd", "subnet-aecba8c7", "subnet-be89a2ca", "subnet-d6cba8bf"],
-        "security_group_desc": ["default VPC security group", "windows slaves"],
-        "security_group_ids": ["sg-d5617cb9", "sg-84beade6"],
-        "instance_type": "m1.medium",
-        "distro": "win2012",
-        "user_data_file": "configs/tst-win64.user_data",
-        "use_public_ip": true,
-        "device_map": {
-            "/dev/sda1": {
-                "size": 30,
-                "instance_dev": "C:"
-            }
-        }
-    }
-
-}
deleted file mode 100644
--- a/configs/tst-win64.user_data
+++ /dev/null
@@ -1,111 +0,0 @@
-<powershell>
-Start-Transcript -Path 'c:\userdata-transcript.txt' -Force
-Set-StrictMode -Version Latest
-Set-ExecutionPolicy Unrestricted
-
-Import-Module AWSPowerShell
-
-$log = 'c:\userdata-log.txt'
-Function Log ($str) {{
-    $d = Get-Date
-    Add-Content $log -value "$d - $str"
-}}
-
-Log "Userdata started"
-
-# We need this helper, because PowerShell has a separate
-# notion of directory for all child commands, and directory
-# for the script. Running commands directly use the
-# location set by cd, while things like DownloadFile
-# will use the script directory (set by SetCurrentDirectory)
-#
-# This function makes things a little bit easier to follow
-Function SetDirectory ($dir) {{
-    Set-Location $dir
-    [System.IO.Directory]::SetCurrentDirectory($dir)
-}}
-
-# silent MSI install helper
-Function InstallMSI ($msi) {{
-    Start-Process -Wait -FilePath "msiexec.exe" -ArgumentList "/qb /i $msi"
-}}
-
-# HTTP download helper
-Function GetFromHTTP ($url, $path) {{
-    Log "Downloading $url to $path"
-    $client = new-object System.Net.WebClient
-    $client.DownloadFile($url, $path)
-}}
-
-# For setting the hostname
-Function SetHostname ($hostname, $domain) {{
-        # http://msdn.microsoft.com/en-us/library/ms724224(v=vs.85).aspx
-        $ComputerNamePhysicalDnsHostname = 5
-        $ComputerNamePhysicalDnsDomain = 6
-
-        Add-Type -TypeDefinition @"
-        using System;
-        using System.Runtime.InteropServices;
-
-        namespace ComputerSystem {{
-            public class Identification {{
-                [DllImport("kernel32.dll", CharSet = CharSet.Auto)]
-                static extern bool SetComputerNameEx(int NameType, string lpBuffer);
-
-                public static bool SetPrimaryDnsSuffix(string suffix) {{
-                    try {{
-                        return SetComputerNameEx($ComputerNamePhysicalDnsDomain, suffix);
-                    }}
-                    catch (Exception) {{
-                        return false;
-                    }}
-                }}
-            }}
-        }}
-"@
-        [ComputerSystem.Identification]::SetPrimaryDnsSuffix($domain)
-        $computerName = Get-WmiObject Win32_ComputerSystem 
-        $computerName.Rename($hostname)
-}}
-
-Log "Setting hostname"
-SetHostname {hostname} {domain}
-Log "Setting dns search list - {dns_search_domain}"
-Set-DnsClientGlobalSetting -SuffixSearchList @("{dns_search_domain}")
-
-SetDirectory $Env:USERPROFILE
-
-Log "Setting up ssh"
-SetDirectory "C:\Program Files (x86)\KTS"
-& .\install.bat
-Log "Done"
-
-### cltbld-starter logs in first, which then logs in locally as cltbld via RDP
-Log "Setting up autologon."
-SetDirectory $Env:USERPROFILE
-Start-Process -Wait -FilePath Autologon.exe -ArgumentList "/accepteula cltbld-starter $env:COMPUTERNAME {password}"
-Log "Done"
-
-Log "Resetting Admininstrator password"
-net user Administrator {password}
-wmic path Win32_UserAccount where Name='Administrator' set PasswordExpires=false
-
-### User policies allow cltbld to reboot the machine
-Log "Setting up cltbld user policies"
-secedit /import /cfg userpolicy.inf /db userpolicy.sdb
-secedit /configure /db userpolicy.sdb
-Remove-Item userpolicy.inf
-Remove-Item userpolicy.sdb
-wmic path Win32_UserAccount where Name='cltbld' set PasswordExpires=false
-Log "Done"
-
-# Enable the Desktop Experience! Ooooh
-Log "Enabling Desktop Experience"
-dism /Online /Enable-Feature /FeatureName:DesktopExperience /All /NoRestart
-Log "Done"
-
-### Shutdown to signal we're done. We also need to shutdown/restart to get the hostname changed
-### aws_create_instance will clear our user data.
-Log "Done. Shutting down now!"
-shutdown /t 0 /f /s
-</powershell>
deleted file mode 100644
--- a/configs/vcssync-linux64
+++ /dev/null
@@ -1,45 +0,0 @@
-{
-    "hostname": "vcssync%d",
-    "us-east-1": {
-        "type": "vcssync",
-        "domain": "srv.releng.use1.mozilla.com",
-        "ami": "ami-43c7b72a",
-        "subnet_ids": ["subnet-33a98358", "subnet-30a9835b", "subnet-35a9835e", "subnet-0aa98361"],
-        "security_group_ids": [],
-        "instance_type": "m3.xlarge",
-        "disable_api_termination": true,
-        "ssh_key": "aws-releng",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 10,
-                "instance_dev": "/dev/xvde1"
-            },
-            "/dev/sdf": {
-                "skip_resize": true,
-                "size": 100,
-                "instance_dev": "/dev/xvdj"
-            }
-        }
-    },
-    "us-west-2": {
-        "type": "vcssync",
-        "domain": "srv.releng.usw2.mozilla.com",
-        "ami": "ami-516cfc61",
-        "subnet_ids": ["subnet-b948dad0", "subnet-ba48dad3", "subnet-bf48dad6"],
-        "security_group_ids": [],
-        "instance_type": "m3.xlarge",
-        "disable_api_termination": true,
-        "ssh_key": "aws-releng",
-        "device_map": {
-            "/dev/sda1": {
-                "size": 10,
-                "instance_dev": "/dev/xvde1"
-            },
-            "/dev/sdf": {
-                "skip_resize": true,
-                "size": 100,
-                "instance_dev": "/dev/xvdj"
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/configs/watch_pending.cfg
+++ /dev/null
@@ -1,161 +0,0 @@
-{
-    "region_priorities": {
-        "us-west-2": 6,
-        "us-east-1": 5,
-        "us-west-1": 0
-    },
-    "buildermap": {
-        "Android.* (?!try|Tegra|Panda|Emulator)\\S+ (build|non-unified)": "bld-linux64",
-        "Android.* (?!Tegra|Panda|Emulator)try build": "try-linux64",
-        "^Linux (?!try)\\S+ (pgo-|leak test )?build": "bld-linux64",
-        "^Linux x86-64 (?!try)\\S+ (pgo-|leak test |asan |debug asan |debug static analysis )?(build|non-unified)": "bld-linux64",
-        "^(Android|Linux).* (nightly|non-profiling)": "bld-linux64",
-        "^Linux( x86-64)? .*(?!try )?valgrind": "bld-linux64",
-        "^Linux( x86-64)? try.*": "try-linux64",
-        "^b2g_(?!try)\\S+_(unagi|panda|otoro|leo|inari|hamachi|emulator|emulator-debug|emulator-jb|emulator-jb-debug|emulator-kk|emulator-kk-debug|helix|leo|nexus-4|wasabi|flame)(_eng)?_(dep|nightly|periodic|nonunified)": "bld-linux64",
-        "^b2g_try_(emulator|emulator-debug|emulator-jb|emulator-jb-debug|emulator-kk|emulator-kk-debug)_(dep|nightly|nonunified)": "try-linux64",
-        "^b2g_(?!try)\\S+_linux(32|64)_gecko(-debug)?": "bld-linux64",
-        "^Firefox (mozilla-central|mozilla-aurora) linux(64)? l10n nightly": "bld-linux64",
-        "^Thunderbird (comm-central|comm-aurora) linux(64)? l10n nightly": "bld-linux64",
-        "^b2g_try_linux(32|64)_gecko(-debug)?": "try-linux64",
-        "^Ubuntu VM 12.04 (?!x64).*": "tst-linux32",
-        "^Ubuntu VM 12.04 x64.*": "tst-linux64",
-        "^Ubuntu Mulet VM 12.04 x64.*": "tst-linux64",
-        "^Ubuntu ASAN VM 12.04 x64.*": "tst-linux64",
-        "^b2g_(emulator|ubuntu64)_vm": "tst-linux64",
-        "^Android 2.3( Armv6)? Emulator(?:(?!plain-reftest|crashtest|jsreftest).)*$": "tst-linux64",
-        "^Android 2.3( Armv6)? Emulator.*": "tst-emulator64",
-        "^Android armv7 API 9.*test (?:(?!plain-reftest|crashtest|jsreftest).)*$": "tst-linux64",
-        "^Android armv7 API 9.*test .*": "tst-emulator64",
-        "^b2g_ubuntu32_vm": "tst-linux32",
-        "^b2g_emulator_vm_large cedar.* (gaia-ui-test|mochitest-media)": "tst-emulator64"
-    },
-    "spot": {
-        "rules": {
-            "tst-linux64": [
-                {"instance_type": "m1.medium",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                "performance_constant": 1,
-                "bid_price": 0.07}
-            ],
-            "tst-linux32": [
-                {"instance_type": "m1.medium",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 1,
-                 "bid_price": 0.07}
-            ],
-            "tst-emulator64": [
-               {"instance_type": "c3.xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                "performance_constant": 1,
-                "bid_price": 0.18},
-               {"instance_type": "m3.xlarge",
-                "performance_constant": 1.1,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                "bid_price": 0.18}
-            ],
-            "bld-linux64": [
-                {"instance_type": "c3.xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 1,
-                 "bid_price": 0.18},
-                {"instance_type": "m3.xlarge",
-                 "performance_constant": 1.1,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "r3.xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 1.2,
-                 "bid_price": 0.18},
-                {"instance_type": "r3.2xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 2.2,
-                 "bid_price": 0.18},
-                {"instance_type": "m3.2xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 2,
-                 "bid_price": 0.18},
-                {"instance_type": "c3.2xlarge",
-                 "performance_constant": 2,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18}
-            ],
-            "try-linux64": [
-                {"instance_type": "c3.xlarge",
-                 "performance_constant": 1,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "m3.xlarge",
-                 "performance_constant": 1.1,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "r3.xlarge",
-                 "performance_constant": 1.2,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "r3.2xlarge",
-                 "performance_constant": 2.2,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "m3.2xlarge",
-                 "performance_constant": 2,
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "bid_price": 0.18},
-                {"instance_type": "c3.2xlarge",
-                 "ignored_azs": ["us-east-1b", "us-east-1e"],
-                 "performance_constant": 2,
-                 "bid_price": 0.18}
-            ]
-        },
-        "limits": {
-            "global": {
-                "tst-linux64": 1300,
-                "tst-linux32": 999,
-                "tst-emulator64": 1300,
-                "bld-linux64": 600
-            },
-            "us-east-1": {
-                "tst-linux64": 1000,
-                "tst-linux32": 666,
-                "tst-emulator64": 1000,
-                "bld-linux64": 400,
-                "try-linux64": 200
-            },
-            "us-west-2": {
-                "tst-linux64": 1000,
-                "tst-linux32": 666,
-                "tst-emulator64": 1000,
-                "bld-linux64": 400,
-                "try-linux64": 200
-            }
-        }
-    },
-    "ondemand": {
-        "limits": {
-            "global": {
-                "tst-linux64": 5,
-                "tst-linux32": 5,
-                "tst-emulator64": 5,
-                "try-linux64": 5,
-                "bld-linux64": 5
-            },
-            "us-east-1": {
-                "tst-linux64": 5,
-                "tst-linux32": 5,
-                "tst-emulator64": 5,
-                "try-linux64": 5,
-                "bld-linux64": 5
-            },
-            "us-west-2": {
-                "tst-linux64": 5,
-                "tst-linux32": 5,
-                "tst-emulator64": 5,
-                "try-linux64": 5,
-                "bld-linux64": 5
-            }
-        }
-    },
-    "graphite_host": "graphite-relay.private.scl3.mozilla.com",
-    "graphite_port": 2003,
-    "graphite_prefix": "releng.aws.aws_watch_pending"
-}
deleted file mode 100644
--- a/configs/watch_pending.cfg.example
+++ /dev/null
@@ -1,79 +0,0 @@
-{
-    "region_priorities": {
-        "us-west-2": 4,
-        "us-east-1": 5,
-        "us-west-1": 0
-    },
-    "buildermap": {
-        "Android.* (?!try)\\S+ build": "bld-linux64",
-        "Android.* try build": "try-linux64",
-        "^Linux (?!try)\\S+ (pgo-|leak test )?build": "bld-linux64",
-        "^Linux x86-64 (?!try)\\S+ (pgo-|leak test |asan |debug asan |debug static analysis )?build": "bld-linux64",
-        "^Linux.* nightly": "bld-linux64",
-        "^Linux.* valgrind": "bld-linux64",
-        "^Linux.* try.*build": "try-linux64",
-        "^b2g_(?!try)\\S+_(unagi|panda|otoro|leo|inari|hamachi|emulator|helix|leo|leo_eng)_(dep|nightly)": "bld-linux64",
-        "^b2g_try_(unagi|panda|otoro|leo|inari|hamachi|emulator)_(dep|nightly)": "try-linux64",
-        "^b2g_(?!try)\\S+_linux(32|64)_gecko(-debug)?": "bld-linux64",
-        "^b2g_try\\S+_linux(32|64)_gecko(-debug)?": "try-linux64",
-        "^Ubuntu VM 12.04 (?!x64).*": "tst-linux32",
-        "^Ubuntu VM 12.04 x64.*": "tst-linux64",
-        "^Ubuntu ASAN VM 12.04 x64.*": "tst-linux64"
-    },
-    "spot": {
-        "rules": {
-            "tst-linux64": [
-                {"instance_type": "m1.medium",
-                "performance_constant": 1,
-                "bid_price": 0.09}
-            ],
-            "tst-linux32": [
-                {"instance_type": "m1.medium",
-                 "performance_constant": 1,
-                 "bid_price": 0.09}
-            ],
-            "bld-linux64": [
-                {"instance_type": "c3.xlarge",
-                 "performance_constant": 1,
-                 "bid_price": 0.25},
-                {"instance_type": "m3.xlarge",
-                 "performance_constant": 1.1,
-                 "bid_price": 0.25},
-                {"instance_type": "m3.2xlarge",
-                 "performance_constant": 1.2,
-                 "bid_price": 0.25},
-                {"instance_type": "c3.2xlarge",
-                 "performance_constant": 1.2,
-                 "bid_price": 0.25}
-            ],
-            "try-linux64": [
-                {"instance_type": "c3.xlarge",
-                 "performance_constant": 1,
-                 "bid_price": 0.25},
-                {"instance_type": "m3.xlarge",
-                 "performance_constant": 1.1,
-                 "bid_price": 0.25},
-                {"instance_type": "m3.2xlarge",
-                 "performance_constant": 1.2,
-                 "bid_price": 0.25},
-                {"instance_type": "c3.2xlarge",
-                 "performance_constant": 1.2,
-                 "bid_price": 0.25}
-            ]
-        },
-        "limits": {
-            "us-east-1": {
-            "tst-linux64": 200,
-            "tst-linux32": 200,
-            "bld-linux64": 100,
-            "try-linux64": 200
-            },
-            "us-west-2": {
-            "tst-linux64": 200,
-            "tst-linux32": 200,
-            "bld-linux64": 100,
-            "try-linux64": 200
-            }
-        }
-    }
-}
deleted file mode 100644
deleted file mode 100644
--- a/instance_data/us-east-1.instance_data_dev.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "buildslave_password": "pass",
-  "buildbot_master": "10.12.48.14:1313",
-  "puppet_masters": ["releng-puppet1.srv.releng.scl3.mozilla.com", "releng-puppet2.srv.releng.scl3.mozilla.com"]
-}
deleted file mode 100644
--- a/instance_data/us-east-1.instance_data_master.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "puppet_masters": ["releng-puppet1.srv.releng.scl3.mozilla.com", "releng-puppet2.srv.releng.scl3.mozilla.com"]
-}
deleted file mode 100644
--- a/instance_data/us-east-1.instance_data_prod.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "puppet_masters": ["releng-puppet1.srv.releng.scl3.mozilla.com", "releng-puppet2.srv.releng.scl3.mozilla.com"],
-  "hg_repos": {
-    "build/mozharness": "https://hg.mozilla.org/build/mozharness",
-    "build/tools": "https://hg.mozilla.org/build/tools"
-  },
-  "s3_tarballs": {
-    "/builds/git-shared/repo": {"bucket": "mozilla-releng-tarballs-use1", "key": "git-shared-repo.tar"},
-    "/builds/hg-shared/integration/gaia-central/.hg": {"bucket": "mozilla-releng-tarballs-use1", "key": "gaia-central.tar"},
-    "/builds/hg-shared/integration/mozilla-inbound/.hg": {"bucket": "mozilla-releng-tarballs-use1", "key": "mozilla-inbound.tar"}
-  }
-}
deleted file mode 100644
--- a/instance_data/us-east-1.instance_data_tests.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "puppet_masters": ["releng-puppet1.srv.releng.scl3.mozilla.com", "releng-puppet2.srv.releng.scl3.mozilla.com"],
-  "hg_repos": {
-    "build/mozharness": "https://hg.mozilla.org/build/mozharness",
-    "build/tools": "https://hg.mozilla.org/build/tools"
-  },
-  "s3_tarballs": {
-    "/builds/hg-shared/integration/gaia-central/.hg": {"bucket": "mozilla-releng-tarballs-use1", "key": "gaia-central.tar"}
-  }
-}
deleted file mode 100644
--- a/instance_data/us-east-1.instance_data_try.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "puppet_masters": ["releng-puppet1.srv.releng.scl3.mozilla.com", "releng-puppet2.srv.releng.scl3.mozilla.com"],
-  "hg_repos": {
-    "build/mozharness": "https://hg.mozilla.org/build/mozharness",
-    "build/tools": "https://hg.mozilla.org/build/tools"
-  },
-  "s3_tarballs": {
-    "/builds/git-shared/repo": {"bucket": "mozilla-releng-tarballs-use1", "key": "git-shared-repo.tar"},
-    "/builds/hg-shared/integration/gaia-central/.hg": {"bucket": "mozilla-releng-tarballs-use1", "key": "gaia-central.tar"},
-    "/builds/hg-shared/try/.hg": {"bucket": "mozilla-releng-tarballs-use1", "key": "try.tar"}
-  }
-}
deleted file mode 120000
--- a/instance_data/us-west-2.instance_data_dev.json
+++ /dev/null
@@ -1,1 +0,0 @@
-us-east-1.instance_data_dev.json
\ No newline at end of file
deleted file mode 120000
--- a/instance_data/us-west-2.instance_data_master.json
+++ /dev/null
@@ -1,1 +0,0 @@
-us-east-1.instance_data_master.json
\ No newline at end of file
deleted file mode 120000
--- a/instance_data/us-west-2.instance_data_prod.json
+++ /dev/null
@@ -1,1 +0,0 @@
-us-east-1.instance_data_prod.json
\ No newline at end of file
deleted file mode 120000
--- a/instance_data/us-west-2.instance_data_tests.json
+++ /dev/null
@@ -1,1 +0,0 @@
-us-east-1.instance_data_tests.json
\ No newline at end of file
deleted file mode 120000
--- a/instance_data/us-west-2.instance_data_try.json
+++ /dev/null
@@ -1,1 +0,0 @@
-us-east-1.instance_data_try.json
\ No newline at end of file
deleted file mode 100644
--- a/requirements.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Fabric==1.8.0
-IPy==0.81
-MySQL-python==1.2.5
-PyYAML==3.11
-SQLAlchemy==0.8.3
-argparse>=1.2.1
-boto==2.27.0
-dnspython==1.12.0
-docopt==0.6.1
-ecdsa==0.10
-iso8601==0.1.10
-netaddr==0.7.12
-paramiko==1.12.0
-pycrypto==2.6.1
-repoze.lru==0.6
-requests==2.0.1
-simplejson==3.3.1
-ssh==1.8.0
-wsgiref==0.1.2
deleted file mode 100644
--- a/scripts/aws_clean_log_dir.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-"""Downloads the cloudtrail logs locally"""
-
-import datetime
-import os
-import site
-import shutil
-import json
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import DEFAULT_REGIONS
-
-import logging
-log = logging.getLogger(__name__)
-
-
-def delete_obsolete_logs(root_dir, reference_dir):
-    """removes cloudtrail directories"""
-    try:
-        # cloudtrails directories are organized by year/month/day
-        #
-        # let's say we run the script with the follows parameters:
-        # root dir      => /builds/aws_cloudtrail
-        # reference_dir => /builds/aws_cloudtrail/2014
-        # all the directories named /builds/aws_cloudtrail/<year>
-        # where year is < 2014 will be deleted
-        # this function is called 3 times with the follwing parametes:
-        # e.g log_dir = /builds/aws_cloudtrail_logs
-        # 1st run:
-        #   root_dir = log_dir
-        #   reference_dir = root_dir/year
-        #   deletes obsolete logs from last year and before
-        # 2nd run:
-        #   root_dir = log_dir/year
-        #   reference_dir = root_dir/year/month
-        #   deletes obsolete logs from last month and before
-        # 3rd run:
-        #   root_dir = log_dir/year/month
-        #   reference_dir = root_dir/year/month/day
-        #   deletes obsolete logs from last day and before
-        # where last day, last month, last year are today - numdays
-        for directory in os.listdir(root_dir):
-            full_path = os.path.join(root_dir, directory)
-            if full_path < reference_dir:
-                # current directory is < than reference dir
-                log.debug("deleting obsolete cloudtrail file: %s",
-                          full_path)
-                shutil.rmtree(full_path)
-    except OSError:
-        # root dir does not exist, nothing to delete here
-        pass
-
-
-def delete_obsolete_json_file(json_file, numdays):
-    """reads a json log and returns the eventTime"""
-    try:
-        with open(json_file) as json_f:
-            data = json.loads(json_f.read())
-            #  event time is stored as: 2014-04-07T18:09:23Z
-            event = datetime.datetime.strptime(data['eventTime'],
-                                               '%Y-%m-%dT%H:%M:%SZ')
-            now = datetime.datetime.now()
-            tdelta = now - event
-            if tdelta.days > numdays:
-                log.debug("deleting: %s (obsolete)" % json_file)
-                os.remove(json_file)
-    except TypeError:
-        log.debug("deleting: %s (not valid)" % json_file)
-        os.remove(json_file)
-    except IOError:
-        # file does not exist
-        pass
-
-if __name__ == '__main__':
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-v", "--verbose", action="store_true",
-                        help="Increase logging verbosity")
-    parser.add_argument("--cache-dir", metavar="cache_dir", required=True,
-                        help="cache directory. Cloutrail logs are stored here")
-    parser.add_argument("--s3-base-prefix", metavar="s3_base_dir", required=True,
-                        help="root of s3 logs keys")
-    parser.add_argument("--events-dir", metavar="events_dir", required=True,
-                        help="root of the events directory")
-
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(message)s")
-    if args.verbose:
-        log.setLevel(logging.DEBUG)
-    else:
-        log.setLevel(logging.INFO)
-
-    prefixes = []
-    today = datetime.date.today()
-
-    numdays = 60
-    base = datetime.datetime.today()
-    last_day_to_keep = base - datetime.timedelta(days=numdays)
-
-    day = last_day_to_keep.strftime("%d")
-    month = last_day_to_keep.strftime("%m")
-    year = last_day_to_keep.strftime("%Y")
-
-    cache_dir = args.cache_dir
-
-    log.debug("deleting obsolete cloudtrail logs")
-    for region in DEFAULT_REGIONS:
-        aws_cloudtrail_logs = os.path.join(cache_dir, args.s3_base_prefix, region)
-
-        # delete last years
-        root_dir = aws_cloudtrail_logs
-        reference_dir = os.path.join(aws_cloudtrail_logs, year)
-        delete_obsolete_logs(root_dir, reference_dir)
-
-        # delete last months
-        root_dir = reference_dir
-        reference_dir = os.path.join(reference_dir, month)
-        delete_obsolete_logs(root_dir, reference_dir)
-
-        # delete last days
-        root_dir = reference_dir
-        reference_dir = os.path.join(reference_dir, day)
-        delete_obsolete_logs(root_dir, reference_dir)
-
-    log.debug("deleting obsolete event files")
-    for root, dirnames, filenames in os.walk(args.events_dir):
-        for f in filenames:
-            if f.startswith('i-'):
-                # do not delete non instance files
-                instance_event_file = os.path.join(root, f)
-                delete_obsolete_json_file(instance_event_file, numdays)
deleted file mode 100644
--- a/scripts/aws_create_ami.py
+++ /dev/null
@@ -1,516 +0,0 @@
-#!/usr/bin/env python
-
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-import boto
-from fabric.api import run, put, lcd
-from fabric.context_managers import hide
-import json
-import time
-import logging
-import os
-import site
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import AMI_CONFIGS_DIR, wait_for_status
-from cloudtools.aws.ami import ami_cleanup, copy_ami
-from cloudtools.aws.instance import run_instance, assimilate_instance
-from cloudtools.fabric import setup_fabric_env
-
-log = logging.getLogger(__name__)
-
-
-def manage_service(service, target, state, distro="centos"):
-    assert state in ("on", "off")
-    if distro in ("debian", "ubuntu"):
-        pass
-    else:
-        run('chroot %s chkconfig --level 2345 %s %s' % (target, service,
-                                                        state))
-
-
-def partition_image(mount_dev, int_dev_name, img_file):
-    run("mkdir /mnt-tmp")
-    run("mkfs.ext4 %s" % int_dev_name)
-    run("mount %s /mnt-tmp" % int_dev_name)
-    run("fallocate -l 10G /mnt-tmp/{}".format(img_file))
-    run("losetup /dev/loop0 /mnt-tmp/{}".format(img_file))
-    run('parted -s /dev/loop0 -- mklabel msdos')
-    # /boot uses 64M, reserve 64 sectors for grub
-    run('parted -s -a optimal /dev/loop0 -- mkpart primary ext2 64s 128')
-    # / uses the rest
-    run('parted -s -a optimal /dev/loop0 -- mkpart primary ext2 128 -1s')
-    run('parted -s /dev/loop0 -- set 1 boot on')
-    run('parted -s /dev/loop0 -- set 2 lvm on')
-    run("kpartx -av /dev/loop0")
-    run("mkfs.ext2 /dev/mapper/loop0p1")
-    run("pvcreate /dev/mapper/loop0p2")
-    run("vgcreate cloud_root /dev/mapper/loop0p2")
-    run("lvcreate -n lv_root -l 100%FREE cloud_root")
-
-
-def partition_ebs_volume(int_dev_name):
-    # HVM based instances use EBS disks as raw disks. They are have to be
-    # partitioned first. Additionally ,"1" should the appended to get the
-    # first primary device name.
-    run('parted -s %s -- mklabel msdos' % int_dev_name)
-    # /boot uses 64M, reserve 64 sectors for grub
-    run('parted -s -a optimal %s -- mkpart primary ext2 64s 64' %
-        int_dev_name)
-    # / uses the rest
-    run('parted -s -a optimal %s -- mkpart primary ext2 64 -1s' %
-        int_dev_name)
-    run('parted -s %s -- set 1 boot on' % int_dev_name)
-    run('parted -s %s -- set 2 lvm on' % int_dev_name)
-    run("mkfs.ext2 %s1" % int_dev_name)
-    run("pvcreate %s2" % int_dev_name)
-    run("vgcreate cloud_root %s2" % int_dev_name)
-    run("lvcreate -n lv_root -l 100%FREE cloud_root")
-
-
-def attach_and_wait(host_instance, size, aws_dev_name, int_dev_name):
-    v = host_instance.connection.create_volume(size, host_instance.placement)
-    while True:
-        try:
-            v.attach(host_instance.id, aws_dev_name)
-            break
-        except:
-            log.debug('waiting for volume to be attached')
-            time.sleep(10)
-
-    wait_for_status(v, "status", "in-use", "update")
-    while True:
-        try:
-            if run('ls %s' % int_dev_name).succeeded:
-                break
-        except:
-            log.debug('waiting for volume to appear', exc_info=True)
-            time.sleep(10)
-    return v
-
-
-def read_packages(packages_file):
-    with open(packages_file) as f:
-        packages = " ".join(line.strip() for line in f.readlines())
-
-    return packages
-
-
-def install_packages(packages_file, distro, chroot=None):
-    if distro not in ("debian", "ubuntu"):
-        raise NotImplementedError
-    packages = read_packages(packages_file)
-    if chroot:
-        chroot_prefix = "chroot {} ".format(chroot)
-    else:
-        chroot_prefix = ""
-
-    if distro in ("debian", "ubuntu"):
-        run("{}apt-get update".format(chroot_prefix))
-        run("DEBIAN_FRONTEND=noninteractive {}apt-get install -y "
-            "--force-yes {}".format(chroot_prefix, packages))
-        run("{}apt-get clean".format(chroot_prefix))
-
-
-def sync(src, dst):
-    for local_directory, _, files in os.walk(src, followlinks=True):
-        directory = os.path.relpath(local_directory, src)
-        if directory == '.':
-            directory = ''
-
-        remote_directory = os.path.join(dst, directory)
-        if directory != '':
-            run('mkdir -p %s' % remote_directory)
-
-        for f in files:
-            local_file = os.path.join(local_directory, f)
-            remote_file = os.path.join(remote_directory, f)
-            put(local_file, remote_file, mirror_local_mode=True)
-
-
-def create_ami(host_instance, args, config, instance_config, ssh_key,
-               key_filename, instance_data, deploypass, cert, pkey,
-               ami_name_prefix):
-    connection = host_instance.connection
-    setup_fabric_env(instance=host_instance, abort_on_prompts=True,
-                     disable_known_hosts=True, key_filename=key_filename)
-
-    target_name = args.config
-    virtualization_type = config.get("virtualization_type")
-    config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name)
-    if ami_name_prefix:
-        prefix = ami_name_prefix
-    else:
-        prefix = args.config
-    dated_target_name = "{}-{}".format(
-        prefix, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
-
-    int_dev_name = config['target']['int_dev_name']
-    mount_dev = int_dev_name
-    grub_dev = int_dev_name
-    mount_point = config['target']['mount_point']
-    boot_mount_dev = None
-    host_packages_file = os.path.join(config_dir, "host_packages")
-    packages_file = os.path.join(config_dir, "packages")
-    if os.path.exists(host_packages_file):
-        install_packages(host_packages_file, config.get('distro'))
-
-    v = attach_and_wait(host_instance, config['target']['size'],
-                        config['target']['aws_dev_name'], int_dev_name)
-
-    # Step 0: install required packages
-    if config.get('distro') == "centos":
-        run('which MAKEDEV >/dev/null || yum install -y MAKEDEV')
-
-    # Step 1: prepare target FS
-    run('mkdir -p %s' % mount_point)
-    if config.get("root_device_type") == "instance-store":
-        # Use file image
-        mount_dev = "/dev/cloud_root/lv_root"
-        grub_dev = "/dev/loop0"
-        boot_mount_dev = "/dev/mapper/loop0p1"
-        img_file = dated_target_name
-        partition_image(mount_dev=mount_dev, int_dev_name=int_dev_name,
-                        img_file=img_file)
-
-    elif virtualization_type == "hvm":
-        # use EBS volume
-        mount_dev = "/dev/cloud_root/lv_root"
-        boot_mount_dev = "%s1" % int_dev_name
-        partition_ebs_volume(int_dev_name=int_dev_name)
-
-    run('/sbin/mkfs.{fs_type} {args} {dev}'.format(
-        fs_type=config['target']['fs_type'],
-        args=config['target'].get("mkfs_args", ""), dev=mount_dev))
-    run('/sbin/e2label {dev} {label}'.format(
-        dev=mount_dev, label=config['target']['e2_label']))
-    run('mount {dev} {mount_point}'.format(dev=mount_dev,
-                                           mount_point=mount_point))
-    run('mkdir {0}/dev {0}/proc {0}/etc {0}/boot {0}/sys'.format(mount_point))
-    run('mount -t sysfs sys %s/sys' % mount_point)
-
-    if config.get('distro') not in ('debian', 'ubuntu'):
-        run('mount -t proc proc %s/proc' % mount_point)
-        run('for i in console null zero random urandom; '
-            'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point)
-    if boot_mount_dev:
-        run('mount {} {}/boot'.format(boot_mount_dev, mount_point))
-
-    # Step 2: install base system
-    if config.get('distro') in ('debian', 'ubuntu'):
-        run("debootstrap precise %s "
-            "http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/"
-            % mount_point)
-        run('chroot %s mount -t proc none /proc' % mount_point)
-        run('mount -o bind /dev %s/dev' % mount_point)
-        put('%s/releng-public.list' % AMI_CONFIGS_DIR,
-            '%s/etc/apt/sources.list' % mount_point)
-        with lcd(config_dir):
-            put('usr/sbin/policy-rc.d', '%s/usr/sbin/' % mount_point,
-                mirror_local_mode=True)
-        install_packages(packages_file, config.get('distro'),
-                         chroot=mount_point)
-    else:
-        with lcd(config_dir):
-            put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point)
-            put('groupinstall', '/tmp/groupinstall')
-            put('additional_packages', '/tmp/additional_packages')
-        yum = 'yum -c {0}/etc/yum-local.cfg -y --installroot={0} '.format(
-            mount_point)
-        run('%s groupinstall "`cat /tmp/groupinstall`"' % yum)
-        run('%s install `cat /tmp/additional_packages`' % yum)
-        run('%s clean packages' % yum)
-        # Rebuild RPM DB for cases when versions mismatch
-        run('chroot %s rpmdb --rebuilddb || :' % mount_point)
-
-    # Step 3: upload custom configuration files
-    run('chroot %s mkdir -p /boot/grub' % mount_point)
-    for directory in ('boot', 'etc', 'usr'):
-        local_directory = os.path.join(config_dir, directory)
-        remote_directory = os.path.join(mount_point, directory)
-        if not os.path.exists(local_directory):
-            pass
-
-        sync(local_directory, remote_directory)
-
-    # Step 4: tune configs
-    run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g '
-        '{mnt}/etc/fstab'.format(label=config['target']['e2_label'],
-                                 fs=config['target']['fs_type'],
-                                 mnt=mount_point))
-    if config.get('distro') in ('debian', 'ubuntu'):
-        if virtualization_type == "hvm":
-            run("chroot {mnt} grub-install {int_dev_name}".format(
-                mnt=mount_point, int_dev_name=int_dev_name))
-            run("chroot {mnt} update-grub".format(mnt=mount_point))
-        else:
-            run("chroot {mnt} update-grub -y".format(mnt=mount_point))
-            run("sed  -i 's/^# groot.*/# groot=(hd0)/g' "
-                "{mnt}/boot/grub/menu.lst".format(mnt=mount_point))
-            run("chroot {mnt} update-grub".format(mnt=mount_point))
-    else:
-        run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point)
-        run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point)
-        if config.get('kernel_package') == 'kernel-PAE':
-            run('sed -i s/@VERSION@/`chroot %s rpm -q '
-                '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" '
-                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
-                (mount_point, config.get('kernel_package', 'kernel'),
-                 mount_point))
-        else:
-            run('sed -i s/@VERSION@/`chroot %s rpm -q '
-                '--queryformat "%%{version}-%%{release}.%%{arch}" '
-                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
-                (mount_point, config.get('kernel_package', 'kernel'),
-                 mount_point))
-        if config.get("root_device_type") == "instance-store":
-            # files normally copied by grub-install
-            run("cp -va /usr/share/grub/x86_64-redhat/* /mnt/boot/grub/")
-            put(os.path.join(config_dir, "grub.cmd"), "/tmp/grub.cmd")
-            run("sed -i s/@IMG@/{}/g /tmp/grub.cmd".format(img_file))
-            run("cat /tmp/grub.cmd | grub --device-map=/dev/null")
-        elif virtualization_type == "hvm":
-            # See https://bugs.archlinux.org/task/30241 for the details,
-            # grub-nstall doesn't handle /dev/xvd* devices properly
-            grub_install_patch = os.path.join(config_dir, "grub-install.diff")
-            if os.path.exists(grub_install_patch):
-                put(grub_install_patch, "/tmp/grub-install.diff")
-                run('which patch >/dev/null || yum install -y patch')
-                run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install')
-            run("grub-install --root-directory=%s --no-floppy %s" %
-                (mount_point, grub_dev))
-
-    run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' "
-        "-e '$ a PermitRootLogin without-password' "
-        "-e '$ a UseDNS no' "
-        "%s/etc/ssh/sshd_config" % mount_point)
-
-    if config.get('distro') in ('debian', 'ubuntu'):
-        pass
-    else:
-        manage_service("network", mount_point, "on")
-        manage_service("rc.local", mount_point, "on")
-
-    if config.get("root_device_type") == "instance-store" and \
-            config.get("distro") == "centos":
-        instance_data = instance_data.copy()
-        instance_data['name'] = host_instance.tags.get("Name")
-        instance_data['hostname'] = host_instance.tags.get("FQDN")
-        run("cp /etc/resolv.conf {}/etc/resolv.conf".format(mount_point))
-        # make puppet happy
-        # disable ipv6
-        run("/sbin/service ip6tables stop")
-        # mount /dev to let sshd start
-        run('mount -o bind /dev %s/dev' % mount_point)
-        assimilate_instance(host_instance, instance_config, ssh_key,
-                            instance_data, deploypass, chroot=mount_point,
-                            reboot=False)
-        ami_cleanup(mount_point=mount_point, distro=config["distro"])
-        # kill chroot processes
-        put('%s/kill_chroot.sh' % AMI_CONFIGS_DIR, '/tmp/kill_chroot.sh')
-        run('bash /tmp/kill_chroot.sh {}'.format(mount_point))
-        run('swapoff -a')
-    run('umount %s/dev || :' % mount_point)
-    if config.get("distro") == "ubuntu":
-        run('rm -f %s/usr/sbin/policy-rc.d' % mount_point)
-        run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point)
-        for dev in ('zero', 'null', 'console', 'generic'):
-            run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' % (mount_point, dev))
-    run('umount %s/sys || :' % mount_point)
-    run('umount %s/proc || :' % mount_point)
-    run('umount %s/dev  || :' % mount_point)
-    run('umount %s/boot || :' % mount_point)
-    run('umount %s' % mount_point)
-    if config.get("root_device_type") == "instance-store" \
-            and config.get("distro") == "centos":
-        # create bundle
-        run("yum install -y ruby "
-            "http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.noarch.rpm")
-        bundle_location = "{b}/{d}/{t}/{n}".format(
-            b=config["bucket"], d=config["bucket_dir"],
-            t=config["target"]["tags"]["moz-type"], n=dated_target_name)
-        manifest_location = "{}/{}.manifest.xml".format(bundle_location,
-                                                        dated_target_name)
-        run("mkdir -p /mnt-tmp/out")
-        put(cert, "/mnt-tmp/cert.pem")
-        put(pkey, "/mnt-tmp/pk.pem")
-        run("ec2-bundle-image -c /mnt-tmp/cert.pem -k /mnt-tmp/pk.pem "
-            "-u {uid} -i /mnt-tmp/{img_file} -d /mnt-tmp/out -r x86_64".format(
-                img_file=img_file, uid=config["aws_user_id"]))
-
-        with hide('running', 'stdout', 'stderr'):
-            log.info("uploading bundle")
-            run("ec2-upload-bundle -b {bundle_location}"
-                " --access-key {access_key} --secret-key {secret_key}"
-                " --region {region}"
-                " -m /mnt-tmp/out/{img_file}.manifest.xml  --retry".format(
-                    bundle_location=bundle_location,
-                    access_key=boto.config.get("Credentials",
-                                               "aws_access_key_id"),
-                    secret_key=boto.config.get("Credentials",
-                                               "aws_secret_access_key"),
-                    region=connection.region.name,
-                    img_file=img_file))
-
-    v.detach(force=True)
-    wait_for_status(v, "status", "available", "update")
-    if not config.get("root_device_type") == "instance-store":
-        # Step 5: Create a snapshot
-        log.info('Creating a snapshot')
-        snapshot = v.create_snapshot(dated_target_name)
-        wait_for_status(snapshot, "status", "completed", "update")
-        snapshot.add_tag('Name', dated_target_name)
-        snapshot.add_tag('moz-created', str(int(time.mktime(time.gmtime()))))
-
-    # Step 6: Create an AMI
-    log.info('Creating AMI')
-    if config.get("root_device_type") == "instance-store":
-        ami_id = connection.register_image(
-            dated_target_name,
-            '%s AMI' % dated_target_name,
-            architecture=config['arch'],
-            virtualization_type=virtualization_type,
-            image_location=manifest_location,
-        )
-    else:
-        host_img = connection.get_image(config['ami'])
-        block_map = BlockDeviceMapping()
-        block_map[host_img.root_device_name] = BlockDeviceType(
-            snapshot_id=snapshot.id)
-        root_device_name = host_img.root_device_name
-        if virtualization_type == "hvm":
-            kernel_id = None
-            ramdisk_id = None
-        else:
-            kernel_id = host_img.kernel_id
-            ramdisk_id = host_img.ramdisk_id
-
-        ami_id = connection.register_image(
-            dated_target_name,
-            '%s AMI' % dated_target_name,
-            architecture=config['arch'],
-            kernel_id=kernel_id,
-            ramdisk_id=ramdisk_id,
-            root_device_name=root_device_name,
-            block_device_map=block_map,
-            virtualization_type=virtualization_type,
-        )
-    while True:
-        try:
-            ami = connection.get_image(ami_id)
-            ami.add_tag('Name', dated_target_name)
-            ami.add_tag('moz-created', str(int(time.mktime(time.gmtime()))))
-            if config["target"].get("tags"):
-                for tag, value in config["target"]["tags"].items():
-                    log.info("Tagging %s: %s", tag, value)
-                    ami.add_tag(tag, value)
-            log.info('AMI created')
-            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
-            break
-        except:
-            log.info('Wating for AMI')
-            time.sleep(10)
-
-    # Step 7: Cleanup
-    if not args.keep_volume:
-        log.info('Deleting volume')
-        v.delete()
-    if not args.keep_host_instance:
-        log.info('Terminating host instance')
-        host_instance.terminate()
-
-    return ami
-
-
-if __name__ == '__main__':
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.set_defaults(
-        region="us-west-1",
-        key_name=None,
-    )
-    parser.add_argument("-c", "--config", required=True,
-                        help="instance configuration to use")
-    parser.add_argument("-r", "--region", help="region to use",
-                        default="us-east-1")
-    parser.add_argument("--ssh-key", help="SSH key file", required=True)
-    parser.add_argument("--key-name", help="SSH key name", required=True)
-    parser.add_argument('--keep-volume', action='store_true',
-                        help="Don't delete target volume")
-    parser.add_argument('--keep-host-instance', action='store_true',
-                        help="Don't delete host instance")
-    parser.add_argument('--user', default='root')
-    parser.add_argument('--puppetize', action="store_true",
-                        help="Puppetize the AMI")
-    parser.add_argument('--instance-config', type=argparse.FileType('r'),
-                        help="Path to instance config file")
-    parser.add_argument('--instance-data', type=argparse.FileType('r'),
-                        help="Path to instance data file")
-    parser.add_argument('--secrets', type=argparse.FileType('r'),
-                        help="Path to secrets file")
-    parser.add_argument('--certificate',
-                        help="Path to AMI encryptiion certificate")
-    parser.add_argument('--pkey',
-                        help="Path to AMI encryptiion privte key")
-    parser.add_argument('--ami-name-prefix', help="AMI name prefix")
-    parser.add_argument("-t", "--copy-to-region", action="append", default=[],
-                        dest="copy_to_regions", help="Regions to copy AMI to")
-    parser.add_argument("-v", "--verbose", action="store_const",
-                        default=logging.INFO, const=logging.DEBUG,
-                        dest="log_level", help="Verbose logging")
-    parser.add_argument("host", metavar="host", nargs=1,
-                        help="Temporary hostname")
-    args = parser.parse_args()
-
-    args = parser.parse_args()
-
-    logging.basicConfig(level=args.log_level)
-    logging.getLogger("boto").setLevel(logging.INFO)
-    logging.getLogger("paramiko").setLevel(logging.INFO)
-    instance_config = None
-    instance_data = None
-    deploypass = None
-    dns_required = False
-
-    try:
-        ami_config = json.load(open("%s/%s.json" % (AMI_CONFIGS_DIR,
-                                                    args.config)))[args.region]
-        if args.instance_config:
-            instance_config = json.load(args.instance_config)[args.region]
-        if args.instance_data:
-            instance_data = json.load(args.instance_data)
-        if args.secrets:
-            deploypass = json.load(args.secrets)["deploy_password"]
-
-    except KeyError:
-        parser.error("unknown configuration")
-        raise
-    except IOError:
-        parser.error("Cannot read")
-        raise
-
-    if args.puppetize:
-        dns_required = True
-        for attr in ("instance_config", "instance_data", "secrets"):
-            if not getattr(args, attr):
-                parser.error("{} is required for S3-backed AMIs".format(attr))
-        if ami_config.get("root_device_type") == "instance-store":
-            for attr in ("certificate", "pkey"):
-                if not getattr(args, attr):
-                    parser.error(
-                        "{} is required for S3-backed AMIs".format(attr))
-
-    host_instance = run_instance(region=args.region, hostname=args.host[0],
-                                 config=ami_config, key_name=args.key_name,
-                                 user=args.user, key_filename=args.ssh_key,
-                                 dns_required=dns_required)
-    ami = create_ami(host_instance=host_instance, args=args, config=ami_config,
-                     instance_config=instance_config, ssh_key=args.key_name,
-                     instance_data=instance_data, deploypass=deploypass,
-                     cert=args.certificate, pkey=args.pkey,
-                     ami_name_prefix=args.ami_name_prefix,
-                     key_filename=args.ssh_key)
-
-    for r in args.copy_to_regions:
-        log.info("Copying %s (%s) to %s", ami.id, ami.tags.get("Name"), r)
-        new_ami = copy_ami(ami, r)
-        log.info("New AMI created. AMI ID: %s", new_ami.id)
deleted file mode 100755
--- a/scripts/aws_create_instance.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/env python
-import json
-import uuid
-import time
-import boto
-import site
-import os
-import multiprocessing
-import sys
-import logging
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import get_aws_connection, get_vpc, \
-    name_available, wait_for_status, get_user_data_tmpl
-from cloudtools.dns import get_ip, get_ptr
-from cloudtools.aws.instance import assimilate_instance, \
-    make_instance_interfaces
-from cloudtools.aws.vpc import get_subnet_id, ip_available
-from cloudtools.aws.ami import ami_cleanup, volume_to_ami, copy_ami, \
-    get_ami
-
-log = logging.getLogger(__name__)
-
-
-def verify(hosts, config, region, ignore_subnet_check=False):
-    """ Check DNS entries and IP availability for hosts"""
-    passed = True
-    conn = get_aws_connection(region)
-    for host in hosts:
-        fqdn = "%s.%s" % (host, config["domain"])
-        log.info("Checking name conflicts for %s", host)
-        if not name_available(conn, host):
-            log.error("%s has been already taken", host)
-            passed = False
-            continue
-        log.debug("Getting IP for %s", fqdn)
-        ip = get_ip(fqdn)
-        if not ip:
-            log.error("%s has no DNS entry", fqdn)
-            passed = False
-        else:
-            log.debug("Getting PTR for %s", fqdn)
-            ptr = get_ptr(ip)
-            if ptr != fqdn:
-                log.error("Bad PTR for %s", host)
-                passed = False
-            log.debug("Checking %s availablility", ip)
-            if not ip_available(region, ip):
-                log.error("IP %s reserved for %s, but not available", ip, host)
-                passed = False
-            if not ignore_subnet_check:
-                vpc = get_vpc(region)
-                s_id = get_subnet_id(vpc, ip)
-                if s_id not in config['subnet_ids']:
-                    log.error("IP %s does not belong to assigned subnets", ip)
-                    passed = False
-    if not passed:
-        raise RuntimeError("Sanity check failed")
-
-
-def create_instance(name, config, region, key_name, ssh_key, instance_data,
-                    deploypass, loaned_to, loan_bug, create_ami,
-                    ignore_subnet_check, max_attempts):
-    """Creates an AMI instance with the given name and config. The config must
-    specify things like ami id."""
-    conn = get_aws_connection(region)
-    # Make sure we don't request the same things twice
-    token = str(uuid.uuid4())[:16]
-
-    instance_data = instance_data.copy()
-    instance_data['name'] = name
-    instance_data['domain'] = config['domain']
-    instance_data['hostname'] = '{name}.{domain}'.format(
-        name=name, domain=config['domain'])
-
-    ami = conn.get_all_images(image_ids=[config["ami"]])[0]
-    bdm = None
-    if 'device_map' in config:
-        bdm = BlockDeviceMapping()
-        for device, device_info in config['device_map'].items():
-            bd = BlockDeviceType()
-            if device_info.get('size'):
-                bd.size = device_info['size']
-            # Overwrite root device size for HVM instances, since they cannot
-            # be resized online
-            if ami.virtualization_type == "hvm" and \
-                    ami.root_device_name == device:
-                bd.size = ami.block_device_mapping[ami.root_device_name].size
-            if device_info.get("delete_on_termination") is not False:
-                bd.delete_on_termination = True
-            if device_info.get("ephemeral_name"):
-                bd.ephemeral_name = device_info["ephemeral_name"]
-
-            bdm[device] = bd
-
-    interfaces = make_instance_interfaces(
-        region, instance_data['hostname'], ignore_subnet_check,
-        config.get('subnet_ids'), config.get('security_group_ids', []),
-        config.get("use_public_ip"))
-
-    keep_going, attempt = True, 1
-    while keep_going:
-        try:
-            if 'user_data_file' in config:
-                user_data = open(config['user_data_file']).read()
-            else:
-                user_data = get_user_data_tmpl(config['type'])
-            if user_data:
-                user_data = user_data.format(
-                    puppet_server=instance_data.get('default_puppet_server'),
-                    fqdn=instance_data['hostname'],
-                    hostname=instance_data['name'],
-                    domain=instance_data['domain'],
-                    dns_search_domain=config.get('dns_search_domain'),
-                    password=deploypass,
-                    moz_instance_type=config['type'],
-                )
-
-            reservation = conn.run_instances(
-                image_id=config['ami'],
-                key_name=key_name,
-                instance_type=config['instance_type'],
-                block_device_map=bdm,
-                client_token=token,
-                disable_api_termination=config.get('disable_api_termination'),
-                user_data=user_data,
-                instance_profile_name=config.get('instance_profile_name'),
-                network_interfaces=interfaces,
-            )
-            break
-        except boto.exception.BotoServerError:
-            log.exception("Cannot start an instance")
-        time.sleep(10)
-        if max_attempts:
-            attempt += 1
-            keep_going = max_attempts >= attempt
-
-    instance = reservation.instances[0]
-    log.info("instance %s created, waiting to come up", instance)
-    # Wait for the instance to come up
-    wait_for_status(instance, "state", "running", "update")
-    instance.add_tag('Name', name)
-    instance.add_tag('FQDN', instance_data['hostname'])
-    instance.add_tag('created', time.strftime("%Y-%m-%d %H:%M:%S %Z",
-                                              time.gmtime()))
-    instance.add_tag('moz-type', config['type'])
-    if loaned_to:
-        instance.add_tag("moz-loaned-to", loaned_to)
-    if loan_bug:
-        instance.add_tag("moz-bug", loan_bug)
-
-    log.info("assimilating %s", instance)
-    instance.add_tag('moz-state', 'pending')
-
-    keep_going, attempt = True, 1
-    while keep_going:
-        try:
-            # Don't reboot if need to create ami
-            reboot = not create_ami
-            assimilate_instance(instance=instance, config=config,
-                                ssh_key=ssh_key, instance_data=instance_data,
-                                deploypass=deploypass, reboot=reboot)
-            break
-        except:
-            log.warn("problem assimilating %s (%s, %s), retrying in "
-                     "10 sec ...", instance_data['hostname'], instance.id,
-                     instance.private_ip_address, exc_info=True)
-            time.sleep(10)
-        if max_attempts:
-            attempt += 1
-            keep_going = max_attempts >= attempt
-
-    instance.add_tag('moz-state', 'ready')
-    if create_ami:
-        ami_name = "spot-%s-%s" % (
-            config['type'], time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
-        log.info("Generating AMI %s", ami_name)
-        ami_cleanup(mount_point="/", distro=config["distro"])
-        root_bd = instance.block_device_mapping[instance.root_device_name]
-        volume = instance.connection.get_all_volumes(
-            volume_ids=[root_bd.volume_id])[0]
-        # The instance has to be stopped to flush EBS caches
-        instance.stop()
-        wait_for_status(instance, 'state', 'stopped', 'update')
-        ami = volume_to_ami(volume=volume, ami_name=ami_name,
-                            arch=instance.architecture,
-                            virtualization_type=instance.virtualization_type,
-                            kernel_id=instance.kernel,
-                            root_device_name=instance.root_device_name,
-                            tags=config["tags"])
-        log.info("AMI %s (%s) is ready", ami_name, ami.id)
-        log.warn("Terminating %s", instance)
-        instance.terminate()
-
-
-class LoggingProcess(multiprocessing.Process):
-    def __init__(self, log, *args, **kwargs):
-        self.log = log
-        super(LoggingProcess, self).__init__(*args, **kwargs)
-
-    def run(self):
-        output = open(self.log, 'wb', 0)
-        logging.basicConfig(stream=output)
-        sys.stdout = output
-        sys.stderr = output
-        return super(LoggingProcess, self).run()
-
-
-def make_instances(names, config, region, key_name, ssh_key, instance_data,
-                   deploypass, loaned_to, loan_bug, create_ami,
-                   ignore_subnet_check, max_attempts):
-    """Create instances for each name of names for the given configuration"""
-    procs = []
-    for name in names:
-        p = LoggingProcess(log="{name}.log".format(name=name),
-                           target=create_instance,
-                           args=(name, config, region, key_name, ssh_key,
-                                 instance_data, deploypass, loaned_to,
-                                 loan_bug, create_ami, ignore_subnet_check,
-                                 max_attempts),
-                           )
-        p.start()
-        procs.append(p)
-
-    log.info("waiting for workers")
-    for p in procs:
-        p.join()
-
-
-if __name__ == '__main__':
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-c", "--config", required=True,
-                        type=argparse.FileType('r'),
-                        help="instance configuration to use")
-    parser.add_argument("-r", "--region", help="region to use",
-                        default="us-east-1")
-    parser.add_argument("-k", "--secrets", type=argparse.FileType('r'),
-                        required=True, help="file where secrets can be found")
-    parser.add_argument("-s", "--key-name", help="SSH key name", required=True)
-    parser.add_argument("--ssh-key", required=True,
-                        help="SSH key to be used by Fabric")
-    parser.add_argument("-i", "--instance-data", help="instance specific data",
-                        type=argparse.FileType('r'), required=True)
-    parser.add_argument("--no-verify", action="store_true",
-                        help="Skip DNS related checks")
-    parser.add_argument("-v", "--verbose", action="store_const",
-                        dest="log_level", const=logging.DEBUG,
-                        default=logging.INFO,
-                        help="Increase logging verbosity")
-    parser.add_argument("-l", "--loaned-to", help="Loaner contact e-mail")
-    parser.add_argument("-b", "--bug", help="Loaner bug number")
-    parser.add_argument("hosts", metavar="host", nargs="+",
-                        help="hosts to be processed")
-    parser.add_argument("--create-ami", action="store_true",
-                        help="Generate AMI and terminate the instance")
-    parser.add_argument("--ignore-subnet-check", action="store_true",
-                        help="Do not check subnet IDs")
-    parser.add_argument("-t", "--copy-to-region", action="append", default=[],
-                        dest="copy_to_regions", help="Regions to copy AMI to")
-    parser.add_argument("--max-attempts",
-                        help="The number of attempts to try after each failure"
-                        )
-
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
-                        level=args.log_level)
-
-    try:
-        config = json.load(args.config)[args.region]
-    except KeyError:
-        parser.error("unknown configuration")
-    if not os.path.exists(args.ssh_key):
-        parser.error("Cannot read %s" % args.ssh_key)
-
-    secrets = json.load(args.secrets)
-    deploypass = secrets["deploy_password"]
-
-    instance_data = json.load(args.instance_data)
-    if not args.no_verify:
-        log.info("Sanity checking DNS entries...")
-        verify(args.hosts, config, args.region, args.ignore_subnet_check)
-    make_instances(names=args.hosts, config=config, region=args.region,
-                   key_name=args.key_name, ssh_key=args.ssh_key,
-                   instance_data=instance_data, deploypass=deploypass,
-                   loaned_to=args.loaned_to, loan_bug=args.bug,
-                   create_ami=args.create_ami,
-                   ignore_subnet_check=args.ignore_subnet_check,
-                   max_attempts=args.max_attempts)
-    for r in args.copy_to_regions:
-        ami = get_ami(region=args.region, moz_instance_type=config["type"])
-        log.info("Copying %s (%s) to %s", ami.id, ami.tags.get("Name"), r)
-        new_ami = copy_ami(ami, r)
-        log.info("New AMI created. AMI ID: %s", new_ami.id)
deleted file mode 100644
--- a/scripts/aws_create_win_ami.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-"""Usage: aws_create_win_ami.py -c <config> -s <keyname> [-r region] [-k secrets] INSTANCE_NAME
-
--c, --config <config>    instance configuration to use
--r, --region <region>    region to use [default: us-east-1]
--k, --secrets <secrets>  file for AWS secrets
--s, --key-name <keyname> ssh key name
-"""
-import random
-import json
-import uuid
-import time
-import logging
-import site
-import os
-
-import boto
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-from boto.ec2.networkinterface import NetworkInterfaceSpecification, \
-    NetworkInterfaceCollection
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import AMI_CONFIGS_DIR, wait_for_status, get_aws_connection
-log = logging.getLogger(__name__)
-
-
-def create_instance(connection, instance_name, config, key_name):
-    bdm = None
-    if 'device_map' in config:
-        bdm = BlockDeviceMapping()
-        for device, device_info in config['device_map'].items():
-            bdm[device] = BlockDeviceType(size=device_info['size'],
-                                          delete_on_termination=True)
-
-    if 'user_data_file' in config:
-        log.debug("reading user_data from '%s'" % config['user_data_file'])
-        user_data = open(config['user_data_file']).read()
-        # assert that there are no values in need of formatting
-        user_data = user_data.format()
-    else:
-        user_data = None
-
-    subnet_id = random.choice(config.get('subnet_ids'))
-
-    interface = NetworkInterfaceSpecification(
-        subnet_id=subnet_id,
-        delete_on_termination=True,
-        groups=config.get('security_group_ids', []),
-        associate_public_ip_address=config.get("use_public_ip")
-    )
-    interfaces = NetworkInterfaceCollection(interface)
-
-    reservation = connection.run_instances(
-        image_id=config['ami'],
-        key_name=key_name,
-        instance_type=config['instance_type'],
-        block_device_map=bdm,
-        client_token=str(uuid.uuid4())[:16],
-        disable_api_termination=bool(config.get('disable_api_termination')),
-        user_data=user_data,
-        instance_profile_name=config.get('instance_profile_name'),
-        network_interfaces=interfaces,
-    )
-
-    instance = reservation.instances[0]
-    instance.add_tag('Name', instance_name)
-
-    log.info("instance %s created, waiting to come up", instance)
-    # Wait for the instance to come up
-    wait_for_status(instance, 'state', 'running', 'update')
-
-    log.info("instance %s is running; waiting for shutdown", instance)
-    wait_for_status(instance, 'state', 'stopped', 'update')
-    log.info("clearing userData")
-    instance.modify_attribute("userData", None)
-    return instance
-
-
-def create_ami(host_instance, config_name, config):
-    connection = host_instance.connection
-    dated_target_name = "%s-%s" % (
-        config_name, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
-
-    log.info('Creating AMI')
-
-    ami_id = connection.create_image(host_instance.id, name=dated_target_name,
-                                     description='%s EBS AMI' %
-                                     dated_target_name,)
-    while True:
-        try:
-            ami = connection.get_image(ami_id)
-            ami.add_tag('Name', dated_target_name)
-            log.info('AMI created')
-            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
-            break
-        except boto.exception.EC2ResponseError:
-            log.info('Wating for AMI')
-            time.sleep(10)
-    log.info("Waiting for AMI")
-    while ami.state != 'available':
-        ami.update()
-        time.sleep(10)
-    return ami
-
-
-if __name__ == '__main__':
-    from docopt import docopt
-
-    args = docopt(__doc__)
-
-    logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
-
-    try:
-        config = json.load(
-            open("%s/%s.json" % (AMI_CONFIGS_DIR,
-                                 args['--config'])))[args['--region']]
-    except KeyError:
-        log.error("unknown configuration")
-        exit(1)
-
-    connection = get_aws_connection(args['--region'])
-    host_instance = create_instance(connection, args['INSTANCE_NAME'], config,
-                                    args['--key-name'])
-    target_ami = create_ami(host_instance, args['--config'], config)
deleted file mode 100644
--- a/scripts/aws_get_cloudtrail_logs.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-"""Downloads the cloudtrail logs locally"""
-
-import datetime
-import boto
-import os
-import signal
-import site
-from functools import partial
-from multiprocessing import Pool
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import DEFAULT_REGIONS, get_s3_connection
-from cloudtools.fileutils import mkdir_p
-
-import logging
-log = logging.getLogger(__name__)
-
-LIMIT_MONTHS = 1  # 1 this month and the previous one
-GET_CONTENTS_TO_FILENAME_TIMEOUT = 5  # get_contents_to_filename timeout in seconds
-
-
-def get_keys(bucket, prefix):
-    """gets s3 keys"""
-    for i in bucket.list(prefix=prefix, delimiter="/"):
-        if isinstance(i, boto.s3.prefix.Prefix):
-            for i in get_keys(bucket, i.name):
-                yield i
-        else:
-            yield i
-
-
-def days_to_consider(limit=LIMIT_MONTHS):
-    """limit logs to the current month + last calender month"""
-    # it outputs, ['2014/01', '2013/12']
-    now = datetime.datetime.now()
-    start_date = datetime.datetime.now() - datetime.timedelta(LIMIT_MONTHS * 30)
-
-    days = []
-    days.append(start_date.strftime("%Y/%m"))
-    days.append(now.strftime("%Y/%m"))
-    return days
-
-
-class TimeoutException(Exception):
-    """Timeout exception used by _timeout()"""
-    pass
-
-
-def _timeout(*args):
-    """callback function for signal.alarm, just rise an exception"""
-    raise TimeoutException
-
-
-def write_to_disk(cache_dir, key):
-    """write key to disk in cache_dir"""
-    dst = os.path.join(cache_dir, key.name)
-    mkdir_p(os.path.dirname(dst))
-    # key.get_contents_to_filename() is a blocking function,
-    # if we try to download non existing files, it will hang here
-    # it works only on unix systems
-    signal.signal(signal.SIGALRM, _timeout)
-    if not os.path.exists(dst):
-        log.debug('downloading: {0}'.format(key.name))
-        signal.alarm(GET_CONTENTS_TO_FILENAME_TIMEOUT)
-        try:
-            key.get_contents_to_filename(dst)
-        except TimeoutException:
-            log.debug('timeout downloading: {0}'.format(key.name))
-    else:
-        # file is already cached locally
-        log.debug('{0} is already cached'.format(key.name))
-
-if __name__ == '__main__':
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-v", "--verbose", action="store_true",
-                        help="Increase logging verbosity")
-    parser.add_argument("--cache-dir", metavar="cache_dir", required=True,
-                        help="cache directory. Cloutrail logs are stored here")
-    parser.add_argument("--s3-base-prefix", metavar="s3_base_dir", required=True,
-                        help="root of s3 logs keys")
-    parser.add_argument("--s3-bucket", metavar="s3_bucket", required=True,
-                        help="s3 bucket")
-
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(message)s")
-    if args.verbose:
-        log.setLevel(logging.DEBUG)
-    else:
-        log.setLevel(logging.INFO)
-
-    conn = get_s3_connection()
-    bucket = conn.get_bucket(args.s3_bucket)
-
-    prefixes = []
-    log.debug("finding all AWSLog keys")
-    for region in DEFAULT_REGIONS:
-        for day in days_to_consider():
-            prefixes.append("{0}/{1}/{2}".format(args.s3_base_prefix, region,
-                            day))
-
-    write_to_disk_partial = partial(write_to_disk, args.cache_dir)
-
-    for prefix in prefixes:
-        keys = get_keys(bucket, prefix)
-        pool = Pool()
-        pool.map(write_to_disk_partial, keys)
-        pool.close()
-        pool.join()
deleted file mode 100644
--- a/scripts/aws_manage_instances.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import logging
-from time import gmtime, strftime
-import site
-import os
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import get_aws_connection, DEFAULT_REGIONS
-
-log = logging.getLogger(__name__)
-
-
-def start(i, dry_run):
-    name = i.tags.get('Name', '')
-    log.info("Starting %s..." % name)
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-    else:
-        i.start()
-
-
-def stop(i, dry_run):
-    name = i.tags.get('Name', '')
-    log.info("Stopping %s..." % name)
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-    else:
-        i.stop()
-
-
-def restart(i, dry_run):
-    name = i.tags.get('Name', '')
-    log.info("Restarting %s..." % name)
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-    else:
-        i.reboot()
-
-
-def enable(i, dry_run):
-    name = i.tags.get('Name', '')
-    log.info("Enabling %s..." % name)
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-    else:
-        # .add_tag overwrites existing tag
-        i.add_tag("moz-state", "ready")
-
-
-def disable(i, dry_run, comments=None):
-    name = i.tags.get('Name', '')
-    moz_state = "disabled at %s" % strftime("%Y-%m-%d %H:%M:%S +0000",
-                                            gmtime())
-    if comments:
-        moz_state += ". %s" % comments
-    log.info("Disabling %s, setting moz-state tag to '%s'..." % (name,
-                                                                 moz_state))
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-    else:
-        i.add_tag("moz-state", moz_state)
-
-
-def terminate(i, dry_run, force=None):
-    name = i.tags.get('Name', '')
-    log.info("Terminating %s..." % name)
-
-    if dry_run:
-        log.info("Dry run mode, skipping...")
-        return
-
-    if force:
-        yesno = "y"
-    else:
-        yesno = raw_input("WARNING: you are about to terminate %s! "
-                          "Are you sure? [y/N] " % name)
-    if yesno == "y":
-        i.terminate()
-        log.info("%s terminated" % name)
-    else:
-        log.info("%s NOT terminated" % name)
-
-
-def status(i):
-    instance_id = i.id
-    name = i.tags.get('Name', '')
-    ip = i.private_ip_address
-    state = i.state
-    moz_state = i.tags.get('moz-state', '')
-    enabled = bool(moz_state == "ready")
-
-    print "Name:".rjust(8), name
-    print "ID:".rjust(8), instance_id
-    print "IP:".rjust(8), ip
-    print "Enabled:".rjust(8), enabled
-    print "State:".rjust(8), state
-    print "Tags:".rjust(8), ", ".join(["%s -> %s" % (k, v)
-                                       for k, v in i.tags.iteritems()])
-    print "=" * 72
-
-
-if __name__ == '__main__':
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-r", "--region", dest="regions", action="append",
-                        help="optional list of regions")
-    parser.add_argument("action", choices=["stop", "start", "restart",
-                                           "enable", "disable", "terminate",
-                                           "status"],
-                        help="action to be performed")
-    parser.add_argument("-m", "--comments", help="reason to disable")
-    parser.add_argument("-n", "--dry-run", action="store_true",
-                        help="Dry run mode")
-    parser.add_argument("-q", "--quiet", action="store_true",
-                        help="Supress logging messages")
-    parser.add_argument("hosts", metavar="host", nargs="+",
-                        help="hosts to be processed")
-    parser.add_argument("-f", "--force", action="store_true",
-                        help="Force action without prompting")
-
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
-    if not args.quiet:
-        log.setLevel(logging.INFO)
-    else:
-        log.setLevel(logging.ERROR)
-
-    if not args.regions:
-        args.regions = DEFAULT_REGIONS
-
-    for region in args.regions:
-        conn = get_aws_connection(region)
-        instances = conn.get_only_instances()
-        for i in instances:
-            name = i.tags.get('Name', '')
-            instance_id = i.id
-            if not i.private_ip_address:
-                # Terminated instances has no IP address assinged
-                log.debug("Skipping (terminated?) %s (%s)..." % (name,
-                                                                 instance_id))
-                continue
-            if name in args.hosts or instance_id in args.hosts:
-                log.info("Found %s (%s)..." % (name, instance_id))
-
-                if args.action == "start":
-                    start(i, args.dry_run)
-                elif args.action == "stop":
-                    stop(i, args.dry_run)
-                elif args.action == "restart":
-                    restart(i, args.dry_run)
-                elif args.action == "enable":
-                    enable(i, args.dry_run)
-                elif args.action == "disable":
-                    disable(i, args.dry_run, args.comments)
-                elif args.action == "terminate":
-                    terminate(i, args.dry_run, args.force)
-                elif args.action == "status":
-                    status(i)
deleted file mode 100644
--- a/scripts/aws_manage_routingtables.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/env python
-import boto.vpc
-import yaml
-import dns.resolver
-
-import logging
-log = logging.getLogger(__name__)
-_dns_cache = {}
-
-
-def get_connection(region):
-    return boto.vpc.connect_to_region(region)
-
-
-def load_config(filename):
-    return yaml.load(open(filename))
-
-
-def resolve_host(hostname):
-    if hostname in _dns_cache:
-        return _dns_cache[hostname]
-    log.info("resolving host %s", hostname)
-    ips = dns.resolver.query(hostname, "A")
-    ips = [i.to_text() for i in ips]
-    _dns_cache[hostname] = ips
-    return ips
-
-
-def sync_tables(conn, my_tables, remote_tables):
-    # Check that remote tables don't have overlapping names
-    seen_names = set()
-    for t in remote_tables[:]:
-        name = t.tags.get('Name')
-        if not name:
-            log.warn("table %s has no name", t.id)
-            remote_tables.remove(t)
-            continue
-        if name in seen_names:
-            log.warn("table %s has a duplicate name %s; skipping", t.id, name)
-            remote_tables.remove(t)
-        seen_names.add(name)
-
-    for name in my_tables:
-        if name not in seen_names:
-            # TODO: Allow multiple VPCs to exist per region
-            vpc_id = conn.get_all_vpcs()[0].id
-            log.info("remote table %s doesn't exist; creating in vpc %s", name, vpc_id)
-            t = conn.create_route_table(vpc_id)
-            t.add_tag('Name', name)
-            remote_tables.append(t)
-
-    # Sync remote tables
-    for t in remote_tables:
-        name = t.tags['Name']
-        if name not in my_tables:
-            if raw_input("table %s doesn't exist in local config; delete? (y/N)" % t.id) == 'y':
-                log.warn("DELETING %s", t.id)
-                # TODO
-            continue
-
-        my_t = my_tables[name]
-
-        # Now look at routes
-        remote_routes = set()
-        for r in t.routes:
-            remote_routes.add((r.destination_cidr_block, r.gateway_id, r.instance_id))
-
-        my_routes = set()
-        IGW = None
-        VGW = None
-
-        # Resolve hostnames
-        to_delete = set()
-        to_add = set()
-        for cidr, dest in my_t['routes'].iteritems():
-            if "/" not in cidr:
-                for ip in resolve_host(cidr):
-                    log.info("adding %s for %s", ip, cidr)
-                    to_add.add(("%s/32" % ip, dest))
-                to_delete.add(cidr)
-
-        for d in to_delete:
-            del my_t['routes'][d]
-
-        for cidr, dest in to_add:
-            my_t['routes'][cidr] = dest
-
-        for cidr, dest in my_t['routes'].iteritems():
-            assert "/" in cidr
-            instance_id = None
-            gateway_id = None
-            if dest == "IGW":
-                # Use our VPC's IGW
-                if IGW is None:
-                    IGW = conn.get_all_internet_gateways()[0]
-                gateway_id = IGW.id
-            elif dest == "VGW":
-                # Use our VPC's IGW
-                if VGW is None:
-                    VGW = conn.get_all_vpn_gateways()[0]
-                gateway_id = VGW.id
-            elif dest == 'local':
-                gateway_id = 'local'
-            elif dest and dest.startswith("i-"):
-                instance_id = dest
-            my_routes.add((cidr, gateway_id, instance_id))
-
-        # Delete extra routes first, in case we need to change the gateway of
-        # some route
-        extra_routes = remote_routes - my_routes
-        for cidr, gateway_id, instance_id in extra_routes:
-            log.info("%s - deleting route to %s via %s %s", t.id, cidr, gateway_id, instance_id)
-            if raw_input("delete? (y/N) ") == 'y':
-                conn.delete_route(t.id, cidr)
-
-        # Add missing routes
-        missing_routes = my_routes - remote_routes
-        for cidr, gateway_id, instance_id in missing_routes:
-            log.info("%s - adding route to %s via %s %s", t.id, cidr, gateway_id, instance_id)
-            conn.create_route(t.id, cidr, gateway_id=gateway_id, instance_id=instance_id)
-
-        # TODO: Set default, manage subnets
-
-
-def main():
-    import sys
-    log.debug("Parsing file")
-    rt_defs = load_config(sys.argv[1])
-
-    regions = set(rt_defs.keys())
-
-    log.info("Working in regions %s", regions)
-
-    for region in regions:
-        log.info("Working in %s", region)
-        conn = get_connection(region)
-        remote_tables = conn.get_all_route_tables()
-
-        # Compare vs. our configs
-        my_tables = rt_defs[region]
-
-        sync_tables(conn, my_tables, remote_tables)
-
-if __name__ == '__main__':
-    logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
-    main()
deleted file mode 100644
--- a/scripts/aws_manage_securitygroups.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-import os
-import re
-import logging
-import yaml
-import boto.ec2
-import dns.resolver
-
-import site
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-
-from cloudtools.yaml import process_includes
-
-
-# see http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html
-# note that "Rules" in that document actually refers to grants
-MAX_GRANTS_PER_SG = 125
-
-log = logging.getLogger(__name__)
-port_re = re.compile(r'^(\d+)-(\d+)$')
-
-
-def get_connection(region):
-    return boto.ec2.connect_to_region(region)
-
-
-def load_config(filename):
-    return process_includes(yaml.load(open(filename)))
-
-
-def get_remote_sg_by_name(groups, name):
-    log.info("Looking for sg %s", name)
-    for g in groups:
-        if g.name == name:
-            log.info("Found %s", g)
-            return g
-    log.info("Didn't find %s; returning None", name)
-
-
-_dns_cache = {}
-
-
-def resolve_host(hostname):
-    if hostname in _dns_cache:
-        return _dns_cache[hostname]
-    log.info("resolving host %s", hostname)
-    ips = dns.resolver.query(hostname, "A")
-    ips = [i.to_text() for i in ips]
-    _dns_cache[hostname] = ips
-    return ips
-
-
-def make_rules_for_def(rule):
-    """Returns a set of rules for a given config definition. A rule is a
-    (proto, from_port, to_port, hosts) tuple
-    """
-    retval = []
-    proto = str(rule['proto'])
-    if 'ports' in rule:
-        ports = []
-        for p in rule['ports']:
-            p = str(p)
-            mo = port_re.match(p)
-            if mo:
-                ports.append(tuple(mo.groups()))
-            else:
-                ports.append((p, p))
-    else:
-        ports = [(None, None)]
-    hosts = rule['hosts']
-    # Resolve the hostnames
-    log.debug("%s %s %s", proto, ports, hosts)
-    log.debug("Resolving hostnames")
-    for h in hosts[:]:
-        if '/' not in h:
-            ips = resolve_host(h)
-            hosts.remove(h)
-            for ip in ips:
-                hosts.append("%s/32" % ip)
-    log.debug("%s %s %s", proto, ports, hosts)
-
-    for from_port, to_port in ports:
-        retval.append((proto, from_port, to_port, set(hosts)))
-    return retval
-
-
-def make_rules(sg_config):
-    rules = {}
-    for rule_def in sg_config.get('inbound', []):
-        for proto, from_port, to_port, hosts in make_rules_for_def(rule_def):
-            rules.setdefault(('inbound', proto, from_port, to_port),
-                             set()).update(hosts)
-
-    for rule_def in sg_config.get('outbound', []):
-        for proto, from_port, to_port, hosts in make_rules_for_def(rule_def):
-            rules.setdefault(('outbound', proto, from_port, to_port),
-                             set()).update(hosts)
-
-    return rules
-
-
-def rules_from_sg(sg):
-    rules = {}
-    for rule in sg.rules:
-        # ignore non-cidr grants (to other sg's)
-        cidr_grants = set(g.cidr_ip for g in rule.grants if g.cidr_ip)
-        if not cidr_grants:
-            continue
-        rules.setdefault(('inbound', rule.ip_protocol, rule.from_port,
-                          rule.to_port), set()).update(cidr_grants)
-    for rule in sg.rules_egress:
-        # ignore non-cidr grants (to other sg's)
-        cidr_grants = set(g.cidr_ip for g in rule.grants if g.cidr_ip)
-        if not cidr_grants:
-            continue
-        rules.setdefault(
-            ('outbound', rule.ip_protocol, rule.from_port, rule.to_port),
-            set()).update(set(g.cidr_ip for g in rule.grants if g.cidr_ip))
-
-    return rules
-
-
-def add_hosts(sg, rule_key, hosts):
-    if rule_key[0] == 'inbound':
-        auth_func = sg.connection.authorize_security_group
-    else:
-        auth_func = sg.connection.authorize_security_group_egress
-
-    for h in hosts:
-        auth_func(
-            group_id=sg.id,
-            ip_protocol=rule_key[1],
-            from_port=rule_key[2],
-            to_port=rule_key[3],
-            cidr_ip=h,
-        )
-
-
-def remove_hosts(sg, rule_key, hosts):
-    if rule_key[0] == 'inbound':
-        auth_func = sg.connection.revoke_security_group
-    else:
-        auth_func = sg.connection.revoke_security_group_egress
-
-    for h in hosts:
-        auth_func(
-            group_id=sg.id,
-            ip_protocol=rule_key[1],
-            from_port=rule_key[2],
-            to_port=rule_key[3],
-            cidr_ip=h,
-        )
-
-
-def tags_to_filters(tags):
-    f = {}
-    for tag_name, tag_value in tags:
-        f["tag:%s" % tag_name] = tag_value
-    return f
-
-
-def apply_to_object(sg, filters, get_func, set_func, prompt):
-    # TODO: handle more than 1 security groups
-    if not filters:
-        log.warn("No interface filters to apply, skipping.")
-        return
-    elements = get_func(filters=tags_to_filters(filters.get("tags")))
-    for e in elements:
-        if sg.id not in [g.id for g in e.groups]:
-            if prompt and \
-                    raw_input("Add %s (%s) to %s (%s)? (y/N) " %
-                              (sg.name, sg.id, e.tags.get("Name"),
-                               e.id)) != 'y':
-                continue
-            log.info("Adding %s (%s) to %s (%s)" %
-                     (sg.name, sg.id, e.tags.get("Name"), e.id))
-            set_func(e.id, "groupset", [sg.id])
-
-
-def sync_security_group(remote_sg, sg_config, prompt):
-    rules = make_rules(sg_config)
-    remote_rules = rules_from_sg(remote_sg)
-
-    # Check if we need to add any rules
-    for rule_key, hosts in rules.items():
-        new_hosts = hosts - remote_rules.get(rule_key, set())
-        if new_hosts:
-            if prompt and \
-                    raw_input("%s - Add rule for %s to %s? (y/N) " %
-                              (remote_sg.name, rule_key, new_hosts)) != 'y':
-                continue
-            log.info("%s - adding rule for %s to %s", remote_sg.name, rule_key,
-                     new_hosts)
-            add_hosts(remote_sg, rule_key, new_hosts)
-
-    # Now check if we should delete any rules
-    for rule_key, hosts in remote_rules.items():
-        old_hosts = hosts - rules.get(rule_key, set())
-        if old_hosts:
-            if prompt and \
-                    raw_input("%s - Delete rule %s to %s (y/N) " %
-                              (remote_sg.name, rule_key, old_hosts)) != 'y':
-                continue
-            log.info("%s - removing rule for %s to %s", remote_sg.name,
-                     rule_key, old_hosts)
-            remove_hosts(remote_sg, rule_key, old_hosts)
-    apply_to_object(remote_sg, sg_config.get("apply-to", {}).get("instances"),
-                    remote_sg.connection.get_only_instances,
-                    remote_sg.connection.modify_instance_attribute,
-                    prompt)
-    apply_to_object(remote_sg, sg_config.get("apply-to", {}).get("interfaces"),
-                    remote_sg.connection.get_all_network_interfaces,
-                    remote_sg.connection.modify_network_interface_attribute,
-                    prompt)
-
-
-def main():
-    import sys
-    log.debug("Parsing file")
-    sg_defs = load_config(sys.argv[1])
-
-    # Get the security groups for all affected regions
-    regions = set()
-    for sg_name, sg_config in sg_defs.items():
-        regions.update(sg_config['regions'])
-
-    log.info("Working in regions %s", regions)
-
-    security_groups_by_region = {}
-    conns_by_region = {}
-    for region in regions:
-        log.info("Loading groups for %s", region)
-        conn = get_connection(region)
-        all_groups = conn.get_all_security_groups()
-        conns_by_region[region] = conn
-        security_groups_by_region[region] = all_groups
-
-    prompt = True
-
-    # look for too-big security groups
-    ok = True
-    for sg_name, sg_config in sg_defs.iteritems():
-        rules = make_rules(sg_config)
-        total_grants = sum([len(hosts) for hosts in rules.itervalues()])
-        if total_grants > MAX_GRANTS_PER_SG:
-            log.warning("Group %s has %d rules, more than the allowed %d",
-                        sg_name, total_grants, MAX_GRANTS_PER_SG)
-            ok = False
-    if not ok:
-        exit(1)
-
-    # Now compare vs. our configs
-    for sg_name, sg_config in sg_defs.items():
-        for region in sg_config['regions']:
-            log.info("Working in %s", region)
-            remote_sg = get_remote_sg_by_name(
-                security_groups_by_region[region], sg_name)
-            if not remote_sg:
-                if prompt:
-                    if raw_input('Create security group %s in %s? (y/N) ' %
-                                 (sg_name, region)) != 'y':
-                        log.info("Exiting")
-                        exit(0)
-                log.info("Creating group %s", sg_name)
-                remote_sg = conns_by_region[region].create_security_group(
-                    sg_name,
-                    vpc_id=sg_config['regions'][region],
-                    description=sg_config['description'],
-                )
-                # Fetch it again so we get all the rules
-                log.info("Re-loading group %s", sg_name)
-                remote_sg = conn.get_all_security_groups(
-                    group_ids=[remote_sg.id])[0]
-
-            sync_security_group(remote_sg, sg_config, prompt=prompt)
-
-
-if __name__ == '__main__':
-    logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
-    main()
deleted file mode 100644
--- a/scripts/aws_manage_subnets.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-# Add cloudtools to our module search path
-import site
-import os
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-
-import itertools
-
-import yaml
-from netaddr import IPNetwork, IPSet
-import cloudtools.aws
-
-import logging
-log = logging.getLogger(__name__)
-
-
-def load_config(filename):
-    return yaml.load(open(filename))
-
-
-def sync_subnets(conn, config):
-    log.debug("loading routing tables")
-    routing_tables = conn.get_all_route_tables()
-    route_tables_by_name = {r.tags.get('Name'): r for r in routing_tables}
-    route_tables_by_subnet_id = {}
-    for r in routing_tables:
-        for a in r.associations:
-            route_tables_by_subnet_id[a.subnet_id] = r
-
-    # Get list of AZs
-    zones = conn.get_all_zones()
-
-    for vpc_id in config:
-        # Get a list of all the remote subnets
-        remote_subnets = conn.get_all_subnets(filters={'vpcId': vpc_id})
-
-        seen = set()
-
-        # Go through our config, adjusting or any subnets as appropriate
-        for cidr, block_config in config[vpc_id].items():
-            cidr_net = IPNetwork(cidr)
-            table_name = block_config.get('routing_table')
-            if table_name and table_name not in route_tables_by_name:
-                log.warn("couldn't find routing table %s for block %s", table_name, cidr)
-                log.warn("skipping rest of %s", cidr)
-                continue
-            my_rt = route_tables_by_name[table_name]
-
-            ip_set = IPSet(cidr_net)
-
-            for s in remote_subnets:
-                if IPNetwork(s.cidr_block) in cidr_net:
-                    ip_set.remove(s.cidr_block)
-                    if s.tags.get('Name') != block_config['name']:
-                        log.info("Setting Name of %s to %s", s, block_config['name'])
-                        s.add_tag('Name', block_config['name'])
-
-                        if s.id in route_tables_by_subnet_id:
-                            remote_rt = route_tables_by_subnet_id[s.id]
-                        else:
-                            remote_rt = route_tables_by_subnet_id[None]
-                        if remote_rt != my_rt:
-                            log.info(
-                                "Changing routing table for %s (%s) to %s (%s)",
-                                s, s.tags.get('Name'), my_rt,
-                                my_rt.tags.get('Name'))
-                            if raw_input("(y/N) ") == "y":
-                                conn.associate_route_table(my_rt.id, s.id)
-                    seen.add(s)
-
-            # Are we missing any subnets?
-            # If so, create them!
-            # TODO: We want to evenly distribute the ip range over the
-            # configured availability zones, without dividing smaller than a
-            # /25 network (128 ips, at least 2 of which are reserved)
-            # For now we'll just split them as small as /24, and then assign
-            # them into the subnets
-            while ip_set:
-                log.info("%s - %s isn't covered by any subnets", cidr, ip_set)
-                my_zones = [z for z in zones if z.name not in block_config.get('skip_azs', [])]
-
-                remaining_cidrs = list(ip_set.iter_cidrs())
-                remaining_cidrs.sort(key=lambda s: s.size, reverse=True)
-                for s in remaining_cidrs[:]:
-                    if s.prefixlen < 24:
-                        added = list(s.subnet(24))
-                        remaining_cidrs.remove(s)
-                        remaining_cidrs.extend(added)
-                    ip_set.remove(s)
-
-                zg = itertools.cycle(my_zones)
-                while remaining_cidrs:
-                    c = remaining_cidrs.pop()
-                    z = next(zg)
-                    log.info("creating subnet %s in %s/%s", c, z.name, vpc_id)
-                    if raw_input("(y/N) ") == "y":
-                        log.debug("creating subnet")
-                        s = conn.create_subnet(vpc_id, c, z.name)
-                        log.debug("adding tag")
-                        # TODO: sometimes the subnet isn't actually created by
-                        # the time we try and add the tag, so get a 400 error
-                        s.add_tag('Name', block_config['name'])
-                        log.debug("associating routing")
-                        conn.associate_route_table(my_rt.id, s.id)
-
-        local_missing = set(remote_subnets) - seen
-        for m in local_missing:
-            log.info("%s:%s (name: %s) is unmanaged", m.id, m.cidr_block, m.tags.get('Name'))
-
-
-def main():
-    import sys
-    log.debug("parsing file")
-    config = load_config(sys.argv[1])
-
-    for region in config.keys():
-        log.info("working in %s", region)
-        conn = cloudtools.aws.get_vpc(region)
-        sync_subnets(conn, config[region])
-
-
-if __name__ == '__main__':
-    logging.getLogger('boto').setLevel(logging.INFO)
-    logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.DEBUG)
-    main()
deleted file mode 100644
--- a/scripts/aws_manage_users.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-import boto.iam
-import boto.exception
-import os
-import site
-import json
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-
-
-def get_users(conn=None):
-    if not conn:
-        conn = boto.iam.IAMConnection()
-    users = conn.get_all_users("/")
-    users = users['list_users_response']['list_users_result']['users']
-    return users
-
-
-def make_mfa_policy(user_name):
-    return json.dumps({
-        "Statement": [{
-            "Action": ["iam:CreateVirtualMFADevice",
-                       "iam:DeleteVirtualMFADevice",
-                       "iam:ListVirtualMFADevices",
-                       "iam:ResyncMFADevice",
-                       "iam:EnableMFADevice",
-                       "iam:CreateAccessKey",
-                       "iam:UpdateLoginProfile",
-                       "iam:DeactivateMFADevice"
-                       ],
-            "Resource": ["arn:aws:iam::*:mfa/%s" % user_name,
-                         "arn:aws:iam::*:user/%s" % user_name,
-                         ],
-            "Effect": "Allow"
-        }
-        ]
-    }, indent=2)
-
-
-def enable_mfa(users):
-    conn = boto.iam.IAMConnection()
-    all_users = get_users(conn)
-    for u in all_users:
-        if u.user_name in users:
-            user_name = u.user_name
-            policy_json = make_mfa_policy(user_name)
-            conn.put_user_policy(user_name, "manage_own_MFA", policy_json)
-
-
-def main():
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--mfa", help="enable mfas for users", action="store_const", const="mfa", dest="action")
-    parser.add_argument("--list", help="list users", action="store_const", const="list", dest="action")
-    parser.add_argument("users", help="list of users to manage")
-
-    args = parser.parse_args()
-
-    if args.action == 'list':
-        for u in get_users():
-            print u.user_name
-
-    elif args.action == "mfa":
-        print "enabling MFA for", args.users
-        enable_mfa(args.users)
-
-
-if __name__ == '__main__':
-    main()
deleted file mode 100644
--- a/scripts/aws_process_cloudtrail_logs.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-"""parses local cloudtrail logs and stores the results in the events dir"""
-
-import json
-import os
-import site
-from functools import partial
-from multiprocessing import Pool
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.fileutils import (mkdir_p, get_data_from_gz_file,
-                                  get_data_from_json_file)
-
-import logging
-log = logging.getLogger(__name__)
-
-
-def move_to_bad_logs(filename):
-    """moves filename into BAD_LOGS dir"""
-    bad_logs_dir = os.path.join(os.path.dirname(filename), '..', 'bad_logs')
-    bad_logs_dir = os.path.abspath(bad_logs_dir)
-    mkdir_p(bad_logs_dir)
-    name = os.path.split(filename)[1]
-    dst_file = os.path.join(bad_logs_dir, name)
-    log.debug("%s => %s", filename, dst_file)
-    os.rename(filename, dst_file)
-
-
-def process_cloudtrail(discard_bad_logs, events_dir, filename):
-    """extracts data from filename"""
-    try:
-        data = get_data_from_gz_file(filename)
-        data = json.loads(data)
-    except (ValueError, IOError):
-        log.debug('cannot decode JSON from %s', filename)
-        try:
-            if discard_bad_logs:
-                log.debug('%s is not valid, deleting it', filename)
-                os.remove(filename)
-            else:
-                move_to_bad_logs(filename)
-        except Exception:
-            pass
-        return
-
-    log.debug('processing: %s', filename)
-    for record in data['Records']:
-        eventName = record['eventName']
-        # just process stop events, skip StartInstances and TerminateInstances
-        if eventName in ('StopInstances',):
-            process_start_stop_record(events_dir, record)
-
-
-def process_start_stop_record(events_dir, record):
-    """process a start/stop/terminate row"""
-    # this metod works with Start/Stop/Terminate events too
-    time_ = record['eventTime']
-    for item in record['requestParameters']['instancesSet']['items']:
-        instanceId = item['instanceId']
-        data = {'instances': instanceId,
-                'eventName': record['eventName'],
-                'eventTime': time_}
-        write_to_json(events_dir, data)
-
-
-def get_time_from_file(filename):
-    """returns the eventTime from filename"""
-    try:
-        data = get_data_from_json_file(filename)
-        return data['eventTime']
-    except (ValueError, KeyError):
-        log.debug('cannot get eventTime from json file: %s', filename)
-        return None
-
-
-def write_to_json(events_dir, data):
-    """writes data to a json file; the file name is:
-       <EVENTS_DIR>/event/instance,
-       event and instance are provided by data itself"""
-    event = data['eventName']
-    instance = data['instances']
-    filename = os.path.join(events_dir, event, instance)
-    mkdir_p(os.path.dirname(filename))
-    if not os.path.exists(filename):
-        with open(filename, 'w') as f_out:
-            json.dump(data, f_out)
-    elif data['eventTime'] > get_time_from_file(filename):
-        # replace old event with current one
-        with open(filename, 'w') as f_out:
-            json.dump(data, f_out)
-
-
-if __name__ == '__main__':
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-v", "--verbose", action="store_true",
-                        help="Increase logging verbosity")
-    parser.add_argument("--cloudtrail-dir", metavar="cloudtrail_dir",
-                        required=True,
-                        help="Cloutrail logs directory")
-    parser.add_argument("--events-dir", metavar="events_dir", required=True,
-                        help="directory where events logs will be stored")
-    parser.add_argument("--discard-bad-logs", action="store_true",
-                        help="delete bad log files, if not provided, bad log "
-                        "files will be moved into bad_logs_dir (next to "
-                        "--event-dir)")
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(message)s")
-    if args.verbose:
-        log.setLevel(logging.DEBUG)
-    else:
-        log.setLevel(logging.INFO)
-
-    prefixes = []
-    # cloudtrails
-    # get all the available cloudtrail files
-    logging.debug("processing cloudtrail files")
-    cloudtrail_files = []
-    for dirpath, dirnames, filenames in os.walk(args.cloudtrail_dir):
-        for log_file in filenames:
-            cloudtrail_files.append(os.path.join(dirpath, log_file))
-
-    # process_cloud_tails requires 3 arguments: discard_bad_logs,
-    # events_dir and cloudtrail_file, maps() accepts only 2 parameters,
-    # function name and an iterable, let's use partials
-    process_cloudtrail_partial = partial(
-        process_cloudtrail, args.discard_bad_logs, args.events_dir)
-    pool = Pool()
-    pool.map(process_cloudtrail_partial, cloudtrail_files)
-    pool.close()
-    pool.join()
deleted file mode 100644
--- a/scripts/aws_publish_amis.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import logging
-import site
-import os
-import json
-from collections import defaultdict
-import boto
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws import get_aws_connection, DEFAULT_REGIONS
-
-log = logging.getLogger(__name__)
-BUCKET = "mozilla-releng-amis"
-KEY = "amis.json"
-# A list of attributes with optional function to be used to convert them to
-# thir JSON representation
-AMI_ATTRS = ("architecture", ("block_device_mapping", lambda o: o.keys()),
-             "description", "hypervisor", "id", "is_public", "kernel_id",
-             "location", "name", "owner_alias", "owner_id", "platform",
-             "ramdisk_id", ("region", lambda o: o.name), "root_device_name",
-             "root_device_type", "state", "tags", "type",
-             "virtualization_type")
-
-
-def amis_to_dict(images):
-    """Convert collection of AMIs into their JSON prepresenation.  Uses
-    AMI_ATTRS to get the list of attributes to be converted.  Optionally can
-    use a function to conver objects into their JSON compatible representation.
-    """
-    data = defaultdict(dict)
-    for img in images:
-        for attr in AMI_ATTRS:
-            if isinstance(attr, tuple):
-                name, func = attr
-                data[img.id][name] = func(getattr(img, name))
-            else:
-                data[img.id][attr] = getattr(img, attr)
-    return json.dumps(data)
-
-
-def update_ami_status(data):
-    """Publish JSON to S3. It can be accessed from the following URL:
-       https://s3.amazonaws.com/{BUCKET}/{KEY},
-       https://s3.amazonaws.com/mozilla-releng-amis/amis.json in our case"""
-    conn = boto.connect_s3()
-    bucket = conn.get_bucket(BUCKET)
-    key = bucket.get_key(KEY)
-    key.set_contents_from_string(data)
-    key.set_acl("public-read")
-
-
-if __name__ == '__main__':
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-r", "--region", dest="regions", action="append",
-                        help="optional list of regions")
-    parser.add_argument("-q", "--quiet", action="store_true",
-                        help="Supress logging messages")
-
-    args = parser.parse_args()
-
-    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
-    if not args.quiet:
-        log.setLevel(logging.INFO)
-    else:
-        log.setLevel(logging.ERROR)
-
-    if not args.regions:
-        args.regions = DEFAULT_REGIONS
-    images = []
-    for region in args.regions:
-        conn = get_aws_connection(region)
-        images.extend(conn.get_all_images(owners=["self"],
-                                          filters={"state": "available"}))
-    update_ami_status(amis_to_dict(images))
deleted file mode 100644
--- a/scripts/aws_sanity_checker.py
+++ /dev/null
@@ -1,264 +0,0 @@
-#!/usr/bin/env python
-"""Generates a report of the AWS instance status"""
-
-import argparse
-import logging
-import collections
-import re
-import site
-import os
-
-site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
-from cloudtools.aws.sanity import AWSInstance, aws_instance_factory, SLAVE_TAGS
-from cloudtools.aws import get_aws_connection, DEFAULT_REGIONS
-
-log = logging.getLogger(__name__)
-
-
-def is_beanstalk_instance(i):
-    """returns True if this is a beanstalk instance"""
-    return i.tags.get("elasticbeanstalk:environment-name") is not None
-
-
-def get_all_instances(connection):
-    """gets all the instances from a connection"""
-    res = connection.get_all_instances()
-    instances = []
-    if res:
-        instances = reduce(lambda a, b: a + b, [r.instances for r in res])
-    # Skip instances managed by Elastic Beanstalk
-    return [i for i in instances if not is_beanstalk_instance(i)]
-
-
-def report(items, message):
-    """prints out the sanity check message"""
-    if items:
-        print "==== {message} ====".format(message=message)
-        for num, item in enumerate(items):
-            print "{num} {item}".format(num=num, item=item)
-        print
-
-
-def _report_lazy_running_instances(lazy):
-    """reports the lazy long running instances"""
-    message = 'Lazy long running instances'
-    lazy = sorted(lazy, reverse=True, key=lambda x: x.get_uptime())
-    lazy = [i.longrunning_message() for i in lazy]
-    report(lazy, message)
-
-
-def _report_long_running_instances(long_running):
-    """reports the long running instances"""
-    message = 'Long running instances'
-    # remove lazy instances...
-    long_running = [i for i in long_running if not i.is_lazy()]
-    if long_running:
-        items = sorted(long_running, reverse=True,
-                       key=lambda x: x.get_uptime())
-        items = [i.longrunning_message() for i in items]
-        report(items, message)
-    else:
-        print "==== No long running instances ===="
-        print
-
-
-def _report_loaned(loaned):
-    """reports the loaned instances"""
-    if loaned:
-        items = [i.loaned_message() for i in loaned]
-        message = "Loaned"
-        report(items, message)
-    else:
-        print "==== No loaned instances ===="
-        print
-
-
-def _report_bad_type(bad_type):
-    """reports the instances with a bad type"""
-    if bad_type:
-        message = "Instances with unknown type"
-        # sort the instances by region
-        items = sorted(bad_type, key=lambda x: x.get_region())
-        # we need the unknown_type_message...
-        items = [i.unknown_type_message() for i in items]
-        report(items, message)
-    else:
-        print "==== No instances with unknown type ===="
-        print
-
-
-def _report_bad_state(bad_state):
-    """reports the instances with a bad state"""
-    if bad_state:
-        message = "Instances with unknown state"
-        items = sorted(bad_state, key=lambda x: x.get_region())
-        items = [i.unknown_state_message() for i in items]
-        report(items, message)
-    else:
-        print "==== No instances with unknown state ===="
-        print
-
-
-def _report_long_stopped(long_stopped):
-    """reports the instances stopped for a while"""
-    if long_stopped:
-        message = "Instances stopped for a while"
-        items = sorted(long_stopped, reverse=True,
-                       key=lambda x: x.get_uptime())
-        items = [i.stopped_message() for i in items]
-        report(items, message)
-    else:
-        print "==== No long stopped instances ===="
-        print
-
-
-def _report_impaired(impaired):
-    """reports the impaired instances"""
-    if impaired:
-        print "=== Impaired instances ===="
-        for num, instance in enumerate(impaired):
-            print "{0} {1}".format(num, instance)
-        print
-
-
-def get_all_instance_status(connection, filters=None):</