Bug 978327 - Refactor cloud-tools a bit
authorRail Aliiev <rail@mozilla.com>
Mon, 03 Mar 2014 12:28:11 -0500
changeset 332 d20eacad281bbd079cea7823b16c5d5c65edf749
parent 331 b7dc5193b9948fd453f9f2503f352f8e80d3402d
child 333 d0ae264d2f4ab67401505456a15acef751ccbd4f
push id332
push userraliiev@mozilla.com
push dateMon, 03 Mar 2014 17:28:16 +0000
bugs978327
Bug 978327 - Refactor cloud-tools a bit
ami_configs/centos-6-x86_64-base.json
ami_configs/centos-6-x86_64-base/additional_packages
ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
ami_configs/centos-6-x86_64-base/etc/fstab
ami_configs/centos-6-x86_64-base/etc/hosts
ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
ami_configs/centos-6-x86_64-base/etc/rc.local
ami_configs/centos-6-x86_64-base/etc/sysconfig/network
ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
ami_configs/centos-6-x86_64-base/groupinstall
ami_configs/centos-6-x86_64-hvm-base.json
ami_configs/centos-6-x86_64-hvm-base/additional_packages
ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
ami_configs/centos-6-x86_64-hvm-base/etc/fstab
ami_configs/centos-6-x86_64-hvm-base/etc/hosts
ami_configs/centos-6-x86_64-hvm-base/etc/init.d
ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
ami_configs/centos-6-x86_64-hvm-base/groupinstall
ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
ami_configs/centos-6-x86_64-hvm-try
ami_configs/centos-6-x86_64-hvm-try.json
ami_configs/centos-6-x86_64-server.json
ami_configs/centos-6-x86_64-server/additional_packages
ami_configs/centos-6-x86_64-server/boot
ami_configs/centos-6-x86_64-server/etc
ami_configs/centos-6-x86_64-server/groupinstall
ami_configs/releng-public.list
ami_configs/releng-public.repo
ami_configs/spot_setup.conf
ami_configs/spot_setup.sh
ami_configs/ubuntu-12.04-i386-desktop
ami_configs/ubuntu-12.04-i386-desktop.json
ami_configs/ubuntu-12.04-x86_64-desktop.json
ami_configs/ubuntu-12.04-x86_64-desktop/boot/grub/menu.lst
ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
aws/ami_configs/centos-6-x86_64-base.json
aws/ami_configs/centos-6-x86_64-base/additional_packages
aws/ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
aws/ami_configs/centos-6-x86_64-base/etc/fstab
aws/ami_configs/centos-6-x86_64-base/etc/hosts
aws/ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
aws/ami_configs/centos-6-x86_64-base/etc/rc.local
aws/ami_configs/centos-6-x86_64-base/etc/sysconfig/network
aws/ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
aws/ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
aws/ami_configs/centos-6-x86_64-base/groupinstall
aws/ami_configs/centos-6-x86_64-hvm-base.json
aws/ami_configs/centos-6-x86_64-hvm-base/additional_packages
aws/ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
aws/ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
aws/ami_configs/centos-6-x86_64-hvm-base/etc/fstab
aws/ami_configs/centos-6-x86_64-hvm-base/etc/hosts
aws/ami_configs/centos-6-x86_64-hvm-base/etc/init.d
aws/ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
aws/ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
aws/ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
aws/ami_configs/centos-6-x86_64-hvm-base/groupinstall
aws/ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
aws/ami_configs/centos-6-x86_64-hvm-try
aws/ami_configs/centos-6-x86_64-hvm-try.json
aws/ami_configs/centos-6-x86_64-server.json
aws/ami_configs/centos-6-x86_64-server/additional_packages
aws/ami_configs/centos-6-x86_64-server/boot
aws/ami_configs/centos-6-x86_64-server/etc
aws/ami_configs/centos-6-x86_64-server/groupinstall
aws/ami_configs/releng-public.list
aws/ami_configs/releng-public.repo
aws/ami_configs/spot_setup.conf
aws/ami_configs/spot_setup.sh
aws/ami_configs/ubuntu-12.04-i386-desktop
aws/ami_configs/ubuntu-12.04-i386-desktop.json
aws/ami_configs/ubuntu-12.04-x86_64-desktop.json
aws/ami_configs/ubuntu-12.04-x86_64-desktop/boot/grub/menu.lst
aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
aws/ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
aws/aws_create_ami.py
aws/aws_create_instance.py
aws/aws_instances.py
aws/aws_manage_instances.py
aws/aws_manage_routingtables.py
aws/aws_manage_securitygroups.py
aws/aws_sanity_checker.py
aws/aws_stop_idle.py
aws/aws_watch_pending.py
aws/check_dns.py
aws/configs/bld-linux64
aws/configs/dev-linux64
aws/configs/master-linux64
aws/configs/routingtables.yml
aws/configs/securitygroups.yml
aws/configs/servo-linux64
aws/configs/try-linux64
aws/configs/tst-linux32
aws/configs/tst-linux64
aws/configs/vcssync-linux64
aws/configs/watch_pending.cfg
aws/configs/watch_pending.cfg.example
aws/configs/watch_pending_servo.cfg
aws/ec22ip.py
aws/free_ips.py
aws/instance2ami.py
aws/instance_data/us-east-1.instance_data_dev.json
aws/instance_data/us-east-1.instance_data_master.json
aws/instance_data/us-east-1.instance_data_prod.json
aws/instance_data/us-east-1.instance_data_servo.json
aws/instance_data/us-east-1.instance_data_tests.json
aws/instance_data/us-east-1.instance_data_try.json
aws/instance_data/us-west-2.instance_data_master.json
aws/instance_data/us-west-2.instance_data_prod.json
aws/instance_data/us-west-2.instance_data_tests.json
aws/instance_data/us-west-2.instance_data_try.json
aws/spot_sanity_check.py
aws/tag_spot_instances.py
cloudtools/__init__.py
cloudtools/aws/__init__.py
cloudtools/aws/instance.py
cloudtools/aws/sanity.py
cloudtools/aws/vpc.py
cloudtools/dns.py
configs/bld-linux64
configs/dev-linux64
configs/master-linux64
configs/routingtables.yml
configs/securitygroups.yml
configs/servo-linux64
configs/try-linux64
configs/tst-linux32
configs/tst-linux64
configs/vcssync-linux64
configs/watch_pending.cfg
configs/watch_pending.cfg.example
configs/watch_pending_servo.cfg
instance_data/us-east-1.instance_data_dev.json
instance_data/us-east-1.instance_data_master.json
instance_data/us-east-1.instance_data_prod.json
instance_data/us-east-1.instance_data_servo.json
instance_data/us-east-1.instance_data_tests.json
instance_data/us-east-1.instance_data_try.json
instance_data/us-west-2.instance_data_master.json
instance_data/us-west-2.instance_data_prod.json
instance_data/us-west-2.instance_data_tests.json
instance_data/us-west-2.instance_data_try.json
requirements.txt
scripts/aws_create_ami.py
scripts/aws_create_instance.py
scripts/aws_manage_instances.py
scripts/aws_manage_routingtables.py
scripts/aws_manage_securitygroups.py
scripts/aws_sanity_checker.py
scripts/aws_stop_idle.py
scripts/aws_watch_pending.py
scripts/check_dns.py
scripts/ec22ip.py
scripts/free_ips.py
scripts/instance2ami.py
scripts/spot_sanity_check.py
scripts/tag_spot_instances.py
rename from aws/ami_configs/centos-6-x86_64-base.json
rename to ami_configs/centos-6-x86_64-base.json
rename from aws/ami_configs/centos-6-x86_64-base/additional_packages
rename to ami_configs/centos-6-x86_64-base/additional_packages
rename from aws/ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
rename to ami_configs/centos-6-x86_64-base/boot/grub/grub.conf
rename from aws/ami_configs/centos-6-x86_64-base/etc/fstab
rename to ami_configs/centos-6-x86_64-base/etc/fstab
rename from aws/ami_configs/centos-6-x86_64-base/etc/hosts
rename to ami_configs/centos-6-x86_64-base/etc/hosts
rename from aws/ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
rename to ami_configs/centos-6-x86_64-base/etc/init.d/rc.local
rename from aws/ami_configs/centos-6-x86_64-base/etc/rc.local
rename to ami_configs/centos-6-x86_64-base/etc/rc.local
rename from aws/ami_configs/centos-6-x86_64-base/etc/sysconfig/network
rename to ami_configs/centos-6-x86_64-base/etc/sysconfig/network
rename from aws/ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
rename to ami_configs/centos-6-x86_64-base/etc/sysconfig/network-scripts/ifcfg-eth0
rename from aws/ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
rename to ami_configs/centos-6-x86_64-base/etc/yum-local.cfg
rename from aws/ami_configs/centos-6-x86_64-base/groupinstall
rename to ami_configs/centos-6-x86_64-base/groupinstall
rename from aws/ami_configs/centos-6-x86_64-hvm-base.json
rename to ami_configs/centos-6-x86_64-hvm-base.json
rename from aws/ami_configs/centos-6-x86_64-hvm-base/additional_packages
rename to ami_configs/centos-6-x86_64-hvm-base/additional_packages
rename from aws/ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
rename to ami_configs/centos-6-x86_64-hvm-base/boot/grub/device.map
rename from aws/ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
rename to ami_configs/centos-6-x86_64-hvm-base/boot/grub/grub.conf
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/fstab
rename to ami_configs/centos-6-x86_64-hvm-base/etc/fstab
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/hosts
rename to ami_configs/centos-6-x86_64-hvm-base/etc/hosts
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/init.d
rename to ami_configs/centos-6-x86_64-hvm-base/etc/init.d
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
rename to ami_configs/centos-6-x86_64-hvm-base/etc/rc.local
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
rename to ami_configs/centos-6-x86_64-hvm-base/etc/sysconfig
rename from aws/ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
rename to ami_configs/centos-6-x86_64-hvm-base/etc/yum-local.cfg
rename from aws/ami_configs/centos-6-x86_64-hvm-base/groupinstall
rename to ami_configs/centos-6-x86_64-hvm-base/groupinstall
rename from aws/ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
rename to ami_configs/centos-6-x86_64-hvm-base/grub-install.diff
rename from aws/ami_configs/centos-6-x86_64-hvm-try
rename to ami_configs/centos-6-x86_64-hvm-try
rename from aws/ami_configs/centos-6-x86_64-hvm-try.json
rename to ami_configs/centos-6-x86_64-hvm-try.json
rename from aws/ami_configs/centos-6-x86_64-server.json
rename to ami_configs/centos-6-x86_64-server.json
rename from aws/ami_configs/centos-6-x86_64-server/additional_packages
rename to ami_configs/centos-6-x86_64-server/additional_packages
rename from aws/ami_configs/centos-6-x86_64-server/boot
rename to ami_configs/centos-6-x86_64-server/boot
rename from aws/ami_configs/centos-6-x86_64-server/etc
rename to ami_configs/centos-6-x86_64-server/etc
rename from aws/ami_configs/centos-6-x86_64-server/groupinstall
rename to ami_configs/centos-6-x86_64-server/groupinstall
rename from aws/ami_configs/releng-public.list
rename to ami_configs/releng-public.list
rename from aws/ami_configs/releng-public.repo
rename to ami_configs/releng-public.repo
rename from aws/ami_configs/spot_setup.conf
rename to ami_configs/spot_setup.conf
rename from aws/ami_configs/spot_setup.sh
rename to ami_configs/spot_setup.sh
rename from aws/ami_configs/ubuntu-12.04-i386-desktop
rename to ami_configs/ubuntu-12.04-i386-desktop
rename from aws/ami_configs/ubuntu-12.04-i386-desktop.json
rename to ami_configs/ubuntu-12.04-i386-desktop.json
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop.json
rename to ami_configs/ubuntu-12.04-x86_64-desktop.json
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/boot/grub/menu.lst
rename to ami_configs/ubuntu-12.04-x86_64-desktop/boot/grub/menu.lst
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
rename to ami_configs/ubuntu-12.04-x86_64-desktop/etc/fstab
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
rename to ami_configs/ubuntu-12.04-x86_64-desktop/etc/hosts
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
rename to ami_configs/ubuntu-12.04-x86_64-desktop/etc/network/interfaces
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
rename to ami_configs/ubuntu-12.04-x86_64-desktop/etc/rc.local
rename from aws/ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
rename to ami_configs/ubuntu-12.04-x86_64-desktop/usr/sbin/policy-rc.d
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/cloudtools/aws/__init__.py
@@ -0,0 +1,57 @@
+import os
+import logging
+import time
+from boto.ec2 import connect_to_region
+from boto.vpc import VPCConnection
+from repoze.lru import lru_cache
+
+log = logging.getLogger(__name__)
+AMI_CONFIGS_DIR = os.path.join(os.path.dirname(__file__), "../../ami_configs")
+INSTANCE_CONFIGS_DIR = os.path.join(os.path.dirname(__file__), "../../configs")
+
+
+@lru_cache(10)
+def get_aws_connection(region, aws_access_key_id=None,
+                       aws_secret_access_key=None):
+    """Connect to an EC2 region. Caches connection objects"""
+    conn = connect_to_region(
+        region,
+        aws_access_key_id=aws_access_key_id,
+        aws_secret_access_key=aws_secret_access_key
+    )
+    return conn
+
+
+@lru_cache(10)
+def get_vpc(region, aws_access_key_id=None, aws_secret_access_key=None):
+    conn = get_aws_connection(region, aws_access_key_id, aws_secret_access_key)
+    vpc = VPCConnection(
+        region=conn.region,
+        aws_access_key_id=aws_access_key_id,
+        aws_secret_access_key=aws_secret_access_key
+    )
+    return vpc
+
+
+def wait_for_status(obj, attr_name, attr_value, update_method):
+    log.debug("waiting for %s availability", obj)
+    while True:
+        try:
+            getattr(obj, update_method)()
+            if getattr(obj, attr_name) == attr_value:
+                break
+            else:
+                time.sleep(1)
+        except:
+            log.exception('hit error waiting')
+            time.sleep(10)
+
+
+def name_available(conn, name):
+    res = conn.get_all_instances()
+    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
+    names = [i.tags.get("Name") for i in instances if i.state != "terminated"]
+    if name in names:
+        return False
+    else:
+        return True
new file mode 100644
--- /dev/null
+++ b/cloudtools/aws/instance.py
@@ -0,0 +1,59 @@
+import uuid
+import logging
+import time
+from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
+from fabric.api import run, env, sudo
+from . import wait_for_status
+
+log = logging.getLogger(__name__)
+
+
+def run_instance(connection, instance_name, config, key_name, user='root',
+                 subnet_id=None):
+    bdm = None
+    if 'device_map' in config:
+        bdm = BlockDeviceMapping()
+        for device, device_info in config['device_map'].items():
+            bdm[device] = BlockDeviceType(size=device_info['size'],
+                                          delete_on_termination=True)
+
+    reservation = connection.run_instances(
+        image_id=config['ami'],
+        key_name=key_name,
+        instance_type=config['instance_type'],
+        block_device_map=bdm,
+        client_token=str(uuid.uuid4())[:16],
+        subnet_id=subnet_id,
+    )
+
+    instance = reservation.instances[0]
+    log.info("instance %s created, waiting to come up", instance)
+    # Wait for the instance to come up
+    wait_for_status(instance, "state", "running", "update")
+    if subnet_id:
+        env.host_string = instance.private_ip_address
+    else:
+        env.host_string = instance.public_dns_name
+    env.user = user
+    env.abort_on_prompts = True
+    env.disable_known_hosts = True
+
+    # wait until the instance is responsive
+    while True:
+        try:
+            if run('date').succeeded:
+                break
+        except:
+            log.debug('hit error waiting for instance to come up')
+        time.sleep(10)
+
+    instance.add_tag('Name', instance_name)
+    # Overwrite root's limited authorized_keys
+    if user != 'root':
+        sudo("cp -f ~%s/.ssh/authorized_keys "
+             "/root/.ssh/authorized_keys" % user)
+        sudo("sed -i -e '/PermitRootLogin/d' "
+             "-e '$ a PermitRootLogin without-password' /etc/ssh/sshd_config")
+        sudo("service sshd restart || service ssh restart")
+        sudo("sleep 20")
+    return instance
rename from aws/aws_instances.py
rename to cloudtools/aws/sanity.py
new file mode 100644
--- /dev/null
+++ b/cloudtools/aws/vpc.py
@@ -0,0 +1,21 @@
+from IPy import IP
+
+
+def get_subnet_id(vpc, ip):
+    subnets = vpc.get_all_subnets()
+    for s in subnets:
+        if IP(ip) in IP(s.cidr_block):
+            return s.id
+    return None
+
+
+def ip_available(conn, ip):
+    res = conn.get_all_instances()
+    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
+    ips = [i.private_ip_address for i in instances]
+    interfaces = conn.get_all_network_interfaces()
+    ips.extend(i.private_ip_address for i in interfaces)
+    if ip in ips:
+        return False
+    else:
+        return True
new file mode 100644
--- /dev/null
+++ b/cloudtools/dns.py
@@ -0,0 +1,23 @@
+from socket import gethostbyname, gaierror, gethostbyaddr, herror, \
+    gethostbyname_ex
+
+
+def get_ip(hostname):
+    try:
+        return gethostbyname(hostname)
+    except gaierror:
+        return None
+
+
+def get_ptr(ip):
+    try:
+        return gethostbyaddr(ip)[0]
+    except herror:
+        return None
+
+
+def get_cname(cname):
+    try:
+        return gethostbyname_ex(cname)[0]
+    except:
+        return None
rename from aws/configs/bld-linux64
rename to configs/bld-linux64
rename from aws/configs/dev-linux64
rename to configs/dev-linux64
rename from aws/configs/master-linux64
rename to configs/master-linux64
rename from aws/configs/routingtables.yml
rename to configs/routingtables.yml
rename from aws/configs/securitygroups.yml
rename to configs/securitygroups.yml
rename from aws/configs/servo-linux64
rename to configs/servo-linux64
rename from aws/configs/try-linux64
rename to configs/try-linux64
rename from aws/configs/tst-linux32
rename to configs/tst-linux32
rename from aws/configs/tst-linux64
rename to configs/tst-linux64
rename from aws/configs/vcssync-linux64
rename to configs/vcssync-linux64
rename from aws/configs/watch_pending.cfg
rename to configs/watch_pending.cfg
rename from aws/configs/watch_pending.cfg.example
rename to configs/watch_pending.cfg.example
rename from aws/configs/watch_pending_servo.cfg
rename to configs/watch_pending_servo.cfg
rename from aws/instance_data/us-east-1.instance_data_dev.json
rename to instance_data/us-east-1.instance_data_dev.json
rename from aws/instance_data/us-east-1.instance_data_master.json
rename to instance_data/us-east-1.instance_data_master.json
rename from aws/instance_data/us-east-1.instance_data_prod.json
rename to instance_data/us-east-1.instance_data_prod.json
rename from aws/instance_data/us-east-1.instance_data_servo.json
rename to instance_data/us-east-1.instance_data_servo.json
rename from aws/instance_data/us-east-1.instance_data_tests.json
rename to instance_data/us-east-1.instance_data_tests.json
rename from aws/instance_data/us-east-1.instance_data_try.json
rename to instance_data/us-east-1.instance_data_try.json
rename from aws/instance_data/us-west-2.instance_data_master.json
rename to instance_data/us-west-2.instance_data_master.json
rename from aws/instance_data/us-west-2.instance_data_prod.json
rename to instance_data/us-west-2.instance_data_prod.json
rename from aws/instance_data/us-west-2.instance_data_tests.json
rename to instance_data/us-west-2.instance_data_tests.json
rename from aws/instance_data/us-west-2.instance_data_try.json
rename to instance_data/us-west-2.instance_data_try.json
new file mode 100644
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,16 @@
+Fabric==1.8.0
+IPy==0.81
+MySQL-python==1.2.4
+SQLAlchemy==0.8.3
+argparse==1.2.1
+boto==2.16.0
+docopt==0.6.1
+ecdsa==0.10
+invtool==0.1.0
+paramiko==1.12.0
+pycrypto==2.6.1
+repoze.lru==0.6
+requests==2.0.1
+simplejson==3.3.1
+ssh==1.8.0
+wsgiref==0.1.2
rename from aws/aws_create_ami.py
rename to scripts/aws_create_ami.py
--- a/aws/aws_create_ami.py
+++ b/scripts/aws_create_ami.py
@@ -1,91 +1,32 @@
 #!/usr/bin/env python
 
-import boto
-from boto.ec2 import connect_to_region
 from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-from fabric.api import run, put, env, lcd, sudo
+from fabric.api import run, put, env, lcd
 import json
-import uuid
 import time
 import logging
 import os
-log = logging.getLogger()
-
-AMI_CONFIGS_DIR = "ami_configs"
-INSTANCE_CONFIGS_DIR = "configs"
-
+import site
 
-def create_connection(options):
-    secrets = json.load(open(options.secrets))
-    connection = connect_to_region(
-        options.region,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key'],
-    )
-    return connection
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection, AMI_CONFIGS_DIR, wait_for_status
+from cloudtools.aws.instance import run_instance
+
+log = logging.getLogger()
 
 
 def manage_service(service, target, state, distro="centos"):
     assert state in ("on", "off")
     if distro in ("debian", "ubuntu"):
         pass
     else:
-        run('chroot %s chkconfig --level 2345 %s %s' % (target, service, state))
-
-
-def create_instance(connection, instance_name, config, key_name, user='root',
-                    subnet_id=None):
-
-    bdm = None
-    if 'device_map' in config:
-        bdm = BlockDeviceMapping()
-        for device, device_info in config['device_map'].items():
-            bdm[device] = BlockDeviceType(size=device_info['size'],
-                                          delete_on_termination=True)
-
-    reservation = connection.run_instances(
-        image_id=config['ami'],
-        key_name=key_name,
-        instance_type=config['instance_type'],
-        block_device_map=bdm,
-        client_token=str(uuid.uuid4())[:16],
-        subnet_id=subnet_id,
-    )
-
-    instance = reservation.instances[0]
-    log.info("instance %s created, waiting to come up", instance)
-    # Wait for the instance to come up
-    while True:
-        try:
-            instance.update()
-            if instance.state == 'running':
-                if subnet_id:
-                    env.host_string = instance.private_ip_address
-                else:
-                    env.host_string = instance.public_dns_name
-                env.user = user
-                env.abort_on_prompts = True
-                env.disable_known_hosts = True
-                if run('date').succeeded:
-                    break
-        except:
-            log.debug('hit error waiting for instance to come up')
-        time.sleep(10)
-    instance.add_tag('Name', instance_name)
-    # Overwrite root's limited authorized_keys
-    if user != 'root':
-        sudo("cp -f ~%s/.ssh/authorized_keys "
-             "/root/.ssh/authorized_keys" % user)
-        sudo("sed -i -e '/PermitRootLogin/d' "
-             "-e '$ a PermitRootLogin without-password' /etc/ssh/sshd_config")
-        sudo("service sshd restart || service ssh restart")
-        sudo("sleep 20")
-    return instance
+        run('chroot %s chkconfig --level 2345 %s %s' % (target, service,
+                                                        state))
 
 
 def create_ami(host_instance, options, config):
     connection = host_instance.connection
     env.host_string = host_instance.public_dns_name
     env.user = 'root'
     env.abort_on_prompts = True
     env.disable_known_hosts = True
@@ -104,22 +45,21 @@ def create_ami(host_instance, options, c
     while True:
         try:
             v.attach(host_instance.id, config['target']['aws_dev_name'])
             break
         except:
             log.debug('hit error waiting for volume to be attached')
             time.sleep(10)
 
+    wait_for_status(v, "status", "in-use", "update")
     while True:
         try:
-            v.update()
-            if v.status == 'in-use':
-                if run('ls %s' % int_dev_name).succeeded:
-                    break
+            if run('ls %s' % int_dev_name).succeeded:
+                break
         except:
             log.debug('hit error waiting for volume to be attached')
             time.sleep(10)
 
     # Step 0: install required packages
     if config.get('distro') not in ('debian', 'ubuntu'):
         run('which MAKEDEV >/dev/null || yum install -y MAKEDEV')
     # Step 1: prepare target FS
@@ -235,36 +175,22 @@ def create_ami(host_instance, options, c
     else:
         manage_service("network", mount_point, "on")
         manage_service("rc.local", mount_point, "on")
 
     run('umount %s/proc || :' % mount_point)
     run('umount %s' % mount_point)
 
     v.detach()
-    while True:
-        try:
-            v.update()
-            if v.status == 'available':
-                break
-        except:
-            log.exception('hit error waiting for volume to be detached')
-            time.sleep(10)
+    wait_for_status(v, "status", "available", "update")
 
     # Step 5: Create a snapshot
     log.info('Creating a snapshot')
     snapshot = v.create_snapshot('EBS-backed %s' % dated_target_name)
-    while True:
-        try:
-            snapshot.update()
-            if snapshot.status == 'completed':
-                break
-        except:
-            log.exception('hit error waiting for snapshot to be taken')
-            time.sleep(10)
+    wait_for_status(snapshot, "status", "completed", "update")
     snapshot.add_tag('Name', dated_target_name)
 
     # Step 6: Create an AMI
     log.info('Creating AMI')
     host_img = connection.get_image(config['ami'])
     block_map = BlockDeviceMapping()
     block_map[host_img.root_device_name] = BlockDeviceType(
         snapshot_id=snapshot.id)
@@ -349,15 +275,18 @@ if __name__ == '__main__':
         parser.error("secrets are required")
 
     if not options.key_name:
         parser.error("SSH key name name is required")
 
     try:
         config = json.load(open("%s/%s.json" % (AMI_CONFIGS_DIR,
                                                 options.config)))[options.region]
+        secrets = json.load(open(options.secrets))
     except KeyError:
         parser.error("unknown configuration")
 
-    connection = create_connection(options)
-    host_instance = create_instance(connection, args[0], config,
-                                    options.key_name, options.user)
-    target_ami = create_ami(host_instance, options, config)
+    connection = get_aws_connection(options.region,
+                                    secrets["aws_access_key_id"],
+                                    secrets["aws_secret_access_key"])
+    host_instance = run_instance(connection, args[0], config, options.key_name,
+                                 options.user)
+    create_ami(host_instance, options, config)
rename from aws/aws_create_instance.py
rename to scripts/aws_create_instance.py
--- a/aws/aws_create_instance.py
+++ b/scripts/aws_create_instance.py
@@ -1,87 +1,41 @@
 #!/usr/bin/env python
 import json
 import uuid
 import time
 import boto
 import StringIO
-from socket import gethostbyname, gaierror, gethostbyaddr, herror
 import random
-
+import site
+import os
+import multiprocessing
+import sys
 from random import choice
 from fabric.api import run, put, env, sudo
 from fabric.context_managers import cd
-from boto.ec2 import connect_to_region
 from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
 from boto.ec2.networkinterface import NetworkInterfaceSpecification, \
     NetworkInterfaceCollection
-from boto.vpc import VPCConnection
-from IPy import IP
 
-from aws_create_ami import AMI_CONFIGS_DIR
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import AMI_CONFIGS_DIR, get_aws_connection, get_vpc, \
+    name_available, wait_for_status
+from cloudtools.dns import get_ip, get_ptr
+from cloudtools.aws.vpc import get_subnet_id, ip_available
 
 import logging
 log = logging.getLogger(__name__)
-_connections = {}
-_vpcs = {}
-
-
-def get_ip(hostname):
-    try:
-        return gethostbyname(hostname)
-    except gaierror:
-        return None
-
-
-def get_ptr(ip):
-    try:
-        return gethostbyaddr(ip)[0]
-    except herror:
-        return None
-
-
-def get_subnet_id(vpc, ip):
-    subnets = vpc.get_all_subnets()
-    for s in subnets:
-        if IP(ip) in IP(s.cidr_block):
-            return s.id
-    return None
-
-
-def ip_available(conn, ip):
-    res = conn.get_all_instances()
-    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
-    ips = [i.private_ip_address for i in instances]
-    interfaces = conn.get_all_network_interfaces()
-    ips.extend(i.private_ip_address for i in interfaces)
-    if ip in ips:
-        return False
-    else:
-        return True
-
-
-def name_available(conn, name):
-    res = conn.get_all_instances()
-    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
-    names = [i.tags.get("Name") for i in instances if i.state != "terminated"]
-    if name in names:
-        return False
-    else:
-        return True
 
 
 def verify(hosts, config, region, secrets):
     """ Check DNS entries and IP availability for hosts"""
     passed = True
-    conn = get_connection(
-        region,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key']
-    )
+    conn = get_aws_connection(region, secrets['aws_access_key_id'],
+                              secrets['aws_secret_access_key'])
     for host in hosts:
         fqdn = "%s.%s" % (host, config["domain"])
         log.info("Checking name conflicts for %s", host)
         if not name_available(conn, host):
             log.error("%s has been already taken", host)
             passed = False
             continue
         log.debug("Getting IP for %s", fqdn)
@@ -94,21 +48,18 @@ def verify(hosts, config, region, secret
             ptr = get_ptr(ip)
             if ptr != fqdn:
                 log.error("Bad PTR for %s", host)
                 passed = False
             log.debug("Checking %s availablility", ip)
             if not ip_available(conn, ip):
                 log.error("IP %s reserved for %s, but not available", ip, host)
                 passed = False
-            vpc = get_vpc(
-                connection=conn,
-                aws_access_key_id=secrets['aws_access_key_id'],
-                aws_secret_access_key=secrets['aws_secret_access_key'])
-
+            vpc = get_vpc(region, secrets['aws_access_key_id'],
+                          secrets['aws_secret_access_key'])
             s_id = get_subnet_id(vpc, ip)
             if s_id not in config['subnet_ids']:
                 log.error("IP %s does not belong to assigned subnets", ip)
                 passed = False
     if not passed:
         raise RuntimeError("Sanity check failed")
 
 
@@ -201,54 +152,24 @@ def assimilate(ip_addr, config, instance
             run("chown cltbld: %s/.hg/hgrc" % target_dir)
             sudo('{hg} -R {d} unbundle {b}'.format(hg=hg, d=target_dir,
                                                    b=bundle), user="cltbld")
 
     log.info("Rebooting %s...", hostname)
     run("reboot")
 
 
-def get_connection(region, aws_access_key_id, aws_secret_access_key):
-    global _connections
-    if _connections.get(region):
-        return _connections[region]
-    _connections[region] = connect_to_region(
-        region,
-        aws_access_key_id=aws_access_key_id,
-        aws_secret_access_key=aws_secret_access_key
-    )
-    return _connections[region]
-
-
-def get_vpc(connection, aws_access_key_id, aws_secret_access_key):
-    global _vpcs
-    if _vpcs.get(connection.region.name):
-        return _vpcs[connection.region.name]
-    _vpcs[connection.region.name] = VPCConnection(
-        region=connection.region,
-        aws_access_key_id=aws_access_key_id,
-        aws_secret_access_key=aws_secret_access_key
-    )
-    return _vpcs[connection.region.name]
-
-
 def create_instance(name, config, region, secrets, key_name, instance_data,
                     deploypass, loaned_to, loan_bug):
     """Creates an AMI instance with the given name and config. The config must
     specify things like ami id."""
-    conn = get_connection(
-        region,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key']
-    )
-    vpc = get_vpc(
-        connection=conn,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key'])
-
+    conn = get_aws_connection(region, secrets['aws_access_key_id'],
+                              secrets['aws_secret_access_key'])
+    vpc = get_vpc(region, secrets['aws_access_key_id'],
+                  secrets['aws_secret_access_key'])
     # Make sure we don't request the same things twice
     token = str(uuid.uuid4())[:16]
 
     instance_data = instance_data.copy()
     instance_data['name'] = name
     instance_data['hostname'] = '{name}.{domain}'.format(
         name=name, domain=config['domain'])
 
@@ -309,25 +230,17 @@ def create_instance(name, config, region
             break
         except boto.exception.BotoServerError:
             log.exception("Cannot start an instance")
         time.sleep(10)
 
     instance = reservation.instances[0]
     log.info("instance %s created, waiting to come up", instance)
     # Wait for the instance to come up
-    while True:
-        try:
-            instance.update()
-            if instance.state == 'running':
-                break
-        except Exception:
-            log.warn("waiting for instance to come up, retrying in 10 sec...")
-        time.sleep(10)
-
+    wait_for_status(instance, "state", "running", "update")
     instance.add_tag('Name', name)
     instance.add_tag('FQDN', instance_data['hostname'])
     instance.add_tag('created', time.strftime("%Y-%m-%d %H:%M:%S %Z",
                                               time.gmtime()))
     instance.add_tag('moz-type', config['type'])
     if loaned_to:
         instance.add_tag("moz-loaned-to", loaned_to)
     if loan_bug:
@@ -342,72 +255,16 @@ def create_instance(name, config, region
             break
         except:
             log.warn("problem assimilating %s (%s), retrying in 10 sec ...",
                      instance_data['hostname'], instance.id)
             time.sleep(10)
     instance.add_tag('moz-state', 'ready')
 
 
-def ami_from_instance(instance):
-    base_ami = instance.connection.get_image(instance.image_id)
-    target_name = '%s-puppetized' % base_ami.name
-    v = instance.connection.get_all_volumes(
-        filters={'attachment.instance-id': instance.id})[0]
-    instance.stop()
-    log.info('Stopping instance')
-    while True:
-        try:
-            instance.update()
-            if instance.state == 'stopped':
-                break
-        except Exception:
-            log.info('Waiting for instance stop')
-            time.sleep(10)
-    log.info('Creating snapshot')
-    snapshot = v.create_snapshot('EBS-backed %s' % target_name)
-    while True:
-        try:
-            snapshot.update()
-            if snapshot.status == 'completed':
-                break
-        except Exception:
-            log.exception('hit error waiting for snapshot to be taken')
-            time.sleep(10)
-    snapshot.add_tag('Name', target_name)
-
-    log.info('Creating AMI')
-    block_map = BlockDeviceMapping()
-    block_map[base_ami.root_device_name] = BlockDeviceType(
-        snapshot_id=snapshot.id)
-    ami_id = instance.connection.register_image(
-        target_name,
-        '%s EBS AMI' % target_name,
-        architecture=base_ami.architecture,
-        kernel_id=base_ami.kernel_id,
-        ramdisk_id=base_ami.ramdisk_id,
-        root_device_name=base_ami.root_device_name,
-        block_device_map=block_map,
-    )
-    while True:
-        try:
-            ami = instance.connection.get_image(ami_id)
-            ami.add_tag('Name', target_name)
-            log.info('AMI created')
-            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
-            break
-        except boto.exception.EC2ResponseError:
-            log.info('Wating for AMI')
-            time.sleep(10)
-    instance.terminate()
-
-import multiprocessing
-import sys
-
-
 class LoggingProcess(multiprocessing.Process):
     def __init__(self, log, *args, **kwargs):
         self.log = log
         super(LoggingProcess, self).__init__(*args, **kwargs)
 
     def run(self):
         output = open(self.log, 'wb', 0)
         logging.basicConfig(stream=output)
rename from aws/aws_manage_instances.py
rename to scripts/aws_manage_instances.py
--- a/aws/aws_manage_instances.py
+++ b/scripts/aws_manage_instances.py
@@ -1,15 +1,19 @@
 #!/usr/bin/env python
 
 import argparse
 import json
 import logging
 from time import gmtime, strftime
-from boto.ec2 import connect_to_region
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
 
 log = logging.getLogger(__name__)
 REGIONS = ['us-east-1', 'us-west-2']
 
 
 def start(i, dry_run):
     name = i.tags.get('Name', '')
     log.info("Starting %s..." % name)
@@ -114,35 +118,29 @@ if __name__ == '__main__':
                         help="Supress logging messages")
     parser.add_argument("hosts", metavar="host", nargs="+",
                         help="hosts to be processed")
 
     args = parser.parse_args()
     if args.secrets:
         secrets = json.load(args.secrets)
     else:
-        secrets = None
+        secrets = {}
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if not args.quiet:
         log.setLevel(logging.INFO)
     else:
         log.setLevel(logging.ERROR)
 
     if not args.regions:
         args.regions = REGIONS
     for region in args.regions:
-        if secrets:
-            conn = connect_to_region(
-                region,
-                aws_access_key_id=secrets['aws_access_key_id'],
-                aws_secret_access_key=secrets['aws_secret_access_key']
-            )
-        else:
-            conn = connect_to_region(region)
+        conn = get_aws_connection(region, secrets.get("aws_access_key_id"),
+                                  secrets.get("aws_secret_access_key"))
 
         res = conn.get_all_instances()
         instances = reduce(lambda a, b: a + b, [r.instances for r in res])
         for i in instances:
             name = i.tags.get('Name', '')
             instance_id = i.id
             if not i.private_ip_address:
                 # Terminated instances has no IP address assinged
rename from aws/aws_manage_routingtables.py
rename to scripts/aws_manage_routingtables.py
rename from aws/aws_manage_securitygroups.py
rename to scripts/aws_manage_securitygroups.py
rename from aws/aws_sanity_checker.py
rename to scripts/aws_sanity_checker.py
--- a/aws/aws_sanity_checker.py
+++ b/scripts/aws_sanity_checker.py
@@ -2,18 +2,22 @@
 
 import argparse
 import json
 import logging
 import time
 import calendar
 import collections
 import re
-from boto.ec2 import connect_to_region
-from aws_instances import Slave, AWSInstance
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws.sanity import Slave, AWSInstance
+from cloudtools.aws import get_aws_connection
 
 
 log = logging.getLogger(__name__)
 REGIONS = ('us-east-1', 'us-west-2')
 KNOWN_TYPES = ('puppetmaster', 'buildbot-master', 'dev-linux64', 'bld-linux64',
                'try-linux64', 'tst-linux32', 'tst-linux64', 'tst-win64', 'dev',
                'servo-linux64', 'packager', 'vcssync', 'infra')
 
@@ -48,28 +52,16 @@ EXPECTED_MAX_DOWNTIME = {
     "default": 24
 }
 
 
 def is_beanstalk_instance(i):
     return i.tags.get("elasticbeanstalk:environment-name") is not None
 
 
-def get_connection(region, secrets):
-    if secrets:
-        conn = connect_to_region(
-            region,
-            aws_access_key_id=secrets['aws_access_key_id'],
-            aws_secret_access_key=secrets['aws_secret_access_key']
-        )
-    else:
-        conn = connect_to_region(region)
-    return conn
-
-
 def get_all_instances(conn):
     res = conn.get_all_instances()
     instances = []
     if res:
         instances = reduce(lambda a, b: a + b, [r.instances for r in res])
     # Skip instances managed by Elastic Beanstalk
     return [i for i in instances if not is_beanstalk_instance(i)]
 
@@ -105,19 +97,18 @@ def get_loaned(instances):
         bug_string = "an unknown bug"
         if i.tags.get("moz-bug"):
             bug_string = "bug %s" % i.tags.get("moz-bug")
         if i.state == "running":
             uptime = get_uptime(i)
             ret.append((uptime, i, "Loaned to %s in %s, up for %i hours" % (
                 i.tags["moz-loaned-to"], bug_string, uptime)))
         else:
-            ret.append((None, i, "Loaned to %s in %s, %s" % (i.tags["moz-loaned-to"],
-                                                             bug_string,
-                                                             i.state)))
+            ret.append((None, i, "Loaned to %s in %s, %s" %
+                        (i.tags["moz-loaned-to"], bug_string, i.state)))
     if ret:
         # sort by uptime, reconstruct ret
         ret = [(e[1], e[2]) for e in reversed(sorted(ret, key=lambda x: x[0]))]
     return ret
 
 
 def get_uptime(instance):
     return (time.time() - parse_launch_time(instance.launch_time)) / 3600
@@ -340,29 +331,30 @@ if __name__ == '__main__':
                         help="optional list of regions")
     parser.add_argument("-q", "--quiet", action="store_true",
                         help="Supress logging messages")
 
     args = parser.parse_args()
     if args.secrets:
         secrets = json.load(args.secrets)
     else:
-        secrets = None
+        secrets = {}
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if not args.quiet:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.ERROR)
 
     if not args.regions:
         args.regions = REGIONS
     all_instances = []
     all_volumes = []
     for region in args.regions:
-        conn = get_connection(region, secrets)
+        conn = get_aws_connection(region, secrets.get("aws_access_key_id"),
+                                  secrets.get("aws_secret_access_key"))
         all_instances.extend(get_all_instances(conn))
         all_volumes.extend(conn.get_all_volumes())
 
     generate_report(connection=conn,
                     regions=args.regions,
                     instances=all_instances,
                     volumes=all_volumes)
rename from aws/aws_stop_idle.py
rename to scripts/aws_stop_idle.py
--- a/aws/aws_stop_idle.py
+++ b/scripts/aws_stop_idle.py
@@ -8,23 +8,27 @@ import calendar
 try:
     import simplejson as json
     assert json
 except ImportError:
     import json
 
 import random
 import threading
+import boto.ec2
+import requests
+import logging
+import site
+import os
+from paramiko import SSHClient
 from Queue import Queue, Empty
 
-import boto.ec2
-from paramiko import SSHClient
-import requests
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
 
-import logging
 log = logging.getLogger()
 
 # Instances runnnig less than STOP_THRESHOLD_MINS minutes within 1 hour
 # boundary won't be stopped.
 STOP_THRESHOLD_MINS = 45
 
 
 def stop(i, ssh_client=None):
@@ -269,17 +273,18 @@ def aws_stop_idle(secrets, credentials, 
 
     min_running_by_type = 0
 
     all_instances = []
     impaired_ids = []
 
     for r in regions:
         log.debug("looking at region %s", r)
-        conn = boto.ec2.connect_to_region(r, **secrets)
+        conn = get_aws_connection(r, secrets["aws_access_key_id"],
+                                  secrets["aws_secret_access_key"])
 
         instances = get_buildbot_instances(conn, moz_types)
         impaired = conn.get_all_instance_status(
             filters={'instance-status.status': 'impaired'})
         impaired_ids.extend(i.id for i in impaired)
         instances_by_type = {}
         for i in instances:
             # TODO: Check if launch_time is too old, and terminate the instance
rename from aws/aws_watch_pending.py
rename to scripts/aws_watch_pending.py
--- a/aws/aws_watch_pending.py
+++ b/scripts/aws_watch_pending.py
@@ -8,28 +8,31 @@ import random
 from collections import defaultdict
 
 try:
     import simplejson as json
     assert json
 except ImportError:
     import json
 
-import boto.ec2
 from boto.exception import BotoServerError
 from boto.ec2.networkinterface import NetworkInterfaceCollection, \
     NetworkInterfaceSpecification
 from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
 import sqlalchemy as sa
 from sqlalchemy.engine.reflection import Inspector
 
 import requests
 import os
 import logging
 from bid import decide as get_spot_choices
+import site
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
 
 log = logging.getLogger()
 
 
 def find_pending(db):
     inspector = Inspector(db)
     # Newer buildbot has a "buildrequest_claims" table
     if "buildrequest_claims" in inspector.get_table_names():
@@ -61,36 +64,21 @@ def find_pending(db):
 
 
 def find_retries(db, brid):
     """Returns the number of previous builds for this build request id"""
     q = sa.text("SELECT count(*) from builds where brid=:brid")
     return db.execute(q, brid=brid).fetchone()[0]
 
 
-# Used by aws_connect_to_region to cache connection objects per region
-_aws_cached_connections = {}
-
-
-def aws_connect_to_region(region, secrets):
-    """Connect to an EC2 region. Caches connection objects"""
-    if region in _aws_cached_connections:
-        return _aws_cached_connections[region]
-    conn = boto.ec2.connect_to_region(
-        region,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key']
-    )
-    _aws_cached_connections[region] = conn
-    return conn
-
-
 def aws_get_spot_requests(region, secrets, moz_instance_type):
     """retruns a list of all open and active spot requests"""
-    conn = aws_connect_to_region(region, secrets)
+    conn = get_aws_connection(
+        region, aws_access_key_id=secrets['aws_access_key_id'],
+        aws_secret_access_key=secrets['aws_secret_access_key'])
     filters = {"tag:moz-type": moz_instance_type}
     req = conn.get_all_spot_instance_requests(filters=filters)
     return [r for r in req if r.state in ("open", "active")]
 
 
 _aws_instances_cache = {}
 
 
@@ -100,17 +88,19 @@ def aws_get_all_instances(regions, secre
     """
     log.debug("fetching all instances for %s", regions)
     retval = []
     for region in regions:
         if region in _aws_instances_cache:
             log.debug("aws_get_all_instances - cache hit for %s", region)
             retval.extend(_aws_instances_cache[region])
         else:
-            conn = aws_connect_to_region(region, secrets)
+            conn = get_aws_connection(
+                region, aws_access_key_id=secrets['aws_access_key_id'],
+                aws_secret_access_key=secrets['aws_secret_access_key'])
             reservations = conn.get_all_instances()
             region_instances = []
             for r in reservations:
                 region_instances.extend(r.instances)
             log.debug("aws_get_running_instances - caching %s", region)
             _aws_instances_cache[region] = region_instances
             retval.extend(region_instances)
     return retval
@@ -158,17 +148,19 @@ def aws_get_running_instances(all_instan
 
 def aws_get_reservations(regions, secrets):
     """
     Return a mapping of (availability zone, ec2 instance type) -> count
     """
     log.debug("getting reservations for %s", regions)
     retval = {}
     for region in regions:
-        conn = aws_connect_to_region(region, secrets)
+        conn = get_aws_connection(
+            region, aws_access_key_id=secrets['aws_access_key_id'],
+            aws_secret_access_key=secrets['aws_secret_access_key'])
         reservations = conn.get_all_reserved_instances(filters={
             'state': 'active',
         })
         for r in reservations:
             az = r.availability_zone
             ec2_instance_type = r.instance_type
             if (az, ec2_instance_type) not in retval:
                 retval[az, ec2_instance_type] = 0
@@ -322,17 +314,20 @@ def request_spot_instances(moz_instance_
     spot_rules = spot_config.get("rules", {}).get(moz_instance_type)
     if not spot_rules:
         log.warn("No spot rules found for %s", moz_instance_type)
         return 0
 
     instance_config = json.load(open("configs/%s" % moz_instance_type))
     connections = []
     for region in regions:
-        connections.append(aws_connect_to_region(region, secrets))
+        conn = get_aws_connection(
+            region, aws_access_key_id=secrets['aws_access_key_id'],
+            aws_secret_access_key=secrets['aws_secret_access_key'])
+        connections.append(conn)
     spot_choices = get_spot_choices(connections, spot_rules)
     if not spot_choices:
         log.warn("No spot choices for %s", moz_instance_type)
         return 0
 
     to_start = {}
     for region in regions:
         # Check if spots are enabled in this region for this type
@@ -432,17 +427,19 @@ def do_request_spot_instances(amount, re
         except (RuntimeError):
             log.warn("Cannot start", exc_info=True)
     return started
 
 
 def do_request_spot_instance(region, secrets, moz_instance_type, price, ami,
                              instance_config, cached_cert_dir, instance_type,
                              availability_zone, slaveset, dryrun):
-    conn = aws_connect_to_region(region, secrets)
+    conn = get_aws_connection(
+        region, aws_access_key_id=secrets['aws_access_key_id'],
+        aws_secret_access_key=secrets['aws_secret_access_key'])
     interface = get_available_interface(
         conn=conn, moz_instance_type=moz_instance_type,
         availability_zone=availability_zone,
         slaveset=slaveset)
     if not interface:
         log.warn("No free network interfaces left in %s" % region)
         return False
 
@@ -550,17 +547,19 @@ def get_available_interface(conn, moz_in
                 if i.tags.get("FQDN").split(".")[0] not in allocated_slaves:
                     _cached_interfaces[availability_zone][moz_instance_type].remove(i)
                     log.debug("using %s", i.tags.get("FQDN"))
                     return i
     return None
 
 
 def get_ami(region, secrets, moz_instance_type):
-    conn = aws_connect_to_region(region, secrets)
+    conn = get_aws_connection(
+        region, aws_access_key_id=secrets['aws_access_key_id'],
+        aws_secret_access_key=secrets['aws_secret_access_key'])
     avail_amis = conn.get_all_images(
         owners=["self"],
         filters={"tag:moz-type": moz_instance_type})
     last_ami = sorted(avail_amis,
                       key=lambda ami: ami.tags.get("moz-created"))[-1]
     return last_ami
 
 
rename from aws/check_dns.py
rename to scripts/check_dns.py
--- a/aws/check_dns.py
+++ b/scripts/check_dns.py
@@ -1,27 +1,23 @@
 #!/usr/bin/env python
 import argparse
 import json
 import logging
 from multiprocessing import Pool
-from boto.ec2 import connect_to_region
-from aws_create_instance import get_ip, get_ptr
-from socket import gethostbyname_ex
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
+from cloudtools.dns import get_ip, get_ptr, get_cname
 
 log = logging.getLogger(__name__)
 
 
-def get_cname(cname):
-    try:
-        return gethostbyname_ex(cname)[0]
-    except:
-        return None
-
-
 def check_A(args):
     fqdn, ip = args
     log.debug("Checking A %s %s", fqdn, ip)
     dns_ip = get_ip(fqdn)
     if dns_ip != ip:
         log.error("%s A entry %s doesn't match real ip %s", fqdn, dns_ip, ip)
     else:
         log.debug("%s A entry %s matches real ip %s", fqdn, dns_ip, ip)
@@ -56,32 +52,26 @@ def main():
                         help="optional list of regions")
     parser.add_argument("-v", "--verbose", action="store_true",
                         help="Supress logging messages")
 
     args = parser.parse_args()
     if args.secrets:
         secrets = json.load(args.secrets)
     else:
-        secrets = None
+        secrets = {}
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if args.verbose:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.WARNING)
 
-    if secrets:
-        conn = connect_to_region(
-            args.region,
-            aws_access_key_id=secrets['aws_access_key_id'],
-            aws_secret_access_key=secrets['aws_secret_access_key']
-        )
-    else:
-        conn = connect_to_region(args.region)
+    conn = get_aws_connection(args.region, secrets.get("aws_access_key_id"),
+                              secrets.get("aws_secret_access_key"))
 
     pool = Pool()
     res = conn.get_all_instances()
     instances = reduce(lambda a, b: a + b, [r.instances for r in res])
     a_checks = []
     ptr_checks = []
     cname_checks = []
     for i in instances:
rename from aws/ec22ip.py
rename to scripts/ec22ip.py
--- a/aws/ec22ip.py
+++ b/scripts/ec22ip.py
@@ -1,36 +1,39 @@
 #!/usr/bin/env python
 
 import re
 import json
-from boto.ec2 import connect_to_region
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
 
 
 if __name__ == '__main__':
     from optparse import OptionParser
     parser = OptionParser()
     parser.add_option("-r", "--region", dest="region", help="region to use",
                       default="us-east-1")
     parser.add_option("-k", "--secrets", dest="secrets",
                       help="file where secrets can be found")
 
     options, args = parser.parse_args()
     if not args:
         parser.error("at least one instance name is required")
 
     hosts_re = [re.compile(x) for x in args]
 
-    if not options.secrets:
-        conn = connect_to_region(options.region)
+    if options.secrets:
+        secrets = json.load(args.secrets)
     else:
-        secrets = json.load(open(options.secrets))
-        conn = connect_to_region(
-            options.region, aws_access_key_id=secrets['aws_access_key_id'],
-            aws_secret_access_key=secrets['aws_secret_access_key'])
+        secrets = {}
+    conn = get_aws_connection(options.region, secrets.get("aws_access_key_id"),
+                              secrets.get("aws_secret_access_key"))
 
     res = conn.get_all_instances()
     if res:
         instances = reduce(lambda a, b: a + b, [r.instances for r in res])
         for i in instances:
             for mask in hosts_re:
                 hostname = i.tags.get('FQDN', i.tags.get('Name', ''))
                 if mask.search(hostname) and i.private_ip_address:
rename from aws/free_ips.py
rename to scripts/free_ips.py
--- a/aws/free_ips.py
+++ b/scripts/free_ips.py
@@ -1,14 +1,17 @@
-from boto.ec2 import connect_to_region
-from boto.vpc import VPCConnection
-from IPy import IP
+import os
+import site
 import random
 import argparse
 import json
+from IPy import IP
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection, get_vpc
 
 
 parser = argparse.ArgumentParser()
 parser.add_argument("-c", "--config", required=True,
                     type=argparse.FileType('r'),
                     help="instance configuration to use")
 parser.add_argument("-r", "--region", help="region to use",
                     default="us-east-1")
@@ -20,29 +23,23 @@ args = parser.parse_args()
 
 try:
     config = json.load(args.config)[args.region]
 except KeyError:
     parser.error("unknown configuration")
 
 if args.secrets:
     secrets = json.load(args.secrets)
-    conn = connect_to_region(
-        args.region,
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key']
-    )
-    vpc = VPCConnection(
-        aws_access_key_id=secrets['aws_access_key_id'],
-        aws_secret_access_key=secrets['aws_secret_access_key'],
-        region=conn.region
-    )
 else:
-    conn = connect_to_region(args.region)
-    vpc = VPCConnection(region=conn.region)
+    secrets = {}
+
+conn = get_aws_connection(args.region, secrets.get("aws_access_key_id"),
+                          secrets.get("aws_secret_access_key"))
+vpc = get_vpc(args.region, secrets.get("aws_access_key_id"),
+              secrets.get("aws_secret_access_key"))
 
 interfaces = vpc.get_all_network_interfaces()
 used_ips = [i.private_ip_address for i in interfaces]
 
 subnets = vpc.get_all_subnets(subnet_ids=config["subnet_ids"])
 blocks = [s.cidr_block for s in subnets]
 
 available_ips = []
rename from aws/instance2ami.py
rename to scripts/instance2ami.py
--- a/aws/instance2ami.py
+++ b/scripts/instance2ami.py
@@ -1,43 +1,33 @@
 #!/usr/bin/env python
 import argparse
 import json
 import logging
 import time
 import random
-from boto.ec2 import connect_to_region
 from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
 from fabric.api import run, env, put, cd
-
-from aws_create_ami import create_instance, AMI_CONFIGS_DIR, \
-    INSTANCE_CONFIGS_DIR
-log = logging.getLogger(__name__)
-
+import os
+import site
 
-def wait_for_status(obj, attr_name, attr_value, update_method):
-    log.debug("waiting for %s availability", obj)
-    while True:
-        try:
-            getattr(obj, update_method)()
-            if getattr(obj, attr_name) == attr_value:
-                break
-            else:
-                time.sleep(1)
-        except:
-            log.exception('hit error waiting for snapshot to be taken')
-            time.sleep(10)
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection, wait_for_status, \
+    AMI_CONFIGS_DIR, INSTANCE_CONFIGS_DIR
+from cloudtools.aws.instance import run_instance
+
+log = logging.getLogger(__name__)
 
 
 def main():
     parser = argparse.ArgumentParser()
     parser.add_argument("-k", "--secrets", type=argparse.FileType('r'),
                         help="optional file where secrets can be found")
     parser.add_argument("-r", "--region", dest="region", required=True,
-                        help="optional list of regions")
+                        help="Region")
     parser.add_argument("-q", "--quiet", action="store_true",
                         help="Supress logging messages")
     parser.add_argument("-c", "--ami-config", required=True, help="AMI config")
     parser.add_argument("-i", "--instance-config", required=True,
                         help="Instance config")
     parser.add_argument("--ssh-key", required=True, help="SSH key name")
     parser.add_argument("--user", help="Login name")
     parser.add_argument("--public", action="store_true", default=False,
@@ -59,24 +49,18 @@ def main():
         parser.error("unknown configuration")
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if not args.quiet:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.ERROR)
 
-    if secrets:
-        conn = connect_to_region(
-            args.region,
-            aws_access_key_id=secrets['aws_access_key_id'],
-            aws_secret_access_key=secrets['aws_secret_access_key']
-        )
-    else:
-        conn = connect_to_region(args.region)
+    conn = get_aws_connection(args.region, secrets.get("aws_access_key_id"),
+                              secrets.get("aws_secret_access_key"))
 
     dated_target_name = "spot-%s-%s" % (
         args.ami_config, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
     filters = {
         "tag:moz-state": "ready",
         "instance-state-name": "stopped"
     }
     for tag, value in moz_type_config["tags"].iteritems():
@@ -92,17 +76,17 @@ def main():
     instances = [i for i in instances if not i.tags.get("moz-loaned-to")]
     i = sorted(instances, key=lambda i: i.launch_time)[-1]
     log.debug("Selected instance to clone: %s", i)
     v_id = i.block_device_mapping[i.root_device_name].volume_id
     v = conn.get_all_volumes(volume_ids=[v_id])[0]
     snap1 = v.create_snapshot("temporary snapshot of %s" % v_id)
 
     wait_for_status(snap1, "status", "completed", "update")
-    host_instance = create_instance(
+    host_instance = run_instance(
         connection=conn, instance_name="tmp", config=ami_config,
         key_name=args.ssh_key, user=args.user,
         subnet_id=random.choice(moz_type_config["subnet_ids"]))
 
     env.host_string = host_instance.private_ip_address
     env.user = 'root'
     env.abort_on_prompts = True
     env.disable_known_hosts = True
rename from aws/spot_sanity_check.py
rename to scripts/spot_sanity_check.py
--- a/aws/spot_sanity_check.py
+++ b/scripts/spot_sanity_check.py
@@ -1,16 +1,20 @@
 #!/usr/bin/env python
 
 import argparse
 import json
 import logging
 import datetime
+import site
+import os
 
-from boto.ec2 import connect_to_region
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection
+
 from sqlalchemy.ext.declarative import declarative_base
 from sqlalchemy import Column, String, DateTime, Float, Integer, \
     create_engine, ForeignKey
 from sqlalchemy.orm import validates, relationship, sessionmaker
 
 log = logging.getLogger(__name__)
 REGIONS = ['us-east-1', 'us-west-2']
 Base = declarative_base()
@@ -166,34 +170,28 @@ if __name__ == '__main__':
     parser.add_argument("-q", "--quiet", action="store_true",
                         help="Supress logging messages")
     parser.add_argument("-d", "--db", default="spots.db")
 
     args = parser.parse_args()
     if args.secrets:
         secrets = json.load(args.secrets)
     else:
-        secrets = None
+        secrets = {}
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if not args.quiet:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.WARNING)
 
     engine = create_engine('sqlite:///%s' % args.db)
     Base.metadata.create_all(bind=engine)
     Session = sessionmaker(bind=engine)
     session = Session()
 
     if not args.regions:
         args.regions = REGIONS
     for region in args.regions:
-        if secrets:
-            conn = connect_to_region(
-                region,
-                aws_access_key_id=secrets['aws_access_key_id'],
-                aws_secret_access_key=secrets['aws_secret_access_key']
-            )
-        else:
-            conn = connect_to_region(region)
+        conn = get_aws_connection(region, secrets.get("aws_access_key_id"),
+                                  secrets.get("aws_secret_access_key"))
         update_spot_stats(conn, session)
         cancel_low_price(conn)
rename from aws/tag_spot_instances.py
rename to scripts/tag_spot_instances.py
--- a/aws/tag_spot_instances.py
+++ b/scripts/tag_spot_instances.py
@@ -1,15 +1,18 @@
 #!/usr/bin/env python
 
 import argparse
 import json
 import logging
-from boto.ec2 import connect_to_region
-from boto.vpc import VPCConnection
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), ".."))
+from cloudtools.aws import get_aws_connection, get_vpc
 
 log = logging.getLogger(__name__)
 REGIONS = ['us-east-1', 'us-west-2']
 
 
 def tag_it(i, vpc):
     netif = i.interfaces[0]
     # network interface needs to be reloaded usin VPC to get the tags
@@ -31,41 +34,31 @@ if __name__ == '__main__':
                         help="optional list of regions")
     parser.add_argument("-q", "--quiet", action="store_true",
                         help="Supress logging messages")
 
     args = parser.parse_args()
     if args.secrets:
         secrets = json.load(args.secrets)
     else:
-        secrets = None
+        secrets = {}
 
     logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
     if not args.quiet:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.ERROR)
 
     if not args.regions:
         args.regions = REGIONS
     for region in args.regions:
-        if secrets:
-            conn = connect_to_region(
-                region,
-                aws_access_key_id=secrets['aws_access_key_id'],
-                aws_secret_access_key=secrets['aws_secret_access_key']
-            )
-            vpc = VPCConnection(
-                aws_access_key_id=secrets['aws_access_key_id'],
-                aws_secret_access_key=secrets['aws_secret_access_key'],
-                region=conn.region
-            )
-        else:
-            conn = connect_to_region(region)
-            vpc = VPCConnection(region=conn.region)
+        conn = get_aws_connection(region, secrets.get("aws_access_key_id"),
+                                  secrets.get("aws_secret_access_key"))
+        vpc = get_vpc(region, secrets.get("aws_access_key_id"),
+                      secrets.get("aws_secret_access_key"))
 
         spot_requests = conn.get_all_spot_instance_requests() or []
         for req in spot_requests:
             if req.tags.get("moz-tagged"):
                 log.debug("Skipping already processed spot request %s", req)
                 continue
             i_id = req.instance_id
             if not i_id: