nobug: remove a bunch of code that is most likely dead. r=tomprince
authorBen Hearsum <bhearsum@mozilla.com>
Mon, 24 Sep 2018 13:26:42 -0400
changeset 8456 d5289f1ceaaac0d01769548bd88ffe0ae4a8f200
parent 8455 d8cd2c0aba0918256efe9fe10d37b9d53c051f8d
child 8457 400ef0a557ea5b043b5f96a74b8665c12cc4a9c8
push id6179
push userbhearsum@mozilla.com
push dateMon, 24 Sep 2018 17:26:59 +0000
reviewerstomprince
nobug: remove a bunch of code that is most likely dead. r=tomprince
MANIFEST.in
README.md
buildbot-helpers/check_queuedir.py
buildbot-helpers/command_runner.py
buildbot-helpers/force_build.py
buildbot-helpers/force_release_l10n.py
buildbot-helpers/mozconfig_whitelist
buildbot-helpers/pulse_publisher.py
buildbot-helpers/release_sanity.py
buildfarm/breakpad/cleanup-breakpad-symbols.py
buildfarm/breakpad/post-symbol-upload.py
buildfarm/maintenance/add-master.py
buildfarm/maintenance/archive-debug-builds.sh
buildfarm/maintenance/buildbot-wrangler.py
buildfarm/maintenance/check-master-json.py
buildfarm/maintenance/cleanup_db.py
buildfarm/maintenance/count_and_reboot.py
buildfarm/maintenance/end_to_end_reconfig.sh
buildfarm/maintenance/get_instance_metadata.py
buildfarm/maintenance/manage_masters.py
buildfarm/maintenance/master_cleanup.py
buildfarm/maintenance/maybe_reconfig.sh
buildfarm/maintenance/normalize-masters.py
buildfarm/maintenance/print_waits.py
buildfarm/maintenance/process_commit_comments.py
buildfarm/maintenance/production-branches.json
buildfarm/maintenance/production-masters.json
buildfarm/maintenance/purge_builds.py
buildfarm/maintenance/purge_events.py
buildfarm/maintenance/reboot-idle-slaves.py
buildfarm/maintenance/reconfig-logrotate.conf
buildfarm/maintenance/restart_masters.py
buildfarm/maintenance/slaverebooter.ini.sample
buildfarm/maintenance/test_purge_builds.py
buildfarm/maintenance/try_sendchange.sh
buildfarm/maintenance/update_irc.sh
buildfarm/maintenance/update_maintenance_wiki.sh
buildfarm/maintenance/update_remote_creds.pl
buildfarm/maintenance/watch_twistd_log.py
buildfarm/release/release-runner.py
buildfarm/release/release-runner.sh
buildfarm/release/releasetasks_graph_gen.py
buildfarm/utils/Makefile
buildfarm/utils/archiver_client.py
buildfarm/utils/count_ctors.py
buildfarm/utils/firefoxInstallConfig.ini
buildfarm/utils/generate-tpcomponent.py
buildfarm/utils/generator/places/__init__.py
buildfarm/utils/generator/places/builddb/__init__.py
buildfarm/utils/generator/places/builddb/increment_dates.py
buildfarm/utils/generator/places/builddb/models.py
buildfarm/utils/generator/places/builddb/views.py
buildfarm/utils/generator/places/manage.py
buildfarm/utils/generator/places/settings.py
buildfarm/utils/generator/places/urls.py
buildfarm/utils/generator/places/uuid.py
buildfarm/utils/generator/places/virgin_places.sqlite
buildfarm/utils/graph_server_post.py
buildfarm/utils/hgtool.py
buildfarm/utils/installdmg.sh
buildfarm/utils/jsontool.py
buildfarm/utils/mar.py
buildfarm/utils/pack_scp.sh
buildfarm/utils/package-script.py
buildfarm/utils/printbuildrev.py
buildfarm/utils/repository_manifest.py
buildfarm/utils/retry.py
buildfarm/utils/trigger.cgi
buildfarm/utils/unix_util.py
buildfarm/utils/updateGenerated.py
buildfarm/utils/wget_unpack.sh
buildfarm/utils/win32_util.py
buildfarm/utils/winrm/Makefile
buildfarm/utils/winrm/rm.cpp
clobberer/cleanup.php
clobberer/cleanup_clobberer.cron
clobberer/clobberer.py
clobberer/clobberer_creds.php.sample
clobberer/index.php
clobberer/setup.py
clobberer/test_clobberer.py
lib/perl/Bootstrap/Util.pm
lib/perl/Config/._Changelog
lib/perl/Config/Changelog
lib/perl/Config/General.pm
lib/perl/Config/General/Extended.pm
lib/perl/Config/General/Interpolated.pm
lib/perl/Config/MANIFEST
lib/perl/Config/META.yml
lib/perl/Config/Makefile.PL
lib/perl/Config/README
lib/perl/Config/example.cfg
lib/perl/Config/t/Tie/IxHash.pm
lib/perl/Config/t/Tie/README
lib/perl/Config/t/apache-include.conf
lib/perl/Config/t/cfg.16
lib/perl/Config/t/cfg.16a
lib/perl/Config/t/cfg.17
lib/perl/Config/t/cfg.19
lib/perl/Config/t/cfg.2
lib/perl/Config/t/cfg.20.a
lib/perl/Config/t/cfg.20.b
lib/perl/Config/t/cfg.20.c
lib/perl/Config/t/cfg.3
lib/perl/Config/t/cfg.34
lib/perl/Config/t/cfg.39
lib/perl/Config/t/cfg.4
lib/perl/Config/t/cfg.40
lib/perl/Config/t/cfg.41
lib/perl/Config/t/cfg.42
lib/perl/Config/t/cfg.43
lib/perl/Config/t/cfg.45
lib/perl/Config/t/cfg.46
lib/perl/Config/t/cfg.5
lib/perl/Config/t/cfg.6
lib/perl/Config/t/cfg.7
lib/perl/Config/t/cfg.8
lib/perl/Config/t/complex.cfg
lib/perl/Config/t/complex/n1.cfg
lib/perl/Config/t/complex/n2.cfg
lib/perl/Config/t/dual-include.conf
lib/perl/Config/t/included.conf
lib/perl/Config/t/run.t
lib/perl/Config/t/sub1/cfg.sub1
lib/perl/Config/t/sub1/cfg.sub1b
lib/perl/Config/t/sub1/cfg.sub1c
lib/perl/Config/t/sub1/cfg.sub1d
lib/perl/Config/t/sub1/cfg.sub1e
lib/perl/Config/t/sub1/sub2/cfg.sub2
lib/perl/Config/t/sub1/sub2/cfg.sub2b
lib/perl/Config/t/sub1/sub2/sub3/cfg.sub3
lib/perl/Config/t/test.rc
lib/perl/MozBuild/Util.pm
lib/perl/Release/Patcher/Config.pm
lib/perl/Release/Versions.pm
lib/python/build/checksums.py
lib/python/build/misc.py
lib/python/build/paths.py
lib/python/build/upload.py
lib/python/mozilla_buildtools/queuedir.py
lib/python/mozilla_buildtools/test/bad-patcher-config.cfg
lib/python/mozilla_buildtools/test/sample-patcher-config.cfg
lib/python/mozilla_buildtools/test/test_build_checksums.py
lib/python/mozilla_buildtools/test/test_build_paths.py
lib/python/mozilla_buildtools/test/test_json_validity.py
lib/python/mozilla_buildtools/test/test_partials.py
lib/python/mozilla_buildtools/test/test_release_config.py
lib/python/mozilla_buildtools/test/test_release_l10n.py
lib/python/mozilla_buildtools/test/test_release_updates_patcher.py
lib/python/mozilla_buildtools/test/test_release_versions.py
lib/python/release/config.py
lib/python/release/download.py
lib/python/release/partials.py
lib/python/release/sanity.py
lib/python/release/signing.py
lib/python/release/updates/patcher.py
lib/python/slavealloc/__init__.py
lib/python/slavealloc/client.py
lib/python/slavealloc/contrib/make-slavealloc-virtualenv.sh
lib/python/slavealloc/contrib/prod-db-to-staging.sh
lib/python/slavealloc/contrib/slavealloc-allocator.tac
lib/python/slavealloc/contrib/slavealloc-combined.tac
lib/python/slavealloc/contrib/slavealloc-nginx.conf
lib/python/slavealloc/contrib/slavealloc-staging-nginx.conf
lib/python/slavealloc/contrib/slavealloc-staging.initd
lib/python/slavealloc/contrib/slavealloc-ui.tac
lib/python/slavealloc/contrib/slavealloc.initd
lib/python/slavealloc/daemon/__init__.py
lib/python/slavealloc/daemon/application.py
lib/python/slavealloc/daemon/http/__init__.py
lib/python/slavealloc/daemon/http/api.py
lib/python/slavealloc/daemon/http/gettac.py
lib/python/slavealloc/daemon/http/site.py
lib/python/slavealloc/daemon/http/ui.py
lib/python/slavealloc/daemon/service.py
lib/python/slavealloc/daemon/ui/__init__.py
lib/python/slavealloc/data/__init__.py
lib/python/slavealloc/data/model.py
lib/python/slavealloc/data/queries.py
lib/python/slavealloc/data/setup.py
lib/python/slavealloc/exceptions.py
lib/python/slavealloc/logic/__init__.py
lib/python/slavealloc/logic/allocate.py
lib/python/slavealloc/logic/buildbottac.py
lib/python/slavealloc/scripts/__init__.py
lib/python/slavealloc/scripts/dbdump.py
lib/python/slavealloc/scripts/dbimport.py
lib/python/slavealloc/scripts/dbinit.py
lib/python/slavealloc/scripts/disable.py
lib/python/slavealloc/scripts/enable.py
lib/python/slavealloc/scripts/gettac.py
lib/python/slavealloc/scripts/lock.py
lib/python/slavealloc/scripts/main.py
lib/python/slavealloc/scripts/notes.py
lib/python/slavealloc/www/css/demo_table_jui.css
lib/python/slavealloc/www/css/responsive-bundle.352a81c95337.css
lib/python/slavealloc/www/css/slavealloc.css
lib/python/slavealloc/www/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_glass_75_e6e6e6_1x400.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_glass_95_fef1ec_1x400.png
lib/python/slavealloc/www/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png
lib/python/slavealloc/www/css/smoothness/images/ui-icons_222222_256x240.png
lib/python/slavealloc/www/css/smoothness/images/ui-icons_2e83ff_256x240.png
lib/python/slavealloc/www/css/smoothness/images/ui-icons_454545_256x240.png
lib/python/slavealloc/www/css/smoothness/images/ui-icons_888888_256x240.png
lib/python/slavealloc/www/css/smoothness/images/ui-icons_cd0a0a_256x240.png
lib/python/slavealloc/www/css/smoothness/jquery-ui-1.8.9.custom.css
lib/python/slavealloc/www/dashboard.html
lib/python/slavealloc/www/icons/bug.png
lib/python/slavealloc/www/icons/bug_add.png
lib/python/slavealloc/www/icons/bug_delete.png
lib/python/slavealloc/www/icons/bug_edit.png
lib/python/slavealloc/www/icons/bug_error.png
lib/python/slavealloc/www/icons/bug_go.png
lib/python/slavealloc/www/icons/bug_link.png
lib/python/slavealloc/www/icons/help.png
lib/python/slavealloc/www/icons/loading.png
lib/python/slavealloc/www/index.html
lib/python/slavealloc/www/js/bugzilla.js
lib/python/slavealloc/www/js/controller.js
lib/python/slavealloc/www/js/deps/backbone-0.9.10.min.js
lib/python/slavealloc/www/js/deps/backbone-min.js
lib/python/slavealloc/www/js/deps/jquery-1.4.4.min.js
lib/python/slavealloc/www/js/deps/jquery-1.9.1.min.js
lib/python/slavealloc/www/js/deps/jquery-ui-1.10.0.js
lib/python/slavealloc/www/js/deps/jquery-ui-1.8.9.custom.min.js
lib/python/slavealloc/www/js/deps/jquery.dataTables-1.9.4.min.js
lib/python/slavealloc/www/js/deps/jquery.dataTables.min.js
lib/python/slavealloc/www/js/deps/load-min.js
lib/python/slavealloc/www/js/deps/underscore-1.4.4.min.js
lib/python/slavealloc/www/js/deps/underscore-min.js
lib/python/slavealloc/www/js/models.js
lib/python/slavealloc/www/js/slavealloc.js
lib/python/slavealloc/www/js/views.js
lib/python/util/fabric/__init__.py
lib/python/util/fabric/actions.py
lib/python/util/fabric/common.py
lib/python/util/post_file.py
lib/python/util/transfer.py
lib/python/util/tuxedo.py
release/add-promptWaitTime.py
release/firefox-tuxedo.ini
release/get-update-xml.sh
release/patcher-config-bump.pl
release/patcher-configs/mozBeta-branch-patcher2.cfg
release/patcher-configs/mozBeta-thunderbird-branch-patcher2.cfg
release/patcher-configs/mozDevedition-branch-patcher2.cfg
release/patcher-configs/mozEsr45-branch-patcher2.cfg
release/patcher-configs/mozEsr45-thunderbird-branch-patcher2.cfg
release/patcher-configs/mozEsr52-branch-patcher2.cfg
release/patcher-configs/mozEsr52-thunderbird-branch-patcher2.cfg
release/patcher-configs/mozRelease-branch-patcher2.cfg
release/test-mar-url.sh
release/tuxedo-add.py
release/updates/aurora-devedition-linux.cfg
release/updates/aurora-devedition-linux64.cfg
release/updates/aurora-devedition-macosx64.cfg
release/updates/aurora-devedition-win32.cfg
release/updates/aurora-devedition-win64.cfg
release/updates/beta-firefox-linux.cfg
release/updates/beta-firefox-linux64.cfg
release/updates/beta-firefox-macosx64.cfg
release/updates/beta-firefox-win32.cfg
release/updates/beta-firefox-win64.cfg
release/updates/comm-esr52-thunderbird-linux.cfg
release/updates/comm-esr52-thunderbird-linux64.cfg
release/updates/comm-esr52-thunderbird-mac64.cfg
release/updates/comm-esr52-thunderbird-win32.cfg
release/updates/comm-esr52-thunderbird-win64.cfg
release/updates/date-firefox-linux.cfg
release/updates/date-firefox-linux64.cfg
release/updates/date-firefox-macosx64.cfg
release/updates/date-firefox-win32.cfg
release/updates/date-firefox-win64.cfg
release/updates/esr-firefox-linux.cfg
release/updates/esr-firefox-linux64.cfg
release/updates/esr-firefox-macosx64.cfg
release/updates/esr-firefox-win32.cfg
release/updates/esr-firefox-win64.cfg
release/updates/mozBeta-thunderbird-linux.cfg
release/updates/mozBeta-thunderbird-linux64.cfg
release/updates/mozBeta-thunderbird-mac64.cfg
release/updates/mozBeta-thunderbird-win32.cfg
release/updates/mozEsr45-thunderbird-linux.cfg
release/updates/mozEsr45-thunderbird-linux64.cfg
release/updates/mozEsr45-thunderbird-mac64.cfg
release/updates/mozEsr45-thunderbird-win32.cfg
release/updates/mozilla-esr45-firefox-linux.cfg
release/updates/mozilla-esr45-firefox-linux64.cfg
release/updates/mozilla-esr45-firefox-mac64.cfg
release/updates/mozilla-esr45-firefox-win32.cfg
release/updates/mozilla-esr45-firefox-win64.cfg
release/updates/mozilla-esr52-firefox-linux.cfg
release/updates/mozilla-esr52-firefox-linux64.cfg
release/updates/mozilla-esr52-firefox-mac64.cfg
release/updates/mozilla-esr52-firefox-win32.cfg
release/updates/mozilla-esr52-firefox-win64.cfg
release/updates/release-firefox-linux.cfg
release/updates/release-firefox-linux64.cfg
release/updates/release-firefox-macosx64.cfg
release/updates/release-firefox-win32.cfg
release/updates/release-firefox-win64.cfg
scripts/blocklist/sync-hg-blocklist.sh
scripts/build-promotion/balrog-release-pusher.py
scripts/build-promotion/balrog-release-shipper.py
scripts/build-promotion/create-update-verify-config.py
scripts/fuzzing/fuzzer.sh
scripts/hsts/update_hsts_preload_list.sh
scripts/l10n/create-release-repacks.py
scripts/l10n/mbsdiff_hook.sh
scripts/l10n/release_repacks.sh
scripts/nanojit/nanojit.sh
scripts/periodic_file_updates/periodic_file_updates.manifest
scripts/periodic_file_updates/periodic_file_updates.sh
scripts/release/FAKE_KEY
scripts/release/KEY
scripts/release/Makefile
scripts/release/compare-mozconfigs.py
scripts/release/post-release.py
scripts/release/post-release.sh
scripts/release/tag-release.py
scripts/release/tagging.sh
scripts/spidermonkey_builds/arm-sim
scripts/spidermonkey_builds/dtrace
scripts/spidermonkey_builds/exactrooting
scripts/spidermonkey_builds/generational
scripts/spidermonkey_builds/rootanalysis
scripts/spidermonkey_builds/spidermonkey.sh
scripts/spidermonkey_builds/vanilla
scripts/spidermonkey_builds/warnaserr
scripts/spidermonkey_builds/warnaserrdebug
scripts/support/mouse_and_screen_resolution.py
scripts/tooltool/fetch_and_unpack.sh
scripts/updates/balrog-nightly-locker.py
scripts/updates/balrog-release-pusher.py
scripts/updates/balrog-release-shipper.py
scripts/updates/balrog-submitter.py
scripts/updates/balrog-tweaker.py
scripts/updates/create-update-verify-configs.py
setup.py
stage/extract_and_run_command.py
stage/organize.py
stage/post_upload.py
trychooser/index.html
deleted file mode 100644
--- a/MANIFEST.in
+++ /dev/null
@@ -1,3 +0,0 @@
-recursive-include lib/python/slavealloc/contrib *
-recursive-include lib/python/slavealloc/www *
-
--- a/README.md
+++ b/README.md
@@ -1,20 +1,15 @@
 # Tools [![Build Status](https://travis-ci.org/mozilla/build-tools.png)](https://travis-ci.org/mozilla/build-tools)
 
-This repository contains tools used by Mozilla Release Engineering. This repository
-is checked out on Buildbot masters and Buildbot slaves.
+This repository contains tools used by Mozilla Release Engineering.
 
 This repository is a downstream read-only copy of:
 https://hg.mozilla.org/build/tools/
 
 To submit a patch, please create a bug on http://bugzilla.mozilla.org/ under
-Product: Release Engineering, Component: Tools.
+Product: Release Engineering
 
 To run tests:
 ```
 pip install tox
 tox
 ```
-
-Please also see:
-* https://github.com/mozilla/build-buildbotcustom
-* https://github.com/mozilla/build-buildbot-configs
deleted file mode 100755
--- a/buildbot-helpers/check_queuedir.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/python
-"""%prog -w <warn_new> -c <crit_new> -t <max_age> queuedir [queuedir...]
-
-nagios plugin to monitor a queuedir"""
-import os
-import sys
-import traceback
-import time
-
-OK, WARNING, CRITICAL, UNKNOWN = range(4)
-
-
-def oldest_mtime(files):
-    retval = None
-    for f in files:
-        try:
-            mtime = os.path.getmtime(f)
-            if retval is None:
-                retval = mtime
-            else:
-                retval = min(retval, mtime)
-        except OSError:
-            # The file probably went away
-            pass
-    return retval
-
-
-def check_queuedir(d, options):
-    status = OK
-    msgs = []
-
-    # Check 'dead'
-    num_dead = len([f for f in os.listdir(
-        os.path.join(d, 'dead')) if not f.endswith(".log")])
-    if num_dead > 0:
-        status = CRITICAL
-        if num_dead == 1:
-            msgs.append("%i dead item" % num_dead)
-        else:
-            msgs.append("%i dead items" % num_dead)
-
-    # Check 'new'
-    new_files = os.listdir(os.path.join(d, 'new'))
-    num_new = len(new_files)
-    if num_new > 0:
-        oldest_new = oldest_mtime([(os.path.join(d, 'new', f)) for f in new_files])
-        if num_new >= options.crit_new:
-            status = CRITICAL
-            msgs.append("%i new items" % num_new)
-        elif num_new >= options.warn_new:
-            status = max(status, WARNING)
-            msgs.append("%i new items" % num_new)
-
-        age = int(time.time() - oldest_new)
-        if age > options.max_age:
-            status = max(status, WARNING)
-            msgs.append("oldest item is %is old" % age)
-
-    # Check 'cur'
-    cur_files = os.listdir(os.path.join(d, 'cur'))
-    num_cur = len(cur_files)
-    if num_cur > 0:
-        oldest_cur = oldest_mtime([(os.path.join(d, 'cur', f)) for f in cur_files])
-        if num_cur >= options.crit_cur:
-            status = CRITICAL
-            msgs.append("%i cur items" % num_cur)
-        elif num_cur >= options.warn_cur:
-            status = max(status, WARNING)
-            msgs.append("%i cur items" % num_cur)
-
-        age = int(time.time() - oldest_cur)
-        if age > options.max_age:
-            status = max(status, WARNING)
-            msgs.append("oldest item is %is old" % age)
-
-    return status, msgs
-
-
-def main():
-    from optparse import OptionParser
-    parser = OptionParser(__doc__)
-    parser.set_defaults(
-        warn_new=50,
-        crit_new=100,
-        warn_cur=50,
-        crit_cur=100,
-        max_age=900,
-    )
-    parser.add_option("-w", dest="warn_new", type="int",
-                      help="warn when there are more than this number of items in new")
-    parser.add_option("-c", dest="crit_new", type="int",
-                      help="critical when there are more than this number of items in new")
-    parser.add_option("--warn-cur", dest="warn_cur", type="int",
-                      help="warn when there are more than this number of items in cur")
-    parser.add_option("--crit-cur", dest="crit_cur", type="int",
-                      help="critical when there are more than this number of items in cur")
-    parser.add_option("-t", dest="max_age", type="int",
-                      help="warn when oldest item in new/cur is more than this many seconds old")
-
-    options, args = parser.parse_args()
-
-    if len(args) == 0:
-        print "You must specify at least one queuedir"
-        sys.exit(UNKNOWN)
-
-    try:
-        status = OK
-        msgs = []
-        for d in args:
-            d_status, d_msgs = check_queuedir(d, options)
-            status = max(status, d_status)
-            msgs.extend(d_msgs)
-
-        if not msgs:
-            print "Ok"
-        else:
-            print ";".join(msgs)
-        sys.exit(status)
-    except SystemExit:
-        raise
-    except:
-        print "Unhandled exception"
-        traceback.print_exc()
-        sys.exit(UNKNOWN)
-
-if __name__ == '__main__':
-    main()
deleted file mode 100755
--- a/buildbot-helpers/command_runner.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/usr/bin/env python
-"""
-Runs commands from a queue!
-"""
-import subprocess
-import os
-import signal
-import time
-from mozilla_buildtools.queuedir import QueueDir
-from buildbot.util import json
-import logging
-log = logging.getLogger(__name__)
-
-
-class Job(object):
-    def __init__(self, cmd, item_id, log_fp):
-        self.cmd = cmd
-        self.log = log_fp
-        self.item_id = item_id
-        self.started = None
-        self.last_signal_time = 0
-        self.last_signal = None
-
-        self.proc = None
-
-    def start(self):
-        devnull = open(os.devnull, 'r')
-        self.log.write("Running %s\n" % self.cmd)
-        self.log.flush()
-        self.proc = subprocess.Popen(self.cmd, close_fds=True, stdin=devnull,
-                                     stdout=self.log, stderr=self.log)
-        self.started = time.time()
-
-    def check(self):
-        now = time.time()
-        if now - self.started > self.max_time:
-            # Kill stuff off
-            if now - self.last_signal_time > 60:
-                s = {None: signal.SIGINT, signal.SIGINT:
-                     signal.SIGTERM}.get(self.last_signal, signal.SIGKILL)
-                log.info("Killing %i with %i", self.proc.pid, s)
-                try:
-                    self.log.write("Killing with %s\n" % s)
-                    os.kill(self.proc.pid, s)
-                    self.last_signal = s
-                    self.last_signal_time = now
-                except OSError:
-                    # Ok, process must have exited already
-                    log.exception("Failed to kill")
-                    pass
-
-        result = self.proc.poll()
-        if result is not None:
-            self.log.write("\nResult: %s, Elapsed: %1.1f seconds\n" % (result, time.time() - self.started))
-            self.log.close()
-        return result
-
-
-class CommandRunner(object):
-    def __init__(self, options):
-        self.queuedir = options.queuedir
-        self.q = QueueDir('commands', self.queuedir)
-        self.concurrency = options.concurrency
-        self.retry_time = options.retry_time
-        self.max_retries = options.max_retries
-        self.max_time = options.max_time
-
-        self.active = []
-
-        # List of (signal_time, level, proc)
-        self.to_kill = []
-
-    def run(self, job):
-        """
-        Runs the given job
-        """
-        log.info("Running %s", job.cmd)
-        try:
-            job.start()
-            self.active.append(job)
-        except OSError:
-            job.log.write("\nFailed with OSError; requeuing in %i seconds\n" %
-                          self.retry_time)
-            # Wait to requeue it
-            # If we die, then it's still in cur, and will be moved back into
-            # 'new' eventually
-            self.q.requeue(job.item_id, self.retry_time, self.max_retries)
-
-    def monitor(self):
-        """
-        Monitor running jobs
-        """
-        for job in self.active[:]:
-            self.q.touch(job.item_id)
-            result = job.check()
-
-            if result is not None:
-                self.active.remove(job)
-                if result == 0:
-                    self.q.remove(job.item_id)
-                else:
-                    log.warn("%s failed; requeuing", job.item_id)
-                    # Requeue it!
-                    self.q.requeue(
-                        job.item_id, self.retry_time, self.max_retries)
-
-    def loop(self):
-        """
-        Main processing loop. Read new items from the queue and run them!
-        """
-        while True:
-            self.monitor()
-            if len(self.active) >= self.concurrency:
-                # Wait!
-                time.sleep(1)
-                continue
-
-            while len(self.active) < self.concurrency:
-                item = self.q.pop()
-                if not item:
-                    # Don't wait for very long, since we have to check up on
-                    # our children
-                    if self.active:
-                        self.q.wait(1)
-                    else:
-                        self.q.wait()
-                    break
-
-                item_id, fp = item
-                try:
-                    command = json.load(fp)
-                    job = Job(command, item_id, self.q.getlog(item_id))
-                    job.max_time = self.max_time
-                    self.run(job)
-                except ValueError:
-                    # Couldn't parse it as json
-                    # There's no hope!
-                    self.q.log(item_id, "Couldn't load json; murdering")
-                    self.q.murder(item_id)
-                finally:
-                    fp.close()
-
-
-def main():
-    from optparse import OptionParser
-    import logging.handlers
-    parser = OptionParser()
-    parser.set_defaults(
-        concurrency=1,
-        max_retries=1,
-        retry_time=0,
-        verbosity=0,
-        logfile=None,
-        max_time=60,
-    )
-    parser.add_option("-q", "--queuedir", dest="queuedir")
-    parser.add_option("-j", "--jobs", dest="concurrency", type="int",
-                      help="number of commands to run at once")
-    parser.add_option("-r", "--max_retries", dest="max_retries",
-                      type="int", help="number of times to retry commands")
-    parser.add_option("-t", "--retry_time", dest="retry_time",
-                      type="int", help="seconds to wait between retries")
-    parser.add_option("-v", "--verbose", dest="verbosity",
-                      action="count", help="increase verbosity")
-    parser.add_option(
-        "-l", "--logfile", dest="logfile", help="where to send logs")
-    parser.add_option("-m", "--max_time", dest="max_time", type="int",
-                      help="maximum time for a command to run")
-
-    options, args = parser.parse_args()
-
-    # Set up logging
-    if options.verbosity == 0:
-        log_level = logging.WARNING
-    elif options.verbosity == 1:
-        log_level = logging.INFO
-    else:
-        log_level = logging.DEBUG
-
-    if not options.logfile:
-        logging.basicConfig(
-            level=log_level, format="%(asctime)s - %(message)s")
-    else:
-        logger = logging.getLogger()
-        logger.setLevel(log_level)
-        handler = logging.handlers.RotatingFileHandler(
-            options.logfile, maxBytes=1024 ** 2, backupCount=5)
-        formatter = logging.Formatter("%(asctime)s - %(message)s")
-        handler.setFormatter(formatter)
-        logger.addHandler(handler)
-
-    if not options.queuedir:
-        parser.error("-q/--queuedir is required")
-
-    runner = CommandRunner(options)
-    runner.loop()
-
-if __name__ == '__main__':
-    main()
deleted file mode 100755
--- a/buildbot-helpers/force_build.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/python
-
-from urllib import urlencode
-from urllib2 import urlopen
-from urlparse import urljoin
-
-
-class Forcer:
-    maxProperties = 3
-    """A Forcer knows how to force a build through the Buildbot web interface
-       given the base URL to the master (masterUrl) and the builder name.
-       You can pass name, comments, branch, revision, or properties to
-       forceBuild() and they will be included in the POST
-    """
-    def __init__(self, masterUrl, builder, loud=True):
-        self.masterUrl = masterUrl
-        self.builder = builder
-        self.forceUrl = urljoin(masterUrl, 'builders/%s/force' % builder)
-        self.loud = loud
-
-    def getArgs(self, name, comments, branch, revision, properties):
-        if len(properties) > self.maxProperties:
-            raise Exception("*** ERROR: Cannot pass more than 3 properties")
-
-        args = {'username': name, 'comments': comments}
-        if branch:
-            args['branch'] = branch
-        if revision:
-            args['revision'] = revision
-        i = 1
-        for key, value in properties.iteritems():
-            p = "property%d" % i
-            args['%sname' % p] = key
-            args['%svalue' % p] = value
-            i += 1
-        return args
-
-    def forceBuild(self, name="Unknown", comments="Unknown", branch=None,
-                   revision=None, properties={}):
-        args = self.getArgs(name, comments, branch, revision, properties)
-        params = urlencode(args)
-        request = None
-        try:
-            if self.loud:
-                print "Forcing %s with params: %s" % (self.forceUrl, str(args))
-            request = urlopen(self.forceUrl, params)
-        except:
-            if self.loud:
-                print "*** ERROR ***"
-            raise
-        # If we get past the try block, we assume success
deleted file mode 100755
--- a/buildbot-helpers/force_release_l10n.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/python
-
-from optparse import OptionParser
-from urllib2 import urlopen
-from urlparse import urljoin
-from time import sleep
-
-from buildbotcustom.l10n import ParseLocalesFile
-from buildbotcustom.common import getSupportedPlatforms
-
-from force_build import Forcer
-
-ALL_PLATFORMS = getSupportedPlatforms()
-
-
-class L10nForcer:
-    ignoredLocales = ('en-US')  # locales that are _always_ to be skipped
-
-    def __init__(self, masterUrl, toForce, releaseTag, name="Unknown",
-                 comments="Unknown", delay=2, loud=True):
-        """toForce is a dict whose keys are platform names and whose values
-           are a tuple of locales to force for that platform
-        """
-        self.forcers = {}
-        assert isinstance(toForce, dict), "toForce must be a dict!"
-        for value in toForce.itervalues():
-            assert isinstance(value, (tuple, list)), \
-                "toForce values must be a list or tuple"
-        for platform in toForce.iterkeys():
-            forcer = Forcer(masterUrl, '%s_repack' % platform, loud)
-            self.forcers[platform] = forcer
-        self.toForce = toForce
-        self.name = name
-        self.comments = comments
-        self.delay = delay
-        self.loud = loud
-        self.releaseTag = releaseTag
-
-    def getProperties(self, locale):
-        return {'en_revision': self.releaseTag,
-                'l10n_revision': self.releaseTag,
-                'locale': locale}
-
-    def forceBuilds(self):
-        errors = []
-        for platform, locales in self.toForce.iteritems():
-            forcer = self.forcers[platform]
-            for locale in sorted(locales):
-                if locale in self.ignoredLocales:
-                    continue
-                properties = self.getProperties(locale)
-                try:
-                    # HACK HACK HACK HACK ALERT
-                    # The Force Build form has on way to prevent builds from
-                    # getting merged together. By putting each of them on a
-                    # different branch, we can prevent this. It doesn't affect
-                    # the builds in any way, but it could be confusing.
-                    # Once we have a Scheduler we can use 'sendchange' with
-                    # we should be able to stop doing this.
-                    forcer.forceBuild(name=self.name, comments=self.comments,
-                                      branch=locale, properties=properties)
-                    sleep(self.delay)
-                except:
-                    errors.append((platform, locale))
-                    if self.loud:
-                        "*** Error when forcing %s %s" % (platform, locale)
-                    # Don't raise the exception, keep going
-        if self.loud:
-            if len(errors) > 0:
-                print "ERROR SUMMARY:"
-            for platform, locale in map(lambda x: (x[0], x[1]), errors):
-                print "  %s - %s" % (platform, locale)
-
-
-def getShippedLocales(shippedLocales):
-    parsed = ParseLocalesFile(shippedLocales)
-    # Now we have to reverse it, because we want it a different way
-    properlyParsed = {}
-    for p in ALL_PLATFORMS:
-        properlyParsed[p] = []
-    for locale, platforms in parsed.iteritems():
-        if platforms:
-            for p in platforms:
-                # map the bouncer/shipped-locales platforms to automation ones
-                if p == 'osx':
-                    p = 'macosx'
-                # skip platforms we don't know about
-                if p not in properlyParsed.keys():
-                    continue
-                properlyParsed[p].append(locale)
-        else:
-            for p in properlyParsed.iterkeys():
-                properlyParsed[p].append(locale)
-    return properlyParsed
-
-
-def filterPlatforms(shippedLocales, includedPlatforms):
-    # includedPlatforms is a list of platforms that we want to do builds for,
-    # therefore, we remove any platform NOT in that list from shippedLocales
-    for platform in shippedLocales.keys():
-        if platform not in includedPlatforms:
-            del shippedLocales[platform]
-    return shippedLocales
-
-
-if __name__ == '__main__':
-    usage = """usage: %prog [options]
-You must have buildbotcustom and buildbot in your PYTHONPATH to use this script.
-
-Examples:
-To retrigger all l10n builds for all platforms for Firefox 3.5.4:
-%prog -m http://production-master02.build.mozilla.org:8010 -t FIREFOX_3_5_4_RELEASE -v -b releases/mozilla-1.9.1 -n [your name]
-
-To retrigger all l10n builds for linux for Firefox 3.5.4:
-%prog -m http://production-master02.build.mozilla.org:8010 -t FIREFOX_3_5_4_RELEASE -v -b releases/mozilla-1.9.1 -p linux -n [your name]
-
-To retrigger all l10n builds for linux and macosx for Firefox 3.5.4:
-%prog -m http://production-master02.build.mozilla.org:8010 -t FIREFOX_3_5_4_RELEASE -v -b releases/mozilla-1.9.1 -p linux -p macosx -n [your name]
-
-To retrigger 'de' builds for every platform for Firefox 3.6b1:
-echo "de" > de-only
-%prog -m http://production-master.build.mozilla.org:8010 -t FIREFOX_3_6b1_RELEASE -v -s de-only -n [your name]
-
-To retrigger 'af' on linux and 'zh-TW' on win32 and mac for Firefox 3.6b1:
-echo "af linux" > my-locales
-echo "zh-TW win32 osx" >> my-locales
-%prog -m http://production-master.build.mozilla.org:8010 -t FIREFOX_3_6b1_RELEASE -v -s my-locales -n [your name]
-
-"""
-    parser = OptionParser(usage=usage)
-    parser.add_option("-n", "--name", action="store", dest="name",
-                      default="Unknown",
-                      help="Name to use when submitting.")
-    parser.add_option("-m", "--master", action="store", dest="master",
-                      help="The base url of the master to submit to. " +
-                           "Eg, http://localhost:8010")
-    parser.add_option("-t", "--release-tag", action="store", dest="releaseTag",
-                      help="The tag to build with. Eg, FIREFOX_3_5_3_RELEASE")
-    parser.add_option("-c", "--comments", action="store", dest="comments",
-                      default="Unknown")
-    parser.add_option("-d", "--delay", action="store", dest="delay",
-                      default=2,
-                      help="Amount of time (in seconds) to wait between each" +
-                           "POST. Defaults to 2 seconds")
-    parser.add_option("-v", "--verbose", action="store_true", dest="loud",
-                      default=False)
-    parser.add_option("-g", "--hg", action="store", dest="hg",
-                      default="https://hg.mozilla.org",
-                      help="Root of the HG server. Defaults to " +
-                           "https://hg.mozilla.org. Only used when -s " +
-                           "isn't specified.")
-    parser.add_option("-b", "--branch", action="store", dest="branch",
-                      help="The branch, relative to the HG server, to " +
-                           "locate shipped locales on. " +
-                           "Eg, releases/mozilla-1.9.1. Only used when " +
-                           "-s isn't specified.")
-    parser.add_option("-s", "--shipped-locales-file", action="store",
-                      dest="shippedLocales", default=None,
-                      help="When specified, this script will read in the "
-                           "contents of the given file and force the "
-                           "appropriate builds based on the contents. It is "
-                           "assumed that the file is in shipped-locales format."
-                           " If en-US is listed as a locale it will be ignored."
-                           " If this option is absent shipped-locales will be "
-                           "retrieved based on the -t, -g, and -b arguments "
-                           "given. This will cause every locale for every "
-                           "platform to be rebuilt. Use with caution.")
-    parser.add_option("-p", "--platform", action="append",
-                      dest="platforms", default=None,
-                      help="When specified, only platforms passed with -p will "
-                           "have builds forced. By default, builds will be "
-                           "forced for all platforms %s for " %
-                      str(ALL_PLATFORMS) +
-                      "locales without exceptions. Platform exceptions "
-                      "will be obeyed regardless of -p options used. See "
-                      "example usage for further details.")
-
-    (options, args) = parser.parse_args()
-
-    shippedLocales = None
-    if options.shippedLocales:
-        f = open(options.shippedLocales)
-        shippedLocales = f.read()
-        f.close()
-    else:
-        file = '%s/raw-file/%s/browser/locales/shipped-locales' % \
-            (options.branch, options.releaseTag)
-        url = urljoin(options.hg, file)
-        shippedLocales = urlopen(url).read()
-
-    platforms = options.platforms or ALL_PLATFORMS
-    for p in platforms:
-        assert p in ALL_PLATFORMS
-
-    toForce = filterPlatforms(getShippedLocales(shippedLocales), platforms)
-    forcer = L10nForcer(masterUrl=options.master, toForce=toForce,
-                        releaseTag=options.releaseTag, name=options.name,
-                        comments=options.comments, delay=options.delay,
-                        loud=options.loud)
-    forcer.forceBuilds()
deleted file mode 100644
--- a/buildbot-helpers/mozconfig_whitelist
+++ /dev/null
@@ -1,191 +0,0 @@
-# 'nightly' contains things that are in nightly mozconfigs and allowed to be missing from release builds.
-# Other keys in whitelist contain things are in that branches mozconfigs and allowed to be missing from nightly builds.
-whitelist = {
-    'mozilla-release': {},
-    'mozilla-beta': {},
-    'comm-beta': {},
-    'comm-esr52': {},
-    'nightly': {},
-    }
-
-all_desktop_platforms = ['win32', 'linux', 'linux64', 'macosx64', 'win64']
-all_mobile_platforms = ['android', 'android-api-9', 'android-api-11', 'android-api-15', 'android-x86']
-all_platforms = all_desktop_platforms + all_mobile_platforms
-
-for platform in all_platforms:
-    whitelist['nightly'][platform] = [
-        'ac_add_options --enable-update-channel=nightly',
-        'ac_add_options --enable-profiling',
-        'mk_add_options CLIENT_PY_ARGS="--hg-options=\'--verbose --time\' --hgtool=../tools/buildfarm/utils/hgtool.py --skip-chatzilla --skip-comm --skip-inspector --skip-venkman --tinderbox-print"'
-    ]
-
-for platform in ['linux', 'linux64']:
-    whitelist['nightly'][platform] += [
-        'ac_add_options --disable-elf-hack # --enable-elf-hack conflicts with --enable-profiling',
-        '. "$topsrcdir/build/mozconfig.cache"',
-    ]
-
-for platform in ['linux', 'linux64', 'macosx64']:
-    whitelist['nightly'][platform] += [
-        'ac_add_options --enable-codesighs',
-        'mk_add_options MOZ_MAKE_FLAGS="-j4"',
-    ]
-
-for platform in ['linux', 'linux64', 'macosx64', 'win32', 'win64']:
-    whitelist['nightly'][platform] += ['ac_add_options --enable-signmar']
-    whitelist['nightly'][platform] += ['ac_add_options --enable-js-diagnostics']
-    whitelist['nightly'][platform] += ['ac_add_options --with-branding=browser/branding/nightly']
-    whitelist['nightly'][platform] += ['. "$commtopsrcdir/build/mozconfig.sccache"']
-
-whitelist['nightly']['linux'] += [
-    'CXX=$REAL_CXX',
-    'CXX="ccache $REAL_CXX"',
-    'CC="ccache $REAL_CC"',
-    'mk_add_options PROFILE_GEN_SCRIPT=@TOPSRCDIR@/build/profile_pageloader.pl',
-    'ac_add_options --with-ccache=/usr/bin/ccache',
-    'export MOZILLA_OFFICIAL=1',
-    'export MOZ_TELEMETRY_REPORTING=1',
-    "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    'STRIP_FLAGS="--strip-debug"',
-]
-
-whitelist['nightly']['linux64'] += [
-    'export MOZILLA_OFFICIAL=1',
-    'export MOZ_TELEMETRY_REPORTING=1',
-    "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    'STRIP_FLAGS="--strip-debug"',
-    'ac_add_options --with-ccache=/usr/bin/ccache',
-]
-
-whitelist['nightly']['macosx64'] += [
-    'if test "${MOZ_UPDATE_CHANNEL}" = "nightly"; then',
-    'ac_add_options --with-macbundlename-prefix=Firefox',
-    'fi',
-    'mk_add_options MOZ_MAKE_FLAGS="-j12"',
-    'ac_add_options --with-ccache',
-    'ac_add_options --disable-install-strip',
-    'ac_add_options --enable-instruments',
-    'ac_add_options --enable-dtrace',
-]
-
-whitelist['nightly']['win32'] += [
-    'mk_add_options MOZ_MAKE_FLAGS=-j1',
-    'if test "$IS_NIGHTLY" != ""; then',
-    'ac_add_options --disable-auto-deps',
-    'fi',
-    'ac_add_options --enable-metro',
-    '. "$topsrcdir/build/mozconfig.cache"',
-]
-
-whitelist['nightly']['win64'] += [
-    'mk_add_options MOZ_MAKE_FLAGS=-j1',
-    'if test "$IS_NIGHTLY" != ""; then',
-    'ac_add_options --disable-auto-deps',
-    'fi',
-    'ac_add_options --enable-metro',
-    '. "$topsrcdir/build/mozconfig.cache"',
-]
-
-# TODO - remove 'android-api-11' once gecko 46 is on aurora
-for p in ('android', 'android-api-9', 'android-api-11', 'android-api-15'):
-    whitelist['nightly'][p] += [
-        'ac_add_options --enable-update-channel=${MOZ_UPDATE_CHANNEL}',
-        'ac_add_options --enable-warnings-as-errors',
-        'ac_add_options --with-ccache=/usr/bin/ccache',
-        'ac_add_options --enable-profiling',
-        'ac_add_options --disable-elf-hack # --enable-elf-hack conflicts with --enable-profiling',
-        'STRIP_FLAGS="--strip-debug"',
-        'ac_add_options --with-branding=mobile/android/branding/nightly',
-    ]
-
-# TODO - remove 'android-api-11' once gecko 46 is on aurora
-whitelist['nightly']['android-api-11'] += ['MOZ_ANDROID_GECKOLIBS_AAR=1']
-whitelist['nightly']['android-api-15'] += ['MOZ_ANDROID_GECKOLIBS_AAR=1']
-
-whitelist['nightly']['android-x86'] += [
-    'ac_add_options --enable-update-channel=${MOZ_UPDATE_CHANNEL}',
-    'ac_add_options --enable-warnings-as-errors',
-    'ac_add_options --with-ccache=/usr/bin/ccache',
-    'ac_add_options --enable-profiling',
-    'ac_add_options --disable-elf-hack # --enable-elf-hack conflicts with --enable-profiling',
-    'STRIP_FLAGS="--strip-debug"',
-]
-
-for branch in ('mozilla-beta', 'comm-beta'):
-    for platform in all_platforms:
-        whitelist[branch][platform] = [
-            'ac_add_options --enable-update-channel=beta',
-            'ac_add_options --enable-official-branding',
-            'mk_add_options MOZ_MAKE_FLAGS="-j4"',
-            'if [ -n "$ENABLE_RELEASE_PROMOTION" ]; then',
-            'MOZ_AUTOMATION_UPLOAD_SYMBOLS=1',
-            'MOZ_AUTOMATION_UPDATE_PACKAGING=1',
-            'fi',
-        ]
-    for platform in all_desktop_platforms:
-        whitelist[branch][platform] += ['MOZ_AUTOMATION_SDK=${MOZ_AUTOMATION_SDK-1}']
-    whitelist[branch]['linux'] += [
-        'export MOZILLA_OFFICIAL=1',
-        'export MOZ_TELEMETRY_REPORTING=1',
-        'mk_add_options MOZ_PGO=1',
-        "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    ]
-    whitelist[branch]['linux64'] += [
-        'export MOZILLA_OFFICIAL=1',
-        'export MOZ_TELEMETRY_REPORTING=1',
-        'mk_add_options MOZ_PGO=1',
-        "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    ]
-    whitelist[branch]['win32'] += ['mk_add_options MOZ_PGO=1']
-    whitelist[branch]['win64'] += ['mk_add_options MOZ_PGO=1']
-    whitelist[branch]['android'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-api-9'] += ['ac_add_options --enable-updater']
-    # TODO - remove api-11 once gecko 46 is on beta
-    whitelist[branch]['android-api-11'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-api-15'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-x86'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-api-9'] += ['ac_add_options --with-branding=mobile/android/branding/beta']
-    # TODO - remove api-11 once gecko 46 is on beta
-    whitelist[branch]['android-api-11'] += ['ac_add_options --with-branding=mobile/android/branding/beta']
-    whitelist[branch]['android-api-15'] += ['ac_add_options --with-branding=mobile/android/branding/beta']
-
-
-for branch in ('mozilla-release', 'comm-esr52'):
-    for platform in all_platforms:
-        whitelist[branch][platform] = [
-            'ac_add_options --enable-update-channel=release',
-            'ac_add_options --enable-official-branding',
-            'mk_add_options MOZ_MAKE_FLAGS="-j4"',
-            'if [ -n "$ENABLE_RELEASE_PROMOTION" ]; then',
-            'MOZ_AUTOMATION_UPLOAD_SYMBOLS=1',
-            'MOZ_AUTOMATION_UPDATE_PACKAGING=1',
-            'fi',
-            'export BUILDING_RELEASE=1',
-        ]
-    for platform in all_desktop_platforms:
-        whitelist[branch][platform] += ['MOZ_AUTOMATION_SDK=${MOZ_AUTOMATION_SDK-1}']
-    whitelist[branch]['win32'] += ['mk_add_options MOZ_PGO=1',]
-    whitelist[branch]['win64'] += ['mk_add_options MOZ_PGO=1',]
-    whitelist[branch]['linux'] += [
-        'export MOZILLA_OFFICIAL=1',
-        'export MOZ_TELEMETRY_REPORTING=1',
-        'mk_add_options MOZ_PGO=1',
-        "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    ]
-    whitelist[branch]['linux64'] += [
-        'export MOZILLA_OFFICIAL=1',
-        'export MOZ_TELEMETRY_REPORTING=1',
-        'mk_add_options MOZ_PGO=1',
-        "mk_add_options PROFILE_GEN_SCRIPT='$(PYTHON) @MOZ_OBJDIR@/_profile/pgo/profileserver.py 10'",
-    ]
-    whitelist[branch]['android'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-api-9'] += ['ac_add_options --enable-updater']
-    # TODO - remove 'android-api-11' once gecko 46 is on release
-    whitelist[branch]['android-api-11'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-api-15'] += ['ac_add_options --enable-updater']
-    whitelist[branch]['android-x86'] += ['ac_add_options --enable-updater']
-
-
-if __name__ == '__main__':
-    import pprint
-    pprint.pprint(whitelist)
deleted file mode 100755
--- a/buildbot-helpers/pulse_publisher.py
+++ /dev/null
@@ -1,288 +0,0 @@
-#!/usr/bin/env python
-"""
-Publisher for Pulse events.
-
-Consumes new events being written into a queue directory by the PulseStatus
-plugin
-
-see https://hg.mozilla.org/users/clegnitto_mozilla.com/mozillapulse/ for pulse
-code
-"""
-import time
-import re
-from datetime import tzinfo, timedelta, datetime
-
-from mozillapulse.messages.build import BuildMessage
-from mozilla_buildtools.queuedir import QueueDir
-from buildbot.util import json
-
-import logging
-log = logging.getLogger(__name__)
-
-ZERO = timedelta(0)
-HOUR = timedelta(hours=1)
-
-skip_exps = [
-    # Skip step events, they cause too much load
-    re.compile("^build\.\S+\.\d+\.step\."),
-]
-
-# A UTC class.
-
-
-class UTC(tzinfo):
-    """UTC"""
-
-    def utcoffset(self, dt):
-        return ZERO
-
-    def tzname(self, dt):
-        return "UTC"
-
-    def dst(self, dt):
-        return ZERO
-
-
-def transform_time(t):
-    """Transform an epoch time to a string representation of the form
-    YYYY-mm-ddTHH:MM:SS+0000"""
-    if t is None:
-        return None
-    elif isinstance(t, basestring):
-        return t
-
-    dt = datetime.fromtimestamp(t, UTC())
-    return dt.strftime('%Y-%m-%dT%H:%M:%S%z')
-
-
-def transform_times(event):
-    """Replace epoch times in event with string representations of the time"""
-    if isinstance(event, dict):
-        retval = {}
-        for key, value in event.items():
-            if key == 'times' and len(value) == 2:
-                retval[key] = [transform_time(t) for t in value]
-            else:
-                retval[key] = transform_times(value)
-    else:
-        retval = event
-    return retval
-
-
-class PulsePusher(object):
-    """
-    Publish buildbot events via pulse.
-
-    `queuedir`         - a directory to look for incoming events being written
-                         by a buildbot master
-
-    `publisher`        - an instance of mozillapulse.GenericPublisher indicating where
-                         these messages should be sent
-
-    `max_idle_time`    - number of seconds since last activity after which we'll
-                         disconnect. Set to None/0 to disable
-
-    `max_connect_time` - number of seconds since we last connected after which
-                         we'll disconnect. Set to None/0 to disable
-
-    `retry_time`       - time in seconds to wait between retries
-
-    `max_retries`      - how many times to retry
-    """
-    def __init__(self, queuedir, publisher, max_idle_time=300,
-                 max_connect_time=600, retry_time=60, max_retries=5):
-        self.queuedir = QueueDir('pulse', queuedir)
-        self.publisher = publisher
-        self.max_idle_time = max_idle_time
-        self.max_connect_time = max_connect_time
-        self.retry_time = retry_time
-        self.max_retries = max_retries
-
-        # When should we next disconnect
-        self._disconnect_timer = None
-        # When did we last have activity
-        self._last_activity = None
-        # When did we last connect
-        self._last_connection = None
-
-    def send(self, events):
-        """
-        Send events to pulse
-
-        `events` - a list of buildbot event dicts
-        """
-        if not self._last_connection and self.max_connect_time:
-            self._last_connection = time.time()
-        log.debug("Sending %i messages", len(events))
-        start = time.time()
-        skipped = 0
-        sent = 0
-        for e in events:
-            routing_key = e['event']
-            if any(exp.search(routing_key) for exp in skip_exps):
-                skipped += 1
-                log.debug("Skipping event %s", routing_key)
-                continue
-            else:
-                log.debug("Sending event %s", routing_key)
-            msg = BuildMessage(transform_times(e))
-            self.publisher.publish(msg)
-            sent += 1
-        end = time.time()
-        log.info("Sent %i messages in %.2fs (skipped %i)", sent,
-                 end - start, skipped)
-        self._last_activity = time.time()
-
-        # Update our timers
-        t = 0
-        if self.max_connect_time:
-            t = self._last_connection + self.max_connect_time
-        if self.max_idle_time:
-            if t:
-                t = min(t, self._last_activity + self.max_idle_time)
-            else:
-                t = self._last_activity + self.max_idle_time
-        if t:
-            self._disconnect_timer = t
-
-    def maybe_disconnect(self):
-        "Disconnect from pulse if our timer has expired"
-        now = time.time()
-        if self._disconnect_timer and now > self._disconnect_timer:
-            log.info("Disconnecting")
-            self.publisher.disconnect()
-            self._disconnect_timer = None
-            self._last_connection = None
-            self._last_activity = None
-
-    def loop(self):
-        """
-        Main processing loop. Read new items from the queue, push them to
-        pulse, remove processed items, and then wait for more.
-        """
-        while True:
-            self.maybe_disconnect()
-
-            # Grab any new events
-            item_ids = []
-            events = []
-            come_back_soon = False
-            try:
-                while True:
-                    item = self.queuedir.pop()
-                    if not item:
-                        break
-                    if len(events) > 50:
-                        come_back_soon = True
-                        break
-
-                    try:
-                        item_id, fp = item
-                        item_ids.append(item_id)
-                        log.debug("Loading %s", item)
-                        events.extend(json.load(fp))
-                    except:
-                        log.exception("Error loading %s", item_id)
-                        raise
-                    finally:
-                        fp.close()
-                log.info("Loaded %i events", len(events))
-                self.send(events)
-                for item_id in item_ids:
-                    log.info("Removing %s", item_id)
-                    try:
-                        self.queuedir.remove(item_id)
-                    except OSError:
-                        # Somebody (re-)moved it already, that's ok!
-                        pass
-            except:
-                log.exception("Error processing messages")
-                # Don't try again soon, something has gone horribly wrong!
-                come_back_soon = False
-                for item_id in item_ids:
-                    self.queuedir.requeue(
-                        item_id, self.retry_time, self.max_retries)
-
-            if come_back_soon:
-                # Let's do more right now!
-                log.info("Doing more!")
-                continue
-
-            # Wait for more
-            # don't wait more than our max_idle/max_connect_time
-            now = time.time()
-            to_wait = None
-            if self._disconnect_timer:
-                to_wait = self._disconnect_timer - now
-                if to_wait < 0:
-                    to_wait = None
-            log.info("Waiting for %s", to_wait)
-            self.queuedir.wait(to_wait)
-
-
-def main():
-    from optparse import OptionParser
-    from mozillapulse.publishers import GenericPublisher
-    from mozillapulse.config import PulseConfiguration
-    import logging.handlers
-    parser = OptionParser()
-    parser.set_defaults(
-        verbosity=0,
-        logfile=None,
-        max_retries=5,
-        retry_time=60,
-    )
-    parser.add_option("--passwords", dest="passwords")
-    parser.add_option("-q", "--queuedir", dest="queuedir")
-    parser.add_option("-v", "--verbose", dest="verbosity", action="count",
-                      help="increase verbosity")
-    parser.add_option("-l", "--logfile", dest="logfile",
-                      help="where to send logs")
-    parser.add_option("-r", "--max_retries", dest="max_retries", type="int",
-                      help="number of times to retry")
-    parser.add_option("-t", "--retry_time", dest="retry_time", type="int",
-                      help="seconds to wait between retries")
-
-    options, args = parser.parse_args()
-
-    # Set up logging
-    if options.verbosity == 0:
-        log_level = logging.WARNING
-    elif options.verbosity == 1:
-        log_level = logging.INFO
-    else:
-        log_level = logging.DEBUG
-
-    if not options.logfile:
-        logging.basicConfig(
-            level=log_level, format="%(asctime)s - %(message)s")
-    else:
-        logger = logging.getLogger()
-        logger.setLevel(log_level)
-        handler = logging.handlers.RotatingFileHandler(
-            options.logfile, maxBytes=1024 ** 2, backupCount=5)
-        formatter = logging.Formatter("%(asctime)s - %(message)s")
-        handler.setFormatter(formatter)
-        logger.addHandler(handler)
-
-    if not options.passwords:
-        parser.error("--passwords is required")
-    if not options.queuedir:
-        parser.error("-q/--queuedir is required")
-
-    passwords = {}
-    execfile(options.passwords, passwords, passwords)
-
-    publisher = GenericPublisher(
-        PulseConfiguration(
-            user=passwords['PULSE_USERNAME'],
-            password=passwords['PULSE_PASSWORD'],
-        ),
-        exchange=passwords['PULSE_EXCHANGE'])
-
-    pusher = PulsePusher(options.queuedir, publisher,
-                         max_retries=options.max_retries, retry_time=options.retry_time)
-    pusher.loop()
-
-if __name__ == '__main__':
-    main()
deleted file mode 100644
--- a/buildbot-helpers/release_sanity.py
+++ /dev/null
@@ -1,503 +0,0 @@
-#!/usr/bin/env python
-"""%prog [-d|--dryrun] [-u|--username `username`] [-b|--bypass-check]
-        [-l| --bypass-l10n-check] [-m|--bypass-mozconfig-check]
-        [-V| --version `version`] [-B --branch `branchname`]
-        [-N|--build-number `buildnumber`]
-        [-c| --release-config `releaseConfigFile`]
-        [-w| --whitelist `mozconfig_whitelist`]
-        [--l10n-dashboard-version version]
-        master:port
-
-    Wrapper script to sanity-check a release. Default behaviour is to check
-    the branch and revision specific in the release_configs, check if the
-    milestone and version# in the source repo match the
-    expected values in the release_configs, check the l10n repos & dashboard,
-    compare the nightly and release mozconfigs for a release branch against
-    a whitelist of known differences between the two. If all tests pass then
-    the master is reconfiged and then a senchange is generated to kick off
-    the release automation.
-"""
-try:
-    import simplejson as json
-except ImportError:
-    import json
-
-import logging
-import site
-import urllib2
-
-from optparse import OptionParser
-from os import path
-from tempfile import mkdtemp
-from shutil import rmtree
-
-site.addsitedir(path.join(path.dirname(__file__), "../lib/python"))
-
-from util.file import compare
-from util.hg import make_hg_url, mercurial, update
-from release.info import readReleaseConfig, getRepoMatchingBranch
-from release.versions import getL10nDashboardVersion
-from release.l10n import getShippedLocales
-from release.platforms import getLocaleListFromShippedLocales
-from release.sanity import check_buildbot, locale_diff, \
-    sendchange, verify_mozconfigs
-from release.partials import Partial
-from util.retry import retry
-
-log = logging.getLogger(__name__)
-error_tally = set()
-HG = 'hg.mozilla.org'
-
-
-def verify_repo(branch, revision, hghost):
-    """Poll the hgweb interface for a given branch and revision to
-       make sure it exists"""
-    repo_url = make_hg_url(hghost, branch, revision=revision)
-    log.info("Checking for existence of %s..." % repo_url)
-    success = True
-    try:
-        repo_page = urllib2.urlopen(repo_url)
-        log.info("Got: %s !" % repo_page.geturl())
-    except urllib2.HTTPError:
-        log.error("Repo does not exist with required revision."
-                  " Check again, or use -b to bypass")
-        success = False
-        error_tally.add('verify_repo')
-    return success
-
-
-def verify_configs(configs_dir, revision, hghost, configs_repo, changesets,
-                   filename):
-    """Check the release_configs and l10n-changesets against tagged
-    revisions"""
-
-    release_config_file = path.join(configs_dir, 'mozilla', filename)
-    l10n_changesets_file = path.join(configs_dir, 'mozilla', changesets)
-    configs_url = make_hg_url(hghost, configs_repo, revision=revision,
-                              filename=path.join('mozilla', filename))
-    l10n_url = make_hg_url(hghost, configs_repo, revision=revision,
-                           filename=path.join('mozilla', changesets))
-
-    success = True
-    try:
-        official_configs = urllib2.urlopen(configs_url, timeout=10)
-        log.info("Comparing tagged revision %s to on-disk %s ..." % (
-            configs_url, filename))
-        if not compare(official_configs, release_config_file):
-            log.error("local configs do not match tagged revisions in repo")
-            success = False
-            error_tally.add('verify_configs')
-        l10n_changesets = urllib2.urlopen(l10n_url, timeout=10)
-        log.info("Comparing tagged revision %s to on-disk %s ..." % (
-            l10n_url, changesets))
-        if not compare(l10n_changesets, l10n_changesets_file):
-            log.error("local l10n-changesets do not match tagged revisions"
-                      " in repo")
-            success = False
-            error_tally.add('verify_configs')
-    except (urllib2.HTTPError, urllib2.URLError):
-        log.error("cannot find configs in repo %s" % configs_url)
-        log.error("cannot find configs in repo %s" % l10n_url)
-        success = False
-        error_tally.add('verify_configs')
-    return success
-
-
-def query_locale_revisions(l10n_changesets):
-    locales = {}
-    if l10n_changesets.endswith('.json'):
-        fh = open(l10n_changesets, 'r')
-        locales_json = json.load(fh)
-        fh.close()
-        for locale in locales_json:
-            locales[locale] = locales_json[locale]["revision"]
-    else:
-        for line in open(l10n_changesets, 'r'):
-            locale, revision = line.split()
-            locales[locale] = revision
-    return locales
-
-
-def get_l10n_changesets(locale_url):
-    try:
-        urllib2.urlopen(locale_url, timeout=10)
-        return True
-    except urllib2.HTTPError, e:
-        reason = ""
-        if hasattr(e, 'reason'):
-            # Python 2.6 does not have reason
-            reason = e.reason
-        log.error("error checking l10n changeset %s: %d %s" % (locale_url, e.code, reason))
-        raise e
-    except urllib2.URLError:
-        log.error("timeout checking l10n changeset %s" % locale_url)
-        raise
-
-
-def verify_l10n_changesets(hgHost, l10n_changesets):
-    """Checks for the existance of all l10n changesets"""
-    success = True
-    locales = query_locale_revisions(l10n_changesets)
-    for locale in sorted(locales.keys()):
-        revision = locales[locale]
-        localePath = '%(repoPath)s/%(locale)s/file/%(revision)s' % {
-            'repoPath': releaseConfig['l10nRepoPath'].strip('/'),
-            'locale': locale,
-            'revision': revision,
-        }
-        locale_url = make_hg_url(hgHost, localePath, protocol='https')
-        log.info("Checking for existence l10n changeset %s %s in repo %s ..."
-                 % (locale, revision, locale_url))
-
-        success = retry(get_l10n_changesets,
-                        kwargs=dict(locale_url=locale_url), attempts=3,
-                        sleeptime=1, retry_exceptions=(urllib2.HTTPError, urllib2.URLError))
-        if not success:
-            error_tally.add('verify_l10n')
-    return success
-
-
-def verify_l10n_shipped_locales(l10n_changesets, shipped_locales):
-    """Ensure that our l10n-changesets on the master match the repo's shipped
-    locales list"""
-    success = True
-    locales = query_locale_revisions(l10n_changesets)
-    log.info("Comparing l10n changesets to shipped locales ...")
-    diff_list = locale_diff(locales, shipped_locales)
-    if len(diff_list) > 0:
-        log.error("l10n_changesets and shipped_locales differ on locales:"
-                  " %s" % diff_list)
-        success = False
-        error_tally.add('verify_l10n_shipped_locales')
-    return success
-
-
-def verify_options(cmd_options, config):
-    """Check release_configs against command-line opts"""
-    success = True
-    if cmd_options.version and cmd_options.version != config['version']:
-        log.error("version passed in does not match release_configs")
-        success = False
-        error_tally.add('verify_options')
-    if cmd_options.buildNumber and \
-            int(cmd_options.buildNumber) != int(config['buildNumber']):
-        log.error("buildNumber passed in does not match release_configs")
-        success = False
-        error_tally.add('verify_options')
-    if not getRepoMatchingBranch(cmd_options.branch,
-                                 config['sourceRepositories']):
-        log.error("branch passed in does not exist in release config")
-        success = False
-        error_tally.add('verify_options')
-    if not cmd_options.skip_reconfig:
-        if not cmd_options.masters_json_file:
-            log.error("masters json file is required when not skipping reconfig")
-            success = False
-            error_tally.add('masters_json_file')
-    return success
-
-
-def verify_partial(platforms, product, version, build_number,
-                   protocol='http', server='archive.mozilla.org'):
-
-    partial = Partial(product, version, build_number, protocol, server)
-    log.info("Checking for existence of %s complete mar file..." % partial)
-    complete_mar_name = partial.complete_mar_name()
-    for platform in platforms:
-        log.info("Platform: %s" % platform)
-        complete_mar_url = partial.complete_mar_url(platform=platform)
-        if partial.exists(platform=platform):
-            log.info("complete mar: %s exists, url: %s" % (complete_mar_name,
-                                                           complete_mar_url))
-        else:
-            log.error("Requested file, %s, does not exist on %s"
-                      " Check again, or use -b to bypass" % (complete_mar_name,
-                                                             complete_mar_url))
-            error_tally.add('verify_partial')
-            return False
-
-    return True
-
-
-if __name__ == '__main__':
-    parser = OptionParser(__doc__)
-    parser.set_defaults(
-        check=True,
-        checkL10n=True,
-        checkL10nDashboard=True,
-        checkMozconfigs=True,
-        dryrun=False,
-        username="cltbld",
-        loglevel=logging.INFO,
-        version=None,
-        buildNumber=None,
-        branch=None,
-        whitelist=path.abspath(path.join(path.dirname(__file__),
-                                         "mozconfig_whitelist")),
-        skip_reconfig=False,
-        configs_repo_url='build/buildbot-configs',
-        configs_branch='production',
-        concurrency=8,
-        skip_verify_configs=False,
-        checkMultiLocale=True,
-    )
-    parser.add_option(
-        "-b", "--bypass-check", dest="check", action="store_false",
-        help="don't bother verifying release repo's on this master")
-    parser.add_option(
-        "-l", "--bypass-l10n-check", dest="checkL10n", action="store_false",
-        help="don't bother verifying l10n milestones")
-    parser.add_option(
-        "-m", "--bypass-mozconfig-check", dest="checkMozconfigs",
-        action="store_false", help="don't verify mozconfigs")
-    parser.add_option(
-        "-d", "--dryrun", "--dry-run", dest="dryrun", action="store_true",
-        help="just do the reconfig/checks, without starting anything")
-    parser.add_option(
-        "-u", "--username", dest="username",
-        help="specify a specific username to attach to the sendchange")
-    parser.add_option(
-        "-V", "--version", dest="version",
-        help="version string for release in format: x.x.x")
-    parser.add_option("-N", "--build-number", dest="buildNumber", type="int",
-                      help="build number for this release, "
-                      "uses release_config otherwise")
-    parser.add_option(
-        "-B", "--branch", dest="branch",
-        help="branch name for this release, uses release_config otherwise")
-    parser.add_option(
-        "-c", "--release-config", dest="releaseConfigFiles", action="append",
-        help="specify the release-config files (the first is primary)")
-    parser.add_option("-w", "--whitelist", dest="whitelist",
-                      help="whitelist for known mozconfig differences")
-    parser.add_option(
-        "--l10n-dashboard-version", dest="l10n_dashboard_version",
-        help="Override L10N dashboard version")
-    parser.add_option("--skip-reconfig", dest="skip_reconfig",
-                      action="store_true", help="Do not run reconfig")
-    parser.add_option("--configs-dir", dest="configs_dir",
-                      help="buildbot-configs directory")
-    parser.add_option("--configs-repo-url", dest="configs_repo_url",
-                      help="buildbot-configs repo URL")
-    parser.add_option("--configs-branch", dest="configs_branch",
-                      help="buildbot-configs branch")
-    parser.add_option("--masters-json-file", dest="masters_json_file",
-                      help="Path to production-masters.json file.")
-    parser.add_option('-j', dest='concurrency', type='int',
-                      help='Fabric concurrency level')
-    parser.add_option("--skip-verify-configs", dest="skip_verify_configs",
-                      action="store_true",
-                      help="Do not verify configs agains remote repos")
-    parser.add_option("--bypass-multilocale-check", dest="checkMultiLocale",
-                      action="store_false",
-                      help="Do not verify that multilocale is enabled for Fennec")
-
-    options, args = parser.parse_args()
-    if not options.dryrun and not args:
-        parser.error("Need to provide a master to sendchange to,"
-                     " or -d for a dryrun")
-    elif not options.branch:
-        parser.error("Need to provide a branch to release")
-    elif not options.releaseConfigFiles:
-        parser.error("Need to provide a release config file")
-
-    logging.basicConfig(level=options.loglevel,
-                        format="%(asctime)s : %(levelname)s : %(message)s")
-
-    releaseConfig = None
-    test_success = True
-    buildNumber = options.buildNumber
-    products = []
-
-    check_buildbot()
-    if not options.dryrun and not options.skip_reconfig:
-        from util.fabric.common import check_fabric, FabricHelper
-        check_fabric()
-
-    if options.configs_dir:
-        configs_dir = options.configs_dir
-        cleanup_configs = False
-    else:
-        cleanup_configs = True
-        configs_dir = mkdtemp()
-        remote = make_hg_url(HG, options.configs_repo_url)
-        retry(mercurial, args=(remote, configs_dir),
-              kwargs={'branch': options.configs_branch})
-        update(configs_dir, options.configs_branch)
-
-    # https://bugzilla.mozilla.org/show_bug.cgi?id=678103#c5
-    # This goes through the list of config files in reverse order, which is a
-    # hacky way of making sure that the config file that's listed first is the
-    # one that's loaded in releaseConfig for the sendchange.
-    for releaseConfigFile in list(reversed(options.releaseConfigFiles)):
-        abs_release_config_file = path.join(configs_dir, 'mozilla',
-                                            releaseConfigFile)
-        releaseConfig = readReleaseConfig(abs_release_config_file)
-        products.append(releaseConfig['productName'])
-
-        if not options.buildNumber:
-            log.warn("No buildNumber specified, using buildNumber in"
-                     " release_config, which may be out of date!")
-            options.buildNumber = releaseConfig['buildNumber']
-
-        if options.check:
-            site.addsitedir(path.join(configs_dir, 'mozilla'))
-            from config import BRANCHES
-            source_repo = 'mozilla'
-            try:
-                branchConfig = BRANCHES[options.branch]
-            except KeyError:
-                from thunderbird_config import BRANCHES
-                branchConfig = BRANCHES[options.branch]
-                source_repo = 'comm'
-
-            # Match command line options to defaults in release_configs
-            if not verify_options(options, releaseConfig):
-                test_success = False
-                log.error("Error verifying command-line options,"
-                          " attempting checking repo")
-
-            # verify that mozconfigs for this release pass diff with nightly,
-            # compared to a whitelist
-            try:
-                repo_path = \
-                    releaseConfig['sourceRepositories'][source_repo]['path']
-                revision = \
-                    releaseConfig[
-                        'sourceRepositories'][source_repo]['revision']
-            except KeyError:
-                try:
-                    repo_path = \
-                        releaseConfig['sourceRepositories']['mobile']['path']
-                    revision = \
-                        releaseConfig[
-                            'sourceRepositories']['mobile']['revision']
-                except KeyError:
-                    log.error("Can't determine sourceRepo for mozconfigs")
-            nightly_mozconfigs = {}
-            for p in releaseConfig['mozconfigs']:
-                nightly_mozconfigs[p] = branchConfig['platforms'][p]['src_mozconfig']
-            if options.checkMozconfigs and \
-                    not verify_mozconfigs(
-                        repo_path,
-                        revision,
-                        branchConfig['hghost'],
-                        releaseConfig['productName'],
-                        releaseConfig['mozconfigs'],
-                        nightly_mozconfigs,
-                        options.whitelist):
-                test_success = False
-                error_tally.add('verify_mozconfig')
-                log.error("Error verifying mozconfigs")
-
-            # verify that the release_configs on-disk match the tagged
-            # revisions in hg
-            l10nRevisionFile = path.join(configs_dir, 'mozilla',
-                                         releaseConfig['l10nRevisionFile'])
-            if not options.skip_verify_configs and \
-                    not verify_configs(
-                        configs_dir,
-                        "%s_BUILD%s" % (releaseConfig['baseTag'], buildNumber),
-                        branchConfig['hghost'],
-                        options.configs_repo_url,
-                        releaseConfig['l10nRevisionFile'],
-                        releaseConfigFile):
-                test_success = False
-                log.error("Error verifying configs")
-
-            if options.checkL10n:
-                # verify that l10n changesets exist
-                if not verify_l10n_changesets(branchConfig['hghost'],
-                                              l10nRevisionFile):
-                    test_success = False
-                    log.error("Error verifying l10n changesets")
-
-                if options.checkMultiLocale:
-                    if releaseConfig.get('enableMultiLocale'):
-                        f = open(l10nRevisionFile)
-                        if 'multilocale' not in f.read():
-                            test_success = False
-                            log.error("MultiLocale enabled but not present in l10n changesets")
-                        f.close()
-
-                # verify that l10n changesets match the shipped locales
-                if releaseConfig.get('shippedLocalesPath'):
-                    sr = releaseConfig['sourceRepositories'][source_repo]
-                    sourceRepoPath = sr['path']
-                    shippedLocales = getLocaleListFromShippedLocales(
-                        getShippedLocales(
-                            releaseConfig['productName'],
-                            releaseConfig['appName'],
-                            releaseConfig['version'],
-                            releaseConfig['buildNumber'],
-                            sourceRepoPath,
-                            'https://hg.mozilla.org',
-                            sr['revision'],
-                        ))
-                    # l10n_changesets do not have an entry for en-US
-                    if 'en-US' in shippedLocales:
-                        shippedLocales.remove('en-US')
-                    if not verify_l10n_shipped_locales(l10nRevisionFile,
-                                                       shippedLocales):
-                        test_success = False
-                        log.error("Error verifying l10n_changesets matches"
-                                  " shipped_locales")
-
-            # verify that the relBranch + revision in the release_configs
-            # exists in hg
-            for sr in releaseConfig['sourceRepositories'].values():
-                sourceRepoPath = sr['path']
-                if not verify_repo(sourceRepoPath, sr['revision'],
-                                   branchConfig['hghost']):
-                    test_success = False
-                    log.error("Error verifying repos")
-
-            # check partial updates
-            partials = releaseConfig.get('partialUpdates')
-            if 'extraUpdates' in releaseConfig:
-                partials.extend(releaseConfig['extraUpdated'])
-            product = releaseConfig['productName']
-            platforms = releaseConfig['enUSPlatforms']
-            if partials:
-                for partial in partials:
-                    build_number = partials[partial]['buildNumber']
-                    # when bug 839926 lands, buildNumber must be None for releases
-                    # but it might have a value for betas (beta *might* use
-                    # unreleased builds see bug 1091694 c2)
-                    if not verify_partial(platforms, product, partial,
-                                          build_number,
-                                          server=releaseConfig['ftpServer']):
-                        test_success = False
-                        log.error("Error verifying partials")
-
-    if test_success:
-        if not options.dryrun:
-            if not options.skip_reconfig:
-                fabric_helper = FabricHelper(
-                    masters_json_file=options.masters_json_file,
-                    concurrency=options.concurrency,
-                    roles=['build', 'scheduler'])
-                fabric_helper.update_and_reconfig()
-            sourceRepoPath = getRepoMatchingBranch(
-                options.branch, releaseConfig['sourceRepositories'])['path']
-            sendchange(
-                sourceRepoPath,
-                "%s_RELEASE" % releaseConfig['baseTag'],
-                options.username,
-                args[0],
-                products,
-            )
-        else:
-            log.info("Tests Passed! Did not run reconfig/sendchange."
-                     " Rerun without `-d`")
-            if cleanup_configs:
-                log.info("Removing temporary directory: %s" % configs_dir)
-                rmtree(configs_dir)
-    else:
-        log.fatal("Tests Failed! Not running sendchange!")
-        log.fatal("Failed tests (run with -b to skip) :")
-        for error in error_tally:
-            log.fatal(error)
-        if cleanup_configs:
-            log.info("Not removing temporary directory: %s" % configs_dir)
-        exit(1)
deleted file mode 100644
--- a/buildfarm/breakpad/cleanup-breakpad-symbols.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/env python
-#
-# Breakpad symbol cleanup
-#
-# When the Breakpad symbol store gets built as part of the
-# Mozilla build process, it creates a file listing the symbols
-# for that particular build.  The files are named:
-# ${product}-${version}-${OS_ARCH}-${BUILD_ID}[-${BUILDID_EXTRA}]-symbols.txt, i.e.:
-# firefox-3.0a5pre-WINNT-2007042804-symbols.txt
-# $BUILDID_EXTRA is an optional identifier for feature branches, etc.
-#
-# This script, given a path to a symbol store, removes symbols
-# for the oldest builds there.
-
-import os
-import os.path
-import sys
-import re
-from datetime import datetime, timedelta
-from optparse import OptionParser
-
-# options, tweak as desired
-# maximum number of nightlies to keep per branch
-nightliesPerBin = 30
-# maximum age permitted for a set of symbols, in days.
-# used to clean up old feature branches, for example
-maxNightlyAge = timedelta(45)
-# end options
-
-# RE to get the version number without alpha/beta designation
-versionRE = re.compile("^(\d+\.\d+)")
-
-parser = OptionParser(usage="usage: %prog [options] <symbol path> [symbol indexes to remove]")
-parser.add_option("-d", "--dry-run",
-                  action="store_true", dest="dry_run", default=False,
-                  help="Don't delete anything, just print a list of actions")
-parser.add_option("-r", "--remove-these-symbols",
-                  action="store_true", dest="remove_symbols",
-                  help="Remove specified symbol indexes and their contained symbols")
-(options, args) = parser.parse_args()
-
-if not args:
-    print >>sys.stderr, "Must specify a symbol path!"
-    sys.exit(1)
-symbolPath = args[0]
-if (options.remove_symbols and len(args) < 2) or (not options.remove_symbols and len(args) > 1):
-    print >>sys.stderr, "Bad commandline"
-    sys.exit(1)
-
-symbols_to_remove = set()
-if options.remove_symbols:
-    symbols_to_remove = set(os.path.basename(a) for a in args[1:])
-
-# Cheezy atom implementation, so we don't have to store symbol filenames
-# multiple times.
-atoms = []
-atomdict = {}
-
-
-def atomize(s):
-    if s in atomdict:
-        return atomdict[s]
-    a = len(atoms)
-    atoms.append(s)
-    atomdict[s] = a
-    return a
-
-
-def sortByBuildID(x, y):
-    "Sort two symbol index filenames by the Build IDs contained within"
-    (a, b) = (os.path.basename(x).split('-')[3],
-              os.path.basename(y).split('-')[3])
-    return cmp(a, b)
-
-buildidRE = re.compile("(\d\d\d\d)(\d\d)(\d\d)(\d\d)")
-
-
-def datetimefrombuildid(f):
-    """Given a symbol index filename, return a datetime representing the
-    Build ID contained within it."""
-    m = buildidRE.match(os.path.basename(f).split('-')[3])
-    if m:
-        return datetime(*[int(x) for x in m.groups()])
-    # punt
-    return datetime.now()
-
-
-def adddefault(d, key, default):
-    "If d[key] does not exist, set d[key] = default."
-    if key not in d:
-        d[key] = default
-
-
-def addFiles(symbolindex, filesDict):
-    """Return a list of atoms representing the symbols in this index file.
-Also add 1 to filesDict[atom] for each symbol."""
-    l = []
-    try:
-        sf = open(symbolindex, "r")
-        for line in sf:
-            a = atomize(line.rstrip())
-            l.append(a)
-            adddefault(filesDict, a, 0)
-            filesDict[a] += 1
-        sf.close()
-    except IOError:
-        pass
-    return l
-
-
-def markDeleteSymbols(symbols, filesDict):
-    "Decrement reference count by one for each symbol in this symbol index."
-    for a in symbols:
-        filesDict[a] -= 1
-
-
-def deletefile(f):
-    if options.dry_run:
-        print "rm ", f
-    else:
-        if os.path.isfile(f):
-            try:
-                os.unlink(f)
-            except OSError:
-                print >>sys.stderr, "Error removing file: ", f
-
-builds = {}
-allfiles = {}
-buildfiles = {}
-print "[1/4] Reading symbol index files..."
-# get symbol index files, there's one per build
-for f in os.listdir(symbolPath):
-    if not (os.path.isfile(os.path.join(symbolPath, f)) and
-            f.endswith("-symbols.txt")):
-        continue
-    # increment reference count of all symbol files listed in the index
-    # and also keep a list of files from that index
-    buildfiles[f] = addFiles(os.path.join(symbolPath, f), allfiles)
-    # drop -symbols.txt
-    parts = f.split("-")[:-1]
-    (product, version, osName, buildId) = parts[:4]
-    # extract branch
-    # nightly build versions end with "pre" (older branches)
-    # or "a1" (new mozilla-central) or "a2" (aurora)
-    if version.endswith("a1"):
-        branch = "nightly"
-    elif version.endswith("a2"):
-        branch = "aurora"
-    elif version.endswith("pre"):
-        m = versionRE.match(version)
-        if m:
-            branch = m.group(0)
-        else:
-            branch = version
-    else:
-	branch = "release"
-    # group into bins by branch-product-os[-featurebranch]
-    identifier = "%s-%s-%s" % (branch, product, osName)
-    if len(parts) > 4:  # extra buildid, probably
-        identifier += "-" + "-".join(parts[4:])
-    adddefault(builds, identifier, [])
-    builds[identifier].append(f)
-    if f in symbols_to_remove:
-        markDeleteSymbols(buildfiles[f], allfiles)
-        deletefile(os.path.join(symbolPath,f))
-
-print "[2/4] Looking for symbols to delete..."
-if not symbols_to_remove:
-    oldestdate = datetime.now() - maxNightlyAge
-    for bin in builds:
-	if bin.startswith("release"):
-	    # Skip release builds for now
-	    continue
-        builds[bin].sort(sortByBuildID)
-        if len(builds[bin]) > nightliesPerBin:
-            # delete the oldest builds if there are too many
-            for f in builds[bin][:-nightliesPerBin]:
-                markDeleteSymbols(buildfiles[f], allfiles)
-                deletefile(os.path.join(symbolPath,f))
-            builds[bin] = builds[bin][-nightliesPerBin:]
-        # now look for really old symbol files
-        for f in builds[bin]:
-            if datetimefrombuildid(f) < oldestdate:
-                markDeleteSymbols(buildfiles[f], allfiles)
-                deletefile(os.path.join(symbolPath,f))
-
-print "[3/4] Deleting symbols..."
-# now delete all files marked for deletion
-for a, refcnt in allfiles.iteritems():
-    if refcnt == 0:
-        deletefile(os.path.join(symbolPath, atoms[a]))
-
-print "[4/4] Pruning empty directories..."
-sys.exit(0)
-# now delete empty directories.
-for root, dirs, files in os.walk(symbolPath, topdown=False):
-    for d in dirs:
-        fullpath = os.path.join(root, d)
-        if len(os.listdir(fullpath)) == 0:
-            if options.dry_run:
-                print "rm -rf ", fullpath
-            else:
-                os.rmdir(fullpath)
-print "Done!"
deleted file mode 100755
--- a/buildfarm/breakpad/post-symbol-upload.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-#
-# Post-symbol upload script.
-#
-# This script is run on dm-symbolpush01 after symbols are uploaded
-# from build slaves. Currently it's a no-op, but it will be modified
-# to convert symbols to a different format in the very near future.
-
-if __name__ == '__main__':
-    pass
deleted file mode 100755
--- a/buildfarm/maintenance/add-master.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/python
-import simplejson
-import sys
-import os
-import re
-
-
-def new_master(hostname, type_, instance=1):
-    assert type_ in ('build', 'tests', 'try')
-    localname = "%s%i" % (type_, instance)
-    hostname_data = re.match(
-        "buildbot-master(\d{2})\.build\.(\w+)\.mozilla\.com", hostname)
-    assert hostname_data
-    host_abbrev = "bm%s" % hostname_data.group(1)
-    datacentre = hostname_data.group(2)
-
-    if type_ == 'build':
-        port_base = 0 + instance
-    elif type_ == 'try':
-        port_base = 100 + instance
-    elif type_ == 'tests':
-        port_base = 200 + instance
-
-    return {
-        "hostname": hostname,
-        "enabled": False,
-        "environment": "production",
-        "master_dir": "/builds/buildbot/%s/master" % localname,
-        "name": "%s-%s" % (host_abbrev, localname),
-        "role": type_,
-        "basedir": "/builds/buildbot/%s" % localname,
-        "buildbot_version": "0.8.2",
-        "buildbot_bin": "/builds/buildbot/%s/bin/buildbot" % localname,
-        "buildbot_branch": "production-0.8",
-        "buildbot_python": "/builds/buildbot/%s/bin/python" % localname,
-        "buildbot_setup": "/builds/buildbot/%s/buildbot/master/setup.py" % localname,
-        "bbconfigs_dir": "/builds/buildbot/%s/buildbot-configs" % localname,
-        "db_name": "%s:/builds/buildbot/%s/master" % (hostname, localname),
-        "bbcustom_dir": "/builds/buildbot/%s/buildbotcustom" % localname,
-        "bbcustom_branch": "production-0.8",
-        "bbconfigs_branch": "production",
-        "tools_dir": "/builds/buildbot/%s/tools" % localname,
-        "tools_branch": "default",
-        "ssh_port": 7000 + port_base,
-        "http_port": 8000 + port_base,
-        "pb_port": 9000 + port_base,
-        "datacentre": datacentre,
-    }
-
-fn = sys.argv[1]
-masters = simplejson.load(open(sys.argv[1]))
-masters.append(new_master(*sys.argv[2:]))
-
-os.rename(fn, '%s.bak' % fn)
-
-open(fn, 'w').write(simplejson.dumps(masters, indent=2, sort_keys=True))
deleted file mode 100755
--- a/buildfarm/maintenance/archive-debug-builds.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-BASE_PATH=/home/ftp/pub/mozilla.org/firefox/tinderbox-builds
-ARCHIVE_PATH=/home/ftp/pub/mozilla.org/firefox/nightly
-DATE_BASE=$(date +%Y/%m)
-
-DIRS=$(ls $BASE_PATH | grep 'mozilla.*debug$')
-for dir in $DIRS
-do
-  branch=$(echo $dir | cut -d '-' -f1,2)
-  builddir="$BASE_PATH/$dir"
-  DATE_DIR=$(date +%Y-%m-%d-$branch-debug)
-  cd $builddir
-  archivedir="$(find . -maxdepth 1 -type d -mtime -1 -name '1?????????' | sort -n | tail -1 | cut -c3-)"
-  if [[ -n $archivedir && -d "$builddir/$archivedir" ]]; then
-    files="$(find $builddir/$archivedir/ -name jsshell\* -o -name \*.dmg -o -name \*.txt -o -name \*.bz2 -o -name \*.exe)"
-    if [ -n "$files" ]; then
-      echo "Creating archive directory: $ARCHIVE_PATH/$DATE_BASE/$DATE_DIR"
-      mkdir -p "$ARCHIVE_PATH/$DATE_BASE/$DATE_DIR"
-      if [ ! -e $ARCHIVE_PATH/$DATE_DIR ]; then
-        ln -s $DATE_BASE/$DATE_DIR $ARCHIVE_PATH/$DATE_DIR
-      fi
-      for file in $files
-      do
-        echo "Found recent nightly: $file"
-        backup=$(basename $file | sed s/en-US\./en-US.debug-/)
-        if [ -e $file ]; then
-          echo "Copying $file to $ARCHIVE_PATH/$DATE_BASE/$DATE_DIR/$backup"
-          cp -a $file "$ARCHIVE_PATH/$DATE_BASE/$DATE_DIR/$backup"
-        fi
-      done
-    fi
-  else
-    echo "skipping invalid dir"
-  fi
-done
deleted file mode 100755
--- a/buildfarm/maintenance/buildbot-wrangler.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/python
-"""%prog [options] master_dir
-
-Reconfigures the buildbot master in master_dir, waiting for it to finish.
-
-Any errors generated will be printed to stderr.
-"""
-import os
-import sys
-import time
-import signal
-import subprocess
-import urllib
-
-
-def graceful_stop(port):
-    url = "http://localhost:%s/shutdown" % port
-    data = urllib.urlencode(dict(submit='Clean Shutdown'))
-    try:
-        urllib.urlopen(url, data)
-    except IOError:
-        pass
-
-
-class Watcher:
-    def __init__(self, fname):
-        self.fname = fname
-
-        self.started = False
-
-        if os.path.exists(fname):
-            os.utime(fname, None)
-        else:
-            try:
-                open(fname, 'w').close()
-            except:
-                print "Could not create %s." % fname
-                sys.exit(1)
-        self.fp = open(fname)
-        self.fp.seek(0, 2)  # SEEK_END
-        self.current_inode = os.fstat(self.fp.fileno()).st_ino
-
-
-class StopWatcher(Watcher):
-    def watch_logfile(self):
-        in_error = False
-        while True:
-            time.sleep(0.5)
-            if not self.fp:
-                try:
-                    self.fp = open(self.fname)
-                    self.current_inode = os.fstat(self.fp.fileno()).st_ino
-                except IOError:
-                    continue
-
-            for line in self.fp.readlines():
-                if not self.started:
-                    if "Received SIGTERM" in line:
-                        self.started = True
-                    elif "Initiating clean shutdown" in line:
-                        self.started = True
-                    else:
-                        # Don't do anything else until we've actually started
-                        # the shutdown
-                        continue
-
-                # Print out "Waiting for n build(s) to finish"
-                if "Waiting for" in line:
-                    print >> sys.stderr, line.rstrip()
-
-                # Print out everything until the next blank line
-                if in_error:
-                    if not line.strip():
-                        in_error = False
-                        continue
-                    print >> sys.stderr, line.rstrip()
-
-                if "Server Shut Down" in line:
-                    return
-
-                if "Unhandled Error" in line:
-                    in_error = True
-                    print >> sys.stderr, line.rstrip()
-
-            inode = os.stat(self.fname).st_ino
-            if inode != self.current_inode:
-                # Log has been replaced.  Try and open it again later
-                self.fp = None
-                self.current_inode = None
-
-
-class ReconfigWatcher(Watcher):
-    def watch_logfile(self):
-        """Watches the logfile `fname` for reconfigure messages and errors.
-
-        If "configuration update complete" is seen, this function returns.
-
-        If any unhandled errors are seen, they are printed out to stderr.
-        """
-        in_error = False
-        while True:
-            time.sleep(0.5)
-            if not self.fp:
-                try:
-                    self.fp = open(self.fname)
-                    self.current_inode = os.fstat(self.fp.fileno()).st_ino
-                except IOError:
-                    continue
-
-            for line in self.fp.readlines():
-                if not self.started:
-                    if "loading configuration from" in line:
-                        self.started = True
-                    else:
-                        # Don't do anything else until we've actually started
-                        # the reconfig
-                        continue
-
-                # Print out everything until the next blank line
-                if in_error:
-                    if not line.strip():
-                        in_error = False
-                        continue
-                    print >> sys.stderr, line.rstrip()
-
-                if "configuration update complete" in line:
-                    return
-
-                if "configuration update failed" in line:
-                    raise Exception("reconfig failed")
-
-                if "Unhandled Error" in line:
-                    in_error = True
-                    print >> sys.stderr, line.rstrip()
-
-            inode = os.stat(self.fname).st_ino
-            if inode != self.current_inode:
-                # Log has been replaced.  Try and open it again later
-                self.fp = None
-                self.current_inode = None
-
-if __name__ == '__main__':
-    args = sys.argv[1:]
-    if len(args) <= 1 or len(args) >= 4:
-        print "Usage: buildbot-reconfig.py restart|reconfig|stop master_dir [http_port]"
-        sys.exit(1)
-
-    action = args[0]
-    if action not in ("restart", "reconfig", "stop", "start", "graceful_stop", "graceful_restart"):
-        print "Unknown action", action
-        sys.exit(1)
-
-    master_dir = args[1]
-    twistd_log = os.path.join(master_dir, "twistd.log")
-    pidfile = os.path.join(master_dir, "twistd.pid")
-
-    pid = None
-    if os.path.exists(pidfile):
-        pid = int(open(pidfile).read())
-    elif action == "reconfig":
-        print "Master doesn't appear to be running"
-        sys.exit(1)
-
-    if action == "reconfig":
-        w = ReconfigWatcher(twistd_log)
-        os.kill(pid, signal.SIGHUP)
-        w.watch_logfile()
-
-    elif action == "restart":
-        if pid:
-            w = StopWatcher(twistd_log)
-            os.kill(pid, signal.SIGTERM)
-            w.watch_logfile()
-
-        w = ReconfigWatcher(twistd_log)
-        null = open(os.devnull, 'w')
-        p = subprocess.Popen(
-            ['make', 'start', master_dir], stdout=null, stderr=null)
-        w.watch_logfile()
-
-    elif action == "stop":
-        if pid:
-            w = StopWatcher(twistd_log)
-            os.kill(pid, signal.SIGTERM)
-            w.watch_logfile()
-        else:
-            print "master already stopped"
-
-    elif action == "graceful_stop":
-        if pid:
-            w = StopWatcher(twistd_log)
-            graceful_stop(args[2])
-            w.watch_logfile()
-        else:
-            print "master already stopped"
-
-    elif action == "graceful_restart":
-        if pid:
-            w = StopWatcher(twistd_log)
-            graceful_stop(args[2])
-            w.watch_logfile()
-
-        w = ReconfigWatcher(twistd_log)
-        null = open(os.devnull, 'w')
-        p = subprocess.Popen(
-            ['make', 'start', master_dir], stdout=null, stderr=null)
-        w.watch_logfile()
-
-    elif action == "start":
-        if pid:
-            print "Master is already running"
-        else:
-            w = ReconfigWatcher(twistd_log)
-            null = open(os.devnull, 'w')
-            p = subprocess.Popen(
-                ['make', 'start', master_dir], stdout=null, stderr=null)
-            w.watch_logfile()
deleted file mode 100755
--- a/buildfarm/maintenance/check-master-json.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/python
-try:
-    import simplejson as json
-except ImportError:
-    import json
-import urllib
-import logging
-import re
-
-log = logging.getLogger(__name__)
-
-
-def check_masters(masters):
-    retval = True
-    for master in masters:
-        if not check_master(master):
-            retval = False
-    return retval
-
-
-def load_masters(url):
-    if 'http' in url:
-        fp = urllib.urlopen(url)
-    else:
-        fp = open(url)
-    return json.load(fp)
-
-
-def check_master(master):
-    # Check required keys
-    name = master['name']
-    required_keys = ('hostname', 'enabled', 'master_dir', 'name', 'role',
-                     'basedir', 'bbconfigs_dir', 'db_name', 'bbcustom_dir',
-                     'bbcustom_branch', 'bbconfigs_branch', 'tools_dir',
-                     'tools_branch', 'datacentre', 'buildbot_bin', 'buildbot_branch',
-                     'buildbot_python', 'buildbot_setup', 'environment')
-    opt_keys = ('http_port', 'ssh_port', 'pb_port', 'buildbot_version',
-                'limit_fx_platforms', 'limit_tb_platforms',
-                'release_branches', 'thunderbird_release_branches', 'mobile_release_branches')
-    int_keys = ('http_port', 'ssh_port', 'pb_port')
-
-    for k in required_keys:
-        if k not in master:
-            log.error("%s - missing key %s", name, k)
-            return False
-
-    for k in master.keys():
-        if k not in required_keys + opt_keys:
-            log.error("%s - unknown key %s", name, k)
-            return False
-
-    for k in int_keys:
-        if k in master:
-            if type(master[k]) not in (int, long):
-                log.error("%s - non-integer key %s", name, k)
-                return False
-
-    hostname, domain = master['hostname'].split(".", 1)
-
-    # Check domain
-    if not re.match("(build|srv\.releng)\.(\w+)\.mozilla\.com", domain):
-        log.error("%s - bad domain %s", name, domain)
-        return False
-
-    # Check hostname
-    host_num = re.match("buildbot-master(\d{2})", hostname)
-    if not host_num:
-        log.error("%s - bad hostname %s", name, hostname)
-        return False
-
-    # Check short name
-    role = master['role']
-    abbrev = "bm%s" % host_num.group(1)
-    exp = "%s-%s(\d)" % (abbrev, role)
-    instance_num = re.match(exp, name)
-    if not instance_num:
-        # TODO: schedulers don't follow this logic
-        log.error("%s - bad name (doesn't match %s)", name, exp)
-        return False
-
-    # Check port numbers
-    instance_num = int(instance_num.group(1))
-    instance = "%s%i" % (role, instance_num)
-    if role == 'build':
-        required_ports = ['http', 'ssh', 'pb']
-        role_offset = 0
-    elif role == 'try':
-        required_ports = ['http', 'ssh', 'pb']
-        role_offset = 100
-    elif role == 'tests':
-        required_ports = ['http', 'ssh', 'pb']
-        role_offset = 200
-    elif role == 'scheduler':
-        required_ports = ['ssh', 'pb']
-        role_offset = 300
-    else:
-        log.error("%s - unknown role %s", name, role)
-        return False
-
-    ports = {
-        "ssh": 7000 + role_offset + instance_num,
-        "http": 8000 + role_offset + instance_num,
-        "pb": 9000 + role_offset + instance_num,
-    }
-    for proto in required_ports:
-        master_port = master.get("%s_port" % proto)
-        if master_port != ports[proto]:
-            log.error("%s - bad %s port (got %i, expected %i)",
-                      name, proto, master_port, ports[proto])
-            return False
-
-    # Check master_dir
-    if master['master_dir'] != "/builds/buildbot/%s/master" % instance:
-        # TODO this needs tweaking for tests
-        log.error("%s - bad master_dir %s" % (name, master.get(
-            'master_dir', 'None')))
-        return False
-
-    # Check basedir
-    if master['basedir'] != "/builds/buildbot/%s" % instance:
-        log.error("%s - bad basedir", name)
-        return False
-
-    # Check db_name
-    db_name = '%s:%s' % (master['hostname'], master['master_dir'])
-    if master['db_name'] != db_name:
-        log.error("%s - bad db_name (should be %s)", name, db_name)
-        return False
-
-    # Check datacentre
-    if master['datacentre'] not in ('scl3',):
-        log.error("%s - bad datacentre", name)
-        return False
-
-    if master['datacentre'] not in master['hostname']:
-        log.error("%s - datacentre/hostname mismatch", name)
-        return False
-
-    return True
-
-if __name__ == '__main__':
-    logging.basicConfig()
-    masters = load_masters('production-masters.json')
-    check_masters(masters)
deleted file mode 100755
--- a/buildfarm/maintenance/cleanup_db.py
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/usr/bin/env python
-import sqlalchemy as sa
-import time
-import json
-
-import logging
-log = logging.getLogger(__name__)
-
-
-# From
-# http://stackoverflow.com/questions/5631078/sqlalchemy-print-the-actual-query
-def query_to_str(statement, bind=None):
-    """
-    print a query, with values filled in
-    for debugging purposes *only*
-    for security, you should always separate queries from their values
-    please also note that this function is quite slow
-    """
-    import sqlalchemy.orm
-    if isinstance(statement, sqlalchemy.orm.Query):
-        if bind is None:
-            bind = statement.session.get_bind(
-                statement._mapper_zero_or_none())
-        statement = statement.statement
-    elif bind is None:
-        bind = statement.bind
-
-    dialect = bind.dialect
-    compiler = statement._compiler(dialect)
-
-    class LiteralCompiler(compiler.__class__):
-        def visit_bindparam(
-                self, bindparam, within_columns_clause=False,
-                literal_binds=False, **kwargs
-        ):
-            return super(LiteralCompiler, self).render_literal_bindparam(
-                bindparam, within_columns_clause=within_columns_clause,
-                literal_binds=literal_binds, **kwargs)
-
-    compiler = LiteralCompiler(dialect, statement)
-    return compiler.process(statement)
-
-
-def cleaner_upper(select_query, delete_queries):
-    """
-    Cleans stuff up.
-
-    Runs select query, which should return a list of ids to delete.
-    Chunks of ids are passed to each query of delete_queries.
-
-    delete queries should be functions that accept a list of ids to delete.
-
-    Returns once select_query no longer returns any results
-
-    Sleeps between rounds
-    """
-    while True:
-        t = time.time()
-        log.debug("finding rows: %s", query_to_str(select_query))
-        rows = select_query.execute()
-        log.info("found %i rows in %.2fs", rows.rowcount, time.time() - t)
-        if rows.rowcount == 0:
-            break
-
-        # Delete 100 at a time
-        ids = [row[0] for row in rows]
-        chunk_size = 100
-        for i in range(0, len(ids), chunk_size):
-            for q in delete_queries:
-                t = time.time()
-                q(ids[i:i + chunk_size])
-                sleep_time = max(0.1, (time.time() - t) * 4.0)
-                log.debug("sleeping for %.2fs", sleep_time)
-                time.sleep(sleep_time)
-
-
-def deleter(column):
-    """
-    Returns a function that accepts a list of ids, and deletes
-    rows that match.
-    """
-    def delete_func(ids):
-        t = time.time()
-        q = column.table.delete().where(column.in_(ids))
-        log.debug(query_to_str(q))
-        n = q.execute().rowcount
-        log.info("deleted %i rows from %s in %.2fs", n, column.table.name,
-                 time.time() - t)
-    return delete_func
-
-
-def cleanup_statusdb_builds(meta, cutoff):
-    log.info("Cleaning up builds before %s", cutoff)
-    t_builds = sa.Table('builds', meta, autoload=True)
-    t_steps = sa.Table('steps', meta, autoload=True)
-    t_properties = sa.Table('build_properties', meta, autoload=True)
-    t_schedulerdb_requests = sa.Table('schedulerdb_requests', meta,
-                                      autoload=True)
-
-    builds_q = sa.select([t_builds.c.id]).\
-        where(t_builds.c.starttime < cutoff).\
-        order_by(t_builds.c.starttime.asc()).\
-        limit(10000)
-
-    cleaner_upper(builds_q, [
-        deleter(t_steps.c.build_id),
-        deleter(t_properties.c.build_id),
-        deleter(t_schedulerdb_requests.c.status_build_id),
-        deleter(t_builds.c.id),
-    ])
-    log.info("Finished cleaning up builds")
-
-
-def cleanup_statusdb_orphaned_steps(meta):
-    log.info("Cleaning up orphaned steps")
-    t_builds = sa.Table('builds', meta, autoload=True)
-    t_steps = sa.Table('steps', meta, autoload=True)
-
-    q = sa.select(
-        [t_steps.c.build_id],
-        # nopep8 - we really do need to use == None
-        t_builds.c.id == None,
-        from_obj=[t_steps.outerjoin(t_builds, t_steps.c.build_id ==
-                                    t_builds.c.id)],
-        distinct=True,
-    ).limit(10000)
-
-    cleaner_upper(q, [
-        deleter(t_steps.c.build_id),
-    ])
-    log.info("Finished cleaning up orphaned steps")
-
-
-def cleanup_statusdb_orphaned_properties(meta):
-    log.info("Cleaning up orphaned build properties")
-    t_builds = sa.Table('builds', meta, autoload=True)
-    t_properties = sa.Table('build_properties', meta, autoload=True)
-
-    q = sa.select(
-        [t_properties.c.build_id],
-        # nopep8 - we really do need to use == None
-        t_builds.c.id == None,
-        from_obj=[t_properties.outerjoin(t_builds, t_properties.c.build_id ==
-                                         t_builds.c.id)],
-        distinct=True,
-    ).limit(10000)
-
-    cleaner_upper(q, [
-        deleter(t_properties.c.build_id),
-    ])
-    log.info("Finished cleaning up orphaned build properties")
-
-
-IGNORABLE_CLASSES = (
-    "buildbotcustom.l10n.TriggerableL10n",
-    "buildbotcustom.scheduler.PersistentScheduler",
-    "buildbot.schedulers.basic.Dependent",
-    "buildbot.schedulers.triggerable.Triggerable",
-    "buildbotcustom.scheduler.TriggerBouncerCheck",
-    "buildbotcustom.scheduler.Dependent-props",
-    "buildbotcustom.buildbotcustom.l10n.TriggerableL10n",
-    "buildbot.schedulers.timed.Nightly",
-    "buildbotcustom.scheduler.SpecificNightly-props",
-    "buildbotcustom.scheduler.Nightly-props",
-    "buildbotcustom.scheduler.AggregatingScheduler",
-)
-
-PERBUILD_CLASSES = (
-    "buildbot.schedulers.basic.Scheduler",
-    "buildbotcustom.scheduler.MultiScheduler",
-    "buildbotcustom.scheduler.BuilderChooserScheduler",
-    "buildbotcustom.scheduler.Scheduler-props",
-    "buildbotcustom.scheduler.BuilderChooserScheduler-props",
-    "buildbotcustom.scheduler.EveryNthScheduler")
-
-_change_cache = {}
-
-
-def get_change_date(db, changeid):
-    if changeid in _change_cache:
-        return _change_cache[changeid]
-
-    q = sa.text("SELECT when_timestamp FROM changes WHERE changeid=:changeid")
-    results = db.execute(q, changeid=changeid)
-    row = results.fetchone()
-    if row:
-        _change_cache[changeid] = row.when_timestamp
-        return row.when_timestamp
-    else:
-        _change_cache[changeid] = None
-        return None
-
-
-def should_delete(db, s, cutoff):
-    state = json.loads(s.state)
-    if s.class_name in IGNORABLE_CLASSES:
-        # Not sure if we have enough data here to make a decision...
-        return False
-    elif s.class_name in PERBUILD_CLASSES:
-        assert "last_processed" in state
-        change_time = get_change_date(db, state['last_processed'])
-        if change_time < cutoff:
-            return True
-    else:
-        log.warning("unhandled scheduler class for scheduler %s: %s %s",
-                    s.schedulerid, s.class_name, s.name)
-        return False
-
-
-def cleanup_schedulerdb_schedulers(db):
-    now = time.time()
-    regular_cutoff = now - 7 * 86400  # 1 week
-    release_cutoff = now - 60 * 86400  # 2 months
-
-    # Get all the schedulers from the db
-    q = sa.text("SELECT * FROM schedulers")
-    schedulers = db.execute(q)
-    to_delete = []
-    for s in schedulers:
-        if s.name.startswith("release-"):
-            cutoff = release_cutoff
-        else:
-            cutoff = regular_cutoff
-
-        try:
-            if should_delete(db, s, cutoff):
-                to_delete.append(s)
-        except Exception:
-            log.exception("couldn't process scheduler %s: %s", s.schedulerid,
-                          s.name)
-
-    for s in to_delete:
-        log.info("deleting scheduler %s: %s %s %s", s.schedulerid,
-                 s.class_name, s.name, s.state)
-        q = sa.text("DELETE FROM schedulers WHERE schedulerid=:schedulerid")
-        db.execute(q, schedulerid=s.schedulerid)
-
-
-if __name__ == '__main__':
-    from optparse import OptionParser
-    from ConfigParser import RawConfigParser
-    parser = OptionParser()
-    parser.set_defaults(
-        filename=None,
-        loglevel=logging.INFO,
-        logfile=None,
-        skip_orphans=False,
-    )
-    parser.add_option("-l", "--logfile", dest="logfile")
-    parser.add_option("--status-db", dest="status_db")
-    parser.add_option("--scheduler-db", dest="scheduler_db")
-    parser.add_option("--config", dest="config_file", help="config file")
-    parser.add_option("--cutoff", dest="cutoff",
-                      help="cutoff date, prior to which we'll delete data. "
-                      "format is YYYY-MM-DD")
-    parser.add_option("--skip-orphans", dest="skip_orphans",
-                      action="store_true")
-    parser.add_option("-v", "--verbose", dest="loglevel", action="store_const",
-                      const=logging.DEBUG, help="run verbosely")
-    parser.add_option("-q", "--quiet", dest="loglevel", action="store_const",
-                      const=logging.WARNING, help="run quietly")
-    options, args = parser.parse_args()
-
-    # Configure logging
-    root_log = logging.getLogger()
-    log_formatter = logging.Formatter("%(asctime)s - %(message)s")
-
-    if options.logfile:
-        import logging.handlers
-        # Week one log file per week for 4 weeks
-        handler = logging.handlers.TimedRotatingFileHandler(options.logfile,
-                                                            when='W6',
-                                                            backupCount=4)
-    else:
-        handler = logging.StreamHandler()
-    handler.setFormatter(log_formatter)
-    root_log.setLevel(options.loglevel)
-    root_log.addHandler(handler)
-
-    if not options.cutoff:
-        parser.error("cutoff date is required")
-
-    # Load options from config if it's set
-    if options.config_file:
-        config_parser = RawConfigParser()
-        config_parser.read([options.config_file])
-        if config_parser.has_option("db", "status_db"):
-            options.status_db = config_parser.get("db", "status_db")
-        if config_parser.has_option("db", "scheduler_db"):
-            options.scheduler_db = config_parser.get("db", "scheduler_db")
-
-    # Clean up statusdb
-    if options.status_db:
-        status_db = sa.create_engine(options.status_db, pool_recycle=300)
-        meta = sa.MetaData(bind=status_db)
-        cleanup_statusdb_builds(meta, options.cutoff)
-        if not options.skip_orphans:
-            cleanup_statusdb_orphaned_steps(meta)
-            cleanup_statusdb_orphaned_properties(meta)
-
-    if options.scheduler_db:
-        scheduler_db = sa.create_engine(options.scheduler_db, pool_recycle=300)
-
-        cleanup_schedulerdb_schedulers(scheduler_db)
deleted file mode 100644
--- a/buildfarm/maintenance/count_and_reboot.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-# encoding: utf-8
-# Created by Chris AtLee on 2008-11-04
-"""count_and_reboot.py [-n maxcount] -f countfile
-
-Increments the value in countfile, and reboots the machine once the count
-reaches or exceeds maxcount."""
-
-import os
-import sys
-import time
-
-if sys.platform in ('darwin', 'linux2'):
-    def reboot():
-        # -S means to accept password from stdin, which we then redirect from
-        # /dev/null
-        # This results in sudo not waiting forever for a password.  If sudoers
-        # isn't set up properly, this will fail immediately
-        os.system("sudo -S reboot < /dev/null")
-        # After starting the shutdown, we go to sleep since the system can
-        # take a few minutes to shut everything down and reboot
-        time.sleep(600)
-
-elif sys.platform == "win32":
-    # Windows
-    def reboot():
-        os.system("shutdown -f -r -t 0")
-        # After starting the shutdown, we go to sleep since the system can
-        # take a few minutes to shut everything down and reboot
-        time.sleep(600)
-
-
-def increment_count(fname):
-    try:
-        current_count = int(open(fname).read())
-    except:
-        current_count = 0
-    current_count += 1
-    open(fname, "w").write("%i\n" % current_count)
-    return current_count
-
-if __name__ == '__main__':
-    from optparse import OptionParser
-
-    parser = OptionParser(__doc__)
-    parser.add_option("-n", "--max-count", dest="maxcount", default=10,
-                      help="reboot after <maxcount> runs", type="int")
-    parser.add_option("-f", "--count-file", dest="countfile", default=None,
-                      help="file to record count in")
-    parser.add_option("-z", "--zero-count", dest="zero", default=False,
-                      action="store_true", help="zero out the counter before rebooting")
-
-    options, args = parser.parse_args()
-
-    if not options.countfile:
-        parser.error("countfile is required")
-
-    if increment_count(options.countfile) >= options.maxcount:
-        if options.zero:
-            open(options.countfile, "w").write("0\n")
-        print "************************************************************************************************"
-        print "*********** END OF RUN - NOW DOING SCHEDULED REBOOT; FOLLOWING ERROR MESSAGE EXPECTED **********"
-        print "************************************************************************************************"
-        sys.stdout.flush()
-        reboot()
deleted file mode 100755
--- a/buildfarm/maintenance/end_to_end_reconfig.sh
+++ /dev/null
@@ -1,556 +0,0 @@
-#!/bin/bash -eu
-
-################### TO DO ###################
-#
-# * Check hg is installed
-# * Check hg clone is working (error code 255)
-# * Check virtualenv is installed
-# * Add an option not to refresh tools repo
-# * Check connectivity to an arbitrary master and report nicely
-# * Summarize failures from manage_masters.py at end of log
-# * Option to checkconfig only without update
-# * Only update wiki if there really are changes
-# * Only reconfig if there really are changes, or if forced
-# * Send console output also to a log file
-#
-#############################################
-
-# Include our shared function library
-if [ -e ../../lib/shell/functions ]; then
-    . ../../lib/shell/functions
-else
-    echo "Shared functions missing: ../../lib/shell/functions" >&2
-    exit 1
-fi
-
-START_TIME="$(date +%s)"
-
-# Explicitly unset any pre-existing environment variables to avoid variable collision
-unset PREPARE_ONLY FORCE_RECONFIG MERGE_TO_PRODUCTION UPDATE_BUGZILLA UPDATE_WIKI RECONFIG_DIR RECONFIG_CREDENTIALS_FILE WIKI_USERNAME WIKI_PASSWORD
-
-# Force python into unbuffered mode, so we get full output, as it happens from the fabric scripts
-export PYTHONUNBUFFERED=1
-
-function usage {
-    echo
-    echo "This script can be used to reconfig interactively, or non-interactively. It will merge"
-    echo "buildbotcustom, buildbot-configs from default to production(-0.8)."
-    echo "It will then reconfig the buildbot masters. Afterwards, if all was successful, the script will"
-    echo "update the maintenance wiki (https://wiki.mozilla.org/ReleaseEngineering/Maintenance) and"
-    echo "will additionally post comments to bugzilla for bugs it finds in the commit messages of"
-    echo "merged changes, providing a link to the hg web interface of the commits that have been"
-    echo "deployed to production."
-    echo
-    echo "USAGE"
-    echo "     1) $(basename "${0}") -h"
-    echo "     2) $(basename "${0}") [-b] [-f] [-m] [-n] [-p] [-r RECONFIG_DIR] [-t] [-w RECONFIG_CREDENTIALS_FILE]"
-    echo
-    echo "OPTIONS"
-    echo "    -b:                            No posting of comments to Bugzilla."
-    echo "    -f:                            Force reconfig, even if no changes merged."
-    echo "    -h:                            Display help."
-    echo "    -m:                            No merging of default -> production(-0.8) of hg branches."
-    echo "    -n:                            No wiki update."
-    echo "    -p:                            Prepare only; does not push changes to hg, nor perform"
-    echo "                                   reconfig, nor update wiki, nor post to Bugzilla. Useful"
-    echo "                                   for validating setup, or resolving merge conflicts in"
-    echo "                                   advance early rather than waiting until real reconfig to"
-    echo "                                   resolve conflicts."
-    echo "    -r RECONFIG_DIR:               Use directory RECONFIG_DIR for storing temporary files"
-    echo "                                   (default is /tmp/reconfig). This directory, and any"
-    echo "                                   necessary parent directories will be created if required."
-    echo "    -w RECONFIG_CREDENTIALS_FILE:  Source environment variables: BUGZILLA_USERNAME,"
-    echo "                                   BUGZILLA_PASSWORD, WIKI_USERNAME, WIKI_PASSWORD from file"
-    echo "                                   RECONFIG_CREDENTIALS_FILE (default is ~/.reconfig/config)."
-    echo
-    echo "EXIT CODES"
-    echo "     0        Success"
-    echo "     1        Error loading shared functions"
-    echo "     2        Bad command line options specified"
-    echo "    64        Could not create directory to store results (RECONFIG_DIR)"
-    echo "    65        Wiki credentials file not found"
-    echo "    66        Python 2.7 not found in PATH"
-    echo "    67        Reconfig aborted by user after incomplete reconfig found"
-    echo "    68        Aborted due to incomplete reconfig found but non-interactive session, so"
-    echo "              could not ask user how to proceed"
-    echo "    69        Error during hg merge (most likely due to merge conflict(s))"
-    echo "    70        Theoretically not possible - update_maintenance_wiki.sh tries and fails"
-    echo "              to create the RECONFIG_DIR when it detects it does not exist (but should"
-    echo "              have already been created by end_to_end_reconfig.sh)"
-    echo "    71        Theoretically not possible - no wiki markdown file passed to"
-    echo "              update_maintenance_wiki.sh"
-    echo "    72        Theoretically not possible - non-existing markdown file passed to"
-    echo "              update_maintenance_wiki.sh"
-    echo "    73        WIKI_USERNAME not specified in reconfig credentials file"
-    echo "    74        WIKI_PASSWORD not specified in reconfig credentials file"
-    echo "    75        Could not retreieve login token from wiki - probably a connectivity issue"
-    echo "    76        Wiki user/password not authorized to update wiki page"
-    echo "              https://wiki.mozilla.org/ReleaseEngineering/Maintenance"
-    echo "    77        Wiki API provided a reason not to supply an edit token for the wiki page,"
-    echo "              which is something other than user/password not authorized (which would"
-    echo "              result in exit code 76)"
-    echo "    78        Wiki API response to request to provide an edit token produced a"
-    echo "              non-parsable response"
-    echo "    79        BUGZILLA_USERNAME not specified in reconfig credentials file"
-    echo "    80        BUGZILLA_PASSWORD not specified in reconfig credentials file"
-    echo "    81        Bugzilla user/password not authorized to update Bugzilla"
-    echo "    82        Could not install python packages into a virtualenv"
-    echo
-}
-
-# Simple function to output the name of this script and the options that were passed to it
-function command_called {
-    echo -n "Command called:"
-    for ((INDEX=0; INDEX<=$#; INDEX+=1))
-    do
-        echo -n " '${!INDEX}'"
-    done
-    echo ''
-    echo "From directory: '$(pwd)'"
-}
-
-# writes hg commands to the hg log, and just summarizes command called in main log
-function hg_wrapper {
-    if [ $1 == 'clone' ]; then
-        command=('hg' "${@}" "${RECONFIG_DIR}/${repo}")
-    else
-        command=('hg' -R "${RECONFIG_DIR}/${repo}" "${@}")
-    fi
-    command_full="Running:"
-    for ((INDEX=0; INDEX<${#command[@]}; INDEX+=1)); do
-        command_full="${command_full} '${command[$INDEX]}'"
-    done
-    echo "  * ${command_full}"
-    {
-        echo
-        echo "$(date): ${command_full}"
-        echo
-        HG_START="$(date +%s)"
-        set +e
-        "${command[@]}"
-        HG_RETURN_CODE=$?
-        set -e
-        HG_STOP="$(date +%s)"
-        echo
-        echo "$(date): Completed ($((HG_STOP - HG_START))s)"
-    } >>"${RECONFIG_DIR}/hg-${START_TIME}.log" 2>&1
-    return "${HG_RETURN_CODE}"
-}
-
-set +u
-command_called "${@}" | sed '1s/^/  * /;2s/^/    /'
-set -u
-
-echo "  * Start timestamp: ${START_TIME}"
-echo "  * Parsing parameters of $(basename "${0}")..."
-# Parse parameters passed to this script
-while getopts ":bfhmnpr:w:" opt; do
-    case "${opt}" in
-        b)  UPDATE_BUGZILLA=0
-            ;;
-        f)  FORCE_RECONFIG=1
-            ;;
-        h)  echo "  * Help option requested"
-            usage
-            exit 0
-            ;;
-        m)  MERGE_TO_PRODUCTION=0
-            ;;
-        n)  UPDATE_WIKI=0
-            ;;
-        p)  PREPARE_ONLY=1
-            ;;
-        r)  RECONFIG_DIR="${OPTARG}"
-            ;;
-        w)  RECONFIG_CREDENTIALS_FILE="${OPTARG}"
-            ;;
-        ?)  usage >&2
-            exit 2
-            ;;
-    esac
-done
-
-echo "  * Setting defaults for parameters not provided in command line options..."
-
-PREPARE_ONLY="${PREPARE_ONLY:-0}"
-FORCE_RECONFIG="${FORCE_RECONFIG:-0}"
-MERGE_TO_PRODUCTION="${MERGE_TO_PRODUCTION:-1}"
-UPDATE_BUGZILLA="${UPDATE_BUGZILLA:-1}"
-UPDATE_WIKI="${UPDATE_WIKI:-1}"
-RECONFIG_DIR="${RECONFIG_DIR:-/tmp/reconfig}"
-RECONFIG_CREDENTIALS_FILE="${RECONFIG_CREDENTIALS_FILE:-${HOME}/.reconfig/config}"
-
-RECONFIG_UPDATE_FILE="reconfig_update_for_maintenance.wiki"
-
-##### Now check parsed parameters are valid...
-
-echo "  * Validating parameters..."
-
-if [ "${PREPARE_ONLY}" == 0 ]; then
-    echo "  * Will be preparing *and performing* reconfig, all being well"
-else
-    echo "  * Preparing reconfig only; will not enact changes"
-fi
-
-if [ ! -d "${RECONFIG_DIR}" ]; then
-    echo "  * Storing reconfig output under '${RECONFIG_DIR}'..."
-    if ! mkdir -p "${RECONFIG_DIR}"; then
-        echo "ERROR: Directory '${RECONFIG_DIR}' could not be created from directory '$(pwd)'" >&2
-        exit 64
-    fi
-else
-    echo "  * Reconfig directory '${RECONFIG_DIR}' exists - OK"
-fi
-
-# Convert ${RECONFIG_DIR} to an absolute directory, in case it is relative, by stepping into it...
-pushd "${RECONFIG_DIR}" >/dev/null
-if [ "${RECONFIG_DIR}" != "$(pwd)" ]; then
-    echo "  * Reconfig directory absolute path: '$(pwd)'"
-fi
-RECONFIG_DIR="$(pwd)"
-popd >/dev/null
-
-# To avoid user getting confused about parent directory, tell user the
-# absolute path of the credentials file...
-echo "  * Checking we have a reconfig credentials file..."
-RECONFIG_CREDS_PARENT_DIR="$(dirname "${RECONFIG_CREDENTIALS_FILE}")"
-if [ ! -e "${RECONFIG_CREDS_PARENT_DIR}" ]; then
-echo "  * Reconfig credentials file parent directory '${RECONFIG_CREDS_PARENT_DIR}' not found; creating..."
-    mkdir -p "${RECONFIG_CREDS_PARENT_DIR}"
-fi
-pushd "${RECONFIG_CREDS_PARENT_DIR}" >/dev/null
-ABS_RECONFIG_CREDENTIALS_FILE="$(pwd)/$(basename "${RECONFIG_CREDENTIALS_FILE}")"
-popd >/dev/null
-if [ "${RECONFIG_CREDENTIALS_FILE}" == "${ABS_RECONFIG_CREDENTIALS_FILE}" ]; then
-    echo "  * Reconfig credentials file location: '${RECONFIG_CREDENTIALS_FILE}'"
-else
-    echo "  * Reconfig credentials file location: '${RECONFIG_CREDENTIALS_FILE}' (absolute path: '${ABS_RECONFIG_CREDENTIALS_FILE}')"
-fi
-if [ ! -e "${ABS_RECONFIG_CREDENTIALS_FILE}" ]; then
-    echo "  * Reconfig credentials file '${ABS_RECONFIG_CREDENTIALS_FILE}' not found; creating..."
-    {
-        echo "# Needed if updating wiki - note the wiki does *not* use your LDAP credentials..."
-        echo "export WIKI_USERNAME='naughtymonkey'"
-        echo "export WIKI_PASSWORD='nobananas'"
-        echo
-        echo "# Needed if updating Bugzilla bugs to mark them as in production - *no* Persona integration - must be a native Bugzilla account..."
-        echo "# Details for the 'Release Engineering SlaveAPI Service' <slaveapi@mozilla.releng.tld> Bugzilla user can be found in the RelEng"
-        echo "# private repo, in file passwords/slaveapi-bugzilla.txt.gpg (needs decrypting with an approved gpg key)."
-        echo "export BUGZILLA_USERNAME='naughtymonkey'"
-        echo "export BUGZILLA_PASSWORD='nobananas'"
-    } > "${RECONFIG_CREDENTIALS_FILE}"
-    echo "  * Created credentials file '${ABS_RECONFIG_CREDENTIALS_FILE}'. Please edit this file, setting appropriate values, then rerun."
-    exit 65
-else
-    echo "  * Loading config from '${ABS_RECONFIG_CREDENTIALS_FILE}'..."
-    source "${RECONFIG_CREDENTIALS_FILE}"
-fi
-
-# Now step into directory this script is in...
-cd "$(dirname "${0}")"
-
-# Test python version, and availability of fabric...
-echo "  * Checking python version is 2.7..."
-if ! python --version 2>&1 | grep -q '^Python 2\.7'; then
-    echo "ERROR: Python version 2.7 not found - please make sure python 2.7 is in your PATH" >&2
-    exit 66
-fi
-
-run_virtualenv() {
-    if hash virtualenv2 2>/dev/null; then
-        virtualenv2 "$@"
-    else
-        virtualenv -p python2 "$@"
-    fi
-}
-
-for package in fabric requests; do
-    installed_package=false
-    echo "  * Checking ${package} package is available in python environment..."
-    if ! python -c "import ${package}" >/dev/null 2>&1; then
-        echo "  * Package ${package} not found"
-        if [ ! -e "${RECONFIG_DIR}/reconfig-virtual-env" ]; then
-            echo "  * Creating virtualenv directory '${RECONFIG_DIR}/reconfig-virtual-env' for reconfig tool..."
-            echo "  * Logging to: '${RECONFIG_DIR}/virtualenv-installation.log'..."
-            run_virtualenv "${RECONFIG_DIR}/reconfig-virtual-env" >"${RECONFIG_DIR}/virtualenv-installation.log" 2>&1
-            set +u
-            source "${RECONFIG_DIR}/reconfig-virtual-env/bin/activate"
-            set -u
-            echo "  * Installing ${package} under '${RECONFIG_DIR}/reconfig-virtual-env'..."
-            pip install "${package}" >>"${RECONFIG_DIR}/virtualenv-installation.log" 2>&1
-            installed_package=true
-        else
-            echo "  * Attempting to use existing virtualenv found in '${RECONFIG_DIR}/reconfig-virtual-env'..."
-            set +u
-            source "${RECONFIG_DIR}/reconfig-virtual-env/bin/activate"
-            set -u
-            if ! python -c "import ${package}" >/dev/null 2>&1; then
-                echo "  * Package ${package} not found under virtualenv '${RECONFIG_DIR}/reconfig-virtual-env', installing..."
-                pip install "${package}" >"${RECONFIG_DIR}/virtualenv-installation.log" 2>&1
-                installed_package=true
-            else
-                echo "  * Using existing package ${package} found in '${RECONFIG_DIR}/reconfig-virtual-env'"
-            fi
-        fi
-    else
-        echo "  * Package ${package} found"
-    fi
-    if "${installed_package}"; then
-        echo "  * Re-checking if package ${package} is now available in python environment..."
-        if ! python -c "import ${package}" >/dev/null 2>&1; then
-            echo "ERROR: Could not successfully install package ${package} into python virtualenv '${RECONFIG_DIR}/reconfig-virtual-env'" >&2
-            echo "Exiting..." >&2
-            exit 82
-        else
-            echo "  * Package ${package} installed successfully into python environment"
-        fi
-    fi
-done
-
-if [ "${UPDATE_BUGZILLA}" == '1' ]; then
-    echo "  * Updates to Bugzilla enabled"
-    echo "  * Testing existence of Bugzilla login credentials..."
-    if [ -z "${BUGZILLA_USERNAME}" ]; then
-        echo "ERROR: Environment variable BUGZILLA_USERNAME must be set for posting comments to https://bugzilla.mozilla.org/" >&2
-        echo "Exiting..." >&2
-        exit 79
-    else
-        echo "  * Environment variable BUGZILLA_USERNAME defined ('${BUGZILLA_USERNAME}')"
-    fi
-    if [ -z "${BUGZILLA_PASSWORD}" ]; then
-        echo "ERROR: Environment variable BUGZILLA_PASSWORD must be set for posting comments to https://bugzilla.mozilla.org/" >&2
-        echo "Exiting..." >&2
-        exit 80
-    else
-        echo "  * Environment variable BUGZILLA_PASSWORD defined"
-    fi
-fi
-
-if [ "${UPDATE_WIKI}" == '1' ]; then
-    echo "  * Wiki update enabled"
-    # Now validate wiki credentials by performing a dry run...
-    echo "  * Testing login credentials for wiki..."
-    ./update_maintenance_wiki.sh -d
-else
-    echo "  * Not updating wiki"
-fi
-
-if [ "${UPDATE_BUGZILLA}" == '1' ]; then
-    echo "  * Testing Bugzilla login as user '${BUGZILLA_USERNAME}'..."
-    BUGZILLA_LOGIN_RESPONSE="$(mktemp -t bugzilla_login.XXXXXXXXXX)"
-    curl -s -G --data-urlencode "login=${BUGZILLA_USERNAME}" --data-urlencode "password=${BUGZILLA_PASSWORD}" 'https://bugzilla.mozilla.org/rest/login' > "${BUGZILLA_LOGIN_RESPONSE}"
-    if grep -q '"token":' "${BUGZILLA_LOGIN_RESPONSE}"; then
-        echo "  * Login to Bugzilla successful"
-    else
-        echo "ERROR: Login to Bugzilla failed - response given:" >&2
-        cat "${BUGZILLA_LOGIN_RESPONSE}" | sed 's/^/    /' >&2
-        rm "${BUGZILLA_LOGIN_RESPONSE}"
-        echo >&2
-        echo "Please check values of BUGZILLA_USERNAME and BUGZILLA_PASSWORD set in this file: '${RECONFIG_CREDENTIALS_FILE}'" >&2
-        echo "Exiting..." >&2
-        exit 81
-    fi
-    rm "${BUGZILLA_LOGIN_RESPONSE}"
-else
-    echo "  * Not updating Bugzilla"
-fi
-
-# Check if a previous reconfig did not complete
-if [ -f "${RECONFIG_DIR}/pending_changes" ]; then
-    echo "  * It looks like a previous reconfig did not complete"
-    echo "  * Checking if 'standard in' is connected to a terminal..."
-    if [ -t 0 ]; then
-        # 'standard in' is connected to a terminal, can ask user a question!
-        echo "  * Please select one of the following options:"
-        echo "        1) Continue with existing reconfig (e.g. if you have resolved a merge conflict)"
-        echo "        2) Delete saved state for existing reconfig, and start from fresh"
-        echo "        3) Abort and exit reconfig process"
-        choice=''
-        while [ "${choice}" != 1 ] && [ "${choice}" != 2 ] && [ "${choice}" != 3 ]; do
-            echo -n "    Your choice: "
-            read choice
-        done
-        case "${choice}" in
-            1) echo "  * Continuing with stalled reconfig..."
-               ;;
-            2) echo "  * Cleaning out previous reconfig from '${RECONFIG_DIR}'..."
-               rm -rf "${RECONFIG_DIR}"/{buildbot-configs,buildbotcustom,pending_changes,${RECONFIG_UPDATE_FILE}}
-               ;;
-            3) echo "  * Aborting reconfig..."
-               exit 67
-               ;;
-        esac
-    else
-        # 'standard in' not connected to a terminal, assume no user connected...
-        echo "  * Non-interactive shell detected, cannot ask whether to continue or not, therefore aborting..."
-        exit 68
-    fi
-fi
-
-### If we get this far, all our preflight checks have passed, so now on to business...
-echo "  * All preflight checks passed in '$(basename "${0}")'"
-
-if [ "${PREPARE_ONLY}" == '0' ]; then
-    # Failing to update IRC is non-fatal.
-    IRC_MSG="Merge to production has started for buildbot repos."
-    if [ "${FORCE_RECONFIG}" == '1' ]; then
-        IRC_MSG="Reconfig has started."
-    fi
-    ./update_irc.sh "${IRC_MSG}" || true
-fi
-
-# Merges buildbot-configs from default -> production.
-# Merges buildbostcustom from default -> production-0.8.
-# Returns 0 if something got merged, otherwise returns 1.
-function merge_to_production {
-    [ "${MERGE_TO_PRODUCTION}" == 0 ] && return 0
-    echo "  * hg log for this session: '${RECONFIG_DIR}/hg-${START_TIME}.log'"
-    # Create an empty merge file to compare against so we don't claim merge success when nothing was merged.
-    {
-        echo "Merging from default"
-        echo
-    } > "${RECONFIG_DIR}/empty_merge"
-    for repo in buildbot-configs buildbotcustom; do
-        if [ -d "${RECONFIG_DIR}/${repo}" ]; then
-            echo "  * Existing hg clone of ${repo} found: '${RECONFIG_DIR}/${repo}' - pulling for updates..."
-            hg_wrapper pull
-        else
-            echo "  * Cloning ssh://hg.mozilla.org/build/${repo} into '${RECONFIG_DIR}/${repo}'..."
-            hg_wrapper clone "ssh://hg.mozilla.org/build/${repo}"
-        fi
-        if [ "${repo}" == 'buildbotcustom' ]; then
-            branch='production-0.8'
-        else
-            branch='production'
-        fi
-        hg_wrapper up -r "${branch}"
-        echo "  * Finding ${repo} changesets that would get merged from default to ${branch}..."
-        {
-            echo "Merging from default"
-            echo
-            hg -R "${RECONFIG_DIR}/${repo}" merge -P default
-        } >> "${RECONFIG_DIR}/${repo}_preview_changes.txt" 2>/dev/null
-        # Merging can fail if there are no changes between default and "${branch}"
-        set +e
-        hg_wrapper merge default
-        RETVAL="${?}"
-        set -e
-        if [ "${RETVAL}" == '255' ]; then
-            echo "  * No new changes found in ${repo}"
-        elif [ "${RETVAL}" != '0' ]; then
-            echo "ERROR: An error occurred during hg merge (exit code was ${RETVAL}). Please resolve conflicts/issues in '${RECONFIG_DIR}/${repo}',"
-            echo "       push to ${branch} branch, and run this script again." >&2
-            exit 69
-        else
-            echo "  * ${repo} merge resulted in change"
-        fi
-        # Still commit and push, even if no new changes, since a merge conflict might have been resolved
-        hg_wrapper commit -l "${RECONFIG_DIR}/${repo}_preview_changes.txt"
-        if [ "${PREPARE_ONLY}" == '0' ]; then
-            echo "  * Pushing '${RECONFIG_DIR}/${repo}' ${branch} branch to ssh://hg.mozilla.org/build/${repo}..."
-            hg_wrapper push
-        fi
-        if ! diff "${RECONFIG_DIR}/empty_merge" "${RECONFIG_DIR}/${repo}_preview_changes.txt" > /dev/null; then
-  	    echo "${repo}" >> "${RECONFIG_DIR}/pending_changes"
-        fi
-    done
-    [ -f "${RECONFIG_DIR}/pending_changes" ] && return 0 || return 1
-}
-
-# Now we process the commit messages from all the changes we landed. This is handled by the python script
-# process_commit_comments.py. We pass options to this script based on what steps are enabled (e.g. wiki
-# updates, bugzilla updates). Create an array for this, and if it is empty at the end, we know we don't
-# have to do anything (arrays are better than strings since they can contains spaces).
-commit_processing_options=()
-
-if [ "${UPDATE_WIKI}" == '1' ]; then
-    commit_processing_options+=('--wiki-markup-file' "${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}")
-fi
-if [ "${UPDATE_BUGZILLA}" == '1' ] && [ "${PREPARE_ONLY}" == '0' ]; then
-    commit_processing_options+=('--update-bugzilla')
-fi
-
-# Display the commands in advance in case the reconfig fails part way through.
-if [ "${#commit_processing_options[@]}" -gt 0 ]; then
-    echo "  * Providing post-processing commands in case reconfig aborts."
-    echo -n "  * Will run '$(pwd)/process_commit_comments.py' --logdir '${RECONFIG_DIR}'"
-    for ((i=0; i<${#commit_processing_options[@]}; i+=1)); do
-        echo -n " '${commit_processing_options[${i}]}'"
-    done
-    echo
-    echo "  * Will run: './update_maintenance_wiki.sh -r \"${RECONFIG_DIR}\" -w \"${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}\"'"
-fi
-
-# Return code of merge_to_production is 0 if merge performed successfully and changes made
-if merge_to_production; then
-    echo "  * All changes merged to production."
-fi
-
-if [ "${FORCE_RECONFIG}" == '1' ]; then
-    production_masters_url='https://hg.mozilla.org/build/tools/raw-file/default/buildfarm/maintenance/production-masters.json'
-    if [ "${PREPARE_ONLY}" != '0' ]; then
-        echo "  * Preparing reconfig only; not running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests show_revisions update"
-        echo "  * Preparing reconfig only; not running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests update_master_config"
-        echo "  * Preparing reconfig only; not running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j32 -R scheduler -R build -R try -R tests checkconfig reconfig"
-        echo "  * Preparing reconfig only; not running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests show_revisions"
-    else
-        echo "  * Fabric log for buildbot masters: '${RECONFIG_DIR}/manage_masters-${START_TIME}.log'"
-        # Split into two steps so -j option can be varied between them
-        echo "  * Running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests show_revisions update"
-        ./manage_masters.py -f "${production_masters_url}" -j16 -R scheduler -R build -R try -R tests show_revisions update >>"${RECONFIG_DIR}/manage_masters-${START_TIME}.log" 2>&1
-        echo "  * Running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests update_master_config"
-        ./manage_masters.py -f "${production_masters_url}" -j16 -R scheduler -R build -R try -R tests update_master_config >>"${RECONFIG_DIR}/manage_masters-${START_TIME}.log" 2>&1
-        echo "  * Running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j32 -R scheduler -R build -R try -R tests checkconfig reconfig"
-        ./manage_masters.py -f "${production_masters_url}" -j32 -R scheduler -R build -R try -R tests checkconfig reconfig >>"${RECONFIG_DIR}/manage_masters-${START_TIME}.log" 2>&1
-        # delete this now, since changes have been deployed
-        [ -f "${RECONFIG_DIR}/pending_changes" ] && mv "${RECONFIG_DIR}/pending_changes" "${RECONFIG_DIR}/pending_changes_${START_TIME}"
-        echo "  * Running: '$(pwd)/manage_masters.py' -f '${production_masters_url}' -j16 -R scheduler -R build -R try -R tests show_revisions"
-        ./manage_masters.py -f "${production_masters_url}" -j16 -R scheduler -R build -R try -R tests show_revisions >>"${RECONFIG_DIR}/manage_masters-${START_TIME}.log" 2>&1
-        echo "  * Reconfig of masters completed"
-    fi
-fi
-
-if [ "${#commit_processing_options[@]}" -gt 0 ]; then
-    echo -n "  * Running '$(pwd)/process_commit_comments.py' --logdir '${RECONFIG_DIR}'"
-    for ((i=0; i<${#commit_processing_options[@]}; i+=1)); do echo -n " '${commit_processing_options[${i}]}'"; done
-    echo
-    ./process_commit_comments.py --logdir "${RECONFIG_DIR}" "${commit_processing_options[@]}"
-else
-    echo "  * Skipping commit messages processing step, since no wiki update nor Bugzilla update enabled, so not required"
-fi
-
-if [ -f "${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}" ]; then
-    if [ "${UPDATE_WIKI}" == "1" ]; then
-        if [ "${PREPARE_ONLY}" != '0' ]; then
-            echo "  * Running: './update_maintenance_wiki.sh -d -r \"${RECONFIG_DIR}\" -w \"${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}\"'"
-            ./update_maintenance_wiki.sh -d -r "${RECONFIG_DIR}" -w "${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}"
-        else
-            echo "  * Running: './update_maintenance_wiki.sh -r \"${RECONFIG_DIR}\" -w \"${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}\"'"
-            ./update_maintenance_wiki.sh -r "${RECONFIG_DIR}" -w "${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}"
-            for file in "${RECONFIG_DIR}"/*_preview_changes.txt
-            do
-                mv "${file}" "$(echo "${file}" | sed "s/\\.txt\$/_${START_TIME}&/")"
-            done 2>/dev/null || true
-            RECONFIG_STOP_TIME="$(date +%s)"
-            RECONFIG_ELAPSED=$((RECONFIG_STOP_TIME - START_TIME))
-            RECONFIG_ELAPSED_DISPLAY=`show_time ${RECONFIG_ELAPSED}`
-            IRC_MSG="Merge has finished in ${RECONFIG_ELAPSED_DISPLAY}. See http://bit.ly/reconfigs for details. Masters will reconfig themselves automatically on the hour."
-            if [ "${FORCE_RECONFIG}" == '1' ]; then
-                IRC_MSG="Reconfig has finished in ${RECONFIG_ELAPSED_DISPLAY}. See http://bit.ly/reconfigs for details."
-            fi
-            # Failing to update IRC is non-fatal.
-            ./update_irc.sh "${IRC_MSG}" || true
-        fi
-    fi
-
-    echo "  * Summary of changes:"
-    cat "${RECONFIG_DIR}/${RECONFIG_UPDATE_FILE}" | sed 's/^/        /'
-fi
-
-echo "  * Directory '${RECONFIG_DIR}' contains artifacts from reconfig process"
-
-STOP_TIME="$(date +%s)"
-ELAPSED_TIME=$((STOP_TIME - START_TIME))
-ELAPSED_DISPLAY=`show_time ${ELAPSED_TIME}`
-echo "  * Finish timestamp: ${STOP_TIME}"
-echo "  * Time taken: ${ELAPSED_DISPLAY}"
deleted file mode 100755
--- a/buildfarm/maintenance/get_instance_metadata.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python
-"teeny helper script to find and output instance metadata"
-import sys
-import os
-
-if sys.platform not in ('win32', 'cygwin'):
-    path = "/etc/instance_metadata.json"
-else:
-    path = r"C:\instance_metadata.json"
-
-if os.path.exists(path) and os.access(path, os.R_OK):
-    print open(path).read()
deleted file mode 100755
--- a/buildfarm/maintenance/manage_masters.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env python
-
-import site
-from os import path
-import time
-from fabric.api import env
-from fabric.context_managers import settings
-from Crypto.Random import atfork
-
-site.addsitedir(
-    path.join(path.dirname(path.realpath(__file__)), "../../lib/python"))
-
-import util.fabric.actions
-
-
-def print_status(remaining, failed_masters):
-    if remaining:
-        print "=" * 30, "Remaining masters", "=" * 30
-        for m in remaining:
-            print m
-    if failed_masters:
-        print "=" * 30, "failed masters", "=" * 30
-        for m in failed_masters:
-            print m
-    print "=" * 80
-
-
-def run_action_on_master(action, master):
-    atfork()
-    try:
-        action_func = getattr(util.fabric.actions, "action_%s" % action)
-        with settings(host_string=master.get('ip_address', master['hostname'])):
-            action_func(master)
-            return True
-    except AttributeError:
-        print "[%s] %s action is not defined." % (master['hostname'], action)
-        return False
-    except:
-        import traceback
-        print "Failed to run", action, "on", master['name']
-        print traceback.format_exc()
-        return False
-
-if __name__ == '__main__':
-    import sys
-    import urllib
-    from optparse import OptionParser
-    import textwrap
-    try:
-        import simplejson as json
-    except ImportError:
-        import json
-
-    actions = []
-    action_module = util.fabric.actions
-    for a in action_module.get_actions():
-        actions.append(a)
-    actions.sort()
-    actions_with_help = '\n'.join(["  %s\n    %s" % (a,
-                                                     action_module.__dict__['action_%s' % a].__doc__)
-                                   for a in actions if
-                                   action_module.__dict__['action_%s' % a].__doc__])
-    action_without_help = ", ".join([a for a in actions if not
-                                     action_module.__dict__['action_%s' %
-                                                            a].__doc__])
-    action_help = actions_with_help
-    if action_without_help:
-        action_help += "\nOther supported actions:\n" + \
-            textwrap.TextWrapper(initial_indent='  ', subsequent_indent='    ').fill(action_without_help)
-    parser = OptionParser("""%%prog [options] action [action ...]
-
-Supported actions:
-%s""" % action_help)
-
-    parser.set_defaults(
-        hosts=[],
-        roles=[],
-        datacentre=[],
-        concurrency=1,
-        show_list=False,
-        all_masters=False,
-        ignored_roles=[],
-    )
-    parser.add_option("-f", "--master-file", dest="master_file",
-                      help="list/url of masters")
-    parser.add_option("-H", "--host", dest="hosts", action="append")
-    parser.add_option("-R", "--role", dest="roles", action="append")
-    parser.add_option("-M", "--match", dest="match", action="append",
-                      help="masters that match the term")
-    parser.add_option("-D", "--datacentre", dest="datacentre", action="append")
-    parser.add_option("-j", dest="concurrency", type="int")
-    parser.add_option("-l", dest="show_list", action="store_true",
-                      help="list hosts")
-    parser.add_option("--all", dest="all_masters", action="store_true",
-                      help="work on all masters, not just enabled ones")
-    parser.add_option("-i", dest="status_interval", type="int", default="60",
-                      help="Interval between statuses")
-    parser.add_option("-u", "--username", dest="username", default="cltbld",
-                      help="Username passed to Fabric")
-    parser.add_option("-k", "--ssh-key", dest="ssh_key",
-                      help="SSH key passed to Fabric")
-    parser.add_option("--ignore-role", dest="ignored_roles", action="append",
-                      help="Ignore masters with this role. May be passed multiple times.")
-
-    options, actions = parser.parse_args()
-
-    if options.concurrency > 1:
-        import multiprocessing
-
-    if not options.master_file:
-        parser.error("master-file is required")
-
-    if not actions and not options.show_list:
-        parser.error("at least one action is required")
-
-    # Load master data
-    all_masters = json.load(urllib.urlopen(options.master_file))
-
-    masters = []
-
-    for m in all_masters:
-        if not m['enabled'] and not options.all_masters:
-            continue
-        if options.ignored_roles and m['role'] in options.ignored_roles:
-            continue
-        if options.datacentre and m['datacentre'] not in options.datacentre:
-            continue
-        if m['name'] in options.hosts:
-            masters.append(m)
-        elif m['role'] in options.roles:
-            masters.append(m)
-        elif options.match:
-            for match in options.match:
-                if match in m["name"]:
-                    masters.append(m)
-        elif 'all' in options.hosts or 'all' in options.roles:
-            masters.append(m)
-
-    if options.show_list:
-        if len(masters) == 0:
-            masters = [m for m in all_masters if m['enabled'] or
-                       options.all_masters]
-
-        fmt = "%(role)-9s %(name)-14s %(hostname)s:%(basedir)s"
-        print fmt % dict(role='role', name='name', hostname='hostname',
-                         basedir='basedir')
-        for m in masters:
-            print fmt % m
-        sys.exit(0)
-
-    if len(masters) == 0:
-        parser.error("No masters matched, check your options -H, -R, -M, -D")
-
-    env.user = options.username
-    if options.ssh_key:
-        env.key_filename = options.ssh_key
-    selected_masters = masters
-    for action in actions:
-        if hasattr(action, 'per_host'):
-            hosts = set(m['hostname'] for m in masters)
-            masters = [dict(hostname=h) for h in hosts]
-        else:
-            masters = selected_masters
-
-        try:
-            action_header = getattr(action_module, action + "_header")
-            action_header()
-        except AttributeError:
-            # we don't care if there's no header to print
-            pass
-
-        if options.concurrency == 1:
-            for master in masters:
-                run_action_on_master(action, master)
-        else:
-            # Don't prompt for passwords when forking
-            env.abort_on_prompts = True
-
-            p = multiprocessing.Pool(processes=options.concurrency)
-            results = []
-            for master in masters:
-                result = p.apply_async(run_action_on_master, (action, master))
-                results.append((master, result))
-            p.close()
-            failed = False
-            failed_masters = []
-            tries = 0
-            while True:
-                for master, result in list(results):
-                    if result.ready():
-                        results.remove((master, result))
-                        if not result.get():
-                            failed_masters.append(master['name'])
-                            print master['name'], "FAILED"
-                            failed = True
-                tries += 1
-                if not results:
-                    break
-                if tries % options.status_interval == 0:
-                    print_status([m['name'] for (m, r) in results],
-                                 failed_masters)
-                time.sleep(1)
-
-            p.join()
-            # One final print before we exit, to be sure that results are not
-            # missed
-            print_status([m['name'] for (m, r) in results],
-                         failed_masters)
-            if failed:
-                # failure info may have scrolled, so repeat here
-                print "Action '%s' failed on %d masters:\n%s\n" \
-                    % (action, len(failed_masters),
-                       textwrap.TextWrapper(initial_indent='  ',
-                                            subsequent_indent='  ').fill(", ".join(
-                                                                         failed_masters)))
-                sys.exit(1)
deleted file mode 100644
--- a/buildfarm/maintenance/master_cleanup.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/python
-"""Script to clean up buildbot master directories"""
-import os
-from cPickle import load
-
-
-def maybe_delete(filename, timestamp):
-    """Delete filename if it's older than timestamp"""
-    try:
-        if os.path.getmtime(filename) < timestamp:
-            os.unlink(filename)
-    except OSError:
-        # Ignore this error.  The file may have already been moved.
-        # We'll get it next time!
-        pass
-
-
-def clean_dir(dirname, timestamp):
-    """Delete old twisted log files, and old builder files from dirname"""
-    # Clean up files older than timestamp
-    files = os.listdir(dirname)
-    # Look for twistd.log files
-    for f in files:
-        p = os.path.join(dirname, f)
-        if os.path.isdir(p):
-            builder_file = os.path.join(p, "builder")
-            # Don't clean out non-builder directories
-            if not os.path.exists(builder_file):
-                continue
-
-            try:
-                builder = load(open(builder_file))
-            except:
-                continue
-
-            # Don't clean out release builders
-            if builder and builder.category and \
-                    builder.category.find('release') > -1:
-                continue
-
-            for build in os.listdir(p):
-                # Don't delete the 'builder' file
-                if build == "builder":
-                    continue
-                build = os.path.join(p, build)
-                if os.path.isfile(build):
-                    maybe_delete(build, timestamp)
-
-if __name__ == "__main__":
-    import time
-    from optparse import OptionParser
-    parser = OptionParser()
-    parser.add_option("-t", "--time", dest="time", type="int",
-                      help="time, in days, for how old files have to be before being deleted",
-                      default=4)
-
-    options, args = parser.parse_args()
-
-    if len(args) == 0:
-        parser.error("Must specify at least one directory to clean up")
-
-    for d in args:
-        clean_dir(d, time.time() - options.time * 24 * 3600)
deleted file mode 100755
--- a/buildfarm/maintenance/maybe_reconfig.sh
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-
-function usage () {
-    echo
-    echo "This script is typically used via cron on a buildbot master."
-    echo "It updates the buildbot-configs and buildbotcustom repos on disk,"
-    echo "and then kicks off a reconfig if the production(-0.8) tag for"
-    echo "either repo has moved. It also updates the tools repo on disk."
-    echo
-    echo "USAGE"
-    echo "    $(basename "${0}") /path/to/buildbot-master"
-    echo
-}
-
-function reconfig_needed () {
-    is_reconfig_needed=1
-    for d in buildbotcustom buildbot-configs tools; do
-        tag="production"
-        original_production_rev=""
-        current_production_rev=""
-        if [ "${d}" == "buildbotcustom" ]; then
-            tag="production-0.8"
-        elif [ "${d}" == "tools" ]; then
-            tag="default"
-        fi
-        # Find the current production rev
-        original_production_rev=$(${HG} -R "${d}" heads ${tag} -T '{node}\n')
-        timeout 5m ${HG} -R "${d}" pull > /dev/null
-        if [ "$?" != "0" ]; then
-            error_msg="hg pull failed for ${d}"
-            log_error "${error_msg}"
-            exit 6
-        fi
-        current_production_rev=$(${HG} -R "${d}" heads ${tag} -T '{node}\n')
-        if [ "${original_production_rev}" != "${current_production_rev}" ]; then
-            log_info "${d}: ${tag} tag has moved - old rev: ${original_production_rev}; new rev: ${current_production_rev}"
-            timeout 5m ${HG} -R "${d}" update -r ${tag} >> ${LOGFILE}
-            if [ "$?" != "0" ]; then
-                error_msg="hg update failed for ${d} with tag ${tag}"
-                log_error "${error_msg}"
-                exit 7
-            fi
-            # Changes to the tools repo don't trigger a reconfig.
-            if [ "${d}" != "tools" ]; then
-                is_reconfig_needed=0
-            fi
-        else
-            log_info "${d}: ${tag} tag is unchanged - rev: ${original_production_rev}"
-        fi
-    done
-    return ${is_reconfig_needed}
-}
-
-function finish () {
-    STOP_TIME="$(date +%s)"
-    ELAPSED_TIME=$((STOP_TIME - START_TIME))
-    ELAPSED_DISPLAY=`show_time ${ELAPSED_TIME}`
-    log_info "Elapsed: ${ELAPSED_DISPLAY}"
-    log_info "=================================================="
-    rm -f ${LOCKFILE}
-}
-
-MASTER_DIR=$1
-START_TIME="$(date +%s)"
-HG=/usr/local/bin/hg
-
-if [ "${MASTER_DIR}" == "" ]; then
-    usage >&2
-    exit 1
-fi
-
-if [ ! -d ${MASTER_DIR} ]; then
-    echo "ERROR: Master dir ${MASTER_DIR} not found." >&2
-    exit 2
-fi
-
-# Include our shared function library
-. ${MASTER_DIR}/tools/lib/shell/functions
-
-# Check to see if a reconfig is already in progress. Bail if one is.
-LOCKFILE=${MASTER_DIR}/reconfig.lock
-LOGFILE=${MASTER_DIR}/reconfig.log
-
-# Check whether we should rotate our log file.
-if [ -e ${LOGFILE} ]; then
-    /usr/sbin/logrotate -s /tmp/reconfig-logrotate.status ${MASTER_DIR}/tools/buildfarm/maintenance/reconfig-logrotate.conf
-fi
-
-log_info "Checking whether we need to reconfig..."
-
-OUTPUT=`lockfile -5 -r 1 ${LOCKFILE} 2>&1`
-LOCKFILE_MAX_AGE=120 # Reconfigs should not take more than 2 hours (120 min)
-if [ "${OUTPUT}" != "" ]; then
-    if [ -e ${LOCKFILE} ]; then
-        # Check the age of the lockfile.
-        if [ "$(find ${LOCKFILE} -mmin +${LOCKFILE_MAX_AGE})" ]; then
-            # If the lockfile is older than is acceptable, log the error, but also print
-            # the error to STDOUT so cronmail will trigger.
-            error_msg="Reconfig lockfile is older than ${LOCKFILE_MAX_AGE} minutes."
-            log_error "${error_msg}"
-        else
-            # This is an acceptable state.
-            log_info "Lockfile ${LOCKFILE} exists. Assuming reconfig is still in process. Exiting."
-            exit 0
-        fi
-    else
-        log_error "lockfile output: $OUTPUT"
-    fi
-    exit 3
-fi
-trap finish EXIT
-
-# Main
-pushd ${MASTER_DIR} > /dev/null
-
-# Activate our venv
-if [ ! -e bin/activate ]; then
-    log_error "activate script not found: are you sure this is a buildbot venv?"
-    exit 4
-fi
-source bin/activate
-
-if reconfig_needed; then
-    # We append the START_TIME to the reconfig milestone messages to make it easier to match up milestones
-    # (start/finish) from a single reconfig event.
-    log_info "--------------------------------------------------------------"
-    log_info " Begin updating master_config.json"
-    python ./buildbot-configs/update-master-json.py "${MASTER_DIR}/tools/buildfarm/maintenance/production-masters.json" ${MASTER_DIR}/master/master_config.json >> ${LOGFILE} 2>&1
-    log_info "Starting reconfig. - ${START_TIME}"
-    make checkconfig >> ${LOGFILE} 2>&1
-    RC=$?
-    if [ "${RC}" == "0" ]; then
-        python ./tools/buildfarm/maintenance/buildbot-wrangler.py reconfig ${MASTER_DIR}/master >> ${LOGFILE}
-        RC=$?
-    else
-        log_error "Checkconfig failed."
-    fi
-    if [ "${RC}" == "0" ]; then
-        log_info "Reconfig completed successfuly. - ${START_TIME}"
-        echo "Reconfig completed successfuly. - ${START_TIME}"
-    else
-        log_error "Reconfig failed. - ${START_TIME}"
-        exit 5
-    fi
-else
-    log_info "No reconfig required."
-fi
-
-exit 0
deleted file mode 100755
--- a/buildfarm/maintenance/normalize-masters.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/python
-import simplejson
-import sys
-import os
-
-fn = sys.argv[1]
-data = simplejson.load(open(sys.argv[1]))
-
-data.sort(key=lambda master: master['name'])
-
-os.rename(fn, '%s.bak' % fn)
-
-open(fn, 'w').write(simplejson.dumps(data, indent=2, sort_keys=True))
deleted file mode 100644
--- a/buildfarm/maintenance/print_waits.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import cPickle
-import os
-import re
-import math
-import time
-
-
-def format_hist(h, units=1):
-    retval = []
-    if not h:
-        h[0] = 0
-    total = sum(h.values())
-    keys = sorted(h.keys())
-    min_key = min(keys)
-    max_key = max(keys)
-
-    for i in range(min_key, max_key + 1):
-        n = h.get(i, 0)
-        if total > 0:
-            percentage = " %8.2f%%" % (n * 100. / total)
-        else:
-            percentage = ''
-
-        retval.append("%3i: %8i%s"
-                      % (i * units, n, percentage))
-    return "\n".join(retval)
-
-
-def scan_builder(builder, starttime, endtime, minutes_per_block, times, change_as_submittime=True):
-    """Scans the build pickle files in the builder directory, and updates the dictionary `times`."""
-    if not os.path.exists(builder):
-        return
-    for f in os.listdir(builder):
-        if re.match("^\d+$", f):
-            try:
-                b = cPickle.load(open('%s/%s' % (builder, f)))
-            except:
-                continue
-
-            if 'rebuild' in b.reason:
-                # Skip rebuilds, they mess up the wait times
-                continue
-
-            if change_as_submittime:
-                if len(b.changes) == 0:
-                    continue
-                submittime = b.changes[0].when
-            else:
-                submittime = b.requests[0].submittedAt
-
-            if starttime < submittime < endtime:
-                w = int(math.floor(
-                    (b.started - submittime) / (minutes_per_block * 60.0)))
-                times[w] = times.get(w, 0) + 1
-
-if __name__ == "__main__":
-    from optparse import OptionParser
-    parser = OptionParser()
-    parser.set_defaults(
-        minutes_per_block=15,
-        change_as_submittime=True,
-        name=os.uname()[1],
-        builders={},
-        directory=None,
-        starttime=time.time() - 24 * 3600,
-        endtime=time.time(),
-    )
-
-    def add_builder(option, opt_str, value, parser, *args, **kwargs):
-        if ":" in value:
-            platform, builders = value.split(":", 1)
-            builders = [b.strip() for b in builders.split(",")]
-        else:
-            platform = value
-            builders = [platform]
-
-        if platform not in parser.values.builders:
-            parser.values.builders[platform] = []
-
-        parser.values.builders[platform].extend(builders)
-
-    parser.add_option("-m", "--minutes-per-block", type="int", help="How many minutes per block", dest="minutes_per_block")
-    parser.add_option("-r", "--request-as-submittime",
-                      action="store_false", dest="change_as_submittime")
-    parser.add_option("-n", "--name", dest="name")
-    parser.add_option("-b", "--builders", dest="builders", action="callback", nargs=1, type="string", callback=add_builder, help="platform:builder1,builder2,...")
-    parser.add_option("-d", "--directory", dest="directory")
-    parser.add_option("-s", "--start-time", dest="starttime", type="int")
-    parser.add_option("-e", "--end-time", dest="endtime", type="int")
-    parser.add_option("-a", "--address", dest="addresses", action="append")
-    parser.add_option("-S", "--smtp", dest="smtp")
-    parser.add_option("-f", "--from", dest="sender")
-
-    options, args = parser.parse_args()
-
-    if not options.builders:
-        parser.error("Must specify some builders")
-
-    if options.directory:
-        os.chdir(options.directory)
-
-    text = []
-    text.append("Wait time report for %s for jobs submitted since %s\n"
-                % (options.name, time.ctime(options.starttime)))
-
-    hist = {}
-    for platform, builders in options.builders.items():
-        hist[platform] = {}
-        for builder in builders:
-            scan_builder(builder, options.starttime, options.endtime,
-                         options.minutes_per_block, hist[platform],
-                         options.change_as_submittime)
-
-    allhist = {}
-    for i in set([x for y in hist.keys() for x in hist[y]]):
-        allhist[i] = sum([p.get(i, 0) for p in hist.values()])
-    total = sum(allhist.values())
-    text.append("Total Jobs: %i\n" % total)
-    text.append("Wait Times")
-    text.append(format_hist(allhist, options.minutes_per_block))
-    text.append("\nPlatform break down\n")
-    for platform, builders in options.builders.items():
-        text.append("%s: %i" % (platform, sum(hist[platform].values())))
-        text.append(format_hist(hist[platform], options.minutes_per_block))
-        text.append("\n")
-
-    text.append("The number on the left is how many minutes a build waited to start, rounded down")
-
-    if not options.addresses:
-        print "\n".join(text)
-    else:
-        import smtplib
-        import sys
-
-        zero_wait = 0
-        if total > 0:
-            zero_wait = allhist.get(0, 0) * 100. / total
-
-        subject = "Wait: %i/%.2f%% (%s)" % (total, zero_wait, options.name)
-        server = options.smtp or 'localhost'
-        sender = options.sender or 'cltbld@build.mozilla.com'
-
-        headers = []
-        headers.append("Subject: %s" % subject)
-        headers.append("From: %s" % sender)
-        headers.append("To: %s" % (", ".join(options.addresses)))
-        message = "\n".join(headers) + "\n" + "\n".join(text)
-
-        try:
-            smtp = smtplib.SMTP(server)
-            smtp.sendmail(sender, options.addresses, message)
-        except SMTPException:
-            print "Error: unable to send email"
-            sys.exit(1)
deleted file mode 100755
--- a/buildfarm/maintenance/process_commit_comments.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env python
-"""%prog
-
-Iterates over a series of commit logs generated from hg merge operations,
-collating bug numbers, and generating links to all commits. Formats the
-output for insertion into the Maintenance wiki:
-
-https://wiki.mozilla.org/ReleaseEngineering/Maintenance
-"""
-
-import glob
-import os
-import re
-import requests
-
-found_commits = {}
-unique_bugs = {}
-
-def get_revision_url_for_tag(repo_basedir, repo_name, tag='production'):
-    """Find the corresponding revision for a given tag in a local repo,
-    and return a web URL for that revision.
-    """
-    base_hg_url = "https://hg.mozilla.org/build/%s/rev/" % repo_name
-    try:
-        from mercurial import hg, ui
-        repo = hg.repository(ui.ui(), os.path.join(repo_basedir, repo_name))
-        return base_hg_url + str(repo[tag])
-    except:
-        return base_hg_url + tag
-
-def clean_commit_entry(entry):
-    """Remove common preceding/trailing characters from commit messages.
-    """
-    return entry.rstrip(' \.\,\-\:\;').lstrip(' \.\,\-\:\;')
-
-def collate_merge_previews(logdir):
-    """Iterate over commit logs to create a data structure containing
-    the commit information.
-    """
-    repo_p = re.compile('^(.*)_preview_changes\.txt$')
-    changeset_p = re.compile('changeset:\s+\d+:(.*)$')
-    summary_p = re.compile('summary:\s+(.*)$')
-    bug_p = re.compile('[Bb]ug\s*(\d+)[\s\-\.\,]*(.*)$')
-    review_p = re.compile('(.*)[\s\-\.\,]*(r=.*)$')
-    for preview_file in glob.glob(os.path.join(logdir, '*_preview_changes.txt')):
-        repo = ""
-        m = repo_p.match(os.path.basename(preview_file))
-        if m:
-            repo = m.group(1)
-        else:
-            print "Error determining repo"
-
-        revision = ""
-        with open(preview_file) as f:
-            for line in f:
-                m = changeset_p.match(line)
-                if m:
-                    revision = m.group(1)
-                    continue
-                m = summary_p.match(line)
-                bug_number = 'None'
-                summary = 'None'
-                review = 'None'
-                if m:
-                    commit_message = m.group(1)
-                    bug_m = bug_p.match(commit_message)
-                    if bug_m:
-                        bug_number = bug_m.group(1)
-                        if not unique_bugs.has_key(bug_number):
-                            unique_bugs[bug_number] = 1
-                        summary = bug_m.group(2)
-                    else:
-                        summary = commit_message
-                    review_m = review_p.match(summary)
-                    if review_m:
-                        summary = review_m.group(1)
-                        review = review_m.group(2)
-                    if not found_commits.has_key(repo):
-                        found_commits[repo] = {}
-                    if not found_commits[repo].has_key(bug_number):
-                        found_commits[repo][bug_number] = []
-                    found_commits[repo][bug_number].append({'revision': revision,
-                                                            'summary': clean_commit_entry(summary),
-                                                            'review': clean_commit_entry(review)})
-
-def process_results(update_bugzilla, wiki_markup_file, logdir):
-    """Write our formatted commit data out to a file.
-    """
-    if not found_commits:
-        print "  * No commits found; nothing to do"
-        return
-
-    if wiki_markup_file:
-        f = open(wiki_markup_file,"w")
-        f.write('<div style="border: thin grey solid; background-color: lightgrey; float: right; text-align: right; text-size: 80%; padding-left: 5px; padding-right: 5px;">\n')
-        f.write('[https://bugzilla.mozilla.org/buglist.cgi?bug_id=' + \
-                ','.join(sorted(unique_bugs)) + \
-                '&query_format=advanced&order=bug_status%2Cbug_id&tweak=1 ' +\
-                'View list in Bugzilla]\n')
-        f.write('</div>\n')
-    for repo in sorted(found_commits):
-        if wiki_markup_file:
-            f.write('[https://hg.mozilla.org/build/%s %s]\n' % (repo, repo))
-        for bug_number in sorted(found_commits[repo]):
-            bug_link = '{{bug|%s}}' % bug_number
-            if bug_number == 'None':
-                bug_link = 'No bug'
-            for commit in found_commits[repo][bug_number]:
-                hg_changeset_url = 'https://hg.mozilla.org/build/%s/rev/%s' % (repo,
-                                                                               commit['revision'])
-                if wiki_markup_file:
-                    review_display = ' - %s' % commit['review']
-                    if commit['review'] == 'None':
-                        review_display = ''
-                    f.write('* %s - %s%s ([%s %s])\n' % (bug_link,
-                                                        commit['summary'],
-                                                        review_display,
-                                                        hg_changeset_url,
-                                                        commit['revision']))
-                if args.update_bugzilla:
-                    comment = 'In production: %s' % hg_changeset_url
-                    if repo == "mozharness":
-                        comment = 'mozharness production tag moved to: %s' % get_revision_url_for_tag(logdir, 'mozharness', tag='production')
-                    payload = {'comment': comment,
-                               'login': os.environ['BUGZILLA_USERNAME'],
-                               'password': os.environ['BUGZILLA_PASSWORD']}
-                    requests.post('https://bugzilla.mozilla.org/rest/bug/%s/comment' % bug_number, data=payload)
-                    print "  * curl --data-urlencode 'comment=In production: %s' --data-urlencode 'login=%s' --data-urlencode 'password=XXXXX' 'https://bugzilla.mozilla.org/rest/bug/%s/comment'" % (hg_changeset_url, os.environ['BUGZILLA_USERNAME'], bug_number)
-
-    if wiki_markup_file:
-        f.close()
-
-if __name__ == '__main__':
-    import argparse
-    import sys
-
-    parser = argparse.ArgumentParser(description='Collate and format commit messages for Maintenance wiki.')
-    parser.add_argument("-l", "--logdir", dest="logdir", default='.',
-                        help="directory containing merge preview output")
-    parser.add_argument("-b", "--update-bugzilla", action="store_true", dest="update_bugzilla", help="post comments on landed bugs in Bugzilla")
-    parser.add_argument("-w", "--wiki-markup-file", action="store", dest="wiki_markup_file", help="file to write wiki markup to", default=None)
-
-    args = parser.parse_args()
-
-    if not os.path.isdir(args.logdir):
-        print "ERROR: Log directory specified ('%s') does not exist. Exiting..." % args.logdir
-        sys.exit(1)
-
-    collate_merge_previews(args.logdir)
-    process_results(update_bugzilla=args.update_bugzilla,
-                    wiki_markup_file=args.wiki_markup_file,
-                    logdir=args.logdir)
deleted file mode 100644
--- a/buildfarm/maintenance/production-branches.json
+++ /dev/null
@@ -1,367 +0,0 @@
-{
-  "ash": {
-    "repo": "https://hg.mozilla.org/projects/ash",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "mozilla-release": {
-    "repo": "https://hg.mozilla.org/releases/mozilla-release",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "mozilla-esr52": {
-    "repo": "https://hg.mozilla.org/releases/mozilla-esr52",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "comm-esr52": {
-    "repo": "https://hg.mozilla.org/releases/comm-esr52",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "comm",
-    "features": {
-        "taskcluster-cron": false,
-        "taskcluster-push": false
-    }
-  },
-  "oak": {
-    "repo": "https://hg.mozilla.org/projects/oak",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "pine": {
-    "repo": "https://hg.mozilla.org/projects/pine",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "mozilla-beta": {
-    "repo": "https://hg.mozilla.org/releases/mozilla-beta",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "comm-beta": {
-    "repo": "https://hg.mozilla.org/releases/comm-beta",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "comm",
-    "features": {
-        "taskcluster-cron": false,
-        "taskcluster-push": false
-    }
-  },
-  "mozilla-inbound": {
-    "repo": "https://hg.mozilla.org/integration/mozilla-inbound",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "is-trunk": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "mozilla-central": {
-    "repo": "https://hg.mozilla.org/mozilla-central",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "is-trunk": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "comm-central": {
-    "repo": "https://hg.mozilla.org/comm-central",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "comm",
-    "features": {
-        "taskcluster-cron": false,
-        "taskcluster-push": true
-    }
-  },
-  "alder": {
-    "repo": "https://hg.mozilla.org/projects/alder",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "cedar": {
-    "repo": "https://hg.mozilla.org/projects/cedar",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "holly": {
-    "repo": "https://hg.mozilla.org/projects/holly",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "birch": {
-    "repo": "https://hg.mozilla.org/projects/birch",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "cypress": {
-    "repo": "https://hg.mozilla.org/projects/cypress",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "comm",
-    "features": {
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "ux": {
-    "repo": "https://hg.mozilla.org/projects/ux",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": false,
-        "taskcluster-push": false
-    }
-  },
-  "elm": {
-    "repo": "https://hg.mozilla.org/projects/elm",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "graphics": {
-    "repo": "https://hg.mozilla.org/projects/graphics",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "try": {
-    "repo": "https://hg.mozilla.org/try",
-    "repo_type": "hg",
-    "access": "scm_level_1",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": false,
-        "taskcluster-push": true
-    }
-  },
-  "stylo-try": {
-    "repo": "https://hg.mozilla.org/incubator/stylo-try",
-    "repo_type": "hg",
-    "access": "scm_level_1",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": false,
-        "taskcluster-push": true
-    }
-  },
-  "try-comm-central": {
-    "repo": "https://hg.mozilla.org/try-comm-central",
-    "repo_type": "hg",
-    "access": "scm_level_1",
-    "trust_domain": "comm",
-    "features": {
-        "taskcluster-cron": false,
-        "taskcluster-push": true
-    }
-  },
-  "larch": {
-    "repo": "https://hg.mozilla.org/projects/larch",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "maple": {
-    "repo": "https://hg.mozilla.org/projects/maple",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "fig": {
-    "repo": "https://hg.mozilla.org/projects/fig",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "gum": {
-    "repo": "https://hg.mozilla.org/projects/gum",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "jamun": {
-    "repo": "https://hg.mozilla.org/projects/jamun",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-   "autoland": {
-    "repo": "https://hg.mozilla.org/integration/autoland",
-    "repo_type": "hg",
-    "access": "scm_level_3",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "is-trunk": true,
-        "taskcluster-cron": true,
-        "taskcluster-push": true
-    }
-  },
-  "stylo": {
-    "repo": "https://hg.mozilla.org/incubator/stylo",
-    "repo_type": "hg",
-    "access": "scm_level_2",
-    "trust_domain": "gecko",
-    "features": {
-        "buildbot": true,
-        "taskcluster-docker-routes-v1": true,
-        "taskcluster-docker-routes-v2": true,
-        "taskcluster-cron": false,
-        "taskcluster-push": true
-    }
-  }
-}
deleted file mode 100644
--- a/buildfarm/maintenance/production-masters.json
+++ /dev/null
@@ -1,1239 +0,0 @@
-[
-  {
-    "basedir": "/builds/buildbot/tests1-linux32",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux32/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux32/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux32/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux32/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux32/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master01.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-linux32/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master01.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux": [
-        "ubuntu32_vm"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux32/master",
-    "name": "bm01-tests1-linux32",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux32/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux32",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux32/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux32/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux32/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux32/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux32/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master02.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-linux32/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master02.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux": [
-        "ubuntu32_vm"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux32/master",
-    "name": "bm02-tests1-linux32",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux32/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux32",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux32/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux32/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux32/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux32/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux32/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master04.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-linux32/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master04.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux": [
-        "ubuntu32_vm"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux32/master",
-    "name": "bm04-tests1-linux32",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux32/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux32",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux32/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux32/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux32/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux32/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux32/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master05.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-linux32/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master05.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux": [
-        "ubuntu32_vm"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux32/master",
-    "name": "bm05-tests1-linux32",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux32/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux64",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux64/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux64/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux64/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux64/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux64/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master51.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-linux64/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master51.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux64",
-      "linux64-asan"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_vm",
-        "ubuntu64_vm_lnx_large"
-      ],
-      "linux64-asan": [
-        "ubuntu64-asan_vm",
-        "ubuntu64-asan_vm_lnx_large"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux64/master",
-    "name": "bm51-tests1-linux64",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux64/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux64",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux64/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux64/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux64/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux64/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux64/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master52.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-linux64/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master52.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux64",
-      "linux64-asan"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_vm",
-        "ubuntu64_vm_lnx_large"
-      ],
-      "linux64-asan": [
-        "ubuntu64-asan_vm",
-        "ubuntu64-asan_vm_lnx_large"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux64/master",
-    "name": "bm52-tests1-linux64",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux64/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux64",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux64/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux64/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux64/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux64/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux64/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master53.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-linux64/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master53.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux64",
-      "linux64-asan"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_vm",
-        "ubuntu64_vm_lnx_large"        
-      ],
-      "linux64-asan": [
-        "ubuntu64-asan_vm",
-        "ubuntu64-asan_vm_lnx_large"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux64/master",
-    "name": "bm53-tests1-linux64",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux64/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux64",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux64/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux64/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux64/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux64/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux64/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master54.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-linux64/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master54.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux64",
-      "linux64-asan"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_vm",
-        "ubuntu64_vm_lnx_large"
-      ],
-      "linux64-asan": [
-        "ubuntu64-asan_vm",
-        "ubuntu64-asan_vm_lnx_large"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux64/master",
-    "name": "bm54-tests1-linux64",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux64/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master69.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": false,
-    "environment": "production",
-    "hostname": "buildbot-master69.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm69-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master71.bb.releng.use1.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master71.bb.releng.use1.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm71-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master72.bb.releng.usw2.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master72.bb.releng.usw2.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm72-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master73.bb.releng.usw2.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master73.bb.releng.usw2.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm73-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/try1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/try1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/try1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/try1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/try1/bin/python",
-    "buildbot_setup": "/builds/buildbot/try1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master75.bb.releng.use1.mozilla.com:/builds/buildbot/try1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master75.bb.releng.use1.mozilla.com",
-    "http_port": 8101,
-    "master_dir": "/builds/buildbot/try1/master",
-    "name": "bm75-try1",
-    "pb_port": 9101,
-    "role": "try",
-    "ssh_port": 7101,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/try1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master77.bb.releng.use1.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master77.bb.releng.use1.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm77-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/try1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/try1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/try1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/try1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/try1/bin/python",
-    "buildbot_setup": "/builds/buildbot/try1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master78.bb.releng.usw2.mozilla.com:/builds/buildbot/try1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master78.bb.releng.usw2.mozilla.com",
-    "http_port": 8101,
-    "master_dir": "/builds/buildbot/try1/master",
-    "name": "bm78-try1",
-    "pb_port": 9101,
-    "role": "try",
-    "ssh_port": 7101,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/try1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build_scheduler",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build_scheduler/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build_scheduler/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build_scheduler/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build_scheduler/bin/python",
-    "buildbot_setup": "/builds/buildbot/build_scheduler/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master81.bb.releng.scl3.mozilla.com:/builds/buildbot/build_scheduler/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master81.bb.releng.scl3.mozilla.com",
-    "master_dir": "/builds/buildbot/build_scheduler/master",
-    "name": "bm81-build_scheduler",
-    "pb_port": 9301,
-    "role": "scheduler",
-    "ssh_port": 7301,
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build_scheduler/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests_scheduler",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests_scheduler/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests_scheduler/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests_scheduler/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests_scheduler/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests_scheduler/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master81.bb.releng.scl3.mozilla.com:/builds/buildbot/tests_scheduler/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master81.bb.releng.scl3.mozilla.com",
-    "master_dir": "/builds/buildbot/tests_scheduler/master",
-    "name": "bm81-tests_scheduler",
-    "pb_port": 9302,
-    "role": "scheduler",
-    "ssh_port": 7302,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests_scheduler/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master82.bb.releng.scl3.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master82.bb.releng.scl3.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm82-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/try1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/try1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/try1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/try1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/try1/bin/python",
-    "buildbot_setup": "/builds/buildbot/try1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master83.bb.releng.scl3.mozilla.com:/builds/buildbot/try1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master83.bb.releng.scl3.mozilla.com",
-    "http_port": 8101,
-    "master_dir": "/builds/buildbot/try1/master",
-    "name": "bm83-try1",
-    "pb_port": 9101,
-    "role": "try",
-    "ssh_port": 7101,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/try1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master84.bb.releng.scl3.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master84.bb.releng.scl3.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm84-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master85.bb.releng.scl3.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master85.bb.releng.scl3.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm85-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/build1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/build1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/build1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/build1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/build1/bin/python",
-    "buildbot_setup": "/builds/buildbot/build1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master86.bb.releng.scl3.mozilla.com:/builds/buildbot/build1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master86.bb.releng.scl3.mozilla.com",
-    "http_port": 8001,
-    "master_dir": "/builds/buildbot/build1/master",
-    "name": "bm86-build1",
-    "pb_port": 9001,
-    "role": "build",
-    "release_branches": ["mozilla-esr52"],
-    "mobile_release_branches": [],
-    "thunderbird_release_branches": ["comm-esr52"],
-    "ssh_port": 7001,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/build1/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/try1",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/try1/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/try1/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/try1/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/try1/bin/python",
-    "buildbot_setup": "/builds/buildbot/try1/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master87.bb.releng.scl3.mozilla.com:/builds/buildbot/try1/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master87.bb.releng.scl3.mozilla.com",
-    "http_port": 8101,
-    "master_dir": "/builds/buildbot/try1/master",
-    "name": "bm87-try1",
-    "pb_port": 9101,
-    "role": "try",
-    "ssh_port": 7101,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/try1/tools"
-  },  
-  {
-    "basedir": "/builds/buildbot/tests1-linux",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master103.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-linux/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master103.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux",
-      "linux64",
-      "linux64-stylo",
-      "linux64-stylosequential",
-      "linux64-devedition",
-      "linux64-qr"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_hw"
-      ],
-      "linux64-stylo": [
-        "ubuntu64_hw_stylo"
-      ],
-      "linux64-stylo-seq": [
-        "ubuntu64_hw_styloseq"
-      ],
-      "linux64-devedition": [
-        "ubuntu64_hw_devedition"
-      ],
-      "linux64-qr": [
-        "ubuntu64_hw_qr"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux",
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux/master",
-    "name": "bm103-tests1-linux",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master104.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-linux/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master104.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux",
-      "linux64",
-      "linux64-stylo",
-      "linux64-stylosequential",
-      "linux64-devedition",
-      "linux64-qr"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_hw"
-      ],
-      "linux64-stylo": [
-        "ubuntu64_hw_stylo"
-      ],
-      "linux64-stylo-seq": [
-        "ubuntu64_hw_styloseq"
-      ],
-      "linux64-devedition": [
-        "ubuntu64_hw_devedition"
-      ],
-      "linux64-qr": [
-        "ubuntu64_hw_qr"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux",
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux/master",
-    "name": "bm104-tests1-linux",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-linux",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-linux/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-linux/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-linux/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-linux/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-linux/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master105.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-linux/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master105.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "linux",
-      "linux64",
-      "linux64-stylo",
-      "linux64-stylosequential",
-      "linux64-devedition",
-      "linux64-qr"
-    ],
-    "limit_fx_slave_platforms": {
-      "linux64": [
-        "ubuntu64_hw"
-      ],
-      "linux64-stylo": [
-        "ubuntu64_hw_stylo"
-      ],
-      "linux64-stylo-seq": [
-        "ubuntu64_hw_styloseq"
-      ],
-      "linux64-devedition": [
-        "ubuntu64_hw_devedition"
-      ],
-      "linux64-qr": [
-        "ubuntu64_hw_qr"
-      ]
-    },
-    "limit_tb_platforms": [
-      "linux",
-      "linux64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-linux/master",
-    "name": "bm105-tests1-linux",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-linux/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-macosx",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-macosx/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-macosx/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-macosx/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-macosx/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-macosx/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master106.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-macosx/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master106.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "macosx64",
-      "macosx64-devedition"
-    ],
-    "limit_tb_platforms": [
-      "macosx64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-macosx/master",
-    "name": "bm106-tests1-macosx",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-macosx/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-macosx",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-macosx/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-macosx/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-macosx/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-macosx/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-macosx/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master107.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-macosx/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master107.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "macosx64",
-      "macosx64-devedition"
-    ],
-    "limit_tb_platforms": [
-      "macosx64"
-    ],
-    "master_dir": "/builds/buildbot/tests1-macosx/master",
-    "name": "bm107-tests1-macosx",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-macosx/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master109.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master109.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm109-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master110.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master110.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm110-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "scl3",
-    "db_name": "buildbot-master111.bb.releng.scl3.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master111.bb.releng.scl3.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm111-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master128.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master128.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm128-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master137.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master137.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm137-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-east-1",
-    "db_name": "buildbot-master138.bb.releng.use1.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master138.bb.releng.use1.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm138-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master139.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master139.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm139-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  },
-  {
-    "basedir": "/builds/buildbot/tests1-windows",
-    "bbconfigs_branch": "production",
-    "bbconfigs_dir": "/builds/buildbot/tests1-windows/buildbot-configs",
-    "bbcustom_branch": "production-0.8",
-    "bbcustom_dir": "/builds/buildbot/tests1-windows/buildbotcustom",
-    "buildbot_bin": "/builds/buildbot/tests1-windows/bin/buildbot",
-    "buildbot_branch": "production-0.8",
-    "buildbot_python": "/builds/buildbot/tests1-windows/bin/python",
-    "buildbot_setup": "/builds/buildbot/tests1-windows/buildbot/master/setup.py",
-    "buildbot_version": "0.8.2",
-    "datacentre": "aws-us-west-2",
-    "db_name": "buildbot-master140.bb.releng.usw2.mozilla.com:/builds/buildbot/tests1-windows/master",
-    "enabled": true,
-    "environment": "production",
-    "hostname": "buildbot-master140.bb.releng.usw2.mozilla.com",
-    "http_port": 8201,
-    "limit_fx_platforms": [
-      "win32",
-      "win32-devedition",
-      "win32-stylo-disabled",
-      "win64",
-      "win64-devedition",
-      "win64-stylo-disabled"
-    ],
-    "limit_tb_platforms": [
-      "win32"
-    ],
-    "master_dir": "/builds/buildbot/tests1-windows/master",
-    "name": "bm140-tests1-windows",
-    "pb_port": 9201,
-    "role": "tests",
-    "ssh_port": 7201,
-    "tools_branch": "default",
-    "tools_dir": "/builds/buildbot/tests1-windows/tools"
-  }
-]
deleted file mode 100644
--- a/buildfarm/maintenance/purge_builds.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/env python
-# Written for Mozilla by Chris AtLee <catlee@mozilla.com> 2008
-"""Delete old buildbot builds to make room for the current build.
-
-%prog [options] base_dir1 [base_dir2 ...]
-
-base_dir1 is the root of the directory tree you want to delete builds
-from.
-
-Sub-directories of base_dir1 will be deleted, in order from oldest to newest,
-until the specified amount of space is free.
-
-base_dir1 will always be used for space calculations, but if other base_dir#
-are provided, subdirectories within those dirs will also be purged. This will
-obviously only increase the available space if the other base_dirs are on the
-same mountpoint, but this can be useful for, e.g., cleaning up scratchbox.
-
-example:
-    python %prog -s 6 /builds/moz2_slave /scratchbox/users/cltbld/home/cltbld/build
-"""
-
-import os
-import shutil
-import sys
-from fnmatch import fnmatch
-import re
-
-DEFAULT_BASE_DIRS = [".."]
-
-clobber_suffix = '.deleteme'
-
-if sys.platform == 'win32':
-    # os.statvfs doesn't work on Windows
-    from win32file import RemoveDirectory, DeleteFile, \
-        GetFileAttributesW, SetFileAttributesW, GetDiskFreeSpace, \
-        FILE_ATTRIBUTE_NORMAL, FILE_ATTRIBUTE_DIRECTORY
-    from win32api import FindFiles
-
-    def freespace(p):
-        secsPerClus, bytesPerSec, nFreeClus, totClus = GetDiskFreeSpace(p)
-        return secsPerClus * bytesPerSec * nFreeClus
-else:
-    def freespace(p):
-        "Returns the number of bytes free under directory `p`"
-        r = os.statvfs(p)
-        return r.f_frsize * r.f_bavail
-
-
-def mtime_sort(p1, p2):
-    "sorting function for sorting a list of paths by mtime"
-    return cmp(os.path.getmtime(p1), os.path.getmtime(p2))
-
-
-def rmdirRecursiveWindows(dir):
-    """Windows-specific version of rmdirRecursive that handles
-    path lengths longer than MAX_PATH.
-    """
-
-    dir = os.path.realpath(dir)
-    # Make sure directory is writable
-    SetFileAttributesW('\\\\?\\' + dir, FILE_ATTRIBUTE_NORMAL)
-
-    for ffrec in FindFiles('\\\\?\\' + dir + '\*.*'):
-        file_attr = ffrec[0]
-        name = ffrec[8]
-        if name == '.' or name == '..':
-            continue
-        full_name = os.path.join(dir, name)
-
-        if file_attr & FILE_ATTRIBUTE_DIRECTORY:
-            rmdirRecursiveWindows(full_name)
-        else:
-            SetFileAttributesW('\\\\?\\' + full_name, FILE_ATTRIBUTE_NORMAL)
-            DeleteFile('\\\\?\\' + full_name)
-    RemoveDirectory('\\\\?\\' + dir)
-
-
-def rmdirRecursive(dir):
-    """This is a replacement for shutil.rmtree that works better under
-    windows. Thanks to Bear at the OSAF for the code.
-    (Borrowed from buildbot.slave.commands)"""
-    if os.name == 'nt':
-        rmdirRecursiveWindows(dir)
-        return
-
-    if not os.path.exists(dir):
-        # This handles broken links
-        if os.path.islink(dir):
-            os.remove(dir)
-        return
-
-    if os.path.islink(dir):
-        os.remove(dir)
-        return
-
-    # Verify the directory is read/write/execute for the current user
-    os.chmod(dir, 0700)
-
-    for name in os.listdir(dir):
-        full_name = os.path.join(dir, name)
-        # on Windows, if we don't have write permission we can't remove
-        # the file/directory either, so turn that on
-        if os.name == 'nt':
-            if not os.access(full_name, os.W_OK):
-                # I think this is now redundant, but I don't have an NT
-                # machine to test on, so I'm going to leave it in place
-                # -warner
-                os.chmod(full_name, 0600)
-
-        if os.path.isdir(full_name):
-            rmdirRecursive(full_name)
-        else:
-            # Don't try to chmod links
-            if not os.path.islink(full_name):
-                os.chmod(full_name, 0700)
-            os.remove(full_name)
-    os.rmdir(dir)
-
-
-def str2seconds(s):
-    """ Accepts time intervals resembling:
-         30d  (30 days)
-         10h  (10 hours)
-        Returns the specified interval as a positive integer in seconds.
-    """
-    m = re.match(r'^(\d+)([dh])$', s)
-    if (m):
-        mul = {'d': 24*60*60, 'h': 60*60}
-        n = int(m.group(1))
-        unit = m.group(2)
-        return n * mul[unit]
-    else:
-        raise ValueError("Unhandled time format '%s'" % s)
-
-
-def purge(base_dirs, gigs, ignore, max_age, dry_run=False):
-    """Delete directories under `base_dirs` until `gigs` GB are free.
-
-    Delete any directories older than max_age.
-
-    Will not delete directories listed in the ignore list except
-    those tagged with an expiry threshold.  Example:
-
-      rel-*:40d
-
-    Will not delete rel-* directories until they are over 40 days old.
-    """
-    gigs *= 1024 * 1024 * 1024
-
-    # convert 'ignore' to a dict resembling { directory: cutoff_time }
-    # where a cutoff time of -1 means 'never expire'.
-    ignore = dict(map(lambda x: x.split(':')[0:2] if len(x.split(':')) > 1 else [x, -1], ignore))
-    ignore = dict(map(lambda key: [key, time.time() - str2seconds(ignore[key])] if ignore[key] != -1 else [key, ignore[key]], ignore))
-
-    dirs = []
-    for base_dir in base_dirs:
-        if os.path.exists(base_dir):
-            for d in os.listdir(base_dir):
-                p = os.path.join(base_dir, d)
-                if not os.path.isdir(p):
-                    continue
-                mtime = os.path.getmtime(p)
-                skip = False
-                for pattern, cutoff_time in ignore.iteritems():
-                    if (fnmatch(d, pattern)):
-                        if cutoff_time == -1 or mtime > cutoff_time:
-                            skip = True
-                            break
-                        else:
-                            print("Ignored directory '%s' exceeds cutoff time" % d)
-                if skip:
-                    continue
-                dirs.append((mtime, p))
-
-    dirs.sort()
-
-    while dirs:
-        mtime, d = dirs.pop(0)
-
-        # If we're newer than max_age, and don't need any more free space,
-        # we're all done here
-        if (not max_age) or (mtime > max_age):
-            if freespace(base_dirs[0]) >= gigs:
-                break
-
-        print "Deleting", d
-        if not dry_run:
-            try:
-                clobber_path = d + clobber_suffix
-                if os.path.exists(clobber_path):
-                    rmdirRecursive(clobber_path)
-                # Prevent repeated moving.
-                if d.endswith(clobber_suffix):
-                    rmdirRecursive(d)
-                else:
-                    shutil.move(d, clobber_path)
-                    rmdirRecursive(clobber_path)
-            except:
-                print >>sys.stderr, "Couldn't purge %s properly. Skipping." % d
-
-
-def purge_hg_shares(share_dir, gigs, max_age, dry_run=False):
-    """Deletes old hg directories under share_dir"""
-    # Find hg directories
-    hg_dirs = []
-    for root, dirs, files in os.walk(share_dir):
-        for d in dirs[:]:
-            path = os.path.join(root, d, '.hg')
-            if os.path.exists(path) or os.path.exists(path + clobber_suffix):
-                hg_dirs.append(os.path.join(root, d))
-                # Remove d from the list so we don't go traversing down into it
-                dirs.remove(d)
-
-    # Now we have a list of hg directories, call purge on them
-    purge(hg_dirs, gigs, [], max_age, dry_run)
-
-    # Clean up empty directories
-    for d in hg_dirs:
-        if not os.path.exists(os.path.join(d, '.hg')):
-            print "Cleaning up", d
-            if not dry_run:
-                rmdirRecursive(d)
-
-if __name__ == '__main__':
-    import time
-    from optparse import OptionParser
-    from ConfigParser import ConfigParser, NoOptionError
-
-    max_age = 14
-    config = ConfigParser()
-    config.read(os.path.expanduser('~/.purge_builds.cfg'))
-    try:
-        max_age = config.getint('DEFAULT', 'max_age')
-    except (NoOptionError, ValueError):
-        pass
-
-    cwd = os.path.basename(os.getcwd())
-    parser = OptionParser(usage=__doc__)
-    parser.set_defaults(size=5, share_size=1, skip=[cwd], dry_run=False, max_age=max_age)
-
-    parser.add_option('-s', '--size',
-                      help='free space required (in GB, default 5)', dest='size',
-                      type='float')
-
-    parser.add_option('--share-size',
-                      help='free space required for vcs shares (in GB, default 1)', dest='share_size',
-                      type='float')
-
-    parser.add_option('-n', '--not', help='do not delete this directory. Append :30d to skip for up to 30 days, or :30h to skip for up to 30 hours',
-                      action='append', dest='skip')
-
-    parser.add_option('', '--dry-run', action='store_true',
-                      dest='dry_run',
-                      help='''do not delete anything, just print out what would be
-deleted.  note that since no directories are deleted, if the amount of free
-disk space in base_dir(s) is less than the required size, then ALL directories
-will be listed in the order in which they would be deleted.''')
-
-    parser.add_option('', '--max-age', dest='max_age', type='int',
-                      help='''maximum age (in days) for directories.  If any directory
-            has an mtime older than this, it will be deleted, regardless of how
-            much free space is required.  Set to 0 to disable.''')
-
-    options, base_dirs = parser.parse_args()
-
-    if len(base_dirs) < 1:
-        for d in DEFAULT_BASE_DIRS:
-            if os.path.exists(d):
-                base_dirs.append(d)
-    if len(base_dirs) < 1:
-        parser.error("Must specify one or more base_dirs")
-        sys.exit(1)
-
-    # Figure out the mtime before which we'll start deleting old directories
-    if options.max_age:
-        cutoff_time = time.time() - 24 * 3600 * options.max_age
-    else:
-        cutoff_time = None
-
-    purge(base_dirs, options.size, options.skip, cutoff_time, options.dry_run)
-
-    # Try to cleanup shared hg repos. We run here even if we've freed enough
-    # space so we can be sure and delete repositories older than max_age
-    if 'HG_SHARE_BASE_DIR' in os.environ:
-        purge_hg_shares(os.environ['HG_SHARE_BASE_DIR'],
-                        options.share_size, cutoff_time, options.dry_run)
-
-    # tooltool cache cleanup
-    if 'TOOLTOOL_HOME' in os.environ and 'TOOLTOOL_CACHE' in os.environ:
-        import imp
-        try:
-            tooltool = imp.load_source('tooltool', os.path.join(os.environ['TOOLTOOL_HOME'], "tooltool.py"))
-            tooltool.purge(os.environ['TOOLTOOL_CACHE'], options.size)
-        except:
-            print "Warning: impossible to cleanup tooltool cache"
-
-    after = freespace(base_dirs[0]) / (1024 * 1024 * 1024.0)
-
-    # Try to cleanup the current dir if we still need space and it will
-    # actually help.
-    if after < options.size:
-        # We skip the tools dir here because we've usually just cloned it.
-        purge(['.'], options.size, ['tools'], cutoff_time, options.dry_run)
-        after = freespace(base_dirs[0]) / (1024 * 1024 * 1024.0)
-
-    if after < options.size:
-        print "Error: unable to free %1.2f GB of space. " % options.size + \
-              "Free space only %1.2f GB" % after
-        sys.exit(1)
-    else:
-        print "%1.2f GB of space available" % after
-        sys.exit(0)
deleted file mode 100644
--- a/buildfarm/maintenance/purge_events.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-"""Run in a buildbot master directory once the master is shut down to
-purge old events from the builder files.
-
-NB: The master must be shut down for this to work!"""
-import cPickle
-import os
-import shutil
-
-for f in os.listdir("."):
-    builder_file = os.path.join(f, "builder")
-    if os.path.isdir(f) and os.path.exists(builder_file):
-        builder = cPickle.load(open(builder_file))
-        if builder.category == 'release':
-            print "Skipping", builder_file
-            continue
-        print "Backing up", builder_file
-        shutil.copyfile(builder_file, builder_file + ".bak")
-
-        # Set some dummy attributes that get deleted by __getstate__
-        builder.currentBigState = None
-        builder.basedir = None
-        builder.status = None
-        builder.nextBuildNumber = None
-
-        # Truncate to 500 events
-        builder.events = builder.events[-500:]
-        print "Writing", builder_file
-        cPickle.dump(builder, open(builder_file, "w"))
deleted file mode 100644
--- a/buildfarm/maintenance/reboot-idle-slaves.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/builds/slaverebooter/bin/python2.7
-"""Idle Slave Rebooter
-
-Usage: reboot-idle-slaves.py [-h] [--dryrun] (<config_file>)
-
--h --help         Show this help message.
---dryrun          Don't do any reboots, just print what would've been done.
-"""
-
-from datetime import datetime
-from furl import furl
-from os import path
-import requests
-import site
-from threading import Thread
-import time
-import Queue
-import pprint
-
-import logging
-from logging.handlers import RotatingFileHandler
-log = logging.getLogger(__name__)
-handler = RotatingFileHandler("reboot-idle-slaves.log",
-                              maxBytes=52428800,
-                              backupCount=50)
-formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-handler.setFormatter(formatter)
-log.addHandler(handler)
-
-site.addsitedir(path.join(path.dirname(path.realpath(__file__)), "../../lib/python"))
-
-from util.retry import retry
-
-MAX_WORKERS = 16
-IDLE_THRESHOLD = 5*60*60
-PENDING, RUNNING, SUCCESS, FAILURE = range(4)
-WORKER_WAIT_THRESHOLD = 30*60
-STARTING_WAIT_INCREMENT = 1
-MAX_WAIT_INTERVAL = 2*60
-
-SLAVE_QUEUE = Queue.Queue()
-
-def get_production_slaves(slaveapi):
-    url = furl(slaveapi)
-    url.path.add("slaves")
-    url.args["environment"] = "prod"
-    url.args["enabled"] = 1
-    r = retry(requests.get, args=(str(url),))
-    return r.json()["slaves"]
-
-
-def get_slave(slaveapi, slave):
-    url = furl(slaveapi)
-    url.path.add("slaves").add(slave)
-    return retry(requests.get, args=(str(url),)).json()
-
-
-def get_formatted_time(dt):
-    return dt.strftime("%A, %B %d, %H:%M")
-
-
-def get_latest_timestamp_from_result(result):
-    if not result:
-        return 0
-    times = []
-    for key in ("finish_timestamp", "request_timestamp", "start_timestamp"):
-        if result[key]:
-            times.append(result[key])
-    return max(times)
-
-
-def get_latest_result(results):
-    res = None
-    for result in results:
-        if not res:
-            res = result
-            continue
-        res_ts = get_latest_timestamp_from_result(res)
-        if res_ts < get_latest_timestamp_from_result(result):
-            res = result
-    return res
-
-
-def get_recent_action(slaveapi, slave, action):
-    url = furl(slaveapi)
-    url.path.add("slaves").add(slave).add("actions").add(action)
-    history = retry(requests.get, args=(str(url),)).json()
-    results = []
-    for key in history.keys():
-        if not key == action:
-            continue
-        for item in history[action]:
-            results.append(history[action][item])
-    return get_latest_result(results)
-
-
-def get_recent_graceful(slaveapi, slave):
-    return get_recent_action(slaveapi, slave, "shutdown_buildslave")
-
-
-def get_recent_reboot(slaveapi, slave):
-    return get_recent_action(slaveapi, slave, "reboot")
-
-
-def get_recent_job(slaveapi, slave):
-    info = get_slave(slaveapi, slave)
-    if not info["recent_jobs"]:
-        return None
-
-    return info["recent_jobs"][0]["endtime"]
-
-
-def do_graceful(slaveapi, slave):
-    # We need to set a graceful shutdown for the slave on the off chance that
-    # it picks up a job before us making the decision to reboot it, and the
-    # reboot actually happening. In most cases this will happen nearly
-    # instantly.
-    log.debug("%s - Setting graceful shutdown", slave)
-    url = furl(slaveapi)
-    url.path.add("slaves").add(slave).add("actions").add("shutdown_buildslave")
-    url.args["waittime"] = 30
-    r = retry(requests.post, args=(str(url),)).json()
-    url.args["requestid"] = r["requestid"]
-
-    time.sleep(30)  # Sleep to give a graceful some leeway to complete
-    log.info("%s - issued graceful, re-adding to queue", slave)
-    SLAVE_QUEUE.put_nowait(slave)
-    return
-
-
-def do_reboot(slaveapi, slave):
-    url = furl(slaveapi)
-    url.path.add("slaves").add(slave).add("actions").add("reboot")
-    retry(requests.post, args=(str(url),))
-    # Because SlaveAPI fully escalates reboots (all the way to IT bug filing),
-    # there's no reason for us to watch for it to complete.
-    log.info("%s - Reboot queued", slave)
-    return
-
-
-def process_slave(slaveapi, dryrun=False):
-    slave = None  # No slave name yet
-    try:
-        try:
-            slave = SLAVE_QUEUE.get_nowait()
-            log.debug("%s - got slave from SLAVE_QUEUE", slave)
-        except Queue.Empty:
-            return  # Unlikely due to our thread creation logic, but possible
-
-        last_job_ts = get_recent_job(slaveapi, slave)
-
-        # Ignore slaves without recent job information
-        if not last_job_ts:
-            log.info("%s - Skipping reboot because no job history found", slave)
-            return
-
-        last_job_dt = datetime.fromtimestamp(last_job_ts)
-        # And also slaves that haven't been idle for more than the threshold
-        if not (datetime.now() - last_job_dt).total_seconds() > IDLE_THRESHOLD:
-            log.info("%s - Skipping reboot because last job ended recently at %s",
-                     slave, get_formatted_time(last_job_dt))
-            return
-
-        recent_graceful = get_recent_graceful(slaveapi, slave)
-        recent_graceful_ts = get_latest_timestamp_from_result(recent_graceful)
-        recent_reboot = get_recent_reboot(slaveapi, slave)
-        recent_reboot_ts = get_latest_timestamp_from_result(recent_reboot)
-        # Determine the timestamp we care about for doing work
-        idle_timestamp = max(last_job_ts, recent_reboot_ts)
-        idle_dt = datetime.fromtimestamp(idle_timestamp)
-
-        # If an action is in-flight, lets assume no work to do this run.
-        if (recent_graceful and "state" in recent_graceful and
-                recent_graceful["state"] in (PENDING, RUNNING)):
-            log.info("%s - waiting on graceful shutdown, will recheck next run",
-                     slave)
-            return
-        if (recent_reboot and "state" in recent_reboot and
-                recent_reboot["state"] in (PENDING, RUNNING)):
-            log.info("%s - waiting on a reboot request, assume success",
-                     slave)
-            return
-
-        # No work if we recently performed an action that should recover
-        if not (datetime.now() - idle_dt).total_seconds() > IDLE_THRESHOLD:
-            log.info("%s - Skipping reboot because we recently attempted recovery %s",
-                     slave, get_formatted_time(idle_dt))
-            return
-
-        if recent_graceful_ts <= idle_timestamp:
-            # we've passed IDLE_THRESHOLD since last reboot/job
-            # ---> initiate graceful
-            if dryrun:
-                log.info("%s - Last job ended at %s, would've gracefulled",
-                         slave, get_formatted_time(last_job_dt))
-                return
-            return do_graceful(slaveapi, slave)
-        else:  # (recent_graceful_ts > idle_timestamp)
-            # has recently graceful'd but needs a reboot
-            # ---> initiate reboot
-            if dryrun:
-                log.info("%s - Last job ended at %s, would've rebooted",
-                         slave, get_formatted_time(last_job_dt))
-                return
-            if recent_graceful["state"] in (FAILURE,):
-                log.info("%s - Graceful shutdown failed, rebooting anyway", slave)
-            else:
-                log.info("%s - Graceful shutdown passed, rebooting", slave)
-            return do_reboot(slaveapi, slave)
-    except:
-        log.exception("%s - Caught exception while processing", slave)
-
-
-if __name__ == "__main__":
-    from ConfigParser import RawConfigParser
-    from docopt import docopt, DocoptExit
-    args = docopt(__doc__)
-
-    dryrun = args["--dryrun"]
-    config_file = args["<config_file>"]
-
-    cfg = RawConfigParser()
-    cfg.read(config_file)
-
-    slaveapi = cfg.get("main", "slaveapi_server")
-    n_workers = cfg.getint("main", "workers")
-    verbose = cfg.getboolean("main", "verbose")
-    excludes = cfg.options("exclude")
-
-    logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
-    if verbose:
-        logging.getLogger("requests").setLevel(logging.DEBUG)
-        logging.getLogger("util.retry").setLevel(logging.DEBUG)
-    else:
-        logging.getLogger("requests").setLevel(logging.WARN)
-        logging.getLogger("util.retry").setLevel(logging.WARN)
-
-    if n_workers > MAX_WORKERS:
-        raise DocoptExit("Number of workers requested (%d) exceeds maximum (%d)" % (n_workers, MAX_WORKERS))
-
-    def is_excluded(name):
-        for pattern in excludes:
-            if pattern in name:
-                return True
-        return False
-
-    workers = {}
-
-    try:
-        log.info("Populating List of Slaves to Check...")
-        for slave in get_production_slaves(slaveapi):
-            name = slave["name"]
-            if is_excluded(name):
-                log.debug("%s - Excluding because it matches an excluded pattern.", name)
-                continue
-            log.debug("%s - Adding item to queue", name)
-            SLAVE_QUEUE.put_nowait(name)
-
-        # Run while there is any workers or any queued work
-        elapsed = 0
-        worker_wait_increment = STARTING_WAIT_INCREMENT
-        while len(workers) or SLAVE_QUEUE.qsize():
-            # Block until a worker frees
-            while len(workers) and len(workers) >= min(n_workers, SLAVE_QUEUE.qsize()) and \
-                  elapsed < WORKER_WAIT_THRESHOLD:
-                log.debug("Waiting for a free worker...")
-                if elapsed + worker_wait_increment >= WORKER_WAIT_THRESHOLD:
-                    worker_wait_increment = WORKER_WAIT_THRESHOLD - elapsed
-                log.debug("Sleeping %d seconds..." % worker_wait_increment)
-                time.sleep(worker_wait_increment)
-                elapsed += worker_wait_increment
-                worker_wait_increment = worker_wait_increment * 2
-                if worker_wait_increment > MAX_WAIT_INTERVAL:
-                    worker_wait_increment = MAX_WAIT_INTERVAL
-
-                for wname, w in workers.items():
-                    log.debug("worker: %s" % wname)
-                    if not w.is_alive():
-                        del workers[wname]
-            if elapsed >= WORKER_WAIT_THRESHOLD:
-                log.warning("Gave up waiting for a free worker after %d seconds" % elapsed)
-                break
-
-            # Start a new worker if there is more work
-            if len(workers) < n_workers and SLAVE_QUEUE.qsize():
-                log.debug("Starting a new worker...")
-                t = Thread(target=process_slave, args=(slaveapi, dryrun))
-                t.start()
-                workers[t.ident] = t
-                worker_wait_increment = STARTING_WAIT_INCREMENT
-                log.debug("Started worker %s", t.ident)
-
-        # Wait for any remaining workers to finish before exiting.
-        for w in workers.values():
-            while w.is_alive():
-                log.debug("Found a running worker. Attempting to join...")
-                worker_wait_increment = STARTING_WAIT_INCREMENT
-                w.join(1)
-
-        if SLAVE_QUEUE.qsize():
-            # This should not be possible, but report anyway.
-            log.info("%s items remained in queue at exit",
-                     SLAVE_QUEUE.qsize())
-    except KeyboardInterrupt:
-        raise
-
-    log.info("All done. Exiting...")
deleted file mode 100644
--- a/buildfarm/maintenance/reconfig-logrotate.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-/builds/buildbot/*/reconfig.log {
-    rotate 5
-    size 10M
-    compress
-}
deleted file mode 100755
--- a/buildfarm/maintenance/restart_masters.py
+++ /dev/null
@@ -1,606 +0,0 @@
-#!/usr/bin/env python
-
-# restart_masters.py
-#
-# Buildbot-masters gradually consume more resources the longer they stay up.
-# To combat this, we've begun manually restarting masters once a month. This
-# script attempts to automate that manual process.
-#
-# The basic workflow is this:
-#  * lookup the list of all enabled masters in production-masters.json
-#  * prune that list based on an optional list of masters to target
-#  * put the masters into buckets by type and datacenter:
-#  ** We only process one master per bucket at a given time. This is to
-#     minimize our impact on capacity.
-#  * disable each master in slavealloc
-#  * exceute a graceful shutdown of that master via it's web interface
-#  * wait for the master process to disappear, then restart buildbot
-#  * on each interation, check for masters that have finished, pick a new master
-#    from that same bucket, and begin the restart process for that master
-
-import getpass
-import operator
-import os
-import requests
-import simplejson as json
-import socket
-import time
-import signal
-
-from furl import furl
-from paramiko import AuthenticationException
-from slaveapi.clients import ssh
-from datetime import datetime
-
-import logging
-log = logging.getLogger(__name__)
-
-buckets = {}
-running_buckets = {}
-completed_masters = {}
-problem_masters = {}
-master_ids = {}
-progress_elapsed = 0
-start_time = time.time()
-SLEEP_INTERVAL = 60
-PROGRESS_INTERVAL = 60*60
-slavealloc_api_url = "https://secure.pub.build.mozilla.org/slavealloc/api/masters"
-credentials = {
-    "ldap_username": "",
-    "ldap_password": "",
-    "cltbld_password": "",
-    "root_password": "",
-}
-MAX_DURATION_DEFAULT = 60*60*3 # 3 hours
-bucket_config = {
-    "build_scheduler": {"max_duration": 60*5, "concurrent": 1},
-    "tests_scheduler": {"max_duration": 60*5, "concurrent": 1},
-    "build1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "build1-aws-us-east-1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "build1-aws-us-west-2": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "try1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "try1-aws-us-east-1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "try1-aws-us-west-2": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-linux": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-macosx": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-windows": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 2},
-    "tests1-linux32-aws-us-east-1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-linux32-aws-us-west-2": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-linux64-aws-us-east-1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-linux64-aws-us-west-2": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-windows-aws-us-east-1": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-    "tests1-windows-aws-us-west-2": {"max_duration": MAX_DURATION_DEFAULT, "concurrent": 1},
-}
-
-def IgnorePolicy():
-    def missing_host_key(self, *args):
-        pass
-
-def put_masters_in_buckets(masters_json, master_list=None):
-    for master in masters_json:
-        if not master['enabled']:
-            continue
-        if master_list:
-            if master['hostname'].split('.')[0] not in master_list:
-                continue
-
-        bucket_key = master['name'].split('-',1)[1]
-        # We can parallelize restarts more in AWS because we're in different regions,
-        # so make separate buckets for cloud pools.
-        if "aws" in master['datacentre']:
-            bucket_key += "-" + master['datacentre']
-        if bucket_key not in buckets:
-            buckets[bucket_key] = []
-        buckets[bucket_key].append(master)
-    return
-
-def check_available_slots(key_to_match):
-    if key_to_match in running_buckets:
-        if not key_to_match in bucket_config:
-            # We don't recognize this bucket, so limit to 1 running at a time.
-            return False
-        if len(running_buckets[key_to_match]) < bucket_config[key_to_match]['concurrent']:
-            return True
-        else:
-            return False
-    # We can run 1 of anything.
-    return True
-
-def check_credentials(master):
-    cmd='whoami'
-    if master:
-        log.debug("Checking connection to master: %s" % master['hostname'])
-        console = get_console(master['hostname'])
-        if console:
-            try:
-                rc, output = console.run_cmd(cmd)
-                if rc == 0:
-                    log.debug("Successfully connected to master: %s" % master['hostname'])
-                    return True
-                else:
-                    log.warning("Error running remote command '%s' on master: %s" % (cmd, master['hostname']))
-            except ssh.RemoteCommandError:
-                log.warning("Caught exception while attempting remote command '%s' on master: %s" % (cmd, master['hostname']))
-        else:
-            log.error("Couldn't get console to %s" % master['hostname'])
-    return False
-
-def masters_remain():
-    for key in running_buckets.iterkeys():
-        for master_name in running_buckets[key]:
-            if running_buckets[key][master_name]:
-                return True
-    for key in buckets:
-        if buckets[key]:
-            return True
-    return False
-
-class MasterConsole(ssh.SSHConsole):
-    def connect(self, timeout=30, as_root=False):
-        if as_root:
-            username = "root"
-            password = credentials["root_password"]
-        else:
-            username = "cltbld"
-            password = credentials["cltbld_password"]
-
-        try:
-            log.debug("Attempting to connect to %s as %s" % (self.fqdn, username))
-            self.client.load_system_host_keys()
-            if password != "":
-                self.client.connect(hostname=self.fqdn, username=username, password=password, allow_agent=True)
-            else:
-                self.client.connect(hostname=self.fqdn, username=username, allow_agent=True)
-            log.debug("Connection as %s succeeded!", username)
-            self.connected = True
-        except AuthenticationException, e:
-            log.debug("Authentication failure.")
-            raise e
-        except socket.error, e:
-            # Exit out early if there is a socket error, such as:
-            # ECONNREFUSED (Connection Refused). These errors are
-            # typically raised at the OS level.
-            from errno import errorcode
-            log.debug("Socket Error (%s) - %s", errorcode[e[0]], e[1])
-            raise e
-        if not self.connected:
-            log.warning("Couldn't connect with any credentials.")
-            raise Exception
-
-def get_console(hostname, as_root=False):
-    console = MasterConsole(hostname, None)
-    try:
-        console.connect(as_root=as_root)  # Make sure we can connect properly
-        return console
-    except (socket.error, ssh.SSHException), e:
-        log.error(e)
-        console.disconnect() # Don't hold a connection
-        return None  # No valid console
-    return None  # How did we get here?
-
-def exceeded_max_shutdown_duration(bucket_name, current_master):
-    if bucket_name in bucket_config:
-        max_duration = bucket_config[bucket_name]['max_duration']
-    else:
-        max_duration = MAX_DURATION_DEFAULT
-    now = time.time()
-    if now - current_master['start_time'] > max_duration:
-        return True
-    else:
-        return False
-
-def stop_master(master):
-    # For scheduler masters, we just stop them.
-    log.debug("Stopping %s" % master['hostname'])
-    cmd = "cd %s; source bin/activate; touch reconfig.lock; make stop" % master['basedir']
-    console = get_console(master['hostname'])
-    if console:
-        try:
-            rc, output = console.run_cmd(cmd)
-            if rc == 0:
-                log.debug("%s stopped successfully." % master['hostname'])
-                return True
-            log.warning("Failed to stop %s, or never saw stop finish." % master['hostname'])
-        except ssh.RemoteCommandError:
-            log.warning("Caught exception while attempting stop_master.")
-    else:
-        log.error("Couldn't get console to %s" % master['hostname'])
-    return False
-
-def parse_bash_env_var_from_string(match_string, line):
-    line = line.strip()
-    key,value = line.split("=", 2)
-    return value.strip(' \'\"')
-
-def get_credentials_from_config_file(config_file):
-    # LDAP username and LDAP password are required. If ssh keys are available
-    # (either locally or via agent), we can use them for cltbld access.
-    if config_file and os.path.exists(config_file):
-        with open(config_file, 'r') as f:
-            for line in f:
-                if 'LDAP_USERNAME' in line:
-                    credentials["ldap_username"] = parse_bash_env_var_from_string('LDAP_USERNAME', line)
-                elif 'LDAP_PASSWORD' in line:
-                    credentials["ldap_password"] = parse_bash_env_var_from_string('LDAP_PASSWORD', line)
-                elif 'CLTBLD_PASSWORD' in line:
-                    credentials["cltbld_password"] = parse_bash_env_var_from_string('CLTBLD_PASSWORD', line)
-                elif 'ROOT_PASSWORD' in line:
-                    credentials["root_password"] = parse_bash_env_var_from_string('ROOT_PASSWORD', line)
-        f.closed
-        if not credentials["ldap_username"] or not credentials["ldap_password"]:
-            log.error("Unable to parse LDAP credentials from config file: %s" % config_file)
-            return None
-    return True
-
-def get_credentials_from_user():
-    credentials["ldap_username"] = getpass.getpass("Enter LDAP username: ")
-    credentials["ldap_password"] = getpass.getpass(prompt='Enter LDAP password: ')
-    credentials["cltbld_password"] = getpass.getpass(prompt='Enter cltbld password: ')
-    credentials["root_password"] = getpass.getpass(prompt='Enter root password: ')
-
-def get_credentials(config_file=None):
-    if config_file:
-	get_credentials_from_config_file(config_file)
-    if credentials["ldap_username"] == '' or credentials["ldap_password"] == '':
-        get_credentials_from_user()
-    if credentials["ldap_username"] == '' or credentials["ldap_password"] == '':
-        return None
-    return True
-
-def get_master_ids():
-    r = requests.get(slavealloc_api_url, auth=(credentials["ldap_username"], credentials["ldap_password"]))
-    if r.status_code != 200:
-        log.error("Unable to retrieve masters from slavealloc. Check LDAP credentials.")
-        return False
-    for master in r.json():
-        master_ids[str(master['nickname'])] = master['masterid']
-    if not master_ids:
-        return False
-    return True
-
-def http_post(post_url, error_msg):
-    try:
-        requests.post(str(post_url), allow_redirects=False)
-    except requests.RequestException:
-        log.error(error_msg)
-        return False
-    return True
-
-def http_put(put_url, put_data, error_msg):
-    try:
-        r = requests.put(put_url, data=json.dumps(put_data), allow_redirects=False, auth=(credentials["ldap_username"], credentials["ldap_password"]))
-        if r.status_code == 200:
-            return True
-    except requests.RequestException:
-        log.error(error_msg)
-        return False
-    return True
-
-def disable_master(master):
-    # Disable the master in slavealloc while we're restarting.
-    # This shuold avoid new slaves from connecting during shutdown
-    # and possibly getting hung.
-    log.debug("Disabling %s in slavealloc." % master['hostname'])
-    disable_url = furl(slavealloc_api_url + "/" + str(master_ids[master['name']]))
-    put_data = {"enabled": 0, "notes": "[restart_masters] Master disabled at %s " % datetime.now().strftime("%Y-%m-%d %H:%M")}
-    error_msg = "Failed to disable %s" % master['hostname']
-    return http_put(str(disable_url), put_data, error_msg)
-
-def enable_master(master):
-    # Re-enable the master in slavealloc after it has been restarted.
-    log.debug("Re-enabling %s in slavealloc." % master['hostname'])
-    enable_url = furl(slavealloc_api_url + "/" + str(master_ids[master['name']]))
-    put_data = {"enabled": 1, "notes": None}
-    error_msg = "Failed to re-enable %s" % master['hostname']
-    return http_put(str(enable_url), put_data, error_msg)
-
-def graceful_shutdown(master):
-    # We do graceful shutdowns through the master's web interface
-    log.debug("Initiating graceful shutdown for %s" % master['hostname'])
-    shutdown_url = furl("http://" + master['hostname'])
-    shutdown_url.port = master['http_port']
-    shutdown_url.path = "shutdown"
-    error_msg = "Failed to initiate graceful shutdown for %s" % master['hostname']
-    if http_post(str(shutdown_url), error_msg):
-        log.debug("Creating reconfig lockfile for master: %s" % master['hostname'])
-        cmd = "cd %s; touch reconfig.lock" % master['basedir']
-        console = get_console(master['hostname'])
-        if console:
-            try:
-                rc, output = console.run_cmd(cmd)
-                if rc == 0:
-                    log.debug("Created lockfile on master: %s." % master['hostname'])
-                    return True
-                log.warning("Error creating lockfile on master: %s" % master['hostname'])
-            except ssh.RemoteCommandError:
-                log.warning("Caught exception while attempting graceful_shutdown.")
-        else:
-            log.error("Couldn't get console to %s" % master['hostname'])
-
-    return False
-
-def check_shutdown_status(master):
-    # Returns true when there is no matching master process.
-    # Example process:
-    # /builds/buildbot/coop/tests-master/bin/python /builds/buildbot/coop/tests-master/bin/buildbot start /builds/buildbot/coop/tests-master/master
-    log.debug("Checking shutdown status of master: %s" % master['hostname'])
-    cmd="ps auxww | grep python | grep start | grep %s" % master['master_dir']
-    console = get_console(master['hostname'])
-    if console:
-        try:
-            rc, output = console.run_cmd(cmd)
-            if rc != 0:
-                log.debug("No master process found on %s." % master['hostname'])
-                return True
-            log.debug("Master process still exists on %s." % master['hostname'])
-        except ssh.RemoteCommandError:
-            log.warning("Caught exception while checking shutdown status. Will retry on next pass.")
-    else:
-        log.error("Couldn't get console to %s" % master['hostname'])
-    return False
-
-def restart_master(master):
-    # Restarts buildbot on the remote master
-    log.debug("Attempting to restart master: %s" % master['hostname'])
-    cmd = "cd %s; source bin/activate; make start; rm -f reconfig.lock" % master['basedir']
-    console = get_console(master['hostname'])
-    if console:
-        try:
-            rc, output = console.run_cmd(cmd)
-            if rc == 0:
-                log.debug("Master %s restarted successfully." % master['hostname'])
-                return True
-            log.warning("Restart of master %s failed, or never saw restart finish." % master['hostname'])
-        except ssh.RemoteCommandError:
-            log.warning("Caught exception while attempting to restart_master.")
-    else:
-        log.error("Couldn't get console to %s" % master['hostname'])
-    return False
-
-def reboot_master(master):
-    # Reboots the remote master. Buildbot is configured to start automatically,
-    # so we just need to re-enable the master in slavealloc.
-    log.debug("Attempting to reboot master: %s" % master['hostname'])
-    cmd = "cd %s; rm -f reconfig.lock; reboot" % master['basedir']
-    console = get_console(master['hostname'], as_root=True)
-    if console:
-        try:
-            rc, output = console.run_cmd(cmd)
-            if rc == 0:
-                log.debug("Master %s rebooted successfully." % master['hostname'])
-                return True
-            log.warning("Reboot of master %s failed." % master['hostname'])
-        except ssh.RemoteCommandError:
-            log.warning("Caught exception while attempting to reboot_master.")
-    else:
-        log.error("Couldn't get console to %s" % master['hostname'])
-    return False
-
-def mark_master_as_problem(key, master):
-    if key not in problem_masters:
-        problem_masters[key] = []
-    problem_masters[key].append(master.copy())
-    del running_buckets[key][master['name']]
-
-def display_remaining():
-    if not buckets or len(buckets) == 0:
-        return
-    log.info("")
-    log.info("Masters not processed yet")
-    log.info("{:<30} {}".format("bucket","master URL"))
-    log.info("{:<30} {}".format("======","=========="))
-    for bucket in sorted(buckets.iterkeys()):
-        for master in sorted(buckets[bucket], key=operator.itemgetter('hostname')):
-            if master['role'] == 'scheduler':
-	            log.info("{:<30} {}".format(bucket, master['hostname']))
-            else:
-	            log.info("{:<30} http://{}:{}".format(bucket, master['hostname'], master['http_port']))
-
-def display_running():
-    if not running_buckets or len(running_buckets) == 0:
-        return
-    log.info("")
-    log.info("Masters still being processed")
-    log.info("{:<30} {}".format("bucket","master URL"))
-    log.info("{:<30} {}".format("======","=========="))
-    for bucket in sorted(running_buckets.iterkeys()):
-        for master_name in sorted(running_buckets[bucket].iterkeys()):
-            master = running_buckets[bucket][master_name]
-            if master['role'] == 'scheduler':
-                log.info("{:<30} {}".format(bucket, master['hostname']))
-            else:
-                log.info("{:<30} http://{}:{}".format(bucket, master['hostname'], master['http_port']))
-
-def display_completed():
-    if not completed_masters or len(completed_masters) == 0:
-        return
-    log.info("")
-    log.info("Masters restarted (or at least attempted)")
-    log.info("{:<30} {}".format("bucket","master URL"))
-    log.info("{:<30} {}".format("======","=========="))
-    for bucket in sorted(completed_masters.iterkeys()):
-        for master in sorted(completed_masters[bucket], key=operator.itemgetter('hostname')):
-            if master['role'] == 'scheduler':
-                log.info("{:<30} {}".format(bucket, master['hostname']))
-            else:
-                log.info("{:<30} http://{}:{}".format(bucket, master['hostname'], master['http_port']))
-
-def display_problems():
-    if not problem_masters or len(problem_masters) == 0:
-        return
-    log.warning("")
-    log.warning("Masters that hit problems")
-    log.warning("{:<30} {} {}".format("bucket","master URL","issue"))
-    log.warning("{:<30} {} {}".format("======","==========","====="))
-    for bucket in sorted(problem_masters.iterkeys()):
-        for master in sorted(problem_masters[bucket], key=operator.itemgetter('hostname')):
-            if master['role'] == 'scheduler':
-                log.warning("{:<30} {} {}".format(bucket, master['hostname'], master['issue']))
-            else:
-                log.warning("{:<30} http://{}:{} {}".format(bucket, master['hostname'], master['http_port'], master['issue']))
-
-def display_progress(signal_number=None, signal_context=None):
-    display_completed()
-    display_problems()
-    display_running()
-    display_remaining()
-
-def pprint_buckets():
-    import pprint
-    pp = pprint.PrettyPrinter(indent=4)
-    pp.pprint(buckets)
-
-signal.signal(signal.SIGUSR1, display_progress)
-
-
-if __name__ == '__main__':
-    import argparse
-    import sys
-
-    parser = argparse.ArgumentParser(description='Gracefully restart a list of buildbot masters')
-    parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
-                        help="Enable extra debug output")
-    parser.add_argument("-vv", "--very-verbose", dest="very_verbose", action="store_true",
-                        help="Enable extra debug output for ssh connections")
-    parser.add_argument("-m", "--masters-json", action="store", dest="masters_json", help="JSON file containing complete list of masters", required=True)
-    parser.add_argument("-l", "--limit-to-masters", action="store", dest="limit_to_masters", help="Test file containing list of masters to restart, one per line", default=None)
-    parser.add_argument("-c", "--config", action="store", dest="config_file", help="Text file containing config variables in bash format", required=False)
-    parser.add_argument("-r", "--reboot", dest="reboot", action="store_true",
-                        help="Reboot machine once master is stopped.")
-    args = parser.parse_args()
-
-    # Setup logging
-    #
-    # Some of the modules we use here are very chatty (e.g. paramiko). We set the default log level
-    # low with an option to increase if we're debugging a submodule issue.
-    if args.very_verbose:
-        logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
-    else:
-        logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
-
-    # The default log level for this script is slightly higher (INFO) because it's presumed we care
-    # about the output.
-    if args.verbose:
-        log.setLevel(logging.DEBUG)
-    else:
-        log.setLevel(logging.INFO)
-
-    if not os.path.isfile(args.masters_json):
-        log.error("Masters JSON file ('%s') does not exist. Exiting..." % args.masters_json)
-        sys.exit(1)
-
-    if not get_credentials(config_file=args.config_file):
-        sys.exit(2)
-
-    # Getting the master IDs allown us to valid the LDAP credentials while also
-    # getting a list of master IDS we can use when disabling masters in slavealloc.
-    if not get_master_ids():
-        sys.exit(3)
-
-    master_list = []
-    if args.limit_to_masters:
-        if not os.path.isfile(args.limit_to_masters):
-            log.warning("Masters limit file ('%s') does not exist. Skipping..." % args.limit_to_masters)
-        else:
-            master_list = [line.strip() for line in open(args.limit_to_masters)]
-
-    json_data = open(args.masters_json)
-    masters_json = json.load(json_data)
-
-    put_masters_in_buckets(masters_json, master_list)
-
-    # Connect to a single master, just to make sure our supplied credentials are valid.
-    log.debug("Verifying SSH credentials by connecting to a single master...")
-    master = buckets.itervalues().next()[0]
-    if master and not check_credentials(master):
-        sys.exit(4)
-
-    interval_start_time = time.time()
-    while masters_remain():
-        # Refill our running buckets.
-        # If we add a new master, we need to kick off the graceful shutdown too.
-        keys_processed = []
-        for key in buckets:
-            while check_available_slots(key):
-                if not buckets[key]:
-                    break
-                else:
-                    current_master = buckets[key].pop()
-                    master_name = current_master['name']
-                    if key not in running_buckets:
-                        running_buckets[key] = {}
-                    running_buckets[key][master_name] = current_master
-                    running_buckets[key][master_name]['start_time'] = time.time()
-                    # Make sure we can connect to this master before initiating shutdown.
-                    if current_master and not check_credentials(current_master):
-                        mark_master_as_problem(key, current_master)
-                        continue
-                    if current_master['role'] == "scheduler":
-                        stop_master(current_master)
-                    else:
-                        if disable_master(current_master):
-                            log.debug("Disabled %s in slavealloc." % current_master['hostname'])
-                        else:
-                            current_master['issue'] = "Unable to disable in slavealloc"
-                            mark_master_as_problem(key, current_master)
-                            # This is not fatal, so we can continue to graceful shutdown.
-                        if graceful_shutdown(current_master):
-                            log.debug("Initiated graceful_shutdown of %s." % current_master['hostname'])
-                        else:
-                            current_master['issue'] = "Unable to initiate graceful_shutdown"
-                            mark_master_as_problem(key, current_master)
-
-        for key in running_buckets.iterkeys():
-            for master_name in running_buckets[key]:
-                current_master = running_buckets[key][master_name]
-                if not current_master:
-                    continue
-                if exceeded_max_shutdown_duration(key, current_master):
-                    log.debug("%s has exceeded the max shutdown duration for bucket type %s. Stopping master more forcibly." % (current_master['hostname'], key))
-                    if not stop_master(current_master):
-                        log.debug("Failed to stop master (%s). Please investigate by hand." % current_master['hostname'])
-                        current_master['issue'] = "Failed to stop master. May also need to be re-enabled in slavealloc"
-                if check_shutdown_status(current_master):
-                    if args.reboot and current_master['role'] != "scheduler":
-                        if not reboot_master(current_master):
-                            log.debug("Failed to reboot master (%s). Please investigate by hand." % current_master['hostname'])
-                            current_master['issue'] = "Failed to reboot master. May also need to be re-enabled in slavealloc"
-                    else:
-                        if not restart_master(current_master):
-                            log.debug("Failed to restart master (%s). Please investigate by hand." % current_master['hostname'])
-                            current_master['issue'] = "Failed to restart master. May also need to be re-enabled in slavealloc"
-                    # Either way, we re-enable and remove this master so we can proceed.
-                    if current_master['role'] != "scheduler":
-                        if 'issue' not in current_master:
-                            if enable_master(current_master):
-                                log.debug("Re-enabled %s in slavealloc" % current_master['hostname'])
-                            else:
-                                log.debug("Unable to re-enable master (%s) in slavealloc." % current_master['hostname'])
-                                current_master['issue'] = "Unable to re-enable in slavealloc"
-                    if 'issue' in current_master:
-                        if master_name not in problem_masters:
-                            problem_masters[master_name] = []
-                            problem_masters[master_name].append(current_master.copy())
-                        del current_master
-                        continue
-                    if master_name not in completed_masters:
-                        completed_masters[master_name] = []
-                    completed_masters[master_name].append(current_master.copy())
-                    keys_processed.append(master_name)
-
-            for master_name in keys_processed:
-                del running_buckets[key][master_name]
-            keys_processed = []
-
-        if masters_remain():
-            now = time.time()
-            current_interval = now - interval_start_time
-            if current_interval >= PROGRESS_INTERVAL:
-                display_progress()
-                interval_start_time = now
-            log.debug("Sleeping for %ds" % SLEEP_INTERVAL)
-            time.sleep(SLEEP_INTERVAL)
-
-    log.info("All masters processed. Exiting")
-    display_completed()
-    display_problems()
deleted file mode 100644
--- a/buildfarm/maintenance/slaverebooter.ini.sample
+++ /dev/null
@@ -1,11 +0,0 @@
-[main]
-verbose = false
-slaveapi_server = http://slaveapi.blah
-workers = 4
-
-[exclude]
-# Hosts which match any key in this section will be skipped when processing
-# slaves. Any values listed here will be ignored.
-ec2 =
-spot =
-# etc.
deleted file mode 100644
--- a/buildfarm/maintenance/test_purge_builds.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from unittest import TestCase
-from purge_builds import isImportant
-
-
-class TestPurge(TestCase):
-
-    importantDirsPattern = ['release-*', '*-nightly', 'info']
-    importantDirs = ['mozilla-central-linux64-nightly',
-                     'mozilla-central-linux64-l10n-nightly',
-                     'release-mozilla-1.9.2-linux64_build',
-                     'release-mozilla-central-win32-opt-unittest-crashtest',
-                     'release-mozilla-1.9.1-linux-opt-unittest-mochitests-3',
-                     'release-mozilla-central-macosx64_repack_3',
-                     'release-mozilla-central-android_update_verify',
-                     'info',
-                     ]
-    notImportantDirs = ['mozilla-1.9.2-linux-l10n-dep',
-                        'mozilla-central-win32-xulrunner',
-                        'nanojit-macosx64',
-                        'fuzzer-linux',
-                        'mozilla-central-linux64-debug',
-                        ]
-
-    def testImportantDirs(self):
-        for d in self.importantDirs:
-            self.assertTrue(isImportant(d, self.importantDirsPattern))
-
-    def testNotImportantDirs(self):
-        for d in self.notImportantDirs:
-            self.assertFalse(isImportant(d, self.importantDirsPattern))
deleted file mode 100755
--- a/buildfarm/maintenance/try_sendchange.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-unset PYTHONHOME
-/tools/buildbot-0.8.0/bin/python ~/tools/buildfarm/maintenance/try_sendchange.py "$@"
\ No newline at end of file
deleted file mode 100755
--- a/buildfarm/maintenance/update_irc.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash -eu
-
-# I use ii here as a quick-and-dirty command-line interface to irc.
-# You could quite easily replace this with a call out to an existing
-# bot or external script.
-MSG=$1
-if [ "${MSG}" == "" ]; then
-    exit 1
-fi
-
-II=`which ii`
-if [ "${II}" == "" ]; then
-    echo "  * IRC client not found. Skipping IRC update."
-    exit 2
-fi
-
-IRCSERVER='irc.mozilla.org'
-IRCNICK='reconfig-bot'
-IRCCHANNEL='#releng'
-${II} -s ${IRCSERVER} -n ${IRCNICK} -f "Reconfig Bot" &
-iipid="$!"
-sleep 5
-printf "/j %s\n" "${IRCCHANNEL}" > ~/irc/${IRCSERVER}/in
-while [ ! -d ~/irc/${IRCSERVER}/${IRCCHANNEL} ]; do
-    echo "  * ~/irc/${IRCSERVER}/${IRCCHANNEL} doesn't exist. Waiting."
-    sleep 5
-done
-echo "  * Updating IRC with: ${MSG}"
-echo "${MSG}" > ~/irc/${IRCSERVER}/${IRCCHANNEL}/in
-sleep 5
-kill ${iipid}
deleted file mode 100755
--- a/buildfarm/maintenance/update_maintenance_wiki.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/bin/bash -e
-
-# Explicitly unset any pre-existing environment variables to avoid variable collision
-unset DRY_RUN DEBUG_DIR
-WIKI_TEXT_ADDITIONS_FILE=""
-
-. "$(dirname "${0}")/wiki_functions.sh"
-
-WIKI_TITLE="ReleaseEngineering/Maintenance"
-
-function usage {
-    echo "Usage: $0 -h"
-    echo "Usage: $0 [-r RECONFIG_DIR ] -d [-w WIKI_TEXT_ADDITIONS_FILE]"
-    echo "Usage: $0 [-r RECONFIG_DIR ] -w WIKI_TEXT_ADDITIONS_FILE"
-    echo
-    echo "    -d:                           Dry run; will not make changes, only validates login."
-    echo "    -h:                           Display help."
-    echo "    -r DEBUG_DIR:                 Copy temporary generated files into directory DEBUG_DIR"
-    echo "    -w WIKI_TEXT_ADDITIONS_FILE:  File containing wiki markdown to insert into wiki page."
-    echo "You need to set WIKI_USERNAME and WIKI_PASSWORD in env before running."
-}
-
-# Simple function to output the name of this script and the options that were passed to it
-function command_called {
-    echo -n "Command called:"
-    for ((INDEX=0; INDEX<=$#; INDEX+=1))
-    do
-        echo -n " '${!INDEX}'"
-    done
-    echo ''
-    echo "From directory: '$(pwd)'"
-}
-
-command_called "${@}" | sed '1s/^/  * /;2s/^/    /'
-
-echo "  * Parsing parameters of $(basename "${0}")..."
-# Parse parameters passed to this script
-while getopts ":dhr:w:" opt; do
-    case "${opt}" in
-        d)  DRY_RUN=1
-            ;;
-        h)  usage
-            exit 0
-            ;;
-        r)  DEBUG_DIR="${OPTARG}"
-            ;;
-        w)  WIKI_TEXT_ADDITIONS_FILE="${OPTARG}"
-            ;;
-        ?)  usage >&2
-            exit 1
-            ;;
-    esac
-done
-
-DRY_RUN="${DRY_RUN:-0}"
-DEBUG_DIR="${DEBUG_DIR:-}"
-
-if [ -n "${DEBUG_DIR}" ]; then
-    if [ ! -d "${DEBUG_DIR}" ]; then
-        echo "  * Creating directory '${DEBUG_DIR}'..."
-        if ! mkdir -p "${DEBUG_DIR}"; then
-            echo "ERROR: Directory '${DEBUG_DIR}' could not be created from directory '$(pwd)'." >&2
-            exit 70
-        fi
-    else
-        echo "  * Debug directory '${DEBUG_DIR}' exists - OK"
-    fi
-else
-    echo "  * Not storing temporary files (DEBUG_DIR has not been specified)"
-fi
-
-# if doing a dry run, not *necessary* to specify a wiki text file, but optional, so if specified, use it
-if [ "${DRY_RUN}" == 0 ] || [ -n "${WIKI_TEXT_ADDITIONS_FILE}" ]; then
-    if [ -z "${WIKI_TEXT_ADDITIONS_FILE}" ]; then
-        echo "ERROR: Must provide a file containing additional wiki text to embed, e.g. '${0}' -w 'reconfig-bugs.wikitext'" >&2
-        echo "Exiting..." >&2
-        exit 71
-    fi
-    if [ ! -f "${WIKI_TEXT_ADDITIONS_FILE}" ]; then
-        echo "ERROR: File '${WIKI_TEXT_ADDITIONS_FILE}' not found. Working directory is '$(pwd)'." >&2
-        echo "This file should contain additional wiki content to be inserted in https://wiki.mozilla.org/ReleaseEngineering/Maintenance." >&2
-        echo "Exiting..." >&2
-        exit 72
-    fi
-fi
-
-check_wiki_login_env "${WIKI_TITLE}"
-
-# create some temporary files
-current_content="$(mktemp -t current-content.XXXXXXXXXX)"
-new_content="$(mktemp -t new-content.XXXXXXXXXX)"
-
-echo "  * Retrieving current wiki text of https://wiki.mozilla.org/${WIKI_TITLE}..."
-curl -s "https://wiki.mozilla.org/${WIKI_TITLE}?action=raw" >> "${current_content}"
-
-# find first "| in production" line in the current content, and grab line number
-old_line="$(sed -n '/^| in production$/=' "${current_content}" | head -1)"
-
-echo "  * Preparing wiki page to include new content..."
-# create new version of whole page, and put in "${new_content}" file...
-{
-    # old content, up to 2 lines before the first "| in production" line
-    sed -n 1,$((old_line-2))p "${current_content}"
-    # the new content to add
-    if [ -r "${WIKI_TEXT_ADDITIONS_FILE}" ]; then
-        echo '|-'
-        echo '| in production'
-        echo "| `TZ=America/Los_Angeles date +"%Y-%m-%d %H:%M PT"`"
-        echo '|'
-        cat "${WIKI_TEXT_ADDITIONS_FILE}"
-    fi
-    # the rest of the page (starting from line before "| in production"
-    sed -n $((old_line-1)),\$p "${current_content}"
-} > "${new_content}"
-
-WIKI_COMMENT="reconfig"
-wiki_login
-wiki_edit_login
-wiki_post
-wiki_logout
-
-if [ -n "${DEBUG_DIR}" ]; then
-    echo "  * Backing up temporary files generated during wiki publish before they get deleted..."
-    cp "${cookie_jar}"      "${DEBUG_DIR}/cookie_jar.txt"
-    cp "${new_content}"     "${DEBUG_DIR}/new_wiki_content.txt"
-    cp "${current_content}" "${DEBUG_DIR}/old_wiki_content.log"
-fi
-
-# remove temporary files
-echo "  * Deleting temporary files..."
-rm "${cookie_jar}"
-rm "${new_content}"
-rm "${current_content}"
-
-exit "${EXIT_CODE}"
deleted file mode 100644
--- a/buildfarm/maintenance/update_remote_creds.pl
+++ /dev/null
@@ -1,785 +0,0 @@
-#!/usr/bin/perl -w
-# -*- cperl -*-
-#
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-#
-# The Original Code is Remote Credential Updater.
-#
-# The Initial Developer of the Original Code is
-# Mozilla Corporation.
-# Portions created by the Initial Developer are Copyright (C) 2008
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):  Reed Loden <reed@mozilla.com>
-#                  Chris Cooper <ccooper@deadsquid.com>
-#                  Nick Thomas <nthomas@mozilla.com>
-#
-use strict;
-$|++;
-
-$ENV{PATH} = "/bin:/usr/bin:/usr/local/bin";
-
-use Data::Dumper;
-use Date::Manip;
-use diagnostics;
-use warnings;
-use Getopt::Long;
-use LWP;
-use Net::SSH::Expect;
-
-use vars qw(
-	    $verbose
-	    $machine_list
-	    $host_errors
-	    $default_ssh_timeout
-	    $start_timestamp
-           );
-
-$default_ssh_timeout = 5;
-$start_timestamp = UnixDate("now","%q");
-
-#########################################################################
-sub loadExistingMachineList($) {
-  my $machine_list_file = shift;
-
-  print "Loading existing machine list: $machine_list_file..." if ($verbose);
-
-  if (-e $machine_list_file and
-      -r $machine_list_file) {
-    open(MACHINELIST, $machine_list_file) or die "Unable to open machine list file: $machine_list_file";
-