bug 763929: tracking bug for initial implementation + deployment of release kickoff and release runner - initial implementation of release runner and dependent changes. r=catlee
authorRail Aliev <rail@mozilla.com>
Thu, 10 Jan 2013 10:02:08 -0500
changeset 3351 be9eb45e4ec3ad692e771b1359ca8088dad1d7ce
parent 3350 d91927131ad908220d687dda136e9ed045f38a67
child 3352 1d55e73906ce5cbe0ba028c93b38f7436e03fc16
push id2393
push userbhearsum@mozilla.com
push dateThu, 10 Jan 2013 15:03:00 +0000
reviewerscatlee
bugs763929
bug 763929: tracking bug for initial implementation + deployment of release kickoff and release runner - initial implementation of release runner and dependent changes. r=catlee
.hgignore
buildbot-helpers/release_sanity.py
buildfarm/maintenance/manage_masters.py
buildfarm/maintenance/master_fabric.py
buildfarm/release/release-runner.ini.example
buildfarm/release/release-runner.py
buildfarm/release/release-runner.sh
buildfarm/release/release-runner.supervisor
lib/python/build/versions.py
lib/python/kickoff/__init__.py
lib/python/kickoff/api.py
lib/python/mozilla_buildtools/test/test_release_config.py
lib/python/mozilla_buildtools/test/test_release_info.py
lib/python/mozilla_buildtools/test/test_release_versions.py
lib/python/mozilla_buildtools/test/test_util_commands.py
lib/python/mozilla_buildtools/test/test_util_hg.py
lib/python/release/config.py
lib/python/release/info.py
lib/python/release/sanity.py
lib/python/release/versions.py
lib/python/util/commands.py
lib/python/util/fabric/__init__.py
lib/python/util/fabric/actions.py
lib/python/util/fabric/common.py
lib/python/util/hg.py
lib/python/util/retry.py
lib/python/util/sendmail.py
--- a/.hgignore
+++ b/.hgignore
@@ -6,10 +6,11 @@ RX-44
 RX-51
 build/
 slavealloc\.db
 \..*\.swp
 twistd\.pid
 .*\.egg-info
 dist/
 slavealloc.log
+release-runner.ini
 lib/python/slavealloc/www/icons
 lib/python/slavealloc/www/js/bugzilla.js
--- a/buildbot-helpers/release_sanity.py
+++ b/buildbot-helpers/release_sanity.py
@@ -1,517 +1,583 @@
 #!/usr/bin/env python
 """%prog [-d|--dryrun] [-u|--username `username`] [-b|--bypass-check]
         [-l| --bypass-l10n-check] [-m|--bypass-mozconfig-check]
         [-V| --version `version`] [-B --branch `branchname`]
         [-N|--build-number `buildnumber`]
         [-c| --release-config `releaseConfigFile`]
         [-w| --whitelist `mozconfig_whitelist`]
         [--l10n-dashboard-version version]
-        -p|--products firefox,fennec master:port
+        master:port
 
     Wrapper script to sanity-check a release. Default behaviour is to check
     the branch and revision specific in the release_configs, check if the
     milestone and version# in the source repo match the
     expected values in the release_configs, check the l10n repos & dashboard,
     compare the nightly and release mozconfigs for a release branch against
     a whitelist of known differences between the two. If all tests pass then
     the master is reconfiged and then a senchange is generated to kick off
     the release automation.
 """
-import re, urllib2
-import os, difflib
 try:
     import simplejson as json
 except ImportError:
     import json
+
+import difflib
+import logging
+import os
+import site
+import urllib2
+
 from optparse import OptionParser
-from util.commands import run_cmd
+from os import path
+from tempfile import mkdtemp
+from shutil import rmtree
+
+site.addsitedir(path.join(path.dirname(__file__), "../lib/python"))
+
 from util.file import compare
-from util.hg import make_hg_url, get_repo_name
+from util.hg import make_hg_url, get_repo_name, mercurial, update
 from release.info import readReleaseConfig, getRepoMatchingBranch, readConfig
 from release.versions import getL10nDashboardVersion
 from release.l10n import getShippedLocales
 from release.platforms import getLocaleListFromShippedLocales
-import logging
-from subprocess import CalledProcessError
-log = logging.getLogger(__name__)
-
-RECONFIG_SCRIPT = os.path.join(os.path.dirname(__file__),
-                               "../buildfarm/maintenance/buildbot-wrangler.py")
-error_tally = set()
-
-def findVersion(contents, versionNumber):
-    """Given an open readable file-handle look for the occurrence
-       of the version # in the file"""
-    ret = re.search(re.compile(re.escape(versionNumber), re.DOTALL), contents)
-    return ret
-
-def reconfig():
-    """reconfig the master in the cwd"""
-    run_cmd(['python', RECONFIG_SCRIPT, 'reconfig', os.getcwd()])
-
-def check_buildbot():
-    """check if buildbot command works"""
-    try:
-        run_cmd(['buildbot', '--version'])
-    except CalledProcessError:
-        print "FAIL: buildbot command doesn't work"
-        raise
-
-def locale_diff(locales1,locales2):
-    """ accepts two lists and diffs them both ways, returns any differences found """
-    diff_list = [locale for locale in locales1 if not locale in locales2]
-    diff_list.extend(locale for locale in locales2 if not locale in locales1)
-    return diff_list
+from release.sanity import check_buildbot, find_version, locale_diff, \
+    sendchange
+from util.fabric.common import check_fabric, FabricHelper
+from util.retry import retry
 
-def sendchange(branch, revision, username, master, products):
-    """Send the change to buildbot to kick off the release automation"""
-    cmd = [
-       'buildbot',
-       'sendchange',
-       '--username',
-       username,
-       '--master',
-       master,
-       '--branch',
-       branch,
-       '-p',
-       'products:%s' % products,
-       '-p',
-       'script_repo_revision:%s' % revision,
-       'release_build'
-       ]
-    log.info("Executing: %s" % cmd)
-    run_cmd(cmd)
+log = logging.getLogger(__name__)
+error_tally = set()
+HG = 'hg.mozilla.org'
 
-def verify_branch(branch, productName):
-    masterConfig = json.load(open('master_config.json'))
-    release_branches_key = 'release_branches'
-    if productName == 'fennec':
-        release_branches_key = 'mobile_release_branches'
-    elif productName == 'thunderbird':
-        release_branches_key = 'thunderbird_release_branches'
-    if branch not in masterConfig[release_branches_key]:
-        success = False
-        log.error("Branch %s isn't enabled for %s", branch, productName)
-        error_tally.add('verify_branch')
-    else:
-        success = True
-        log.info("Branch %s is enabled on master for %s", branch, productName)
-    return success
 
 def verify_repo(branch, revision, hghost):
     """Poll the hgweb interface for a given branch and revision to
        make sure it exists"""
     repo_url = make_hg_url(hghost, branch, revision=revision)
     log.info("Checking for existence of %s..." % repo_url)
     success = True
     try:
         repo_page = urllib2.urlopen(repo_url)
         log.info("Got: %s !" % repo_page.geturl())
     except urllib2.HTTPError:
-        log.error("Repo does not exist with required revision. Check again, or use -b to bypass")
+        log.error("Repo does not exist with required revision."
+                  " Check again, or use -b to bypass")
         success = False
         error_tally.add('verify_repo')
     return success
 
-def verify_mozconfigs(branch, revision, hghost, product, mozconfigs, appName, whitelist=None):
-    """Compare nightly mozconfigs for branch to release mozconfigs and compare to whitelist of known differences"""
+
+def verify_mozconfigs(branch, revision, hghost, product, mozconfigs,
+                      whitelist=None):
+    """Compare nightly mozconfigs for branch to release mozconfigs and
+    compare to whitelist of known differences"""
     branch_name = get_repo_name(branch)
     if whitelist:
         mozconfigWhitelist = readConfig(whitelist, ['whitelist'])
     else:
         mozconfigWhitelist = {}
     log.info("Comparing %s mozconfigs to nightly mozconfigs..." % product)
     success = True
-    types = {'+': 'release', '-': 'nightly'}
-    for platform,mozconfig in mozconfigs.items():
+    for platform, mozconfig in mozconfigs.items():
         urls = []
         mozconfigs = []
         mozconfig_paths = [mozconfig, mozconfig.rstrip('release') + 'nightly']
         # Create links to the two mozconfigs.
-        releaseConfig = make_hg_url(hghost, branch, 'http', revision, mozconfig)
+        releaseConfig = make_hg_url(hghost, branch, 'http', revision,
+                                    mozconfig)
         urls.append(releaseConfig)
         # The nightly one is the exact same URL, with the file part changed.
         urls.append(releaseConfig.rstrip('release') + 'nightly')
         for url in urls:
             try:
                 mozconfigs.append(urllib2.urlopen(url).readlines())
             except urllib2.HTTPError as e:
                 log.error("MISSING: %s - ERROR: %s" % (url, e.msg))
         diffInstance = difflib.Differ()
         if len(mozconfigs) == 2:
-            diffList = list(diffInstance.compare(mozconfigs[0],mozconfigs[1]))
+            diffList = list(diffInstance.compare(mozconfigs[0], mozconfigs[1]))
             for line in diffList:
                 clean_line = line[1:].strip()
-                if (line[0] == '-'  or line[0] == '+') and len(clean_line) > 1:
+                if (line[0] == '-' or line[0] == '+') and len(clean_line) > 1:
                     # skip comment lines
                     if clean_line.startswith('#'):
                         continue
                     # compare to whitelist
                     message = ""
                     if line[0] == '-':
-                        if mozconfigWhitelist.get(branch_name, {}).has_key(platform):
-                            if clean_line in mozconfigWhitelist[branch_name][platform]:
+                        if platform in mozconfigWhitelist.get(branch_name, {}):
+                            if clean_line in \
+                                mozconfigWhitelist[branch_name][platform]:
                                 continue
                     elif line[0] == '+':
-                        if mozconfigWhitelist.get('nightly', {}).has_key(platform):
-                            if clean_line in mozconfigWhitelist['nightly'][platform]:
+                        if platform in mozconfigWhitelist.get('nightly', {}):
+                            if clean_line in \
+                                mozconfigWhitelist['nightly'][platform]:
                                 continue
                             else:
-                                log.warning("%s not in %s %s!" % (clean_line, platform, mozconfigWhitelist['nightly'][platform]))
+                                log.warning("%s not in %s %s!" % (
+                                    clean_line, platform,
+                                    mozconfigWhitelist['nightly'][platform]))
                     else:
                         log.error("Skipping line %s!" % line)
                         continue
                     message = "found in %s but not in %s: %s"
                     if line[0] == '-':
-                        log.error(message % (mozconfig_paths[0], mozconfig_paths[1], clean_line))
+                        log.error(message % (mozconfig_paths[0],
+                                             mozconfig_paths[1], clean_line))
                     else:
-                        log.error(message % (mozconfig_paths[1], mozconfig_paths[0], clean_line))
+                        log.error(message % (mozconfig_paths[1],
+                                             mozconfig_paths[0], clean_line))
                     success = False
                     error_tally.add('verify_mozconfig')
         else:
             log.info("Missing mozconfigs to compare for %s" % platform)
-            error_tally.add("verify_mozconfigs: Confirm that %s does not have release/nightly mozconfigs to compare" % platform)
+            error_tally.add("verify_mozconfigs: Confirm that %s does not have "
+                            "release/nightly mozconfigs to compare" % platform)
     return success
 
+
 def verify_build(sourceRepo, hghost):
-    """ Ensure that the bumpFiles match what the release config wants them to be"""
+    """ Ensure that the bumpFiles match what the release config wants them to
+    be"""
     success = True
     for filename, versions in sourceRepo['bumpFiles'].iteritems():
         try:
             url = make_hg_url(hghost, sourceRepo['path'],
                               revision=sourceRepo['revision'],
                               filename=filename)
             found_version = urllib2.urlopen(url).read()
-            if not findVersion(found_version, versions['version']):
-                log.error("%s has incorrect version '%s' (expected '%s')" % \
-                  (filename, found_version, versions['version']))
+            if not find_version(found_version, versions['version']):
+                log.error("%s has incorrect version '%s' (expected '%s')" %
+                          (filename, found_version, versions['version']))
                 success = False
                 error_tally.add('verify_build')
         except urllib2.HTTPError, inst:
-            log.error("cannot find %s. Check again, or -b to bypass" % inst.geturl())
+            log.error("cannot find %s. Check again, or -b to bypass" %
+                      inst.geturl())
             success = False
             error_tally.add('verify_build')
 
     return success
 
-def verify_configs(revision, hghost, configs_repo, changesets, filename):
-    """Check the release_configs and l10n-changesets against tagged revisions"""
-    configs_url = make_hg_url(hghost, configs_repo, revision=revision, filename="mozilla/%s" % filename)
-    l10n_url = make_hg_url(hghost, configs_repo, revision=revision, filename="mozilla/%s" % changesets)
+
+def verify_configs(configs_dir, revision, hghost, configs_repo, changesets,
+                   filename):
+    """Check the release_configs and l10n-changesets against tagged
+    revisions"""
+
+    release_config_file = path.join(configs_dir, 'mozilla', filename)
+    l10n_changesets_file = path.join(configs_dir, 'mozilla', changesets)
+    configs_url = make_hg_url(hghost, configs_repo, revision=revision,
+                              filename=path.join('mozilla', filename))
+    l10n_url = make_hg_url(hghost, configs_repo, revision=revision,
+                           filename=path.join('mozilla', changesets))
 
     success = True
     try:
         official_configs = urllib2.urlopen(configs_url)
-        log.info("Comparing tagged revision %s to on-disk %s ..." % (configs_url, filename))
-        if not compare(official_configs, filename):
+        log.info("Comparing tagged revision %s to on-disk %s ..." % (
+            configs_url, filename))
+        if not compare(official_configs, release_config_file):
             log.error("local configs do not match tagged revisions in repo")
             success = False
             error_tally.add('verify_configs')
         l10n_changesets = urllib2.urlopen(l10n_url)
-        log.info("Comparing tagged revision %s to on-disk %s ..." % (l10n_url, changesets))
-        if not compare(l10n_changesets, changesets):
-            log.error("local l10n-changesets do not match tagged revisions in repo")
+        log.info("Comparing tagged revision %s to on-disk %s ..." % (
+            l10n_url, changesets))
+        if not compare(l10n_changesets, l10n_changesets_file):
+            log.error("local l10n-changesets do not match tagged revisions"
+                      " in repo")
             success = False
             error_tally.add('verify_configs')
     except urllib2.HTTPError:
         log.error("cannot find configs in repo %s" % configs_url)
         log.error("cannot find configs in repo %s" % l10n_url)
         success = False
         error_tally.add('verify_configs')
     return success
 
+
 def query_locale_revisions(l10n_changesets):
     locales = {}
     if l10n_changesets.endswith('.json'):
         fh = open(l10n_changesets, 'r')
         locales_json = json.load(fh)
         fh.close()
-        for locale in locales_json.keys():
+        for locale in locales_json:
             locales[locale] = locales_json[locale]["revision"]
     else:
         for line in open(l10n_changesets, 'r'):
             locale, revision = line.split()
             locales[locale] = revision
     return locales
 
+
 def verify_l10n_changesets(hgHost, l10n_changesets):
     """Checks for the existance of all l10n changesets"""
     success = True
     locales = query_locale_revisions(l10n_changesets)
     for locale in locales.keys():
         revision = locales[locale]
         localePath = '%(repoPath)s/%(locale)s/file/%(revision)s' % {
             'repoPath': releaseConfig['l10nRepoPath'].strip('/'),
             'locale': locale,
             'revision': revision,
         }
         locale_url = make_hg_url(hgHost, localePath, protocol='https')
         log.info("Checking for existence l10n changeset %s %s in repo %s ..."
-            % (locale, revision, locale_url))
+                 % (locale, revision, locale_url))
         try:
             urllib2.urlopen(locale_url)
         except urllib2.HTTPError:
-            log.error("cannot find l10n locale %s in repo %s" % (locale, locale_url))
+            log.error("cannot find l10n locale %s in repo %s" % (locale,
+                                                                 locale_url))
             success = False
             error_tally.add('verify_l10n')
     return success
 
+
 def verify_l10n_dashboard(l10n_changesets, l10n_dashboard_version=None):
     """Checks the l10n-changesets against the l10n dashboard"""
     success = True
     locales = query_locale_revisions(l10n_changesets)
     if l10n_dashboard_version:
         l10n_dashboard_version = getL10nDashboardVersion(
             l10n_dashboard_version, releaseConfig['productName'],
             parse_version=False)
     else:
         l10n_dashboard_version = getL10nDashboardVersion(
             releaseConfig['version'], releaseConfig['productName'])
-    dash_url = 'https://l10n.mozilla.org/shipping/l10n-changesets?ms=%s' % l10n_dashboard_version
+    dash_url = 'https://l10n.mozilla.org/shipping/l10n-changesets?ms=%s' % \
+        l10n_dashboard_version
     log.info("Comparing l10n changesets on dashboard %s to on-disk %s ..."
-        % (dash_url, l10n_changesets))
+             % (dash_url, l10n_changesets))
     try:
         dash_changesets = {}
         for line in urllib2.urlopen(dash_url):
             locale, revision = line.split()
             dash_changesets[locale] = revision
         for locale in locales:
             revision = locales[locale]
             dash_revision = dash_changesets.pop(locale, None)
             if not dash_revision:
                 log.error("\tlocale %s missing on dashboard" % locale)
                 success = False
                 error_tally.add('verify_l10n_dashboard')
             elif revision != dash_revision:
-                log.error("\tlocale %s revisions not matching: %s (config) vs. %s (dashboard)"
+                log.error("\tlocale %s revisions not matching: %s (config)"
+                          " vs. %s (dashboard)"
                     % (locale, revision, dash_revision))
                 success = False
                 error_tally.add('verify_l10n_dashboard')
         for locale in dash_changesets:
             log.error("\tlocale %s missing in config" % locale)
             success = False
             error_tally.add('verify_l10n_dashboard')
     except urllib2.HTTPError:
         log.error("cannot find l10n dashboard at %s" % dash_url)
         success = False
         error_tally.add('verify_l10n_dashboard')
     return success
 
+
 def verify_l10n_shipped_locales(l10n_changesets, shipped_locales):
-    """Ensure that our l10n-changesets on the master match the repo's shipped locales list"""
+    """Ensure that our l10n-changesets on the master match the repo's shipped
+    locales list"""
     success = True
     locales = query_locale_revisions(l10n_changesets)
     log.info("Comparing l10n changesets to shipped locales ...")
     diff_list = locale_diff(locales, shipped_locales)
     if len(diff_list) > 0:
-        log.error("l10n_changesets and shipped_locales differ on locales: %s" % diff_list)
+        log.error("l10n_changesets and shipped_locales differ on locales:"
+                  " %s" % diff_list)
         success = False
         error_tally.add('verify_l10n_shipped_locales')
     return success
 
+
 def verify_options(cmd_options, config):
     """Check release_configs against command-line opts"""
     success = True
     if cmd_options.version and cmd_options.version != config['version']:
         log.error("version passed in does not match release_configs")
         success = False
         error_tally.add('verify_options')
-    if cmd_options.buildNumber and int(cmd_options.buildNumber) != int(config['buildNumber']):
+    if cmd_options.buildNumber and \
+        int(cmd_options.buildNumber) != int(config['buildNumber']):
         log.error("buildNumber passed in does not match release_configs")
         success = False
         error_tally.add('verify_options')
-    if not getRepoMatchingBranch(cmd_options.branch, config['sourceRepositories']):
+    if not getRepoMatchingBranch(cmd_options.branch,
+                                 config['sourceRepositories']):
         log.error("branch passed in does not exist in release config")
         success = False
         error_tally.add('verify_options')
     return success
 
+
 if __name__ == '__main__':
-    from localconfig import GLOBAL_VARS
     parser = OptionParser(__doc__)
     parser.set_defaults(
-            check=True,
-            checkL10n=True,
-            checkMozconfigs=True,
-            dryrun=False,
-            username="cltbld",
-            loglevel=logging.INFO,
-            version=None,
-            buildNumber=None,
-            branch=None,
-            products=None,
-            whitelist='../tools/buildbot-helpers/mozconfig_whitelist',
-            )
-    parser.add_option("-b", "--bypass-check", dest="check", action="store_false",
-            help="don't bother verifying release repo's on this master")
-    parser.add_option("-l", "--bypass-l10n-check", dest="checkL10n", action="store_false",
-            help="don't bother verifying l10n milestones")
-    parser.add_option("-m", "--bypass-mozconfig-check", dest="checkMozconfigs", action="store_false",
-            help="don't bother verifying mozconfigs")
-    parser.add_option("-d", "--dryrun", dest="dryrun", action="store_true",
-            help="just do the reconfig/checks, without starting anything")
-    parser.add_option("-u", "--username", dest="username",
-            help="specify a specific username to attach to the sendchange (cltbld)")
-    parser.add_option("-V", "--version", dest="version",
-            help="firefox version string for release in format: x.x.x")
+        check=True,
+        checkL10n=True,
+        checkL10nDashboard=True,
+        checkMozconfigs=True,
+        dryrun=False,
+        username="cltbld",
+        loglevel=logging.INFO,
+        version=None,
+        buildNumber=None,
+        branch=None,
+        whitelist=path.abspath(path.join(path.dirname(__file__),
+                                         "mozconfig_whitelist")),
+        skip_reconfig=False,
+        configs_repo_url='build/buildbot-configs',
+        configs_branch='production',
+        masters_json_file=path.abspath(path.join(
+            path.dirname(__file__),
+            "../buildfarm/maintenance/production-masters.json")),
+        concurrency=8,
+        skip_verify_configs=False,
+    )
+    parser.add_option(
+        "-b", "--bypass-check", dest="check", action="store_false",
+        help="don't bother verifying release repo's on this master")
+    parser.add_option(
+        "-l", "--bypass-l10n-check", dest="checkL10n", action="store_false",
+        help="don't bother verifying l10n milestones")
+    parser.add_option(
+        "--bypass-l10n-dashboard-check", dest="checkL10nDashboard",
+        action="store_false", help="don't verify l10n changesets against the dashboard (implied when --bypass-l10n-check is passed)")
+    parser.add_option(
+        "-m", "--bypass-mozconfig-check",  dest="checkMozconfigs",
+        action="store_false", help="don't verify mozconfigs")
+    parser.add_option(
+        "-d", "--dryrun", "--dry-run", dest="dryrun", action="store_true",
+        help="just do the reconfig/checks, without starting anything")
+    parser.add_option(
+        "-u", "--username", dest="username",
+        help="specify a specific username to attach to the sendchange")
+    parser.add_option(
+        "-V", "--version", dest="version",
+        help="version string for release in format: x.x.x")
     parser.add_option("-N", "--build-number", dest="buildNumber", type="int",
-            help="buildNumber for this release, uses release_config otherwise")
-    parser.add_option("-B", "--branch", dest="branch",
-            help="branch name for this release, uses release_config otherwise")
-    parser.add_option("-c", "--release-config", dest="releaseConfigFiles",
-            action="append",
-            help="specify the release-config files (the first is primary)")
-    parser.add_option("-p", "--products", dest="products",
-            help="coma separated list of products")
+                      help="build number for this release, "
+                      "uses release_config otherwise")
+    parser.add_option(
+        "-B", "--branch", dest="branch",
+        help="branch name for this release, uses release_config otherwise")
+    parser.add_option(
+        "-c", "--release-config", dest="releaseConfigFiles", action="append",
+        help="specify the release-config files (the first is primary)")
     parser.add_option("-w", "--whitelist", dest="whitelist",
-            help="whitelist for known mozconfig differences")
-    parser.add_option("--l10n-dashboard-version", dest="l10n_dashboard_version",
-            help="Override L10N dashboard version")
+                      help="whitelist for known mozconfig differences")
+    parser.add_option(
+        "--l10n-dashboard-version", dest="l10n_dashboard_version",
+        help="Override L10N dashboard version")
+    parser.add_option("--skip-reconfig", dest="skip_reconfig",
+                      action="store_true", help="Do not run reconfig")
+    parser.add_option("--configs-dir", dest="configs_dir",
+                      help="buildbot-configs directory")
+    parser.add_option("--configs-repo-url", dest="configs_repo_url",
+                      help="buildbot-configs repo URL")
+    parser.add_option("--configs-branch", dest="configs_branch",
+                      help="buildbot-configs branch")
+    parser.add_option("--masters-json-file", dest="masters_json_file",
+                      help="Path to production-masters.json file.")
+    parser.add_option('-j', dest='concurrency', type='int',
+                      help='Fabric concurrency level')
+    parser.add_option("--skip-verify-configs", dest="skip_verify_configs",
+                      action="store_true",
+                      help="Do not verify configs agains remote repos")
 
     options, args = parser.parse_args()
-    if not options.products:
-        parser.error("Need to provide a list of products, e.g. -p firefox,fennec")
     if not options.dryrun and not args:
-        parser.error("Need to provide a master to sendchange to, or -d for a dryrun")
+        parser.error("Need to provide a master to sendchange to,"
+                     " or -d for a dryrun")
     elif not options.branch:
         parser.error("Need to provide a branch to release")
     elif not options.releaseConfigFiles:
         parser.error("Need to provide a release config file")
 
     logging.basicConfig(level=options.loglevel,
-            format="%(asctime)s : %(levelname)s : %(message)s")
+                        format="%(asctime)s : %(levelname)s : %(message)s")
 
     releaseConfig = None
     test_success = True
     buildNumber = options.buildNumber
+    products = []
+
+    check_buildbot()
+    check_fabric()
+
+    if options.configs_dir:
+        configs_dir = options.configs_dir
+        cleanup_configs = False
+    else:
+        cleanup_configs = True
+        configs_dir = mkdtemp()
+        remote = make_hg_url(HG, options.configs_repo_url)
+        retry(mercurial, args=(remote, configs_dir),
+              kwargs={'branch': options.configs_branch})
+        update(configs_dir, options.configs_branch)
+
+    # https://bugzilla.mozilla.org/show_bug.cgi?id=678103#c5
+    # This goes through the list of config files in reverse order, which is a
+    # hacky way of making sure that the config file that's listed first is the
+    # one that's loaded in releaseConfig for the sendchange.
     for releaseConfigFile in list(reversed(options.releaseConfigFiles)):
-        releaseConfig = readReleaseConfig(releaseConfigFile)
+        abs_release_config_file = path.join(configs_dir, 'mozilla',
+                                            releaseConfigFile)
+        releaseConfig = readReleaseConfig(abs_release_config_file)
+        products.append(releaseConfig['productName'])
 
         if not options.buildNumber:
-            log.warn("No buildNumber specified, using buildNumber in release_config, which may be out of date!")
+            log.warn("No buildNumber specified, using buildNumber in"
+                     " release_config, which may be out of date!")
             options.buildNumber = releaseConfig['buildNumber']
 
         if options.check:
-            if not verify_branch(options.branch, releaseConfig['productName']):
-                test_success = False
-                log.error('Error verifying branch is enabled on master')
-
+            site.addsitedir(path.join(configs_dir, 'mozilla'))
             from config import BRANCHES
             source_repo = 'mozilla'
             try:
                 branchConfig = BRANCHES[options.branch]
             except KeyError:
                 from thunderbird_config import BRANCHES
                 branchConfig = BRANCHES[options.branch]
                 source_repo = 'comm'
 
             #Match command line options to defaults in release_configs
             if not verify_options(options, releaseConfig):
                 test_success = False
-                log.error("Error verifying command-line options, attempting checking repo")
+                log.error("Error verifying command-line options,"
+                          " attempting checking repo")
 
-            # verify that mozconfigs for this release pass diff with nightly, compared to a whitelist
+            # verify that mozconfigs for this release pass diff with nightly,
+            # compared to a whitelist
             try:
-                path = releaseConfig['sourceRepositories'][source_repo]['path']
-                revision = releaseConfig['sourceRepositories'][source_repo]['revision']
+                repo_path = \
+                    releaseConfig['sourceRepositories'][source_repo]['path']
+                revision = \
+                    releaseConfig['sourceRepositories'][source_repo]['revision']
             except KeyError:
                 try:
-                    path = releaseConfig['sourceRepositories']['mobile']['path']
-                    revision = releaseConfig['sourceRepositories']['mobile']['revision']
-                except:
+                    repo_path = \
+                        releaseConfig['sourceRepositories']['mobile']['path']
+                    revision = \
+                        releaseConfig['sourceRepositories']['mobile']['revision']
+                except KeyError:
                     log.error("Can't determine sourceRepo for mozconfigs")
-            if options.checkMozconfigs:
-                if not verify_mozconfigs(
-                        path,
+            if options.checkMozconfigs and \
+                    not verify_mozconfigs(
+                        repo_path,
                         revision,
                         branchConfig['hghost'],
                         releaseConfig['productName'],
                         releaseConfig['mozconfigs'],
-                        releaseConfig['appName'],
-                        options.whitelist
-                    ):
-                    test_success = False
-                    log.error("Error verifying mozconfigs")
+                        options.whitelist):
+                test_success = False
+                log.error("Error verifying mozconfigs")
 
-            #verify that the release_configs on-disk match the tagged revisions in hg
-            if not verify_configs(
-                    "%s_BUILD%s" % (releaseConfig['baseTag'], buildNumber),
-                    branchConfig['hghost'],
-                    GLOBAL_VARS['config_repo_path'],
-                    releaseConfig['l10nRevisionFile'],
-                    releaseConfigFile,
-                    ):
+            #verify that the release_configs on-disk match the tagged
+            #revisions in hg
+            l10nRevisionFile = path.join(configs_dir, 'mozilla',
+                                         releaseConfig['l10nRevisionFile'])
+            if not options.skip_verify_configs and \
+                    not verify_configs(
+                        configs_dir,
+                        "%s_BUILD%s" % (releaseConfig['baseTag'], buildNumber),
+                        branchConfig['hghost'],
+                        options.configs_repo_url,
+                        releaseConfig['l10nRevisionFile'],
+                        releaseConfigFile):
                 test_success = False
                 log.error("Error verifying configs")
 
             if options.checkL10n:
                 #verify that l10n changesets exist
-                if not verify_l10n_changesets(
-                        branchConfig['hghost'],
-                        releaseConfig['l10nRevisionFile']):
+                if not verify_l10n_changesets(branchConfig['hghost'],
+                                              l10nRevisionFile):
                     test_success = False
                     log.error("Error verifying l10n changesets")
 
-                #verify that l10n changesets match the dashboard
-                if not verify_l10n_dashboard(releaseConfig['l10nRevisionFile'],
-                                            options.l10n_dashboard_version):
-                    test_success = False
-                    log.error("Error verifying l10n dashboard changesets")
+                if options.checkL10nDashboard:
+                    #verify that l10n changesets match the dashboard
+                    if not verify_l10n_dashboard(
+                        l10nRevisionFile,
+                        options.l10n_dashboard_version):
+                        test_success = False
+                        log.error("Error verifying l10n dashboard changesets")
 
-                #verify that l10n changesets match the shipped locales in firefox product
+                #verify that l10n changesets match the shipped locales
                 if releaseConfig.get('shippedLocalesPath'):
                     sr = releaseConfig['sourceRepositories'][source_repo]
                     sourceRepoPath = sr.get('clonePath', sr['path'])
                     shippedLocales = getLocaleListFromShippedLocales(
                                         getShippedLocales(
                                             releaseConfig['productName'],
                                             releaseConfig['appName'],
                                             releaseConfig['version'],
                                             releaseConfig['buildNumber'],
                                             sourceRepoPath,
                                             'http://hg.mozilla.org',
                                             sr['revision'],
                                     ))
                     # l10n_changesets do not have an entry for en-US
                     if 'en-US' in shippedLocales:
                         shippedLocales.remove('en-US')
-                    if not verify_l10n_shipped_locales(
-                            releaseConfig['l10nRevisionFile'],
-                            shippedLocales):
+                    if not verify_l10n_shipped_locales(l10nRevisionFile,
+                                                       shippedLocales):
                         test_success = False
-                        log.error("Error verifying l10n_changesets matches shipped_locales")
+                        log.error("Error verifying l10n_changesets matches"
+                                  " shipped_locales")
 
-            #verify that the relBranch + revision in the release_configs exists in hg
+            #verify that the relBranch + revision in the release_configs
+            #exists in hg
             for sr in releaseConfig['sourceRepositories'].values():
                 sourceRepoPath = sr.get('clonePath', sr['path'])
                 if not verify_repo(sourceRepoPath, sr['revision'],
                                    branchConfig['hghost']):
                     test_success = False
                     log.error("Error verifying repos")
 
-            #if this is a respin, verify that the version/milestone files have been bumped
+            #if this is a respin, verify that the version/milestone files
+            #have been bumped
             if buildNumber > 1:
                 for sr in releaseConfig['sourceRepositories'].values():
                     if not verify_build(sr, branchConfig['hghost']):
                         test_success = False
 
-    check_buildbot()
     if test_success:
         if not options.dryrun:
-            reconfig()
-            sourceRepoPath = getRepoMatchingBranch(options.branch, releaseConfig['sourceRepositories'])['path']
+            if not options.skip_reconfig:
+                fabric_helper = FabricHelper(
+                    masters_json_file=options.masters_json_file,
+                    concurrency=options.concurrency,
+                    roles=['build', 'scheduler'])
+                fabric_helper.update_and_reconfig()
+            sourceRepoPath = getRepoMatchingBranch(
+                options.branch, releaseConfig['sourceRepositories'])['path']
             sendchange(
-                    sourceRepoPath,
-                    "%s_RELEASE" % releaseConfig['baseTag'],
-                    options.username,
-                    args[0],
-                    options.products,
-                    )
+                sourceRepoPath,
+                "%s_RELEASE" % releaseConfig['baseTag'],
+                options.username,
+                args[0],
+                products,
+            )
         else:
-            log.info("Tests Passed! Did not run reconfig/sendchange. Rerun without `-d`")
+            log.info("Tests Passed! Did not run reconfig/sendchange."
+                     " Rerun without `-d`")
+            if cleanup_configs:
+                log.info("Removing temporary directory: %s" % configs_dir)
+                rmtree(configs_dir)
     else:
         log.fatal("Tests Failed! Not running sendchange!")
         log.fatal("Failed tests (run with -b to skip) :")
         for error in error_tally:
             log.fatal(error)
+        if cleanup_configs:
+            log.info("Not removing temporary directory: %s" % configs_dir)
+        exit(1)
--- a/buildfarm/maintenance/manage_masters.py
+++ b/buildfarm/maintenance/manage_masters.py
@@ -1,29 +1,36 @@
 #!/usr/bin/env python
+
+import site
+from os import path
 import time
-import master_fabric
 from fabric.api import env
 from fabric.context_managers import settings
 from Crypto.Random import atfork
 
+site.addsitedir(path.join(path.dirname(__file__), "../../lib/python"))
+
+import util.fabric.actions
+
 def print_status(remaining, failed_masters):
     print "=" * 30, "Remaining masters", "=" * 30
     for m in remaining:
         print m
     if failed_masters:
         print "=" * 30, "failed masters", "=" * 30
         for m in failed_masters:
             print m
     print "=" * 80
 
+
 def run_action_on_master(action, master):
     atfork()
     try:
-        action_func = getattr(master_fabric, action)
+        action_func = getattr(util.fabric.actions, "action_%s" % action)
         with settings(host_string=master.get('ip_address', master['hostname'])):
             action_func(master)
             return True
     except AttributeError:
         print "[%s] %s action is not defined." % (master['hostname'], action)
         return False
     except:
         import traceback
@@ -39,32 +46,36 @@ if __name__ == '__main__':
     try:
         import simplejson as json
     except ImportError:
         import json
 
     parser = OptionParser("""%%prog [options] action [action ...]
 
 Supported actions:
-%s""" % textwrap.fill(", ".join(master_fabric.actions)))
+%s""" % textwrap.fill(", ".join(util.fabric.actions.get_actions())))
 
     parser.set_defaults(
             hosts=[],
             roles=[],
             concurrency=1,
             show_list=False,
             all_masters=False,
             )
-    parser.add_option("-f", "--master-file", dest="master_file", help="list/url of masters")
+    parser.add_option("-f", "--master-file", dest="master_file",
+                      help="list/url of masters")
     parser.add_option("-H", "--host", dest="hosts", action="append")
     parser.add_option("-R", "--role", dest="roles", action="append")
-    parser.add_option("-M", "--match", dest="match", action="append", help="masters that match the term")
+    parser.add_option("-M", "--match", dest="match", action="append",
+                      help="masters that match the term")
     parser.add_option("-j", dest="concurrency", type="int")
-    parser.add_option("-l", dest="show_list", action="store_true", help="list hosts")
-    parser.add_option("--all", dest="all_masters", action="store_true", help="work on all masters, not just enabled ones")
+    parser.add_option("-l", dest="show_list", action="store_true",
+                      help="list hosts")
+    parser.add_option("--all", dest="all_masters", action="store_true",
+                      help="work on all masters, not just enabled ones")
     parser.add_option("-i", dest="status_interval", type="int", default="60",
                       help="Interval between statuses")
 
     options, actions = parser.parse_args()
 
     if options.concurrency > 1:
         import multiprocessing
 
@@ -83,24 +94,25 @@ Supported actions:
         if not m['enabled'] and not options.all_masters:
             continue
         if m['name'] in options.hosts:
             masters.append(m)
         elif m['role'] in options.roles:
             masters.append(m)
         elif options.match:
             for match in options.match:
-               if match in m["name"]:
+                if match in m["name"]:
                     masters.append(m)
         elif 'all' in options.hosts or 'all' in options.roles:
             masters.append(m)
 
     if options.show_list:
         if len(masters) == 0:
-            masters = [m for m in all_masters if m['enabled'] or options.all_masters]
+            masters = [m for m in all_masters if m['enabled'] or
+                       options.all_masters]
 
         fmt = "%(role)-9s %(name)-14s %(hostname)s:%(basedir)s"
         print fmt % dict(role='role', name='name', hostname='hostname',
                 basedir='basedir')
         for m in masters:
             print fmt % m
         sys.exit(0)
 
@@ -118,18 +130,18 @@ Supported actions:
 
         if options.concurrency == 1:
             for master in masters:
                 run_action_on_master(action, master)
         else:
             p = multiprocessing.Pool(processes=options.concurrency)
             results = []
             for master in masters:
-                result = p.apply_async(run_action_on_master, (action, master) )
-                results.append( (master, result) )
+                result = p.apply_async(run_action_on_master, (action, master))
+                results.append((master, result))
             p.close()
             failed = False
             failed_masters = []
             tries = 0
             while True:
                 for master, result in list(results):
                     if result.ready():
                         results.remove((master,result))
new file mode 100644
--- /dev/null
+++ b/buildfarm/release/release-runner.ini.example
@@ -0,0 +1,27 @@
+[api]
+
+api_root: http://dev-master01.build.scl1.mozilla.com:5000
+username: user
+password: password
+
+[release-runner]
+notify_from: Release Eng <DONOTreleaseME@mozilla.com>
+notify_to: Release Duty <you@example.com>
+smtp_server: localhost
+hg_username: ffxbld
+hg_ssh_key: ~/.ssh/ffxbld_dsa
+buildbot_configs: https://hg.mozilla.org/build/buildbot-configs
+buildbot_configs_branch: production
+buildbotcustom: https://hg.mozilla.org/build/buildbotcustom
+buildbotcustom_branch: production-0.8
+tools: https://hg.mozilla.org/build/tools
+tools_branch: default
+masters_json: http://hg.mozilla.org/build/tools/raw-file/default/buildfarm/maintenance/production-masters.json
+sendchange_master: localhost:9001
+staging: false
+verbose: true
+sleeptime: 30
+
+[symlinks]
+production_config.py: {buildbot_configs}/mozilla/localconfig.py
+thunderbird_production_config.py: {buildbot_configs}/mozilla/thunderbird_localconfig.py
new file mode 100644
--- /dev/null
+++ b/buildfarm/release/release-runner.py
@@ -0,0 +1,370 @@
+#!/usr/bin/env python
+
+import site
+import time
+import datetime
+import logging
+import sys
+import os
+from os import path
+from optparse import OptionParser
+from smtplib import SMTPException
+from functools import partial
+import textwrap
+
+site.addsitedir(path.join(path.dirname(__file__), "../../lib/python"))
+
+from kickoff.api import Releases, Release, ReleaseL10n
+from release.config import substituteReleaseConfig
+from release.info import getBaseTag, getTags, readReleaseConfig, \
+    getReleaseConfigName, getReleaseTag
+from release.versions import getAppVersion
+from release.sanity import check_buildbot, sendchange
+from util.commands import run_cmd
+from util.hg import mercurial, update, commit, tag, apply_and_push, \
+    make_hg_url, get_repo_path, cleanOutgoingRevs
+from util.retry import retry
+from util.fabric.common import check_fabric, FabricHelper
+from util.sendmail import sendmail
+from util.file import load_config, get_config
+
+logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
+log = logging.getLogger(__name__)
+
+HG = 'hg.mozilla.org'
+RELEASE_SANITY_SCRIPT = path.join(path.dirname(__file__),
+                                  "../../buildbot-helpers/release_sanity.py")
+
+
+def reconfig_warning(from_, to, smtp_server, rr, start_time, elapsed,
+                     proc):
+    """Called when the buildbot master reconfigs are taking a long time."""
+    started = time.strftime("%a, %d %b %Y %H:%M:%S %Z",
+                            time.localtime(start_time))
+    elapsed = datetime.timedelta(seconds=elapsed)
+    subject = "[release-runner] WARNING: Reconfig exceeded %s seconds" % elapsed
+    body = textwrap.dedent("""
+    A buildbot master reconfig started at %(start_time)s has been running for
+    %(elapsed)s seconds without completing.
+
+    - release-runner""" % dict(start_time=started, elapsed=elapsed))
+    try:
+        for release in rr.new_releases:
+            rr.update_status(release, "Waiting on reconfig for %d seconds" % elapsed)
+        sendmail(from_=from_, to=to, subject=subject, body=body,
+                 smtp_server=smtp_server)
+    except SMTPException:
+        log.error("Cannot send email", exc_info=True)
+
+
+class ReleaseRunner(object):
+    def __init__(self, api_root=None, username=None, password=None,
+                 timeout=60):
+        self.new_releases = []
+        self.releases_api = Releases((username, password), api_root=api_root,
+                                     timeout=timeout)
+        self.release_api = Release((username, password), api_root=api_root,
+                                   timeout=timeout)
+        self.release_l10n_api = ReleaseL10n((username, password),
+                                            api_root=api_root, timeout=timeout)
+
+    def get_release_requests(self):
+        new_releases = self.releases_api.getReleases()
+        if new_releases['releases']:
+            self.new_releases = [self.release_api.getRelease(name) for name in
+                                 new_releases['releases']]
+            return True
+        else:
+            log.info("No new releases: %s" % new_releases)
+            return False
+
+    def get_release_l10n(self, release):
+        return self.release_l10n_api.getL10n(release)
+
+    def update_status(self, release, status):
+        log.info('updating status for %s to %s' % (release['name'], status))
+        self.release_api.updateStatus(release['name'], status)
+
+    def start_release_automation(self, release, master):
+        sendchange(
+            release['branch'],
+            getReleaseTag(getBaseTag(release['product'],
+                                     release['version'])),
+            release['submitter'],
+            master,
+            release['product']
+        )
+        self.mark_as_completed(release)
+
+    def mark_as_completed(self, release):
+        log.info('mark as completed %s' % release['name'])
+        self.release_api.markAsCompleted(release['name'])
+
+
+def getPartials(release):
+    partials = {}
+    for p in release['partials'].split(','):
+        partialVersion, buildNumber = p.split('build')
+        partials[partialVersion] = {
+            'appVersion': getAppVersion(partialVersion),
+            'buildNumber': buildNumber,
+            'baseTag': getBaseTag(release['product'], partialVersion)
+        }
+    return partials
+
+
+def bump_configs(release, cfgFile, l10nContents, workdir, hg_username,
+                 productionBranch):
+    cfgDir = path.join(workdir, 'mozilla')
+    templateFile = path.join(cfgDir, '%s.template' % cfgFile)
+    tags = getTags(getBaseTag(release['product'], release['version']),
+                   release['buildNumber'])
+    cfgFile = path.join(cfgDir, cfgFile)
+    l10nChangesetsFile = path.join(
+        cfgDir,
+        readReleaseConfig(cfgFile)['l10nRevisionFile']
+    )
+    subs = release.copy()
+    if 'partials' in release:
+        subs['partials'] = getPartials(release)
+
+    with open(templateFile) as f:
+        template = f.read()
+    releaseConfig = substituteReleaseConfig(template, **subs)
+    # Write out the new configs on default....
+    with open(cfgFile, 'w') as f:
+        f.write(releaseConfig)
+    with open(l10nChangesetsFile, 'w') as f:
+        f.write(l10nContents)
+    commit(workdir, 'Update release config for %s' % release['name'],
+           user=hg_username)
+
+    # And also on whatever the production branch is
+    update(workdir, productionBranch)
+    with open(cfgFile, 'w') as f:
+        f.write(releaseConfig)
+    with open(l10nChangesetsFile, 'w') as f:
+        f.write(l10nContents)
+    prodRev = commit(workdir, 'Update release config for %s' % release['name'],
+                     user=hg_username)
+    forceTag = False
+    if release['buildNumber'] > 1:
+        forceTag = True
+    tag(workdir, tags, rev=prodRev, force=forceTag, user=hg_username)
+
+
+def tag_repo(workdir, branch, tags, force, pushRepo, hg_username,
+             hg_ssh_key):
+    def tag_and_push(repo, attempt):
+        update(workdir, branch)
+        tag(workdir, tags, rev=branch, force=force, user=hg_username)
+        log.info("Tagged %s, attempt #%s" % (repo, attempt))
+
+    apply_and_push(workdir, pushRepo, tag_and_push,
+                   ssh_username=hg_username, ssh_key=hg_ssh_key)
+
+
+def update_and_reconfig(masters_json, callback=None):
+    fabric_helper = FabricHelper(masters_json_file=masters_json,
+                                 roles=['scheduler', 'build'], subprocess=True,
+                                 callback=callback)
+    fabric_helper.update_and_reconfig()
+
+
+def get_release_sanity_args(configs_workdir, release, cfgFile, masters_json,
+                            buildbot_configs_branch):
+    args = [
+        '--branch', path.basename(release['branch']),
+        '--username', release['submitter'],
+        '--version', release['version'],
+        '--build-number', str(release['buildNumber']),
+        '--release-config', cfgFile,
+        '--skip-verify-configs',
+        '--masters-json-file', masters_json,
+        '--configs-dir', configs_workdir,
+        '--configs-branch', buildbot_configs_branch,
+    ]
+    if not release['dashboardCheck']:
+        args.append('--bypass-l10n-dashboard-check')
+    return args
+
+if __name__ == '__main__':
+    parser = OptionParser(__doc__)
+    parser.add_option('-c', '--config', dest='config',
+                      help='Configuration file')
+
+    options = parser.parse_args()[0]
+
+    if not options.config:
+        parser.error('Need to pass a config')
+    log.info('Loading config from %s' % options.config)
+    config = load_config(options.config)
+
+    if config.getboolean('release-runner', 'verbose'):
+        log.setLevel(logging.DEBUG)
+    else:
+        log.setLevel(logging.INFO)
+
+    check_buildbot()
+    check_fabric()
+
+    sendchange_master = config.get('release-runner', 'sendchange_master')
+    if not sendchange_master:
+        parser.error('Need to pass sendchange_master')
+
+    # Shorthand
+    api_root = config.get('api', 'api_root')
+    username = config.get('api', 'username')
+    password = config.get('api', 'password')
+    hg_username = config.get('release-runner', 'hg_username')
+    hg_ssh_key = config.get('release-runner', 'hg_ssh_key')
+    buildbot_configs = config.get('release-runner', 'buildbot_configs')
+    buildbot_configs_branch = config.get('release-runner',
+                                         'buildbot_configs_branch')
+    buildbotcustom = config.get('release-runner', 'buildbotcustom')
+    buildbotcustom_branch = config.get('release-runner',
+                                       'buildbotcustom_branch')
+    tools = config.get('release-runner', 'tools')
+    tools_branch = config.get('release-runner', 'tools_branch')
+    masters_json = config.get('release-runner', 'masters_json')
+    staging = config.getboolean('release-runner', 'staging')
+    sleeptime = config.getint('release-runner', 'sleeptime')
+    notify_from = get_config(config, 'release-runner', 'notify_from', None)
+    notify_to = get_config(config, 'release-runner', 'notify_to', None)
+    if isinstance(notify_to, basestring):
+        notify_to = [x.strip() for x in notify_to.split(',')]
+    smtp_server = get_config(config, 'release-runner', 'smtp_server',
+                             'localhost')
+    configs_workdir = 'buildbot-configs'
+    custom_workdir = 'buildbotcustom'
+    tools_workdir = 'tools'
+    configs_pushRepo = make_hg_url(HG, get_repo_path(buildbot_configs),
+                                   protocol='ssh')
+    custom_pushRepo = make_hg_url(HG, get_repo_path(buildbotcustom),
+                                  protocol='ssh')
+    tools_pushRepo = make_hg_url(HG, get_repo_path(tools), protocol='ssh')
+
+    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
+
+    # Main loop waits for new releases, processes them and exits.
+    while True:
+        log.debug('Fetching release requests')
+        new_releases = rr.get_release_requests()
+        if rr.new_releases:
+            for release in rr.new_releases:
+                log.info('Got a new release request: %s' % release)
+            break
+        else:
+            log.debug('Sleeping for %d seconds before polling again' %
+                      sleeptime)
+            time.sleep(sleeptime)
+
+    # Clean up after any potential previous attempts before starting.
+    # Not doing this could end up with multiple heads on the same branch.
+    for repo, workdir, push_repo in (
+        (buildbot_configs, configs_workdir, configs_pushRepo),
+        (buildbotcustom, custom_workdir, custom_pushRepo),
+        (tools, tools_workdir, tools_pushRepo)
+    ):
+
+        retry(mercurial, args=(repo, workdir))
+        cleanOutgoingRevs(workdir, push_repo, hg_username,
+                          hg_ssh_key)
+
+    # Create symlinks if needed
+    if 'symlinks' in config.sections():
+        format_dict = dict(buildbot_configs=configs_workdir,
+                           buildbotcustom=custom_workdir, tools=tools_workdir)
+        for target in config.options('symlinks'):
+            symlink = config.get('symlinks', target).format(**format_dict)
+            if path.exists(symlink):
+                log.warning("Skipping %s -> %s symlink" % (symlink, target))
+            else:
+                log.info("Adding %s -> %s symlink" % (symlink, target))
+                os.symlink(target, symlink)
+
+    tags = []
+    # For safety's sake, we shouldn't force any tagging
+    # unless we know we need to.
+    force_tag = bool([r for r in rr.new_releases if r['buildNumber'] > 1])
+
+    def process_configs(repo, attempt):
+        """Helper method that encapsulates all of the things necessary
+           to run release runner for all releases."""
+        log.info("Bumping %s, attempt #%s" % (repo, attempt))
+        for release in rr.new_releases:
+            rr.update_status(release, 'Writing configs')
+            l10nContents = rr.get_release_l10n(release['name'])
+            tags.extend(getTags(getBaseTag(release['product'],
+                                            release['version']),
+                                release['buildNumber']))
+            update(configs_workdir, revision='default')
+            cfgFile = getReleaseConfigName(
+                release['product'], path.basename(release['branch']),
+                staging)
+            bump_configs(release=release, cfgFile=cfgFile,
+                            l10nContents=l10nContents,
+                            workdir=configs_workdir, hg_username=hg_username,
+                            productionBranch=buildbot_configs_branch)
+            rr.update_status(release, 'Running release sanity')
+            rs_args = get_release_sanity_args(configs_workdir, release,
+                                                cfgFile, masters_json,
+                                                buildbot_configs_branch)
+            run_cmd(['python', RELEASE_SANITY_SCRIPT] + rs_args +
+                    ['--dry-run'])
+            rr.update_status(
+                release, 'Waiting for other releases to run release sanity'
+            )
+
+    try:
+        # Pushing doesn't happen until _after_ release sanity has been run
+        # for all releases to minimize the chance of bad configs being
+        # pushed. apply_and_push calls process_configs, and if it returns
+        # successfully, it pushes all of the changes that it made.
+        apply_and_push(configs_workdir, configs_pushRepo, process_configs,
+                       ssh_username=hg_username, ssh_key=hg_ssh_key)
+
+        # Now that we know that all of the configs are good, we can tag
+        # the other repositories
+        for release in rr.new_releases:
+            rr.update_status(release, 'Tagging other repositories')
+        tag_repo(workdir=custom_workdir, branch=buildbotcustom_branch,
+                    tags=tags, force=force_tag, pushRepo=custom_pushRepo,
+                    hg_username=hg_username, hg_ssh_key=hg_ssh_key)
+        tag_repo(workdir=tools_workdir, branch=tools_branch, tags=tags,
+                    force=force_tag, pushRepo=tools_pushRepo,
+                    hg_username=hg_username, hg_ssh_key=hg_ssh_key)
+        for release in rr.new_releases:
+            rr.update_status(release, 'Reconfiging masters')
+
+        # Reconfig the masters and configure the warning callback, if present.
+        callback = None
+        if notify_from and notify_to:
+            callback = partial(reconfig_warning, notify_from, notify_to,
+                               smtp_server, rr)
+        update_and_reconfig(masters_json, callback=callback)
+    except Exception, e:
+        # Rather than catching individual problems and giving very specific
+        # status updates to the kickoff application, we use this catch-all.
+        # Because nearly all problems require looking at the release runner
+        # logs and manual intervention, it's not worth the pain and ugliness
+        # to do more than this.
+        for release in rr.new_releases:
+            rr.update_status(release, 'Failed: %s' % repr(e))
+        raise
+
+    rc = 0
+    for release in rr.new_releases:
+        try:
+            rr.update_status(release, 'Running sendchange command')
+            rr.start_release_automation(release, sendchange_master)
+        except:
+            # We explicitly do not raise an error here because there's no
+            # reason not to start other releases if the sendchange fails for
+            # another one. We _do_ need to set this in order to exit
+            # with the right code, though.
+            rc = 1
+            rr.update_status(release, 'Sendchange failed')
+            log.error('Sendchange failed for %s: ' % release, exc_Info=True)
+
+    sys.exit(rc)
new file mode 100755
--- /dev/null
+++ b/buildfarm/release/release-runner.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# XXX: TODO: remove the following hack
+. /home/cltbld/release-runner/venv/bin/activate
+
+# Sleep 3 days in case of failure
+SLEEP_TIME=259200
+NOTIFY_TO=release@mozilla.com
+CONFIG=/home/cltbld/.release-runner.ini
+
+CURR_DIR=$(cd $(dirname $0); pwd)
+HOSTNAME=`hostname -s`
+
+cd $CURR_DIR
+
+python release-runner.py -c $CONFIG
+RETVAL=$?
+if [[ $RETVAL != 0 ]]; then
+    (
+        echo "Release runner encountered a runtime error"
+        echo
+        echo "Please check the output log on $HOSTNAME"
+        echo "I'll sleep for $SLEEP_TIME seconds before retry"
+        echo
+        echo "- release runner"
+    ) | mail -s "[release-runner] failed at $HOSTNAME" $NOTIFY_TO
+
+    sleep $SLEEP_TIME
+fi
new file mode 100644
--- /dev/null
+++ b/buildfarm/release/release-runner.supervisor
@@ -0,0 +1,8 @@
+[program:releaserunner]
+command=/home/cltbld/release-runner/build-tools/buildfarm/release/release-runner.sh
+exitcodes=0
+user=cltbld
+log_stderr=true
+log_stdout=true
+redirect_stderr=true
+stdout_logfile=/var/log/supervisor/release-runner.log
--- a/lib/python/build/versions.py
+++ b/lib/python/build/versions.py
@@ -3,17 +3,17 @@ import re
 class BuildVersionsException(Exception):
     pass
 
 # Versions that match this should not be bumped
 DO_NOT_BUMP_REGEX = '^\d\.\d(pre)?$'
 
 # Regex that matches all possible versions and milestones
 ANY_VERSION_REGEX =\
-    ('\d+\.\d[\d\.]*'    # A version number
+    ('(\d+\.\d[\d\.]*)'    # A version number
     '([a-zA-Z]+\d+)?'    # Might be a project branch
     '((a|b)\d+)?'        # Might be an alpha or beta
     '(esr)?'             # Might be an esr
     '(pre)?')            # Might be a 'pre' (nightly) version
 
 BUMP_FILES = {
     '^.*(version.*\.txt|milestone\.txt)$': '^%(version)s$',
     '^.*(default-version\.txt|confvars\.sh)$': '^MOZ_APP_VERSION=%(version)s$'
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/api.py
@@ -0,0 +1,80 @@
+import os
+import requests
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+CA_BUNDLE = os.path.join(os.path.dirname(__file__),
+                        '../../../../misc/certs/ca-bundle.crt')
+
+import logging
+log = logging.getLogger(__name__)
+
+def is_csrf_token_expired(token):
+    from datetime import datetime
+    expiry = token.split('##')[0]
+    if expiry <= datetime.now().strftime('%Y%m%d%H%M%S'):
+        return True
+    return False
+
+class API(object):
+    auth = None
+    url_template = None
+
+    def __init__(self, auth, api_root, ca_certs=CA_BUNDLE, timeout=60,
+                 raise_exceptions=True):
+        self.api_root = api_root.rstrip('/')
+        self.auth = auth
+        self.verify = ca_certs
+        self.timeout = timeout
+        self.config = dict(danger_mode=raise_exceptions)
+        self.session = requests.session()
+        self.csrf_token = None
+
+    def request(self, params=None, data=None, method='GET', url_template_vars={}):
+        url = self.api_root + self.url_template % url_template_vars
+        if method != 'GET' and method != 'HEAD':
+            if not self.csrf_token or is_csrf_token_expired(self.csrf_token):
+                res = self.session.request(method='HEAD', url=self.api_root + '/csrf_token',
+                                           config=self.config, timeout=self.timeout,
+                                           auth=self.auth)
+                self.csrf_token = res.headers['X-CSRF-Token']
+            data['csrf_token'] = self.csrf_token
+        log.debug('Request to %s' % url)
+        log.debug('Data sent: %s' % data)
+        try:
+            return self.session.request(method=method, url=url, data=data,
+                                        config=self.config, timeout=self.timeout,
+                                        auth=self.auth, params=params)
+        except requests.HTTPError, e:
+            log.error('Caught HTTPError: %s' % e.response.content)
+            raise
+
+class Releases(API):
+    url_template = '/releases'
+
+    def getReleases(self, ready=1, complete=0):
+        return json.loads(self.request(params={'ready': ready, 'complete': complete}).content)
+
+class Release(API):
+    url_template = '/releases/%(name)s'
+
+    def getRelease(self, name):
+        return json.loads(self.request(url_template_vars={'name': name}).content)
+
+    def updateStatus(self, name, status):
+        data = {'status': status}
+        url_template_vars = {'name': name}
+        return self.request(method='POST', data=data, url_template_vars=url_template_vars).content
+
+    def markAsCompleted(self, name):
+        data = {'complete': True, 'status': 'Started'}
+        url_template_vars = {'name': name}
+        return self.request(method='POST', data=data, url_template_vars=url_template_vars).content
+
+class ReleaseL10n(API):
+    url_template = '/releases/%(name)s/l10n'
+
+    def getL10n(self, name):
+        return self.request(url_template_vars={'name': name}).content
new file mode 100644
--- /dev/null
+++ b/lib/python/mozilla_buildtools/test/test_release_config.py
@@ -0,0 +1,15 @@
+import unittest
+
+from jinja2 import UndefinedError
+
+from release.config import substituteReleaseConfig
+
+class TestSubstituteReleaseConfig(unittest.TestCase):
+    def testSimple(self):
+        config = '{{ product }} {{ version }} {{ branch }}'
+        got = substituteReleaseConfig(config, 'foo', '1.0', branch='blah')
+        self.assertEquals(got, 'foo 1.0 blah')
+
+    def testUndefined(self):
+        config = '{{ blech }}'
+        self.assertRaises(UndefinedError, substituteReleaseConfig, config, '1', '2.0')
new file mode 100644
--- /dev/null
+++ b/lib/python/mozilla_buildtools/test/test_release_info.py
@@ -0,0 +1,36 @@
+import unittest
+
+from release.info import getBaseTag, getReleaseConfigName
+
+class TestGetBaseTag(unittest.TestCase):
+    def testRelease(self):
+        self.assertEquals('FIREFOX_16_0_2', getBaseTag('firefox', '16.0.2'))
+
+    def testBeta(self):
+        self.assertEquals('FIREFOX_17_0b3', getBaseTag('firefox', '17.0b3'))
+
+    def testEsr(self):
+        self.assertEquals('FIREFOX_10_0_9esr', getBaseTag('firefox', '10.0.9esr'))
+
+    def testFennec(self):
+        self.assertEquals('FENNEC_17_0', getBaseTag('fennec', '17.0'))
+
+    def testThunderbird(self):
+        self.assertEquals('THUNDERBIRD_18_0b1', getBaseTag('thunderbird', '18.0b1'))
+
+class TestGetReleaseConfigName(unittest.TestCase):
+    def testFennec(self):
+        got = getReleaseConfigName('fennec', 'mozilla-beta')
+        self.assertEquals('release-fennec-mozilla-beta.py', got)
+
+    def testFirefox(self):
+        got = getReleaseConfigName('firefox', 'mozilla-release')
+        self.assertEquals('release-firefox-mozilla-release.py', got)
+
+    def testThunderbird(self):
+        got = getReleaseConfigName('thunderbird', 'comm-esr10')
+        self.assertEquals('release-thunderbird-comm-esr10.py', got)
+
+    def testStaging(self):
+        got = getReleaseConfigName('fennec', 'mozilla-release', True)
+        self.assertEquals('staging_release-fennec-mozilla-release.py', got)
--- a/lib/python/mozilla_buildtools/test/test_release_versions.py
+++ b/lib/python/mozilla_buildtools/test/test_release_versions.py
@@ -1,14 +1,27 @@
 import unittest
 
-from release.versions import getL10nDashboardVersion
+from release.versions import getL10nDashboardVersion, getAppVersion
 
 class TestBuildVersions(unittest.TestCase):
     def _doTest(self, expected, version):
         self.assertEquals(expected,
                           getL10nDashboardVersion(version, "firefox"))
 
     def testPointRelease(self):
         self._doTest("fx4.0.1", "4.0.1")
 
     def testBeta(self):
         self._doTest("fx5_beta_b3", "5.0b3")
+
+class TestGetAppVersion(unittest.TestCase):
+    def testFinal(self):
+        self.assertEquals('17.0', getAppVersion('17.0'))
+
+    def testPoint(self):
+        self.assertEquals('18.0.3', getAppVersion('18.0.3'))
+
+    def testBeta(self):
+        self.assertEquals('17.0', getAppVersion('17.0b2'))
+
+    def testEsr(self):
+        self.assertEquals('10.0.9', getAppVersion('10.0.9esr'))
--- a/lib/python/mozilla_buildtools/test/test_util_commands.py
+++ b/lib/python/mozilla_buildtools/test/test_util_commands.py
@@ -1,11 +1,11 @@
 import unittest, subprocess
 
-from util.commands import run_cmd, get_output
+from util.commands import run_cmd, get_output, run_cmd_periodic_poll
 
 class TestRunCmd(unittest.TestCase):
     def testSimple(self):
         self.assertEquals(run_cmd(['true']), 0)
 
     def testFailure(self):
         self.assertRaises(subprocess.CalledProcessError, run_cmd, ['false'])
 
@@ -29,8 +29,56 @@ class TestRunCmd(unittest.TestCase):
            themselves. This test is to ensure that get_output always does."""
         output = "nothing"
         try:
             get_output(['bash', '-c', 'echo hello && false'])
         except subprocess.CalledProcessError, e:
             self.assertEquals(e.output, 'hello\n')
         else:
             self.fail("get_output did not raise CalledProcessError")
+
+
+class TestRunCmdiPeriodicPoll(unittest.TestCase):
+
+    def testSimple(self):
+        self.assertEquals(run_cmd_periodic_poll(['true']), 0)
+
+    def testFailure(self):
+        self.assertRaises(subprocess.CalledProcessError, run_cmd_periodic_poll,
+                          ['false'])
+
+    def testSuccess2secs(self):
+        self.assertEquals(
+            run_cmd_periodic_poll(['bash', '-c', 'sleep 2 && true']),
+            0)
+
+    def testFailure2secs(self):
+        self.assertRaises(
+            subprocess.CalledProcessError, run_cmd_periodic_poll,
+            ['bash', '-c', 'sleep 2 && false'])
+
+    def testSuccess3secsWith2secsPoll(self):
+        self.assertEquals(
+            run_cmd_periodic_poll(['bash', '-c', 'sleep 3 && true'],
+                                  warning_interval=2),
+            0)
+
+    def testSuccessCallback(self):
+        self.callback_called = 0
+
+        def callback(start_time, eapsed, proc):
+            self.callback_called += 1
+
+        run_cmd_periodic_poll(['bash', '-c', 'sleep 5 && true'],
+                              warning_interval=2, warning_callback=callback),
+        self.assertEqual(self.callback_called, 2)
+
+    def testFailureCallback(self):
+        self.callback_called = 0
+
+        def callback(start_time, eapsed, proc):
+            self.callback_called += 1
+
+        self.assertRaises(
+            subprocess.CalledProcessError, run_cmd_periodic_poll,
+            ['bash', '-c', 'sleep 5 && false'], warning_interval=2,
+            warning_callback=callback)
+        self.assertEqual(self.callback_called, 2)
--- a/lib/python/mozilla_buildtools/test/test_util_hg.py
+++ b/lib/python/mozilla_buildtools/test/test_util_hg.py
@@ -2,28 +2,45 @@ import unittest
 import tempfile
 import shutil
 import os
 import subprocess
 
 import util.hg as hg
 from util.hg import clone, pull, update, hg_ver, mercurial, _make_absolute, \
   share, push, apply_and_push, HgUtilError, make_hg_url, get_branch, \
-  get_branches, path, init, unbundle, adjust_paths, is_hg_cset
+  get_branches, path, init, unbundle, adjust_paths, is_hg_cset, commit, tag
 from util.commands import run_cmd, get_output
 
 def getRevisions(dest):
     retval = []
     for rev in get_output(['hg', 'log', '-R', dest, '--template', '{node|short}\n']).split('\n'):
         rev = rev.strip()
         if not rev:
             continue
         retval.append(rev)
     return retval
 
+def getRevInfo(dest, rev):
+    output = get_output(['hg', 'log', '-R', dest, '-r', rev, '--template', '{author}\n{desc}\n{tags}']).splitlines()
+    info = {
+        'user': output[0],
+        'msg': output[1],
+        'tags': []
+    }
+    if len(output) > 2:
+        info['tags'] = output[2].split()
+    return info
+
+def getTags(dest):
+    tags = []
+    for tag in get_output(['hg', 'tags', '-R', dest]).splitlines():
+        tags.append(tag.split()[0])
+    return tags
+
 class TestMakeAbsolute(unittest.TestCase):
     def testAbsolutePath(self):
         self.assertEquals(_make_absolute("/foo/bar"), "/foo/bar")
 
     def testRelativePath(self):
         self.assertEquals(_make_absolute("foo/bar"), os.path.abspath("foo/bar"))
 
     def testHTTPPaths(self):
@@ -906,8 +923,63 @@ class TestHg(unittest.TestCase):
         # Move our repodir out of the way so that pulling/cloning from it fails
         os.rename(self.repodir, self.repodir + "-bad")
 
         # Try and update to a non-existent revision using our mirror and
         # bundle, with the master failing. We should fail
         self.assertRaises(subprocess.CalledProcessError, mercurial,
                 self.repodir, self.wc, shareBase=shareBase, mirrors=[mirror], bundles=[bundle],
                 revision="1234567890")
+
+    def testCommit(self):
+        run_cmd(['touch', 'newfile'], cwd=self.repodir)
+        run_cmd(['hg', 'add', 'newfile'], cwd=self.repodir)
+        rev = commit(self.repodir, user='unittest', msg='gooooood')
+        info = getRevInfo(self.repodir, rev)
+        self.assertEquals(info['msg'], 'gooooood')
+        # can't test for user, because it depends on local hg configs.
+
+    def testCommitWithUser(self):
+        run_cmd(['touch', 'newfile'], cwd=self.repodir)
+        run_cmd(['hg', 'add', 'newfile'], cwd=self.repodir)
+        rev = commit(self.repodir, user='unittest', msg='new stuff!')
+        info = getRevInfo(self.repodir, rev)
+        self.assertEquals(info['user'], 'unittest')
+        self.assertEquals(info['msg'], 'new stuff!')
+
+    def testTag(self):
+        tag(self.repodir, ['test_tag'])
+        self.assertTrue('test_tag' in getTags(self.repodir))
+
+    def testMultitag(self):
+        tag(self.repodir, ['tag1', 'tag2'])
+        tags = getTags(self.repodir)
+        self.assertTrue('tag1' in tags)
+        self.assertTrue('tag2' in tags)
+
+    def testTagWithMsg(self):
+        rev = tag(self.repodir, ['tag'], msg='I made a tag!')
+        info = getRevInfo(self.repodir, rev)
+        self.assertEquals('I made a tag!', info['msg'])
+
+    def testTagWithUser(self):
+        rev = tag(self.repodir, ['taggy'], user='tagger')
+        info = getRevInfo(self.repodir, rev)
+        self.assertEquals('tagger', info['user'])
+
+    def testTagWithRevision(self):
+        tag(self.repodir, ['tag'], rev='1')
+        info = getRevInfo(self.repodir, '1')
+        self.assertEquals(['tag'], info['tags'])
+
+    def testMultitagWithRevision(self):
+        tag(self.repodir, ['tag1', 'tag2'], rev='1')
+        info = getRevInfo(self.repodir, '1')
+        self.assertEquals(['tag1', 'tag2'], info['tags'])
+
+    def testTagFailsIfExists(self):
+        run_cmd(['hg', 'tag', '-R', self.repodir, 'tagg'])
+        self.assertRaises(subprocess.CalledProcessError, tag, self.repodir, 'tagg')
+
+    def testForcedTag(self):
+        run_cmd(['hg', 'tag', '-R', self.repodir, 'tag'])
+        tag(self.repodir, ['tag'], force=True)
+        self.assertTrue('tag' in getTags(self.repodir))
new file mode 100644
--- /dev/null
+++ b/lib/python/release/config.py
@@ -0,0 +1,16 @@
+import logging
+log = logging.getLogger(__name__)
+
+from release.info import getBaseTag
+from release.versions import getAppVersion
+
+def substituteReleaseConfig(config, product, version, **other):
+    from jinja2 import Environment, FunctionLoader, StrictUndefined
+
+    baseTag = getBaseTag(product, version)
+    appVersion = getAppVersion(version)
+
+    environment = Environment(undefined=StrictUndefined)
+    template = environment.from_string(config)
+    return template.render(product=product, version=version, baseTag=baseTag,
+        appVersion=appVersion, **other)
--- a/lib/python/release/info.py
+++ b/lib/python/release/info.py
@@ -46,16 +46,22 @@ def findOldBuildIDs(product, version, bu
             try:
                 id = getBuildID(platform, product, version, n, nightlyDir,
                                 server)
                 ids[platform].append(id)
             except Exception, e:
                 log.error("Hit exception: %s" % e)
     return ids
 
+def getReleaseConfigName(product, branch, staging=False):
+    cfg = 'release-%s-%s.py' % (product, branch)
+    if staging:
+        cfg = 'staging_%s' % cfg
+    return cfg
+
 def readReleaseConfig(configfile, required=[]):
     return readConfig(configfile, keys=['releaseConfig'], required=required)
 
 def readBranchConfig(dir, localconfig, branch, required=[]):
     shutil.copy(localconfig, path.join(dir, "localconfig.py"))
     oldcwd = os.getcwd()
     os.chdir(dir)
     sys.path.append(".")
@@ -79,16 +85,21 @@ def readConfig(configfile, keys=[], requ
             log.error("Required item `%s' missing from %s" % (key, c))
     if err:
         raise ConfigError("Missing at least one item in config, see above")
     return c
 
 def isFinalRelease(version):
     return bool(re.match(FINAL_RELEASE_REGEX, version))
 
+def getBaseTag(product, version):
+    product = product.upper()
+    version = version.replace('.', '_')
+    return '%s_%s' % (product, version)
+
 def getTags(baseTag, buildNumber, buildTag=True):
     t = ['%s_RELEASE' % baseTag]
     if buildTag:
         t.append('%s_BUILD%d' % (baseTag, int(buildNumber)))
     return t
 
 def getRuntimeTag(tag):
     return "%s_RUNTIME" % tag
@@ -96,16 +107,19 @@ def getRuntimeTag(tag):
 def getReleaseTag(tag):
     return "%s_RELEASE" % tag
 
 def generateRelbranchName(milestone, prefix='GECKO'):
     return '%s%s_%s_RELBRANCH' % (
       prefix, milestone.replace('.', ''),
       datetime.now().strftime('%Y%m%d%H'))
 
+def getReleaseName(product, version, buildNumber):
+    return '%s-%s-build%s' % (product.title(), version, str(buildNumber))
+
 def getRepoMatchingBranch(branch, sourceRepositories):
     for sr in sourceRepositories.values():
         if branch in sr['path']:
             return sr
     return None
 
 def fileInfo(filepath, product):
     """Extract information about a release file.  Returns a dictionary with the
new file mode 100644
--- /dev/null
+++ b/lib/python/release/sanity.py
@@ -0,0 +1,61 @@
+import logging
+import re
+from util.commands import run_cmd, get_output
+from subprocess import CalledProcessError
+
+log = logging.getLogger(__name__)
+
+def check_buildbot():
+    """check if buildbot command works"""
+    try:
+        run_cmd(['buildbot', '--version'])
+    except CalledProcessError:
+        log.error("FAIL: buildbot command doesn't work", exc_info=True)
+        raise
+
+
+def find_version(contents, versionNumber):
+    """Given an open readable file-handle look for the occurrence
+       of the version # in the file"""
+    ret = re.search(re.compile(re.escape(versionNumber), re.DOTALL), contents)
+    return ret
+
+
+def locale_diff(locales1, locales2):
+    """ accepts two lists and diffs them both ways, returns any differences
+    found """
+    diff_list = [locale for locale in locales1 if not locale in locales2]
+    diff_list.extend(locale for locale in locales2 if not locale in locales1)
+    return diff_list
+
+
+def get_buildbot_username_param():
+    cmd = ['buildbot', 'sendchange', '--help']
+    output = get_output(cmd)
+    if "-W, --who=" in output:
+        return "--who"
+    else:
+        return "--username"
+
+
+def sendchange(branch, revision, username, master, products):
+    """Send the change to buildbot to kick off the release automation"""
+    if isinstance(products, basestring):
+        products = [products]
+    cmd = [
+        'buildbot',
+        'sendchange',
+        get_buildbot_username_param(),
+        username,
+        '--master',
+        master,
+        '--branch',
+        branch,
+        '-p',
+        'products:%s' % ','.join(products),
+        '-p',
+        'script_repo_revision:%s' % revision,
+        'release_build'
+    ]
+    logging.info("Executing: %s" % cmd)
+    run_cmd(cmd)
--- a/lib/python/release/versions.py
+++ b/lib/python/release/versions.py
@@ -1,12 +1,15 @@
 import re
 
 from build.versions import ANY_VERSION_REGEX
 
+def getAppVersion(version):
+    return re.match(ANY_VERSION_REGEX, version).group(1)
+
 def getPrettyVersion(version):
     version = re.sub(r'a([0-9]+)$', r' Alpha \1', version)
     version = re.sub(r'b([0-9]+)$', r' Beta \1', version)
     version = re.sub(r'rc([0-9]+)$', r' RC \1', version)
     return version
 
 def getL10nDashboardVersion(version, product, parse_version=True):
     if product == 'firefox':
@@ -17,13 +20,13 @@ def getL10nDashboardVersion(version, pro
         ret = 'tb'
     elif product == 'seamonkey':
         ret = 'sea'
 
     if not parse_version:
         ret += version
     else:
         parsed = re.match(ANY_VERSION_REGEX, version)
-        if parsed.group(1) and parsed.group(1).startswith('b'):
-            ret = '%s%s_beta_%s' % (ret, version.split(".")[0], parsed.group(1))
+        if parsed.group(2) and parsed.group(2).startswith('b'):
+            ret = '%s%s_beta_%s' % (ret, version.split(".")[0], parsed.group(2))
         else:
             ret += version
     return ret
--- a/lib/python/util/commands.py
+++ b/lib/python/util/commands.py
@@ -54,16 +54,70 @@ def run_remote_cmd(cmd, server, username
     if sshKey:
         cmd_prefix.extend(['-i', os.path.expanduser(sshKey)])
     cmd_prefix.append(server)
     if isinstance(cmd, basestring):
         cmd = [cmd]
     return run_cmd(cmd_prefix + cmd, **kwargs)
 
 
+def run_cmd_periodic_poll(cmd, warning_interval=300, poll_interval=0.25,
+                          warning_callback=None, **kwargs):
+    """Run cmd (a list of arguments) in a subprocess and check its completion
+    periodically.  Raise subprocess.CalledProcessError if the command exits
+    with non-zero.  If the command returns successfully, return 0.
+    warning_callback function will be called with the following arguments if the
+    command's execution takes longer then warning_interval:
+        start_time, elapsed, proc
+    """
+    log_cmd(cmd, **kwargs)
+    # We update this after logging because we don't want all of the inherited
+    # env vars muddling up the output
+    if 'env' in kwargs:
+        kwargs['env'] = merge_env(kwargs['env'])
+    proc = subprocess.Popen(cmd, **kwargs)
+    start_time = time.time()
+    last_check = start_time
+
+    while True:
+        rc = proc.poll()
+
+        if rc is not None:
+            log.debug("Process returned %s", rc)
+            if rc == 0:
+                elapsed = time.time() - start_time
+                log.info("command: END (%.2fs elapsed)\n", elapsed)
+                return 0
+            else:
+                raise subprocess.CalledProcessError(rc, cmd)
+
+        now = time.time()
+        if now - last_check > warning_interval:
+            # reset last_check to avoid spamming callback
+            last_check = now
+            elapsed = now - start_time
+            if warning_callback:
+                log.debug("Calling warning_callback function: %s(%s)" %
+                    (warning_callback, start_time))
+                try:
+                    warning_callback(start_time, elapsed, proc)
+                except Exception:
+                    log.error("Callback raised an exception, ignoring...",
+                              exc_info=True)
+            else:
+                log.warning("Command execution is taking longer than"
+                            "warning_internal (%d)"
+                            ", executing warning_callback"
+                            "Started at: %s, elapsed: %.2fs" % (warning_callback,
+                                                                start_time,
+                                                                elapsed))
+
+        time.sleep(poll_interval)
+
+
 def get_output(cmd, include_stderr=False, dont_log=False, **kwargs):
     """Run cmd (a list of arguments) and return the output.  If include_stderr
     is set, stderr will be included in the output, otherwise it will be sent to
     the caller's stderr stream.
 
     Warning that you shouldn't use this function to capture a large amount of
     output (> a few thousand bytes), it will deadlock."""
     if include_stderr:
@@ -73,17 +127,17 @@ def get_output(cmd, include_stderr=False
 
     log_cmd(cmd, **kwargs)
     if 'env' in kwargs:
         kwargs['env'] = merge_env(kwargs['env'])
     try:
         output = ""
         t = time.time()
         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr,
-                **kwargs)
+                                **kwargs)
         proc.wait()
         output = proc.stdout.read()
         if proc.returncode != 0:
             raise subprocess.CalledProcessError(proc.returncode, cmd)
         if not dont_log:
             log.info(output)
         return output
     except subprocess.CalledProcessError, e:
new file mode 100644
rename from buildfarm/maintenance/master_fabric.py
rename to lib/python/util/fabric/actions.py
--- a/buildfarm/maintenance/master_fabric.py
+++ b/lib/python/util/fabric/actions.py
@@ -1,170 +1,199 @@
 from fabric.api import run
 from fabric.context_managers import cd, hide, show
 from fabric.operations import put
 from fabric.colors import green, red
-import re, os
+import re
+import os
+import sys
+import inspect
 
 OK = green('[OK]')
 FAIL = red('[FAIL]')
 
-def check(master):
+BUILDBOT_WRANGLER = os.path.normpath(os.path.join(
+    os.path.dirname(__file__),
+    "../../../../buildfarm/maintenance/buildbot-wrangler.py"))
+
+
+def get_actions():
+    current_module = sys.modules[__name__]
+    for name in dir(current_module):
+        attr = getattr(current_module, name)
+        if inspect.isfunction(attr) and name.startswith('action_'):
+            yield name.replace('action_', '')
+
+
+def action_check(master):
     """Checks that the master parameters are valid"""
     with hide('stdout', 'stderr', 'running'):
         date = run('date')
         run('test -d %(bbcustom_dir)s' % master)
         run('test -d %(bbconfigs_dir)s' % master)
         run('test -d %(master_dir)s' % master)
         run('test -d %(tools_dir)s' % master)
 
-        assert run('hg -R %(bbcustom_dir)s ident -b' % master) == master['bbcustom_branch']
-        assert run('hg -R %(bbconfigs_dir)s ident -b' % master) == master['bbconfigs_branch']
-        assert run('hg -R %(tools_dir)s ident -b' % master) == master['tools_branch']
+        assert run('hg -R %(bbcustom_dir)s ident -b' % master) == \
+               master['bbcustom_branch']
+        assert run('hg -R %(bbconfigs_dir)s ident -b' % master) == \
+               master['bbconfigs_branch']
+        assert run('hg -R %(tools_dir)s ident -b' % master) == \
+               master['tools_branch']
         print master['name'], date, OK
 
-def checkconfig(master):
+
+def action_checkconfig(master):
     """Runs buildbot checkconfig"""
-    check(master)
+    action_check(master)
     with hide('stdout', 'stderr'):
         with cd(master['basedir']):
             try:
                 run('make checkconfig')
                 print "%-14s %s" % (master['name'], OK)
             except:
                 print "%-14s %s" % (master['name'], FAIL)
                 raise
 
-def show_revisions(master):
+
+def action_show_revisions(master):
     """Reports the revisions of buildbotcustom, buildbot-configs"""
     with hide('stdout', 'stderr', 'running'):
         bbcustom_rev = run('hg -R %(bbcustom_dir)s ident -i' % master)
         bbconfigs_rev = run('hg -R %(bbconfigs_dir)s ident -i' % master)
         tools_rev = run('hg -R %(tools_dir)s ident -i' % master)
         bbcustom_rev = bbcustom_rev.split()[0]
         bbconfigs_rev = bbconfigs_rev.split()[0]
         tools_rev = tools_rev.split()[0]
 
-        bb_version = run('unset PYTHONHOME PYTHONPATH; %(buildbot_bin)s --version' % master)
+        bb_version = run('unset PYTHONHOME PYTHONPATH; '
+                         '%(buildbot_bin)s --version' % master)
         bb_version = bb_version.replace('\r\n', '\n')
-        m = re.search('^Buildbot version:.*-hg-([0-9a-f]+)-%s' % master['buildbot_branch'], bb_version, re.M)
+        m = re.search('^Buildbot version:.*-hg-([0-9a-f]+)-%s' %
+                      master['buildbot_branch'], bb_version, re.M)
         if not m:
-            print FAIL, "Failed to parse buildbot --version output:", repr(bb_version)
+            print FAIL, "Failed to parse buildbot --version output:", \
+                  repr(bb_version)
             bb_rev = ""
         else:
             bb_rev = m.group(1)
 
         print "%-14s %12s %12s %12s %12s" % (master['name'], bbcustom_rev,
                                         bbconfigs_rev, tools_rev, bb_rev)
 
-def reconfig(master):
+
+def action_reconfig(master):
     print "starting reconfig of %(hostname)s:%(basedir)s" % master
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER,
+                '%s/buildbot-wrangler.py' % master['basedir'])
             run('rm -f *.pyc')
-            run('python buildbot-wrangler.py reconfig %s' % master['master_dir'])
+            run('python buildbot-wrangler.py reconfig %s' %
+                master['master_dir'])
     print OK, "finished reconfig of %(hostname)s:%(basedir)s" % master
 
-def restart(master):
+
+def action_restart(master):
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER, '%s/buildbot-wrangler.py' %
+                master['basedir'])
             run('rm -f *.pyc')
-            run('python buildbot-wrangler.py restart %s' % master['master_dir'])
+            run('python buildbot-wrangler.py restart %s' %
+                master['master_dir'])
     print OK, "finished restarting of %(hostname)s:%(basedir)s" % master
 
-def graceful_restart(master):
+
+def action_graceful_restart(master):
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER, '%s/buildbot-wrangler.py' %
+                master['basedir'])
             run('rm -f *.pyc')
-            run('python buildbot-wrangler.py graceful_restart %s %s' % (master['master_dir'], master['http_port']))
-    print OK, "finished gracefully restarting of %(hostname)s:%(basedir)s" % master
+            run('python buildbot-wrangler.py graceful_restart %s %s' %
+                (master['master_dir'], master['http_port']))
+    print OK, \
+          "finished gracefully restarting of %(hostname)s:%(basedir)s" % master
 
-def stop(master):
+
+def action_stop(master):
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER,
+                '%s/buildbot-wrangler.py' % master['basedir'])
             run('python buildbot-wrangler.py stop %s' % master['master_dir'])
     print OK, "stopped %(hostname)s:%(basedir)s" % master
 
-def graceful_stop(master):
+
+def action_graceful_stop(master):
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER,
+                '%s/buildbot-wrangler.py' % master['basedir'])
             run('rm -f *.pyc')
-            run('python buildbot-wrangler.py graceful_stop %s %s' % (master['master_dir'], master['http_port']))
+            run('python buildbot-wrangler.py graceful_stop %s %s' %
+                (master['master_dir'], master['http_port']))
     print OK, "gracefully stopped %(hostname)s:%(basedir)s" % master
 
+
 def start(master):
     with show('running'):
         with cd(master['basedir']):
-            put('buildbot-wrangler.py', '%s/buildbot-wrangler.py' % master['basedir'])
+            put(BUILDBOT_WRANGLER,
+                '%s/buildbot-wrangler.py' % master['basedir'])
             run('rm -f *.pyc')
             run('python buildbot-wrangler.py start %s' % master['master_dir'])
     print OK, "started %(hostname)s:%(basedir)s" % master
 
-def update(master):
+
+def action_update(master):
     with show('running'):
         with cd(master['basedir']):
             run('source bin/activate && make update')
     print OK, "updated %(hostname)s:%(basedir)s" % master
 
-def update_buildbot(master):
+
+def action_update_buildbot(master):
     with show('running'):
         buildbot_dir = os.path.dirname(master['buildbot_setup'])
         with cd(buildbot_dir):
             run('hg pull')
             run('hg update -r %s' % master['buildbot_branch'])
-            run('unset PYTHONHOME PYTHONPATH; %s setup.py install' % master['buildbot_python'])
+            run('unset PYTHONHOME PYTHONPATH; %s setup.py install' %
+                master['buildbot_python'])
     print OK, "updated buildbot in %(hostname)s:%(basedir)s" % master
 
-def fix_makefile_symlink(master):
+
+def action_fix_makefile_symlink(master):
     with show('running'):
         run('rm -f %(basedir)s/Makefile' % master)
-        run('ln -s %(bbconfigs_dir)s/Makefile.master %(basedir)s/Makefile' % master)
+        run('ln -s %(bbconfigs_dir)s/Makefile.master %(basedir)s/Makefile' %
+            master)
     print OK, "updated Makefile symlink in %(hostname)s:%(basedir)s" % master
 
 
-def add_esr17_symlinks(master):
+def action_add_esr17_symlinks(master):
     with show('running'):
         run('ln -s %(bbconfigs_dir)s/mozilla/release-firefox-mozilla-esr17.py '
             '%(master_dir)s/' % master)
         run('ln -s %(bbconfigs_dir)s/mozilla/l10n-changesets_mozilla-esr17 '
             '%(master_dir)s/' % master)
         run('ln -s %(bbconfigs_dir)s/mozilla/release-thunderbird-comm-esr17.py '
             '%(master_dir)s/' % master)
         run('ln -s %(bbconfigs_dir)s/mozilla/l10n-changesets_thunderbird-esr17 '
             '%(master_dir)s/' % master)
     print OK, "Added esr17 symlinks in %(hostname)s:%(basedir)s" % master
 
 
 def per_host(fn):
     fn.per_host = True
     return fn
 
+
 @per_host
-def update_queue(host):
+def action_update_queue(host):
     with show('running'):
         queue_dir = "/builds/buildbot/queue"
         tools_dir = "%s/tools" % queue_dir
         with cd(tools_dir):
             run('hg pull -u')
     print OK, "updated queue in %s" % host
-
-actions = [
-    'check',
-    'checkconfig',
-    'show_revisions',
-    'reconfig',
-    'restart',
-    'graceful_restart',
-    'stop',
-    'graceful_stop',
-    'start',
-    'update',
-    'update_buildbot',
-    'update_queue',
-    'fix_makefile_symlink',
-    'add_esr17_symlinks',
-    ]
-
new file mode 100644
--- /dev/null
+++ b/lib/python/util/fabric/common.py
@@ -0,0 +1,57 @@
+from os import path
+import logging
+
+from util.commands import run_cmd, run_cmd_periodic_poll
+
+FABRIC_SCRIPT = path.abspath(path.join(
+    path.dirname(__file__),
+    "../../../../buildfarm/maintenance/manage_masters.py"))
+
+
+def check_fabric():
+    try:
+        import fabric
+        assert fabric  # pylint
+    except ImportError:
+        logging.error("FAIL: fabric not installed", exc_info=True)
+        raise
+
+
+class FabricHelper(object):
+    def __init__(self, masters_json_file, roles, concurrency=8,
+                 subprocess=False, warning_interval=300, callback=None):
+        self.masters_json_file = masters_json_file
+        self.roles = roles
+        self.concurrency = concurrency
+        self.subprocess = subprocess
+        self.warning_interval = warning_interval
+        self.callback = callback
+
+    def fabric_cmd(self, actions, **cmdKwargs):
+        cmd = ['python', FABRIC_SCRIPT, '-f', self.masters_json_file,
+               '-j', str(self.concurrency)]
+        for role in self.roles:
+            cmd += ['-R', role]
+        cmd += actions
+        # don't buffer output
+        env = {'PYTHONUNBUFFERED': '1'}
+        if self.subprocess:
+            run_cmd_periodic_poll(cmd, warning_interval=self.warning_interval,
+                                  warning_callback=self.callback, env=env,
+                                  **cmdKwargs)
+        else:
+            run_cmd(cmd, env=env)
+
+    def update(self, **cmdKwargs):
+        self.fabric_cmd(['update'], **cmdKwargs)
+
+    def checkconfig(self, **cmdKwargs):
+        self.fabric_cmd(['checkconfig'], **cmdKwargs)
+
+    def reconfig(self, **cmdKwargs):
+        self.fabric_cmd(['reconfig'], **cmdKwargs)
+
+    def update_and_reconfig(self, **cmdKwargs):
+        self.update(**cmdKwargs)
+        self.checkconfig(**cmdKwargs)
+        self.reconfig(**cmdKwargs)
--- a/lib/python/util/hg.py
+++ b/lib/python/util/hg.py
@@ -19,17 +19,17 @@ class HgUtilError(Exception):
 def _make_absolute(repo):
     if repo.startswith("file://"):
         path = repo[len("file://"):]
         repo = "file://%s" % os.path.abspath(path)
     elif "://" not in repo:
         repo = os.path.abspath(repo)
     return repo
 
-def make_hg_url(hgHost, repoPath, protocol='http', revision=None,
+def make_hg_url(hgHost, repoPath, protocol='https', revision=None,
                 filename=None):
     """construct a valid hg url from a base hg url (hg.mozilla.org),
     repoPath, revision and possible filename"""
     base = '%s://%s' % (protocol, hgHost)
     repo = '/'.join(p.strip('/') for p in [base, repoPath])
     if not filename:
         if not revision:
             return repo
@@ -522,8 +522,29 @@ def adjust_paths(dest, **paths):
     for path_name, path_value in paths.items():
         if (not config.has_option('paths', path_name) or
                 config.get('paths', path_name) != path_value):
             changed = True
             config.set('paths', path_name, path_value)
 
     if changed:
         config.write(open(hgrc, 'w'))
+
+def commit(dest, msg, user=None):
+    cmd = ['hg', 'commit', '-m', msg]
+    if user:
+        cmd.extend(['-u', user])
+    run_cmd(cmd, cwd=dest)
+    return get_revision(dest)
+
+def tag(dest, tags, user=None, msg=None, rev=None, force=None):
+    cmd = ['hg', 'tag']
+    if user:
+        cmd.extend(['-u', user])
+    if msg:
+        cmd.extend(['-m', msg])
+    if rev:
+        cmd.extend(['-r', rev])
+    if force:
+        cmd.append('-f')
+    cmd.extend(tags)
+    run_cmd(cmd, cwd=dest)
+    return get_revision(dest)
--- a/lib/python/util/retry.py
+++ b/lib/python/util/retry.py
@@ -4,17 +4,17 @@ import traceback
 
 import logging
 log = logging.getLogger(__name__)
 
 def retry(action, attempts=5, sleeptime=60, max_sleeptime=5*60, retry_exceptions=(Exception,),
           cleanup=None, args=(), kwargs={}):
     """Call `action' a maximum of `attempts' times until it succeeds,
         defaulting to 5. `sleeptime' is the number of seconds to wait
-	between attempts, defaulting to 60 and doubling each retry attempt, to
+        between attempts, defaulting to 60 and doubling each retry attempt, to
         a maximum of `max_sleeptime'.  `retry_exceptions' is a tuple of
         Exceptions that should be caught. If exceptions other than those
         listed in `retry_exceptions' are raised from `action', they will be
         raised immediately. If `cleanup' is provided and callable it will
         be called immediately after an Exception is caught. No arguments
         will be passed to it. If your cleanup function requires arguments
         it is recommended that you wrap it in an argumentless function.
         `args' and `kwargs' are a tuple and dict of arguments to pass onto
new file mode 100644
--- /dev/null
+++ b/lib/python/util/sendmail.py
@@ -0,0 +1,17 @@
+import email.utils
+from smtplib import SMTP
+from email.mime.text import MIMEText
+
+
+def sendmail(from_, to, subject, body, smtp_server):
+    s = SMTP()
+    s.connect(smtp_server)
+
+    for addr in to:
+        m = MIMEText(body)
+        m['date'] = email.utils.formatdate()
+        m['to'] = addr
+        m['subject'] = subject
+        s.sendmail(from_, [addr], m.as_string())
+
+    s.quit()