author ffxbld
Tue, 25 Jun 2013 15:52:17 -0400
changeset 803 b22c537e9f18d956a60f672a4f586320a96c40a9
parent 217 f5b5bf2816d48b52981043d9bc655d0bb9402cfe
child 879 ecfab480076fbc315f535a10d813dfd557384e0f
permissions -rw-r--r--
Added FIREFOX_23_0b1_RELEASE FIREFOX_23_0b1_BUILD1 tag(s) for changeset production-0.8. DONTBUILD CLOSED TREE a=release

import random, weakref
from zope.interface import implements
from twisted.python import log
from twisted.python.failure import Failure
from twisted.spread import pb
from twisted.application import service, internet
from twisted.internet import defer

from buildbot import interfaces, util
from buildbot.status.progress import Expectations
from buildbot.status.builder import RETRY
from import Properties
from buildbot.util.eventual import eventually

(ATTACHING, # slave attached, still checking hostinfo/etc
 IDLE, # idle, available for use
 PINGING, # build about to start, making sure it is still alive
 BUILDING, # build is running
 LATENT, # latent slave is not substantiated; similar to idle
 ) = range(6)

class AbstractSlaveBuilder(pb.Referenceable):
    """I am the master-side representative for one of the
    L{} objects that lives in a remote
    buildbot. When a remote builder connects, I query it for command versions
    and then make it available to any Builds that are ready to run. """

    def __init__(self):
        self.ping_watchers = []
        self.state = None # set in subclass
        self.remote = None
        self.slave = None
        self.builder_name = None
        self.locks = None

    def __repr__(self):
        r = ["<", self.__class__.__name__]
        if self.builder_name:
            r.extend([" builder=", repr(self.builder_name)])
        if self.slave:
            r.extend([" slave=", repr(self.slave.slavename)])
        return ''.join(r)

    def setBuilder(self, b):
        self.builder = b
        self.builder_name =

    def getSlaveCommandVersion(self, command, oldversion=None):
        if self.remoteCommands is None:
            # the slave is 0.5.0 or earlier
            return oldversion
        return self.remoteCommands.get(command)

    def isAvailable(self):
        # if this SlaveBuilder is busy, then it's definitely not available
        if self.isBusy():
            return False

        # otherwise, check in with the BuildSlave
        if self.slave:
            return self.slave.canStartBuild()

        # no slave? not very available.
        return False

    def isBusy(self):
        return self.state not in (IDLE, LATENT)

    def buildStarted(self):
        self.state = BUILDING

    def buildFinished(self):
        self.state = IDLE

    def attached(self, slave, remote, commands):
        @type  slave: L{buildbot.buildslave.BuildSlave}
        @param slave: the BuildSlave that represents the buildslave as a
        @type  remote: L{twisted.spread.pb.RemoteReference}
        @param remote: a reference to the L{}
        @type  commands: dict: string -> string, or None
        @param commands: provides the slave's version of each RemoteCommand
        self.state = ATTACHING
        self.remote = remote
        self.remoteCommands = commands # maps command name to version
        if self.slave is None:
            self.slave = slave
            assert self.slave == slave
        #log.msg("Buildslave %s attached to %s" % (slave.slavename,
        def _attachFailure(why, where):
            return why

        d = defer.succeed(None)
        def doSetMaster(res):
            d = self.remote.callRemote("setMaster", self)
            #d.addErrback(_attachFailure, "Builder.setMaster")
            return d
        def doPrint(res):
            d = self.remote.callRemote("print", "attached")
            #d.addErrback(_attachFailure, "Builder.print 'attached'")
            return d
        def setIdle(res):
            self.state = IDLE
            return self
        return d

    def prepare(self, builder_status):
        if not self.slave.acquireLocks():
            return defer.succeed(False)
        return defer.succeed(True)

    def ping(self, status=None):
        """Ping the slave to make sure it is still there. Returns a Deferred
        that fires with True if it is.

        @param status: if you point this at a BuilderStatus, a 'pinging'
                       event will be pushed.
        oldstate = self.state
        self.state = PINGING
        newping = not self.ping_watchers
        d = defer.Deferred()
        if newping:
            if status:
                event = status.addEvent(["pinging"])
                d2 = defer.Deferred()
                d2.addCallback(self._pong_status, event)
                self.ping_watchers.insert(0, d2)
                # I think it will make the tests run smoother if the status
                # is updated before the ping completes

        def reset_state(res):
            if self.state == PINGING:
                self.state = oldstate
            return res
        return d

    def _pong(self, res):
        watchers, self.ping_watchers = self.ping_watchers, []
        for d in watchers:

    def _pong_status(self, res, event):
        if res:
            event.text = ["ping", "success"]
            event.text = ["ping", "failed"]

    def detached(self):
        #log.msg("Buildslave %s detached from %s" % (self.slave.slavename,
        if self.slave:
        self.slave = None
        self.remote = None
        self.remoteCommands = None

class Ping:
    running = False

    def ping(self, remote):
        assert not self.running
        if not remote:
            # clearly the ping must fail
            return defer.succeed(False)
        self.running = True
        log.msg("sending ping")
        self.d = defer.Deferred()
        # TODO: add a distinct 'ping' command on the slave.. using 'print'
        # for this purpose is kind of silly.
        remote.callRemote("print", "ping").addCallbacks(self._pong,
        return self.d

    def _pong(self, res):
        log.msg("ping finished: success")

    def _ping_failed(self, res, remote):
        log.msg("ping finished: failure")
        # the slave has some sort of internal error, disconnect them. If we
        # don't, we'll requeue a build and ping them again right away,
        # creating a nasty loop.
        # TODO: except, if they actually did manage to get this far, they'll
        # probably reconnect right away, and we'll do this game again. Maybe
        # it would be better to leave them in the PINGING state.

class SlaveBuilder(AbstractSlaveBuilder):

    def __init__(self):
        self.state = ATTACHING

    def detached(self):
        if self.slave:
        self.slave = None
        self.state = ATTACHING

    def buildFinished(self):
        # Call the slave's buildFinished if we can; the slave may be waiting
        # to do a graceful shutdown and needs to know when it's idle.
        # After, we check to see if we can start other builds.
        self.state = IDLE
        if self.slave:
            d = self.slave.buildFinished(self)
            d.addCallback(lambda x: self.builder.triggerNewBuildCheck())

class LatentSlaveBuilder(AbstractSlaveBuilder):
    def __init__(self, slave, builder):
        self.slave = slave
        self.state = LATENT
        log.msg("Latent buildslave %s attached to %s" % (slave.slavename,

    def prepare(self, builder_status):
        # If we can't lock, then don't bother trying to substantiate
        if not self.slave.acquireLocks():
            return defer.succeed(False)

        log.msg("substantiating slave %s" % (self,))
        d = self.substantiate()
        def substantiation_failed(f):
            builder_status.addPointEvent(['removing', 'latent',
            # TODO: should failover to a new Build
            return f
        def substantiation_cancelled(res):
            # if res is False, latent slave cancelled subtantiation
            if not res:
                self.state = LATENT
            return res
        return d

    def substantiate(self):
        self.state = SUBSTANTIATING
        d = self.slave.substantiate(self)
        if not self.slave.substantiated:
            event = self.builder.builder_status.addEvent(
            def substantiated(res):
                msg = ["substantiate", "success"]
                if isinstance(res, basestring):
                elif isinstance(res, (tuple, list)):
                event.text = msg
                return res
            def substantiation_failed(res):
                event.text = ["substantiate", "failed"]
                # TODO add log of traceback to event
                return res
            d.addCallbacks(substantiated, substantiation_failed)
        return d

    def detached(self):
        self.state = LATENT

    def buildStarted(self):

    def buildFinished(self):

    def _attachFailure(self, why, where):
        self.state = LATENT
        return AbstractSlaveBuilder._attachFailure(self, why, where)

    def ping(self, status=None):
        if not self.slave.substantiated:
            if status:
                status.addEvent(["ping", "latent"]).finish()
            return defer.succeed(True)
        return, status)

class Builder(pb.Referenceable, service.MultiService):
    """I manage all Builds of a given type.

    Each Builder is created by an entry in the config file (the c['builders']
    list), with a number of parameters.

    One of these parameters is the L{buildbot.process.factory.BuildFactory}
    object that is associated with this Builder. The factory is responsible
    for creating new L{Build<buildbot.process.base.Build>} objects. Each
    Build object defines when and how the build is performed, so a new
    Factory or Builder should be defined to control this behavior.

    The Builder holds on to a number of L{base.BuildRequest} objects in a
    list named C{.buildable}. Incoming BuildRequest objects will be added to
    this list, or (if possible) merged into an existing request. When a slave
    becomes available, I will use my C{BuildFactory} to turn the request into
    a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
    goes into C{.building} while it runs. Once the build finishes, I will
    discard it.

    I maintain a list of available SlaveBuilders, one for each connected
    slave that the C{slavenames} parameter says we can use. Some of these
    will be idle, some of them will be busy running builds for me. If there
    are multiple slaves, I can run multiple builds at once.

    I also manage forced builds, progress expectation (ETA) management, and
    some status delivery chores.

    @type buildable: list of L{buildbot.process.base.BuildRequest}
    @ivar buildable: BuildRequests that are ready to build, but which are
                     waiting for a buildslave to be available.

    @type building: list of L{buildbot.process.base.Build}
    @ivar building: Builds that are actively running

    @type slaves: list of L{buildbot.buildslave.BuildSlave} objects
    @ivar slaves: the slaves currently available for building

    expectations = None # this is created the first time we get a good build
    CHOOSE_SLAVES_RANDOMLY = True # disabled for determinism during tests

    def __init__(self, setup, builder_status):
        @type  setup: dict
        @param setup: builder setup data, as stored in
                      BuildmasterConfig['builders'].  Contains name,
                      slavename(s), builddir, slavebuilddir, factory, locks.
        @type  builder_status: L{buildbot.status.builder.BuilderStatus}
        service.MultiService.__init__(self) = setup['name']
        self.slavenames = []
        if setup.has_key('slavename'):
        if setup.has_key('slavenames'):
        self.builddir = setup['builddir']
        self.slavebuilddir = setup['slavebuilddir']
        self.buildFactory = setup['factory']
        self.nextSlave = setup.get('nextSlave')
        if self.nextSlave is not None and not callable(self.nextSlave):
            raise ValueError("nextSlave must be callable")
        self.locks = setup.get("locks", [])
        self.env = setup.get('env', {})
        assert isinstance(self.env, dict)
        if setup.has_key('periodicBuildTime'):
            raise ValueError("periodicBuildTime can no longer be defined as"
                             " part of the Builder: use scheduler.Periodic"
                             " instead")
        self.nextBuild = setup.get('nextBuild')
        if self.nextBuild is not None and not callable(self.nextBuild):
            raise ValueError("nextBuild must be callable")
        self.buildHorizon = setup.get('buildHorizon')
        self.logHorizon = setup.get('logHorizon')
        self.eventHorizon = setup.get('eventHorizon')
        self.mergeRequests = setup.get('mergeRequests', True) = setup.get('properties', {})
        self.category = setup.get('category', None)

        # build/wannabuild slots: Build objects move along this sequence
        self.building = []
        # old_building holds active builds that were stolen from a predecessor
        self.old_building = weakref.WeakKeyDictionary()

        # buildslaves which have connected but which are not yet available.
        # These are always in the ATTACHING state.
        self.attaching_slaves = []

        # buildslaves at our disposal. Each SlaveBuilder instance has a
        # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
        # Build is about to start, to make sure that they're still alive.
        self.slaves = []

        self.builder_status = builder_status
        self.builder_status.buildHorizon = self.buildHorizon
        self.builder_status.logHorizon = self.logHorizon
        self.builder_status.eventHorizon = self.eventHorizon
        t = internet.TimerService(10*60, self.reclaimAllBuilds)

        # for testing, to help synchronize tests
        self.watchers = {'attach': [], 'detach': [], 'detach_all': [],
                         'idle': []}
        self.run_count = 0

    def setBotmaster(self, botmaster):
        self.botmaster = botmaster
        self.db = botmaster.db
        self.master_name = botmaster.master_name
        self.master_incarnation = botmaster.master_incarnation

    def compareToSetup(self, setup):
        diffs = []
        setup_slavenames = []
        if setup.has_key('slavename'):
        setup_slavenames.extend(setup.get('slavenames', []))
        if setup_slavenames != self.slavenames:
            diffs.append('slavenames changed from %s to %s' \
                         % (self.slavenames, setup_slavenames))
        if setup['builddir'] != self.builddir:
            diffs.append('builddir changed from %s to %s' \
                         % (self.builddir, setup['builddir']))
        if setup['slavebuilddir'] != self.slavebuilddir:
            diffs.append('slavebuilddir changed from %s to %s' \
                         % (self.slavebuilddir, setup['slavebuilddir']))
        if setup['factory'] != self.buildFactory: # compare objects
            diffs.append('factory changed')
        if setup.get('locks', []) != self.locks:
            diffs.append('locks changed from %s to %s' % (self.locks, setup.get('locks')))
        if setup.get('env', {}) != self.env:
            diffs.append('env changed from %s to %s' % (self.env, setup.get('env', {})))
        if setup.get('nextSlave') != self.nextSlave:
            diffs.append('nextSlave changed from %s to %s' % (self.nextSlave, setup.get('nextSlave')))
        if setup.get('nextBuild') != self.nextBuild:
            diffs.append('nextBuild changed from %s to %s' % (self.nextBuild, setup.get('nextBuild')))
        if setup['buildHorizon'] != self.buildHorizon:
            diffs.append('buildHorizon changed from %s to %s' % (self.buildHorizon, setup['buildHorizon']))
        if setup['logHorizon'] != self.logHorizon:
            diffs.append('logHorizon changed from %s to %s' % (self.logHorizon, setup['logHorizon']))
        if setup['eventHorizon'] != self.eventHorizon:
            diffs.append('eventHorizon changed from %s to %s' % (self.eventHorizon, setup['eventHorizon']))
        if setup['category'] != self.category:
            diffs.append('category changed from %r to %r' % (self.category, setup['category']))

        return diffs

    def __repr__(self):
        return "<Builder '%r' at %d>" % (, id(self))

    def triggerNewBuildCheck(self):

    def run(self):
        """Check for work to be done. This should be called any time I might
        be able to start a job:

         - when the Builder is first created
         - when a new job has been added to the [buildrequests] DB table
         - when a slave has connected

        If I have both an available slave and the database contains a
        BuildRequest that I can handle, I will claim the BuildRequest and
        start the build. When the build finishes, I will retire the
        # overall plan:
        #  move .expectations to DB

        # if we're not running, we may still be called from leftovers from
        # a run of the loop, so just ignore the call.
        if not self.running:

        self.run_count += 1

        available_slaves = [sb for sb in self.slaves if sb.isAvailable()]
        if not available_slaves:
        d = self.db.runInteraction(self._claim_buildreqs, available_slaves)
        return d

    # slave-managers must refresh their claim on a build at least once an
    # hour, less any inter-manager clock skew

    def _claim_buildreqs(self, t, available_slaves):
        # return a dict mapping slave -> (brid,ssid)
        now =
        old = now - self.RECLAIM_INTERVAL
        requests = self.db.get_unclaimed_buildrequests(, old,

        assignments = {}
        while requests and available_slaves:
            sb = self._choose_slave(available_slaves)
            if not sb:
                log.msg("%s: want to start build, but we don't have a remote"
                        % self)
            breq = self._choose_build(requests)
            if not breq:
                log.msg("%s: went to start build, but nextBuild said not to"
                        % self)
            merged_requests = [breq]
            for other_breq in requests[:]:
                if (self.mergeRequests and
                    self.botmaster.shouldMergeRequests(self, breq, other_breq)
            brids = [ for br in merged_requests]
            if self.db.claim_buildrequests(now, self.master_name,
                    self.master_incarnation, brids, t):
                assignments[sb] = merged_requests
        return assignments

    def _choose_slave(self, available_slaves):
        # note: this might return None if the nextSlave() function decided to
        # not give us anything
        if self.nextSlave:
                return self.nextSlave(self, available_slaves)
                log.msg("Exception choosing next slave")
            return None
            return random.choice(available_slaves)
        return available_slaves[0]

    def _choose_build(self, buildable):
        if self.nextBuild:
                return self.nextBuild(self, buildable)
                log.msg("Exception choosing next build")
            return None
        return buildable[0]

    def _start_builds(self, assignments):
        # because _claim_buildreqs runs in a separate thread, we might have
        # lost a slave by this point. We treat that case the same as if we
        # lose the slave right after the build starts: the initial ping
        # fails.
        for (sb, requests) in assignments.items():
            build = self.buildFactory.newBuild(requests)
            if len(self.env) > 0:
            self.startBuild(build, sb)

    def getBuildable(self, limit=None):
        return self.db.runInteractionNow(self._getBuildable, limit)
    def _getBuildable(self, t, limit):
        now =
        old = now - self.RECLAIM_INTERVAL
        return self.db.get_unclaimed_buildrequests(, old,

    def getOldestRequestTime(self):
        """Returns the timestamp of the oldest build request for this builder.

        If there are no build requests, None is returned."""
        buildable = self.getBuildable(1)
        if buildable:
            # TODO: this is sorted by priority first, not strictly reqtime
            return buildable[0].getSubmitTime()
        return None

    def cancelBuildRequest(self, brid):
        return self.db.cancel_buildrequests([brid])

    def consumeTheSoulOfYourPredecessor(self, old):
        """Suck the brain out of an old Builder.

        This takes all the runtime state from an existing Builder and moves
        it into ourselves. This is used when a Builder is changed in the
        master.cfg file: the new Builder has a different factory, but we want
        all the builds that were queued for the old one to get processed by
        the new one. Any builds which are already running will keep running.
        The new Builder will get as many of the old SlaveBuilder objects as
        it wants."""

        log.msg("consumeTheSoulOfYourPredecessor: %s feeding upon %s" %
                (self, old))
        # all pending builds are stored in the DB, so we don't have to do
        # anything to claim them. The old builder will be stopService'd,
        # which should make sure they don't start any new work

        # this is kind of silly, but the builder status doesn't get updated
        # when the config changes, yet it stores the category.  So:
        self.builder_status.category = self.category

        # old.building (i.e. builds which are still running) is not migrated
        # directly: it keeps track of builds which were in progress in the
        # old Builder. When those builds finish, the old Builder will be
        # notified, not us. However, since the old SlaveBuilder will point to
        # us, it is our maybeStartBuild() that will be triggered.
        if old.building:
        # however, we do grab a weakref to the active builds, so that our
        # BuilderControl can see them and stop them. We use a weakref because
        # we aren't the one to get notified, so there isn't a convenient
        # place to remove it from self.building .
        for b in old.building:
            self.old_building[b] = None
        for b in old.old_building:
            self.old_building[b] = None

        # Our set of slavenames may be different. Steal any of the old
        # buildslaves that we want to keep using.
        for sb in old.slaves[:]:
            if sb.slave.slavename in self.slavenames:
                log.msg(" stealing buildslave %s" % sb)

        # old.attaching_slaves:
        #  these SlaveBuilders are waiting on a sequence of calls:
        #  remote.setMaster and remote.print . When these two complete,
        #  old._attached will be fired, which will add a 'connect' event to
        #  the builder_status and try to start a build. However, we've pulled
        #  everything out of the old builder's queue, so it will have no work
        #  to do. The outstanding remote.setMaster/print call will be holding
        #  the last reference to the old builder, so it will disappear just
        #  after that response comes back.
        #  The BotMaster will ask the slave to re-set their list of Builders
        #  shortly after this function returns, which will cause our
        #  attached() method to be fired with a bunch of references to remote
        #  SlaveBuilders, some of which we already have (by stealing them
        #  from the old Builder), some of which will be new. The new ones
        #  will be re-attached.

        #  Therefore, we don't need to do anything about old.attaching_slaves

        return # all done

    def reclaimAllBuilds(self):
            now =
            brids = set()
            for b in self.building:
                brids.update([ for br in b.requests])
            for b in self.old_building:
                brids.update([ for br in b.requests])
            if not self.db.claim_buildrequests(now, self.master_name,
                    self.master_incarnation, brids, am_reclaiming=True):
                log.msg("Failed to re-claim buildrequests for builder %s" %
            log.msg("Error in reclaimAllBuilds")

    def getBuild(self, number):
        for b in self.building:
            if b.build_status and b.build_status.number == number:
                return b
        for b in self.old_building.keys():
            if b.build_status and b.build_status.number == number:
                return b
        return None

    def fireTestEvent(self, name, fire_with=None):
        if fire_with is None:
            fire_with = self
        watchers = self.watchers[name]
        self.watchers[name] = []
        for w in watchers:
            eventually(w.callback, fire_with)

    def addLatentSlave(self, slave):
        assert interfaces.ILatentBuildSlave.providedBy(slave)
        for s in self.slaves:
            if s == slave:
            sb = LatentSlaveBuilder(slave, self)
                ['added', 'latent', slave.slavename])

    def attached(self, slave, remote, commands):
        """This is invoked by the BuildSlave when the self.slavename bot
        registers their builder.

        @type  slave: L{buildbot.buildslave.BuildSlave}
        @param slave: the BuildSlave that represents the buildslave as a whole
        @type  remote: L{twisted.spread.pb.RemoteReference}
        @param remote: a reference to the L{}
        @type  commands: dict: string -> string, or None
        @param commands: provides the slave's version of each RemoteCommand

        @rtype:  L{twisted.internet.defer.Deferred}
        @return: a Deferred that fires (with 'self') when the slave-side
                 builder is fully attached and ready to accept commands.
        for s in self.attaching_slaves + self.slaves:
            if s.slave == slave:
                # already attached to them. This is fairly common, since
                # attached() gets called each time we receive the builder
                # list from the slave, and we ask for it each time we add or
                # remove a builder. So if the slave is hosting builders
                # A,B,C, and the config file changes A, we'll remove A and
                # re-add it, triggering two builder-list requests, getting
                # two redundant calls to attached() for B, and another two
                # for C.
                # Therefore, when we see that we're already attached, we can
                # just ignore it. TODO: build a diagram of the state
                # transitions here, I'm concerned about sb.attached() failing
                # and leaving sb.state stuck at 'ATTACHING', and about
                # the detached() message arriving while there's some
                # transition pending such that the response to the transition
                # re-vivifies sb
                return defer.succeed(self)

        sb = SlaveBuilder()
        d = sb.attached(slave, remote, commands)
        d.addErrback(self._not_attached, slave)
        return d

    def _attached(self, sb):
        # TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
        self.builder_status.addPointEvent(['connect', sb.slave.slavename])

        return self

    def _not_attached(self, why, slave):
        # already log.err'ed by SlaveBuilder._attachFailure
        # TODO: make this .addSlaveEvent?
        # TODO: remove from self.slaves (except that detached() should get
        #       run first, right?)
        print why
        self.builder_status.addPointEvent(['failed', 'connect',
        # TODO: add an HTMLLogFile of the exception
        self.fireTestEvent('attach', why)

    def detached(self, slave):
        """This is called when the connection to the bot is lost."""
        for sb in self.attaching_slaves + self.slaves:
            if sb.slave == slave:
            log.msg("WEIRD: Builder.detached(%s) (%s)"
                    " not in attaching_slaves(%s)"
                    " or slaves(%s)" % (slave, slave.slavename,
        if sb.state == BUILDING:
            # the Build's .lostRemote method (invoked by a notifyOnDisconnect
            # handler) will cause the Build to be stopped, probably right
            # after the notifyOnDisconnect that invoked us finishes running.

            # TODO: should failover to a new Build

        if sb in self.attaching_slaves:
        if sb in self.slaves:

        # TODO: make this .addSlaveEvent?
        self.builder_status.addPointEvent(['disconnect', slave.slavename])
        sb.detached() # inform the SlaveBuilder that their slave went away
        if not self.slaves:

    def updateBigStatus(self):
        if not self.slaves:
        elif self.building:

    def startBuild(self, build, sb):
        """Start a build on the given slave.
        @param build: the L{base.Build} to start
        @param sb: the L{SlaveBuilder} which will host this build

        @return: a Deferred which fires with a
        L{buildbot.interfaces.IBuildControl} that can be used to stop the
        Build, or to access a L{buildbot.interfaces.IBuildStatus} which will
        watch the Build as it runs. """

        log.msg("starting build %s using slave %s" % (build, sb))
        d = sb.prepare(self.builder_status)

        def _prepared(ready):
            # If prepare returns True then it is ready and we start a build
            # If it returns false then we don't start a new build.
            d = defer.succeed(ready)

            if not ready:
                #FIXME: We should perhaps trigger a check to see if there is
                # any other way to schedule the work
                log.msg("slave %s can't build %s after all" % (build, sb))

                # release the slave. This will queue a call to maybeStartBuild, which
                # will fire after other notifyOnDisconnect handlers have marked the
                # slave as disconnected (so we don't try to use it again).
                # sb.buildFinished()

                log.msg("re-queueing the BuildRequest %s" % build)


                return d

            def _ping(ign):
                # ping the slave to make sure they're still there. If they've
                # fallen off the map (due to a NAT timeout or something), this
                # will fail in a couple of minutes, depending upon the TCP
                # timeout.
                # TODO: This can unnecessarily suspend the starting of a build, in
                # situations where the slave is live but is pushing lots of data to
                # us in a build.
                log.msg("starting build %s.. pinging the slave %s" % (build, sb))
            d.addCallback(self._startBuild_1, build, sb)

            return d

        return d

    def _startBuild_1(self, res, build, sb):
        if not res:
            return self._startBuildFailed("slave ping failed", build, sb)
        # The buildslave is ready to go. sb.buildStarted() sets its state to
        # BUILDING (so we won't try to use it for any other builds). This
        # gets set back to IDLE by the Build itself when it finishes.
        d = sb.remote.callRemote("startBuild")
        d.addCallbacks(self._startBuild_2, self._startBuildFailed,
                       callbackArgs=(build,sb), errbackArgs=(build,sb))
        return d

    def _startBuild_2(self, res, build, sb):
        # create the BuildStatus object that goes with the Build
        bs = self.builder_status.newBuild()

        # start the build. This will first set up the steps, then tell the
        # BuildStatus that it has started, which will announce it to the
        # world (through our BuilderStatus object, which is its parent).
        # Finally it will start the actual build process.
        bids = [self.db.build_started(, bs.number) for req in build.requests]
        d = build.startBuild(bs, self.expectations, sb)
        d.addCallback(self.buildFinished, sb, bids)
        # this shouldn't happen. if it does, the slave will be wedged
        return build # this is the IBuildControl

    def _startBuildFailed(self, why, build, sb):
        # put the build back on the buildable list
        log.msg("I tried to tell the slave that the build %s started, but "
                "remote_startBuild failed: %s" % (build, why))
        # release the slave. This will queue a call to maybeStartBuild, which
        # will fire after other notifyOnDisconnect handlers have marked the
        # slave as disconnected (so we don't try to use it again).

        log.msg("re-queueing the BuildRequest")

    def setupProperties(self, props):
        props.setProperty("buildername",, "Builder")
        if len( > 0:
            for propertyname in
                props.setProperty(propertyname,[propertyname], "Builder")

    def buildFinished(self, build, sb, bids):
        """This is called when the Build has finished (either success or
        failure). Any exceptions during the build are reported with
        results=FAILURE, not with an errback."""

        # by the time we get here, the Build has already released the slave
        # (which queues a call to maybeStartBuild)


        results = build.build_status.getResults()
        if results == RETRY:
            self._resubmit_buildreqs(build).addErrback(log.err) # returns Deferred
            brids = [ for br in build.requests]
            self.db.retire_buildrequests(brids, results)

        if sb.slave:



    def _resubmit_buildreqs(self, build):
        brids = [ for br in build.requests]
        return self.db.resubmit_buildrequests(brids)

    def setExpectations(self, progress):
        """Mark the build as successful and update expectations for the next
        build. Only call this when the build did not fail in any way that
        would invalidate the time expectations generated by it. (if the
        compile failed and thus terminated early, we can't use the last
        build to predict how long the next one will take).
        if self.expectations:
            # the first time we get a good build, create our Expectations
            # based upon its results
            self.expectations = Expectations(progress)
        log.msg("new expectations: %s seconds" % \

    def shutdownSlave(self):
        if self.remote:

class BuilderControl:

    def __init__(self, builder, parent):
        self.original = builder
        self.parent = parent # the IControl object

    def submitBuildRequest(self, ss, reason, props=None, now=False):
        bss = self.parent.submitBuildSet([], ss, reason,
                                         props, now)
        brs = bss.getBuildRequests()[0]
        return brs

    def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None):
        if not bs.isFinished():

        ss = bs.getSourceStamp(absolute=True)
        # Make a copy so as not to modify the original build.
        properties = Properties()
        # Don't include runtime-set properties in a rebuild request
        if extraProperties is None:
        self.submitBuildRequest(ss, reason, props=properties)

    def getPendingBuilds(self):
        # return IBuildRequestControl objects
        retval = []
        for r in self.original.getBuildable():
            retval.append(BuildRequestControl(self.original, r))

        return retval

    def getBuild(self, number):
        return self.original.getBuild(number)

    def ping(self):
        if not self.original.slaves:
            self.original.builder_status.addPointEvent(["ping", "no slave"])
            return defer.succeed(False) # interfaces.NoSlaveError
        dl = []
        for s in self.original.slaves:
        d = defer.DeferredList(dl)
        return d

    def _gatherPingResults(self, res):
        for ignored,success in res:
            if not success:
                return False
        return True

class BuildRequestControl:

    def __init__(self, builder, request):
        self.original_builder = builder
        self.original_request = request
        self.brid =

    def subscribe(self, observer):
        raise NotImplementedError

    def unsubscribe(self, observer):
        raise NotImplementedError

    def cancel(self):