Bug 814966 - Add backtracking register allocator, r=jandem.
authorBrian Hackett <bhackett1024@gmail.com>
Fri, 14 Dec 2012 11:57:30 -0700
changeset 125202 dc4887f61d2e2042b1af209927d23686fd68302c
parent 125201 aafa9e2de5325478e00064bfe4908eb5bbd65352
child 125203 ba8c91f1d468724d44065727d771e50baf4386a9
push id2151
push userlsblakk@mozilla.com
push dateTue, 19 Feb 2013 18:06:57 +0000
treeherdermozilla-beta@4952e88741ec [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs814966
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 814966 - Add backtracking register allocator, r=jandem.
js/src/Makefile.in
js/src/ion/BacktrackingAllocator.cpp
js/src/ion/BacktrackingAllocator.h
js/src/ion/C1Spewer.cpp
js/src/ion/Ion.cpp
js/src/ion/Ion.h
js/src/ion/JSONSpewer.cpp
js/src/ion/LIR-Common.h
js/src/ion/LIR.cpp
js/src/ion/LIR.h
js/src/ion/LinearScan.cpp
js/src/ion/LinearScan.h
js/src/ion/LiveRangeAllocator.cpp
js/src/ion/LiveRangeAllocator.h
js/src/ion/RegisterAllocator.cpp
js/src/ion/RegisterAllocator.h
js/src/ion/Safepoints.cpp
js/src/jsinfer.cpp
js/src/shell/js.cpp
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -272,16 +272,17 @@ CPPSRCS += 	MethodJIT.cpp \
 		$(NULL)
 
 # Ion
 ifdef ENABLE_ION
 VPATH +=	$(srcdir)/ion
 VPATH +=	$(srcdir)/ion/shared
 
 CPPSRCS +=	MIR.cpp \
+		BacktrackingAllocator.cpp \
 		Bailouts.cpp \
 		BitSet.cpp \
 		C1Spewer.cpp \
 		CodeGenerator.cpp \
 		CodeGenerator-shared.cpp \
 		Ion.cpp \
 		IonAnalysis.cpp \
 		IonBuilder.cpp \
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BacktrackingAllocator.cpp
@@ -0,0 +1,1313 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BacktrackingAllocator.h"
+
+using namespace js;
+using namespace js::ion;
+
+using mozilla::DebugOnly;
+
+bool
+BacktrackingAllocator::init()
+{
+    RegisterSet remainingRegisters(allRegisters_);
+    while (!remainingRegisters.empty(/* float = */ false)) {
+        AnyRegister reg = AnyRegister(remainingRegisters.takeGeneral());
+        registers[reg.code()].allocatable = true;
+    }
+    while (!remainingRegisters.empty(/* float = */ true)) {
+        AnyRegister reg = AnyRegister(remainingRegisters.takeFloat());
+        registers[reg.code()].allocatable = true;
+    }
+
+    LifoAlloc *alloc = mir->temp().lifoAlloc();
+    for (size_t i = 0; i < AnyRegister::Total; i++) {
+        registers[i].reg = AnyRegister::FromCode(i);
+        registers[i].allocations.setAllocator(alloc);
+
+        LiveInterval *fixed = fixedIntervals[i];
+        for (size_t j = 0; j < fixed->numRanges(); j++) {
+            AllocatedRange range(fixed, fixed->getRange(j));
+            if (!registers[i].allocations.insert(range))
+                return false;
+        }
+    }
+
+    hotcode.setAllocator(alloc);
+
+    // Partition the graph into hot and cold sections, for helping to make
+    // splitting decisions. Since we don't have any profiling data this is a
+    // crapshoot, so just mark the bodies of inner loops as hot and everything
+    // else as cold.
+
+    LiveInterval *hotcodeInterval = new LiveInterval(0);
+
+    LBlock *backedge = NULL;
+    for (size_t i = 0; i < graph.numBlocks(); i++) {
+        LBlock *block = graph.getBlock(i);
+
+        // If we see a loop header, mark the backedge so we know when we have
+        // hit the end of the loop. Don't process the loop immediately, so that
+        // if there is an inner loop we will ignore the outer backedge.
+        if (block->mir()->isLoopHeader())
+            backedge = block->mir()->backedge()->lir();
+
+        if (block == backedge) {
+            LBlock *header = block->mir()->loopHeaderOfBackedge()->lir();
+            CodePosition from = inputOf(header->firstId());
+            CodePosition to = outputOf(block->lastId()).next();
+            if (!hotcodeInterval->addRange(from, to))
+                return false;
+        }
+    }
+
+    for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) {
+        AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i));
+        if (!hotcode.insert(range))
+            return false;
+    }
+
+    return true;
+}
+
+static inline const char *
+IntervalString(const LiveInterval *interval)
+{
+#ifdef DEBUG
+    if (!interval->numRanges())
+        return " empty";
+
+    // Not reentrant!
+    static char buf[1000];
+
+    char *cursor = buf;
+    char *end = cursor + sizeof(buf);
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        const LiveInterval::Range *range = interval->getRange(i);
+        int n = JS_snprintf(cursor, end - cursor, " [%u,%u>", range->from.pos(), range->to.pos());
+        if (n < 0)
+            return " ???";
+        cursor += n;
+    }
+
+    return buf;
+#else
+    return " ???";
+#endif
+}
+
+bool
+BacktrackingAllocator::go()
+{
+    IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
+
+    IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
+    if (!buildLivenessInfo())
+        return false;
+    IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
+
+    if (IonSpewEnabled(IonSpew_RegAlloc))
+        dumpLiveness();
+
+    if (!init())
+        return false;
+
+    if (!queuedIntervals.reserve(graph.numVirtualRegisters() * 3 / 2))
+        return false;
+
+    if (!groupAndQueueRegisters())
+        return false;
+
+    if (IonSpewEnabled(IonSpew_RegAlloc))
+        dumpRegisterGroups();
+
+    // Allocate, spill and split register intervals until finished.
+    while (!queuedIntervals.empty()) {
+        if (mir->shouldCancel("Backtracking Allocation"))
+            return false;
+
+        LiveInterval *interval = queuedIntervals.removeHighest().interval;
+        if (!processInterval(interval))
+            return false;
+    }
+
+    if (IonSpewEnabled(IonSpew_RegAlloc))
+        dumpAllocations();
+
+    return resolveControlFlow() && reifyAllocations();
+}
+
+static bool
+LifetimesMightOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1)
+{
+    // No fine grained testing, just see if there is a possibility of overlap.
+    CodePosition start0 = reg0->getFirstInterval()->start();
+    CodePosition start1 = reg1->getFirstInterval()->start();
+    CodePosition end0 = reg0->lastInterval()->end();
+    CodePosition end1 = reg1->lastInterval()->end();
+    return (end0 > start1) && (end1 > start0);
+}
+
+bool
+BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg)
+{
+    for (size_t i = 0; i < group->registers.length(); i++) {
+        if (LifetimesMightOverlap(reg, &vregs[group->registers[i]]))
+            return false;
+    }
+    return true;
+}
+
+bool
+BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
+{
+    // See if reg0 and reg1 can be placed in the same group, following the
+    // restrictions imposed by VirtualRegisterGroup and any other registers
+    // already grouped with reg0 or reg1.
+    BacktrackingVirtualRegister *reg0 = &vregs[vreg0], *reg1 = &vregs[vreg1];
+
+    if (reg0->isDouble() != reg1->isDouble())
+        return true;
+
+    VirtualRegisterGroup *group0 = reg0->group(), *group1 = reg1->group();
+
+    if (!group0 && group1)
+        return tryGroupRegisters(vreg1, vreg0);
+
+    if (group0) {
+        if (group1) {
+            if (group0 == group1) {
+                // The registers are already grouped together.
+                return true;
+            }
+            // Try to unify the two distinct groups.
+            for (size_t i = 0; i < group1->registers.length(); i++) {
+                if (!canAddToGroup(group0, &vregs[group1->registers[i]]))
+                    return true;
+            }
+            for (size_t i = 0; i < group1->registers.length(); i++) {
+                uint32_t vreg = group1->registers[i];
+                if (!group0->registers.append(vreg))
+                    return false;
+                vregs[vreg].setGroup(group0);
+            }
+            return true;
+        }
+        if (!canAddToGroup(group0, reg1))
+            return true;
+        if (!group0->registers.append(vreg1))
+            return false;
+        reg1->setGroup(group0);
+        return true;
+    }
+
+    if (LifetimesMightOverlap(reg0, reg1))
+        return true;
+
+    VirtualRegisterGroup *group = new VirtualRegisterGroup();
+    if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
+        return false;
+
+    reg0->setGroup(group);
+    reg1->setGroup(group);
+    return true;
+}
+
+bool
+BacktrackingAllocator::groupAndQueueRegisters()
+{
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        if (mir->shouldCancel("Backtracking Group Registers"))
+            return false;
+
+        BacktrackingVirtualRegister &reg = vregs[i];
+
+        // Place all intervals for this register on the allocation queue.
+        for (size_t j = 0; j < reg.numIntervals(); j++) {
+            LiveInterval *interval = reg.getInterval(j);
+            if (interval->numRanges() > 0) {
+                size_t priority = computePriority(interval);
+                if (!queuedIntervals.insert(QueuedInterval(interval, priority)))
+                    return false;
+            }
+        }
+
+        LDefinition *def = reg.def();
+        if (def && def->policy() == LDefinition::MUST_REUSE_INPUT) {
+            LUse *use = reg.ins()->getOperand(def->getReusedInput())->toUse();
+            VirtualRegister &usedReg = vregs[use->virtualRegister()];
+            if (usedReg.intervalFor(outputOf(reg.ins())) || reg.intervalFor(inputOf(reg.ins()))) {
+                // This definitions reuses an input that is live afterwards
+                // (either in future instructions or a safepoint for the
+                // definition). This is impossible to satisfy without first
+                // copying the input, and rather than encoding this by
+                // splitting intervals (which may require even more copying
+                // later) mark the register as needing this copy during
+                // reification and relax the MUST_REUSE_INPUT constraint.
+                IonSpew(IonSpew_RegAlloc, "Relaxing reuse-input constraint on v%u", i);
+                reg.setMustCopyInput();
+            } else {
+                // This definition reuses an input that is not live afterwards.
+                // The input and output can use the same allocation, and it is
+                // desirable to do this to avoid unnecessary copies.
+                if (!tryGroupRegisters(use->virtualRegister(), def->virtualRegister()))
+                    return false;
+            }
+        }
+
+        // Try to group phis with their inputs.
+        for (size_t i = 0; i < graph.numBlocks(); i++) {
+            LBlock *block = graph.getBlock(i);
+            for (size_t j = 0; j < block->numPhis(); j++) {
+                LPhi *phi = block->getPhi(j);
+                uint32_t output = phi->getDef(0)->virtualRegister();
+                for (size_t k = 0; k < phi->numOperands(); k++) {
+                    uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
+                    if (!tryGroupRegisters(input, output))
+                        return false;
+                }
+            }
+        }
+    }
+
+    return true;
+}
+
+static const size_t MAX_ATTEMPTS = 2;
+
+bool
+BacktrackingAllocator::processInterval(LiveInterval *interval)
+{
+    if (IonSpewEnabled(IonSpew_RegAlloc)) {
+        IonSpew(IonSpew_RegAlloc, "Allocating v%u [priority %lu] [weight %lu]: %s",
+                interval->vreg(), computePriority(interval), computeSpillWeight(interval),
+                IntervalString(interval));
+    }
+
+    // An interval can be processed by doing any of the following:
+    //
+    // - Assigning the interval a register. The interval cannot overlap any
+    //   other interval allocated for that physical register.
+    //
+    // - Spilling the interval, provided it has no register uses.
+    //
+    // - Splitting the interval into two or more intervals which cover the
+    //   original one. The new intervals are placed back onto the priority
+    //   queue for later processing.
+    //
+    // - Evicting one or more existing allocated intervals, and then doing one
+    //   of the above operations. Evicted intervals are placed back on the
+    //   priority queue. Any evicted intervals must have a lower spill weight
+    //   than the interval being processed.
+    //
+    // As long as this structure is followed, termination is guaranteed.
+    // In general, we want to minimize the amount of interval splitting
+    // (which generally necessitates spills), so allocate longer lived, lower
+    // weight intervals first and evict and split them later if they prevent
+    // allocation for higher weight intervals.
+
+    bool canAllocate = setIntervalRequirement(interval);
+
+    LiveInterval *conflict;
+    for (size_t attempt = 0;; attempt++) {
+        if (canAllocate) {
+            // Spill intervals which are required to be in a certain stack slot.
+            if (interval->requirement()->kind() == Requirement::FIXED &&
+                !interval->requirement()->allocation().isRegister())
+            {
+                IonSpew(IonSpew_RegAlloc, "stack allocation requirement");
+                interval->setAllocation(interval->requirement()->allocation());
+                return true;
+            }
+
+            conflict = NULL;
+
+            // If we want, but do not require an interval to be in a specific
+            // register, only look at that register for allocating and evict
+            // or spill if it is not available. Picking a separate register may
+            // be even worse than spilling, as it will still necessitate moves
+            // and will tie up more registers than if we spilled.
+            if (interval->hint()->kind() == Requirement::FIXED) {
+                AnyRegister reg = interval->hint()->allocation().toRegister();
+                bool success;
+                if (!tryAllocateRegister(registers[reg.code()], interval, &success, &conflict))
+                    return false;
+                if (success)
+                    return true;
+            }
+
+            // Spill intervals which have no hint or register requirement.
+            if (interval->requirement()->kind() == Requirement::NONE) {
+                spill(interval);
+                return true;
+            }
+
+            if (!conflict || minimalInterval(interval)) {
+                // Search for any available register which the interval can be
+                // allocated to.
+                for (size_t i = 0; i < AnyRegister::Total; i++) {
+                    bool success;
+                    if (!tryAllocateRegister(registers[i], interval, &success, &conflict))
+                        return false;
+                    if (success)
+                        return true;
+                }
+            }
+        }
+
+        // Failed to allocate a register for this interval.
+
+        if (attempt < MAX_ATTEMPTS &&
+            canAllocate &&
+            conflict &&
+            computeSpillWeight(conflict) < computeSpillWeight(interval))
+        {
+            if (!evictInterval(conflict))
+                return false;
+            continue;
+        }
+
+        // A minimal interval cannot be split any further. If we try to split
+        // it at this point we will just end up with the same interval and will
+        // enter an infinite loop. Weights and the initial live intervals must
+        // be constructed so that any minimal interval is allocatable.
+        JS_ASSERT(!minimalInterval(interval));
+
+        return chooseIntervalSplit(interval);
+    }
+}
+
+bool
+BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval)
+{
+    // Set any requirement or hint on interval according to its definition and
+    // uses. Return false if there are conflicting requirements which will
+    // require the interval to be split.
+    interval->setHint(Requirement());
+    interval->setRequirement(Requirement());
+
+    BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
+
+    // Set a hint if another interval in the same group is in a register.
+    if (VirtualRegisterGroup *group = reg->group()) {
+        if (group->allocation.isRegister())
+            interval->setHint(Requirement(group->allocation));
+    }
+
+    if (interval->index() == 0) {
+        // The first interval is the definition, so deal with any definition
+        // constraints/hints.
+
+        LDefinition::Policy policy = reg->def()->policy();
+        if (policy == LDefinition::PRESET) {
+            // Preset policies get a FIXED requirement.
+            interval->setRequirement(Requirement(*reg->def()->output()));
+        } else if (reg->ins()->isPhi()) {
+            // Phis don't have any requirements, but they should prefer their
+            // input allocations. This is captured by the group hints above.
+        } else {
+            // Non-phis get a REGISTER requirement.
+            interval->setRequirement(Requirement(Requirement::REGISTER));
+        }
+    }
+
+    // Search uses for requirements.
+    for (UsePositionIterator iter = interval->usesBegin();
+         iter != interval->usesEnd();
+         iter++)
+    {
+        LUse::Policy policy = iter->use->policy();
+        if (policy == LUse::FIXED) {
+            AnyRegister required = GetFixedRegister(reg->def(), iter->use);
+
+            // If there are multiple fixed registers which the interval is
+            // required to use, fail. The interval will need to be split before
+            // it can be allocated.
+            if (!interval->addRequirement(Requirement(LAllocation(required))))
+                return false;
+        } else if (policy == LUse::REGISTER) {
+            if (!interval->addRequirement(Requirement(Requirement::REGISTER)))
+                return false;
+        }
+    }
+
+    return true;
+}
+
+bool
+BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
+                                           bool *success, LiveInterval **pconflicting)
+{
+    *success = false;
+
+    if (!r.allocatable)
+        return true;
+
+    BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
+    if (reg->isDouble() != r.reg.isFloat())
+        return true;
+
+    if (interval->requirement()->kind() == Requirement::FIXED) {
+        if (interval->requirement()->allocation() != LAllocation(r.reg)) {
+            IonSpew(IonSpew_RegAlloc, "%s does not match fixed requirement", r.reg.name());
+            return true;
+        }
+    }
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        AllocatedRange range(interval, interval->getRange(i)), existing;
+        if (r.allocations.contains(range, &existing)) {
+            if (existing.interval->hasVreg()) {
+                if (IonSpewEnabled(IonSpew_RegAlloc)) {
+                    IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]",
+                            r.reg.name(), existing.interval->vreg(),
+                            existing.range->from.pos(), existing.range->to.pos(),
+                            computeSpillWeight(existing.interval));
+                }
+                if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting))
+                    *pconflicting = existing.interval;
+            } else {
+                if (IonSpewEnabled(IonSpew_RegAlloc)) {
+                    IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>",
+                            r.reg.name(), existing.range->from.pos(), existing.range->to.pos());
+                }
+            }
+            return true;
+        }
+    }
+
+    IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name());
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        AllocatedRange range(interval, interval->getRange(i));
+        if (!r.allocations.insert(range))
+            return false;
+    }
+
+    // Set any register hint for allocating other intervals in the same group.
+    if (VirtualRegisterGroup *group = reg->group()) {
+        if (!group->allocation.isRegister())
+            group->allocation = LAllocation(r.reg);
+    }
+
+    interval->setAllocation(LAllocation(r.reg));
+    *success = true;
+    return true;
+}
+
+bool
+BacktrackingAllocator::evictInterval(LiveInterval *interval)
+{
+    if (IonSpewEnabled(IonSpew_RegAlloc)) {
+        IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s",
+                interval->vreg(), IntervalString(interval));
+    }
+
+    JS_ASSERT(interval->getAllocation()->isRegister());
+
+    AnyRegister reg(interval->getAllocation()->toRegister());
+    PhysicalRegister &physical = registers[reg.code()];
+    JS_ASSERT(physical.reg == reg && physical.allocatable);
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        AllocatedRange range(interval, interval->getRange(i));
+        physical.allocations.remove(range);
+    }
+
+    interval->setAllocation(LAllocation());
+
+    size_t priority = computePriority(interval);
+    return queuedIntervals.insert(QueuedInterval(interval, priority));
+}
+
+bool
+BacktrackingAllocator::splitAndRequeueInterval(LiveInterval *interval,
+                                               const LiveIntervalVector &newIntervals)
+{
+    JS_ASSERT(newIntervals.length() >= 2);
+
+    if (IonSpewEnabled(IonSpew_RegAlloc)) {
+        IonSpew(IonSpew_RegAlloc, "splitting interval %s:", IntervalString(interval));
+        for (size_t i = 0; i < newIntervals.length(); i++)
+            IonSpew(IonSpew_RegAlloc, "    %s", IntervalString(newIntervals[i]));
+    }
+
+    // Find the earliest interval in the new list.
+    LiveInterval *first = newIntervals[0];
+    for (size_t i = 1; i < newIntervals.length(); i++) {
+        if (newIntervals[i]->start() < first->start())
+            first = newIntervals[i];
+    }
+
+    // Replace the old interval in the virtual register's state with the new intervals.
+    VirtualRegister *reg = &vregs[interval->vreg()];
+    reg->replaceInterval(interval, first);
+    for (size_t i = 0; i < newIntervals.length(); i++) {
+        if (newIntervals[i] != first && !reg->addInterval(newIntervals[i]))
+            return false;
+    }
+
+    // Redistribute uses from the old interval to the new intervals. Intervals
+    // are permitted to overlap. In such cases, assign the use to the interval
+    // with the latest start position.
+    for (UsePositionIterator iter(interval->usesBegin());
+         iter != interval->usesEnd();
+         iter++)
+    {
+        CodePosition pos = iter->pos;
+        LiveInterval *maxInterval = NULL;
+        for (size_t i = 0; i < newIntervals.length(); i++) {
+            if (newIntervals[i]->covers(pos)) {
+                if (!maxInterval || newIntervals[i]->start() > maxInterval->start())
+                    maxInterval = newIntervals[i];
+            }
+        }
+        maxInterval->addUse(new UsePosition(iter->use, iter->pos));
+    }
+
+    // Queue the new intervals for register assignment.
+    for (size_t i = 0; i < newIntervals.length(); i++) {
+        LiveInterval *newInterval = newIntervals[i];
+        size_t priority = computePriority(newInterval);
+        if (!queuedIntervals.insert(QueuedInterval(newInterval, priority)))
+            return false;
+    }
+
+    return true;
+}
+
+void
+BacktrackingAllocator::spill(LiveInterval *interval)
+{
+    IonSpew(IonSpew_RegAlloc, "Spilling interval");
+
+    JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
+
+    // We can't spill bogus intervals.
+    JS_ASSERT(interval->hasVreg());
+
+    BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
+
+    if (reg->canonicalSpill()) {
+        IonSpew(IonSpew_RegAlloc, "  Picked canonical spill location %u",
+                reg->canonicalSpill()->toStackSlot()->slot());
+        interval->setAllocation(*reg->canonicalSpill());
+        return;
+    }
+
+    if (reg->group() && reg->group()->spill.isStackSlot()) {
+        IonSpew(IonSpew_RegAlloc, "  Reusing group spill location %u",
+                reg->group()->spill.toStackSlot()->slot());
+        interval->setAllocation(reg->group()->spill);
+        reg->setCanonicalSpill(reg->group()->spill);
+        return;
+    }
+
+    uint32_t stackSlot;
+    if (reg->isDouble()) {
+        stackSlot = stackSlotAllocator.allocateDoubleSlot();
+    } else {
+        stackSlot = stackSlotAllocator.allocateSlot();
+    }
+    JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
+
+    IonSpew(IonSpew_RegAlloc, "  Allocating canonical spill location %u", stackSlot);
+    interval->setAllocation(LStackSlot(stackSlot, reg->isDouble()));
+    reg->setCanonicalSpill(*interval->getAllocation());
+
+    if (reg->group()) {
+        JS_ASSERT(!reg->group()->spill.isStackSlot());
+        reg->group()->spill = *interval->getAllocation();
+    }
+}
+
+// Add moves to resolve conflicting assignments between a block and its
+// predecessors. XXX try to common this with LinearScanAllocator.
+bool
+BacktrackingAllocator::resolveControlFlow()
+{
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        BacktrackingVirtualRegister *reg = &vregs[i];
+
+        if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)"))
+            return false;
+
+        for (size_t j = 1; j < reg->numIntervals(); j++) {
+            LiveInterval *interval = reg->getInterval(j);
+            JS_ASSERT(interval->index() == j);
+
+            bool skip = false;
+            for (int k = j - 1; k >= 0; k--) {
+                LiveInterval *prevInterval = reg->getInterval(k);
+                if (prevInterval->start() != interval->start())
+                    break;
+                if (*prevInterval->getAllocation() == *interval->getAllocation()) {
+                    skip = true;
+                    break;
+                }
+            }
+            if (skip)
+                continue;
+
+            CodePosition start = interval->start();
+            InstructionData *data = &insData[start];
+            if (interval->start() > inputOf(data->block()->firstId())) {
+                JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
+
+                LiveInterval *prevInterval = reg->intervalFor(start.previous());
+                if (start.subpos() == CodePosition::INPUT) {
+                    if (!moveInput(inputOf(data->ins()), prevInterval, interval))
+                        return false;
+                } else {
+                    if (!moveAfter(outputOf(data->ins()), prevInterval, interval))
+                        return false;
+                }
+            }
+        }
+    }
+
+    for (size_t i = 0; i < graph.numBlocks(); i++) {
+        if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
+            return false;
+
+        LBlock *successor = graph.getBlock(i);
+        MBasicBlock *mSuccessor = successor->mir();
+        if (mSuccessor->numPredecessors() < 1)
+            continue;
+
+        // Resolve phis to moves
+        for (size_t j = 0; j < successor->numPhis(); j++) {
+            LPhi *phi = successor->getPhi(j);
+            JS_ASSERT(phi->numDefs() == 1);
+            VirtualRegister *vreg = &vregs[phi->getDef(0)];
+            LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
+            JS_ASSERT(to);
+
+            for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
+                LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
+                JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
+
+                LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
+                LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
+                JS_ASSERT(from);
+
+                LMoveGroup *moves = predecessor->getExitMoveGroup();
+                if (!addMove(moves, from, to))
+                    return false;
+            }
+        }
+
+        // Resolve split intervals with moves
+        BitSet *live = liveIn[mSuccessor->id()];
+
+        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
+            LiveInterval *to = vregs[*liveRegId].intervalFor(inputOf(successor->firstId()));
+            JS_ASSERT(to);
+
+            for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
+                LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
+                LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
+                JS_ASSERT(from);
+
+                if (mSuccessor->numPredecessors() > 1) {
+                    JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
+                    LMoveGroup *moves = predecessor->getExitMoveGroup();
+                    if (!addMove(moves, from, to))
+                        return false;
+                } else {
+                    LMoveGroup *moves = successor->getEntryMoveGroup();
+                    if (!addMove(moves, from, to))
+                        return false;
+                }
+            }
+        }
+    }
+
+    return true;
+}
+
+static LDefinition *
+FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        LDefinition *def = ins->getDef(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        LDefinition *def = ins->getTemp(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    return NULL;
+}
+
+bool
+BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy)
+{
+    if (LDefinition *def = FindReusingDefinition(ins, use))
+        return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
+    return false;
+}
+
+bool
+BacktrackingAllocator::reifyAllocations()
+{
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        VirtualRegister *reg = &vregs[i];
+
+        if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
+            return false;
+
+        for (size_t j = 0; j < reg->numIntervals(); j++) {
+            LiveInterval *interval = reg->getInterval(j);
+            JS_ASSERT(interval->index() == j);
+
+            if (interval->index() == 0) {
+                reg->def()->setOutput(*interval->getAllocation());
+                if (reg->ins()->recoversInput()) {
+                    LSnapshot *snapshot = reg->ins()->snapshot();
+                    for (size_t i = 0; i < snapshot->numEntries(); i++) {
+                        LAllocation *entry = snapshot->getEntry(i);
+                        if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
+                            *entry = *reg->def()->output();
+                    }
+                }
+            }
+
+            for (UsePositionIterator iter(interval->usesBegin());
+                 iter != interval->usesEnd();
+                 iter++)
+            {
+                LAllocation *alloc = iter->use;
+                *alloc = *interval->getAllocation();
+
+                // For any uses which feed into MUST_REUSE_INPUT definitions,
+                // add copies if the use and def have different allocations.
+                LInstruction *ins = insData[iter->pos].ins();
+                if (LDefinition *def = FindReusingDefinition(ins, alloc)) {
+                    LiveInterval *outputInterval =
+                        vregs[def->virtualRegister()].intervalFor(outputOf(ins));
+                    LAllocation *res = outputInterval->getAllocation();
+                    LAllocation *sourceAlloc = interval->getAllocation();
+
+                    if (*res != *alloc) {
+                        LMoveGroup *group = getInputMoveGroup(inputOf(ins));
+                        if (!group->addAfter(sourceAlloc, res))
+                            return false;
+                        *alloc = *res;
+                    }
+                }
+            }
+        }
+    }
+
+    graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
+    return true;
+}
+
+void
+BacktrackingAllocator::dumpRegisterGroups()
+{
+    printf("Register groups:\n");
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        if (VirtualRegisterGroup *group = vregs[i].group()) {
+            bool minimum = true;
+            for (size_t j = 0; j < group->registers.length(); j++) {
+                if (group->registers[j] < i) {
+                    minimum = false;
+                    break;
+                }
+            }
+            if (minimum) {
+                for (size_t j = 0; j < group->registers.length(); j++)
+                    printf(" v%u", group->registers[j]);
+                printf("\n");
+            }
+        }
+    }
+}
+
+void
+BacktrackingAllocator::dumpLiveness()
+{
+#ifdef DEBUG
+    printf("Virtual Registers:\n");
+
+    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+        LBlock *block = graph.getBlock(blockIndex);
+        MBasicBlock *mir = block->mir();
+
+        printf("\nBlock %lu", blockIndex);
+        for (size_t i = 0; i < mir->numSuccessors(); i++)
+            printf(" [successor %u]", mir->getSuccessor(i)->id());
+        printf("\n");
+
+        for (size_t i = 0; i < block->numPhis(); i++) {
+            LPhi *phi = block->getPhi(i);
+
+            printf("Phi v%u <-", phi->getDef(0)->virtualRegister());
+            for (size_t j = 0; j < phi->numOperands(); j++)
+                printf(" v%u", phi->getOperand(j)->toUse()->virtualRegister());
+            printf("\n");
+        }
+
+        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+            LInstruction *ins = *iter;
+
+            printf("[%u,%u %s]", inputOf(ins).pos(), outputOf(ins).pos(), ins->opName());
+
+            for (size_t i = 0; i < ins->numTemps(); i++) {
+                LDefinition *temp = ins->getTemp(i);
+                if (!temp->isBogusTemp())
+                    printf(" [temp v%u]", temp->virtualRegister());
+            }
+
+            for (size_t i = 0; i < ins->numDefs(); i++) {
+                LDefinition *def = ins->getDef(i);
+                printf(" [def v%u]", def->virtualRegister());
+            }
+
+            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+                if (alloc->isUse())
+                    printf(" [use v%u]", alloc->toUse()->virtualRegister());
+            }
+
+            printf("\n");
+        }
+    }
+
+    printf("\nLive Ranges:\n\n");
+
+    for (size_t i = 0; i < AnyRegister::Total; i++)
+        printf("reg %s: %s\n", AnyRegister::FromCode(i).name(), IntervalString(fixedIntervals[i]));
+
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        printf("v%lu:", i);
+        VirtualRegister &vreg = vregs[i];
+        for (size_t j = 0; j < vreg.numIntervals(); j++) {
+            if (j)
+                printf(" *");
+            printf("%s", IntervalString(vreg.getInterval(j)));
+        }
+        printf("\n");
+    }
+
+    printf("\n");
+#endif // DEBUG
+}
+
+void
+BacktrackingAllocator::dumpAllocations()
+{
+#ifdef DEBUG
+    printf("Allocations:\n");
+
+    for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
+        printf("v%lu:", i);
+        VirtualRegister &vreg = vregs[i];
+        for (size_t j = 0; j < vreg.numIntervals(); j++) {
+            if (j)
+                printf(" *");
+            LiveInterval *interval = vreg.getInterval(j);
+            printf("%s :: %s", IntervalString(interval), interval->getAllocation()->toString());
+        }
+        printf("\n");
+    }
+
+    printf("\n");
+#endif // DEBUG
+}
+
+bool
+BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
+                                       CodePosition from, CodePosition to)
+{
+    LiveInterval *interval = new LiveInterval(vreg, 0);
+    return interval->addRange(from, to) && intervals.append(interval);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Heuristic Methods
+///////////////////////////////////////////////////////////////////////////////
+
+size_t
+BacktrackingAllocator::computePriority(const LiveInterval *interval)
+{
+    // The priority of an interval is its total length, so that longer lived
+    // intervals will be processed before shorter ones (even if the longer ones
+    // have a low spill weight). See processInterval().
+    size_t lifetimeTotal = 0;
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        const LiveInterval::Range *range = interval->getRange(i);
+        lifetimeTotal += range->to.pos() - range->from.pos();
+    }
+
+    return lifetimeTotal;
+}
+
+CodePosition
+BacktrackingAllocator::minimalDefEnd(LInstruction *ins)
+{
+    // Compute the shortest interval that captures vregs defined by ins.
+    // Watch for instructions that are followed by an OSI point and/or Nop.
+    // If moves are introduced between the instruction and the OSI point then
+    // safepoint information for the instruction may be incorrect. This is
+    // pretty disgusting and should be fixed somewhere else in the compiler.
+    while (true) {
+        LInstruction *next = insData[outputOf(ins).next()].ins();
+        if (!next->isNop() && !next->isOsiPoint())
+            break;
+        ins = next;
+    }
+    return outputOf(ins);
+}
+
+bool
+BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins)
+{
+    // Whether interval is a minimal interval capturing a definition at ins.
+    return (interval->end() <= minimalDefEnd(ins).next()) &&
+        (interval->start() == inputOf(ins) || interval->start() == outputOf(ins));
+}
+
+bool
+BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins)
+{
+    // Whether interval is a minimal interval capturing a use at ins.
+    return (interval->start() == inputOf(ins)) &&
+        (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
+}
+
+bool
+BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed)
+{
+    if (interval->index() == 0) {
+        VirtualRegister &reg = vregs[interval->vreg()];
+        if (pfixed)
+            *pfixed = reg.def()->policy() == LDefinition::PRESET && reg.def()->output()->isRegister();
+        return minimalDef(interval, reg.ins());
+    }
+
+    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
+        LUse *use = iter->use;
+
+        switch (use->policy()) {
+          case LUse::FIXED:
+            if (pfixed)
+                *pfixed = true;
+            return minimalUse(interval, insData[iter->pos].ins());
+
+          case LUse::REGISTER:
+            if (pfixed)
+                *pfixed = false;
+            return minimalUse(interval, insData[iter->pos].ins());
+
+          default:
+            break;
+        }
+    }
+
+    return false;
+}
+
+size_t
+BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval)
+{
+    // Minimal intervals have an extremely high spill weight, to ensure they
+    // can evict any other intervals and be allocated to a register.
+    bool fixed;
+    if (minimalInterval(interval, &fixed))
+        return fixed ? 2000000 : 1000000;
+
+    size_t usesTotal = 0;
+
+    if (interval->index() == 0) {
+        VirtualRegister *reg = &vregs[interval->vreg()];
+        if (reg->def()->policy() == LDefinition::PRESET && reg->def()->output()->isRegister())
+            usesTotal += 2000;
+        else if (!reg->ins()->isPhi())
+            usesTotal += 2000;
+    }
+
+    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
+        LUse *use = iter->use;
+
+        switch (use->policy()) {
+          case LUse::ANY:
+            usesTotal += 1000;
+            break;
+
+          case LUse::REGISTER:
+          case LUse::FIXED:
+            usesTotal += 2000;
+            break;
+
+          case LUse::KEEPALIVE:
+            break;
+
+          default:
+            // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
+            JS_NOT_REACHED("Bad use");
+        }
+    }
+
+    // Intervals for registers in groups get higher weights.
+    if (interval->hint()->kind() != Requirement::NONE)
+        usesTotal += 2000;
+
+    // Compute spill weight as a use density, lowering the weight for long
+    // lived intervals with relatively few uses.
+    size_t lifetimeTotal = computePriority(interval);
+    return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
+}
+
+bool
+BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success)
+{
+    // If this interval has portions that are hot and portions that are cold,
+    // split it at the boundaries between hot and cold code.
+
+    const LiveInterval::Range *hotRange = NULL;
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        AllocatedRange range(interval, interval->getRange(i)), existing;
+        if (hotcode.contains(range, &existing)) {
+            hotRange = existing.range;
+            break;
+        }
+    }
+
+    // Don't split if there is no hot code in the interval.
+    if (!hotRange)
+        return true;
+
+    // Don't split if there is no cold code in the interval.
+    bool coldCode = false;
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        if (!hotRange->contains(interval->getRange(i))) {
+            coldCode = true;
+            break;
+        }
+    }
+    if (!coldCode)
+        return true;
+
+    LiveInterval *hotInterval = new LiveInterval(interval->vreg(), 0);
+    LiveInterval *preInterval = NULL, *postInterval = NULL;
+
+    // Accumulate the ranges of hot and cold code in the interval. Note that
+    // we are only comparing with the single hot range found, so the cold code
+    // may contain separate hot ranges.
+    Vector<LiveInterval::Range, 1, SystemAllocPolicy> hotList, coldList;
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        LiveInterval::Range hot, coldPre, coldPost;
+        interval->getRange(i)->intersect(hotRange, &coldPre, &hot, &coldPost);
+
+        if (!hot.empty() && !hotInterval->addRange(hot.from, hot.to))
+            return false;
+
+        if (!coldPre.empty()) {
+            if (!preInterval)
+                preInterval = new LiveInterval(interval->vreg(), 0);
+            if (!preInterval->addRange(coldPre.from, coldPre.to))
+                return false;
+        }
+
+        if (!coldPost.empty()) {
+            if (!postInterval)
+                postInterval = new LiveInterval(interval->vreg(), 0);
+            if (!postInterval->addRange(coldPost.from, coldPost.to))
+                return false;
+        }
+    }
+
+    JS_ASSERT(preInterval || postInterval);
+    JS_ASSERT(hotInterval->numRanges());
+
+    LiveIntervalVector newIntervals;
+    if (!newIntervals.append(hotInterval))
+        return false;
+    if (preInterval && !newIntervals.append(preInterval))
+        return false;
+    if (postInterval && !newIntervals.append(postInterval))
+        return false;
+
+    *success = true;
+    return splitAndRequeueInterval(interval, newIntervals);
+}
+
+bool
+BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, bool *success)
+{
+    // If this interval's later uses do not require it to be in a register,
+    // split it after the last use which does require a register.
+
+    CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
+
+    for (UsePositionIterator iter(interval->usesBegin());
+         iter != interval->usesEnd();
+         iter++)
+    {
+        LUse *use = iter->use;
+        LInstruction *ins = insData[iter->pos].ins();
+
+        // Uses in the interval should be sorted.
+        JS_ASSERT(iter->pos >= lastUse);
+        lastUse = inputOf(ins);
+
+        switch (use->policy()) {
+          case LUse::ANY:
+            if (isReusedInput(iter->use, ins, /* considerCopy = */ true)) {
+                lastRegisterFrom = inputOf(ins);
+                lastRegisterTo = iter->pos.next();
+            }
+            break;
+
+          case LUse::REGISTER:
+          case LUse::FIXED:
+            lastRegisterFrom = inputOf(ins);
+            lastRegisterTo = iter->pos.next();
+            break;
+
+          default:
+            break;
+        }
+    }
+
+    if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) {
+        // Can't trim non-register uses off the end by splitting.
+        return true;
+    }
+
+    LiveInterval *preInterval = new LiveInterval(interval->vreg(), 0);
+    LiveInterval *postInterval = new LiveInterval(interval->vreg(), 0);
+
+    for (size_t i = 0; i < interval->numRanges(); i++) {
+        const LiveInterval::Range *range = interval->getRange(i);
+
+        if (range->from < lastRegisterTo) {
+            CodePosition to = (range->to <= lastRegisterTo) ? range->to : lastRegisterTo;
+            if (!preInterval->addRange(range->from, to))
+                return false;
+        }
+
+        if (lastRegisterFrom < range->to) {
+            CodePosition from = (lastRegisterFrom <= range->from) ? range->from : lastRegisterFrom;
+            if (!postInterval->addRange(from, range->to))
+                return false;
+        }
+    }
+
+    LiveIntervalVector newIntervals;
+    if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
+        return false;
+
+    *success = true;
+    return splitAndRequeueInterval(interval, newIntervals);
+}
+
+bool
+BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval)
+{
+    // Split this interval so that all its register uses become minimal
+    // intervals and allow the vreg to be spilled throughout its range.
+
+    Vector<LiveInterval::Range, 4, SystemAllocPolicy> registerUses;
+    uint32_t vreg = interval->vreg();
+
+    CodePosition spillStart = interval->start();
+    if (interval->index() == 0) {
+        // Treat the definition of the interval as a register use so that it
+        // can be split and spilled ASAP. One issue to watch for:
+        //
+        // - Parameters cannot be spilled immediately, as arguments checks
+        //   assume no movements occur between a script's LParameters and its
+        //   following LStart. It's pointless to do such spills anyways, as the
+        //   vreg is already on the stack.
+        VirtualRegister &reg = vregs[vreg];
+        if (!reg.ins()->isPhi() &&
+            (reg.def()->policy() != LDefinition::PRESET ||
+             reg.def()->output()->isRegister()))
+        {
+            CodePosition from = interval->start();
+            CodePosition to = minimalDefEnd(reg.ins()).next();
+            if (!registerUses.append(LiveInterval::Range(from, to)))
+                return false;
+            spillStart = to;
+        }
+    }
+
+    for (UsePositionIterator iter(interval->usesBegin());
+         iter != interval->usesEnd();
+         iter++)
+    {
+        LUse *use = iter->use;
+        LInstruction *ins = insData[iter->pos].ins();
+
+        bool isRegister = false;
+        switch (use->policy()) {
+          case LUse::ANY:
+            isRegister = isReusedInput(iter->use, ins);
+            break;
+
+          case LUse::REGISTER:
+          case LUse::FIXED:
+            isRegister = true;
+            break;
+
+          default:
+            break;
+        }
+
+        if (isRegister) {
+            // For register uses which are not useRegisterAtStart, pick an
+            // interval that covers both the instruction's input and output, so
+            // that the register is not reused for an output.
+            if (!registerUses.append(LiveInterval::Range(inputOf(ins), iter->pos.next())))
+                return false;
+        }
+    }
+
+    LiveIntervalVector newIntervals;
+
+    for (size_t i = 0; i < registerUses.length(); i++) {
+        // Watch for duplicate register use positions.
+        if (i > 0 && registerUses[i].from == registerUses[i - 1].from)
+            continue;
+        if (!addLiveInterval(newIntervals, vreg, registerUses[i].from, registerUses[i].to))
+            return false;
+    }
+
+    if (!addLiveInterval(newIntervals, vreg, spillStart, interval->end()))
+        return false;
+
+    return splitAndRequeueInterval(interval, newIntervals);
+}
+
+bool
+BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval)
+{
+    bool success = false;
+
+    if (!trySplitAcrossHotcode(interval, &success))
+        return false;
+    if (success)
+        return true;
+
+    if (!trySplitAfterLastRegisterUse(interval, &success))
+        return false;
+    if (success)
+        return true;
+
+    return splitAtAllRegisterUses(interval);
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BacktrackingAllocator.h
@@ -0,0 +1,199 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_ion_backtrackingallocator_h__
+#define js_ion_backtrackingallocator_h__
+
+#include "LiveRangeAllocator.h"
+
+#include "ds/PriorityQueue.h"
+#include "ds/SplayTree.h"
+
+// Backtracking priority queue based register allocator based on that described
+// in the following blog post:
+//
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+
+namespace js {
+namespace ion {
+
+// Information about a group of registers. Registers may be grouped together
+// when (a) all of their lifetimes are disjoint, (b) they are of the same type
+// (double / non-double) and (c) it is desirable that they have the same
+// allocation.
+struct VirtualRegisterGroup : public TempObject
+{
+    // All virtual registers in the group.
+    Vector<uint32_t, 2, IonAllocPolicy> registers;
+
+    // Desired physical register to use for registers in the group.
+    LAllocation allocation;
+
+    // Spill location to be shared by registers in the group.
+    LAllocation spill;
+
+    VirtualRegisterGroup()
+      : allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
+    {}
+};
+
+class BacktrackingVirtualRegister : public VirtualRegister
+{
+    // If this register's definition is MUST_REUSE_INPUT, whether a copy must
+    // be introduced before the definition that relaxes the policy.
+    bool mustCopyInput_;
+
+    // Spill location to use for this register.
+    LAllocation canonicalSpill_;
+
+    // If this register is associated with a group of other registers,
+    // information about the group. This structure is shared between all
+    // registers in the group.
+    VirtualRegisterGroup *group_;
+
+  public:
+    void setMustCopyInput() {
+        mustCopyInput_ = true;
+    }
+    bool mustCopyInput() {
+        return mustCopyInput_;
+    }
+
+    void setCanonicalSpill(LAllocation alloc) {
+        canonicalSpill_ = alloc;
+    }
+    const LAllocation *canonicalSpill() const {
+        return canonicalSpill_.isStackSlot() ? &canonicalSpill_ : NULL;
+    }
+    unsigned canonicalSpillSlot() const {
+        return canonicalSpill_.toStackSlot()->slot();
+    }
+
+    void setGroup(VirtualRegisterGroup *group) {
+        group_ = group;
+    }
+    VirtualRegisterGroup *group() {
+        return group_;
+    }
+};
+
+class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegister>
+{
+    // Priority queue element: an interval and its immutable priority.
+    struct QueuedInterval
+    {
+        LiveInterval *interval;
+
+        QueuedInterval(LiveInterval *interval, size_t priority)
+          : interval(interval), priority_(priority)
+        {}
+
+        static size_t priority(const QueuedInterval &v) {
+            return v.priority_;
+        }
+
+      private:
+        size_t priority_;
+    };
+
+    PriorityQueue<QueuedInterval, QueuedInterval, 0, SystemAllocPolicy> queuedIntervals;
+
+    // A subrange over which a physical register is allocated.
+    struct AllocatedRange {
+        LiveInterval *interval;
+        const LiveInterval::Range *range;
+
+        AllocatedRange()
+          : interval(NULL), range(NULL)
+        {}
+
+        AllocatedRange(LiveInterval *interval, const LiveInterval::Range *range)
+          : interval(interval), range(range)
+        {}
+
+        static int compare(const AllocatedRange &v0, const AllocatedRange &v1) {
+            // LiveInterval::Range includes 'from' but excludes 'to'.
+            if (v0.range->to.pos() <= v1.range->from.pos())
+                return -1;
+            if (v0.range->from.pos() >= v1.range->to.pos())
+                return 1;
+            return 0;
+        }
+    };
+
+    typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
+
+    // Each physical register is associated with the set of ranges over which
+    // that register is currently allocated.
+    struct PhysicalRegister {
+        bool allocatable;
+        AnyRegister reg;
+        AllocatedRangeSet allocations;
+
+        PhysicalRegister() : allocatable(false) {}
+    };
+    FixedArityList<PhysicalRegister, AnyRegister::Total> registers;
+
+    // Ranges of code which are considered to be hot, for which good allocation
+    // should be prioritized.
+    AllocatedRangeSet hotcode;
+
+  public:
+    BacktrackingAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
+      : LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph, /* forLSRA = */ false)
+    { }
+
+    bool go();
+
+  private:
+
+    typedef Vector<LiveInterval *, 4, SystemAllocPolicy> LiveIntervalVector;
+
+    bool init();
+    bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg);
+    bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
+    bool groupAndQueueRegisters();
+    bool processInterval(LiveInterval *interval);
+    bool setIntervalRequirement(LiveInterval *interval);
+    bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
+                             bool *success, LiveInterval **pconflicting);
+    bool evictInterval(LiveInterval *interval);
+    bool splitAndRequeueInterval(LiveInterval *interval,
+                                 const LiveIntervalVector &newIntervals);
+    void spill(LiveInterval *interval);
+
+    bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy = false);
+    bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
+                         CodePosition from, CodePosition to);
+
+    bool resolveControlFlow();
+    bool reifyAllocations();
+
+    void dumpRegisterGroups();
+    void dumpLiveness();
+    void dumpAllocations();
+
+    CodePosition minimalDefEnd(LInstruction *ins);
+    bool minimalDef(const LiveInterval *interval, LInstruction *ins);
+    bool minimalUse(const LiveInterval *interval, LInstruction *ins);
+    bool minimalInterval(const LiveInterval *interval, bool *pfixed = NULL);
+
+    // Heuristic methods.
+
+    size_t computePriority(const LiveInterval *interval);
+    size_t computeSpillWeight(const LiveInterval *interval);
+
+    bool chooseIntervalSplit(LiveInterval *interval);
+    bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);
+    bool trySplitAfterLastRegisterUse(LiveInterval *interval, bool *success);
+    bool splitAtAllRegisterUses(LiveInterval *interval);
+};
+
+} // namespace ion
+} // namespace js
+
+#endif
--- a/js/src/ion/C1Spewer.cpp
+++ b/js/src/ion/C1Spewer.cpp
@@ -112,17 +112,17 @@ C1Spewer::spewIntervals(FILE *fp, Linear
 {
     for (size_t k = 0; k < ins->numDefs(); k++) {
         VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];
 
         for (size_t i = 0; i < vreg->numIntervals(); i++) {
             LiveInterval *live = vreg->getInterval(i);
             if (live->numRanges()) {
                 fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32_t(nextId++));
-                LAllocation::PrintAllocation(fp, live->getAllocation());
+                fprintf(fp, "%s", live->getAllocation()->toString());
                 fprintf(fp, "\" %d -1", vreg->id());
                 for (size_t j = 0; j < live->numRanges(); j++) {
                     fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(),
                             live->getRange(j)->to.pos());
                 }
                 for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++)
                     fprintf(fp, " %d M", usePos->pos.pos());
                 fprintf(fp, " \"\"\n");
--- a/js/src/ion/Ion.cpp
+++ b/js/src/ion/Ion.cpp
@@ -16,16 +16,17 @@
 #include "ValueNumbering.h"
 #include "EdgeCaseAnalysis.h"
 #include "RangeAnalysis.h"
 #include "LinearScan.h"
 #include "jscompartment.h"
 #include "jsworkers.h"
 #include "IonCompartment.h"
 #include "CodeGenerator.h"
+#include "BacktrackingAllocator.h"
 #include "StupidAllocator.h"
 #include "UnreachableCodeElimination.h"
 
 #if defined(JS_CPU_X86)
 # include "x86/Lowering-x86.h"
 #elif defined(JS_CPU_X64)
 # include "x64/Lowering-x64.h"
 #elif defined(JS_CPU_ARM)
@@ -994,16 +995,29 @@ CompileBackEnd(MIRGenerator *mir)
 #ifdef DEBUG
         integrity.check(false);
 #endif
 
         IonSpewPass("Allocate Registers [LSRA]", &regalloc);
         break;
       }
 
+      case RegisterAllocator_Backtracking: {
+        integrity.record();
+
+        BacktrackingAllocator regalloc(mir, &lirgen, *lir);
+        if (!regalloc.go())
+            return NULL;
+        if (!integrity.check(true))
+            return NULL;
+
+        IonSpewPass("Allocate Registers [Backtracking]");
+        break;
+      }
+
       case RegisterAllocator_Stupid: {
         // Use the integrity checker to populate safepoint information, so
         // run it in all builds.
         integrity.record();
 
         StupidAllocator regalloc(mir, &lirgen, *lir);
         if (!regalloc.go())
             return NULL;
--- a/js/src/ion/Ion.h
+++ b/js/src/ion/Ion.h
@@ -17,16 +17,17 @@
 namespace js {
 namespace ion {
 
 class TempAllocator;
 
 // Possible register allocators which may be used.
 enum IonRegisterAllocator {
     RegisterAllocator_LSRA,
+    RegisterAllocator_Backtracking,
     RegisterAllocator_Stupid
 };
 
 struct IonOptions
 {
     // Toggles whether global value numbering is used.
     //
     // Default: true
--- a/js/src/ion/JSONSpewer.cpp
+++ b/js/src/ion/JSONSpewer.cpp
@@ -400,19 +400,17 @@ JSONSpewer::spewIntervals(LinearScanAllo
                 beginListProperty("intervals");
 
                 for (size_t i = 0; i < vreg->numIntervals(); i++) {
                     LiveInterval *live = vreg->getInterval(i);
 
                     if (live->numRanges()) {
                         beginObject();
                         property("allocation");
-                        fprintf(fp_, "\"");
-                        LAllocation::PrintAllocation(fp_, live->getAllocation());
-                        fprintf(fp_, "\"");
+                        fprintf(fp_, "\"%s\"", live->getAllocation()->toString());
                         beginListProperty("ranges");
 
                         for (size_t j = 0; j < live->numRanges(); j++) {
                             beginObject();
                             integerProperty("start", live->getRange(j)->from.pos());
                             integerProperty("end", live->getRange(j)->to.pos());
                             endObject();
                         }
--- a/js/src/ion/LIR-Common.h
+++ b/js/src/ion/LIR-Common.h
@@ -831,17 +831,17 @@ class LTestIAndBranch : public LInstruct
         return ifTrue_;
     }
     MBasicBlock *ifFalse() const {
         return ifFalse_;
     }
 };
 
 // Takes in either an integer or boolean input and tests it for truthiness.
-class LTestDAndBranch : public LInstructionHelper<0, 1, 1>
+class LTestDAndBranch : public LInstructionHelper<0, 1, 0>
 {
     MBasicBlock *ifTrue_;
     MBasicBlock *ifFalse_;
 
   public:
     LIR_HEADER(TestDAndBranch)
 
     LTestDAndBranch(const LAllocation &in, MBasicBlock *ifTrue, MBasicBlock *ifFalse)
--- a/js/src/ion/LIR.cpp
+++ b/js/src/ion/LIR.cpp
@@ -198,82 +198,82 @@ static const char *TypeChars[] =
 
 static void
 PrintDefinition(FILE *fp, const LDefinition &def)
 {
     fprintf(fp, "[%s", TypeChars[def.type()]);
     if (def.virtualRegister())
         fprintf(fp, ":%d", def.virtualRegister());
     if (def.policy() == LDefinition::PRESET) {
-        fprintf(fp, " (");
-        LAllocation::PrintAllocation(fp, def.output());
-        fprintf(fp, ")");
+        fprintf(fp, " (%s)", def.output()->toString());
     } else if (def.policy() == LDefinition::MUST_REUSE_INPUT) {
         fprintf(fp, " (!)");
     } else if (def.policy() == LDefinition::PASSTHROUGH) {
         fprintf(fp, " (-)");
     }
     fprintf(fp, "]");
 }
 
 static void
-PrintUse(FILE *fp, const LUse *use)
+PrintUse(char *buf, size_t size, const LUse *use)
 {
-    fprintf(fp, "v%d:", use->virtualRegister());
     if (use->policy() == LUse::ANY) {
-        fprintf(fp, "*");
+        JS_snprintf(buf, size, "v%d:*", use->virtualRegister());
     } else if (use->policy() == LUse::REGISTER) {
-        fprintf(fp, "r");
+        JS_snprintf(buf, size, "v%d:r", use->virtualRegister());
     } else {
         // Unfortunately, we don't know here whether the virtual register is a
         // float or a double. Should we steal a bit in LUse for help? For now,
         // nothing defines any fixed xmm registers.
-        fprintf(fp, "%s", Registers::GetName(Registers::Code(use->registerCode())));
+        JS_snprintf(buf, size, "v%d:%s", use->virtualRegister(),
+                    Registers::GetName(Registers::Code(use->registerCode())));
     }
 }
 
-void
-LAllocation::PrintAllocation(FILE *fp, const LAllocation *a)
+#ifdef DEBUG
+const char *
+LAllocation::toString() const
 {
-    switch (a->kind()) {
+    // Not reentrant!
+    static char buf[40];
+
+    switch (kind()) {
       case LAllocation::CONSTANT_VALUE:
       case LAllocation::CONSTANT_INDEX:
-        fprintf(fp, "c");
-        break;
+        return "c";
       case LAllocation::GPR:
-        fprintf(fp, "=%s", a->toGeneralReg()->reg().name());
-        break;
+        JS_snprintf(buf, sizeof(buf), "=%s", toGeneralReg()->reg().name());
+        return buf;
       case LAllocation::FPU:
-        fprintf(fp, "=%s", a->toFloatReg()->reg().name());
-        break;
+        JS_snprintf(buf, sizeof(buf), "=%s", toFloatReg()->reg().name());
+        return buf;
       case LAllocation::STACK_SLOT:
-        fprintf(fp, "stack:i%d", a->toStackSlot()->slot());
-        break;
+        JS_snprintf(buf, sizeof(buf), "stack:i%d", toStackSlot()->slot());
+        return buf;
       case LAllocation::DOUBLE_SLOT:
-        fprintf(fp, "stack:d%d", a->toStackSlot()->slot());
-        break;
+        JS_snprintf(buf, sizeof(buf), "stack:d%d", toStackSlot()->slot());
+        return buf;
       case LAllocation::ARGUMENT:
-        fprintf(fp, "arg:%d", a->toArgument()->index());
-        break;
+        JS_snprintf(buf, sizeof(buf), "arg:%d", toArgument()->index());
+        return buf;
       case LAllocation::USE:
-        PrintUse(fp, a->toUse());
-        break;
+        PrintUse(buf, sizeof(buf), toUse());
+        return buf;
       default:
         JS_NOT_REACHED("what?");
-        break;
+        return "???";
     }
 }
+#endif // DEBUG
 
 void
 LInstruction::printOperands(FILE *fp)
 {
     for (size_t i = 0; i < numOperands(); i++) {
-        fprintf(fp, " (");
-        LAllocation::PrintAllocation(fp, getOperand(i));
-        fprintf(fp, ")");
+        fprintf(fp, " (%s)", getOperand(i)->toString());
         if (i != numOperands() - 1)
             fprintf(fp, ",");
     }
 }
 
 void
 LInstruction::assignSnapshot(LSnapshot *snapshot)
 {
@@ -363,21 +363,19 @@ LMoveGroup::addAfter(LAllocation *from, 
     return add(from, to);
 }
 
 void
 LMoveGroup::printOperands(FILE *fp)
 {
     for (size_t i = 0; i < numMoves(); i++) {
         const LMove &move = getMove(i);
-        fprintf(fp, "[");
-        LAllocation::PrintAllocation(fp, move.from());
-        fprintf(fp, " -> ");
-        LAllocation::PrintAllocation(fp, move.to());
-        fprintf(fp, "]");
+        // Use two printfs, as LAllocation::toString is not reentrant.
+        fprintf(fp, "[%s", move.from()->toString());
+        fprintf(fp, " -> %s]", move.to()->toString());
         if (i != numMoves() - 1)
             fprintf(fp, ", ");
     }
 }
 
 Label *
 LTestVAndBranch::ifTrue()
 {
--- a/js/src/ion/LIR.h
+++ b/js/src/ion/LIR.h
@@ -193,17 +193,21 @@ class LAllocation : public TempObject
     bool operator !=(const LAllocation &other) const {
         return bits_ != other.bits_;
     }
 
     HashNumber hash() const {
         return bits_;
     }
 
-    static void PrintAllocation(FILE *fp, const LAllocation *a);
+#ifdef DEBUG
+    const char *toString() const;
+#else
+    const char *toString() const { return "???"; }
+#endif
 };
 
 class LUse : public LAllocation
 {
     static const uint32_t POLICY_BITS = 3;
     static const uint32_t POLICY_SHIFT = 0;
     static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
     static const uint32_t REG_BITS = 5;
@@ -1033,17 +1037,17 @@ class LSafepoint : public TempObject
     bool addNunboxParts(LAllocation type, LAllocation payload) {
         return nunboxParts_.append(NunboxEntry(type, payload));
     }
 
     bool addNunboxType(uint32_t typeVreg, LAllocation type) {
         for (size_t i = 0; i < nunboxParts_.length(); i++) {
             if (nunboxParts_[i].type == type)
                 return true;
-            if (nunboxParts_[i].type == LUse(LUse::ANY, typeVreg)) {
+            if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
                 nunboxParts_[i].type = type;
                 partialNunboxes_--;
                 return true;
             }
         }
         partialNunboxes_++;
 
         // vregs for nunbox pairs are adjacent, with the type coming first.
@@ -1062,17 +1066,17 @@ class LSafepoint : public TempObject
         }
         return false;
     }
 
     bool addNunboxPayload(uint32_t payloadVreg, LAllocation payload) {
         for (size_t i = 0; i < nunboxParts_.length(); i++) {
             if (nunboxParts_[i].payload == payload)
                 return true;
-            if (nunboxParts_[i].payload == LUse(LUse::ANY, payloadVreg)) {
+            if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
                 partialNunboxes_--;
                 nunboxParts_[i].payload = payload;
                 return true;
             }
         }
         partialNunboxes_++;
 
         // vregs for nunbox pairs are adjacent, with the type coming first.
--- a/js/src/ion/LinearScan.cpp
+++ b/js/src/ion/LinearScan.cpp
@@ -87,18 +87,18 @@ LinearScanAllocator::allocateRegisters()
     while ((current = unhandled.dequeue()) != NULL) {
         JS_ASSERT(current->getAllocation()->isUse());
         JS_ASSERT(current->numRanges() > 0);
 
         if (mir->shouldCancel("LSRA Allocate Registers (main loop)"))
             return false;
 
         CodePosition position = current->start();
-        Requirement *req = current->requirement();
-        Requirement *hint = current->hint();
+        const Requirement *req = current->requirement();
+        const Requirement *hint = current->hint();
 
         IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
                 current->hasVreg() ? current->vreg() : 0, current->start().pos(),
                 current->end().pos(), current->requirement()->priority());
 
         // Shift active intervals to the inactive or handled sets as appropriate
         if (position != prevPosition) {
             JS_ASSERT(position > prevPosition);
@@ -770,28 +770,16 @@ LinearScanAllocator::assign(LAllocation 
         }
     }
 
     active.pushBack(current);
 
     return true;
 }
 
-#ifdef JS_NUNBOX32
-LinearScanVirtualRegister *
-LinearScanAllocator::otherHalfOfNunbox(VirtualRegister *vreg)
-{
-    signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
-    LinearScanVirtualRegister *other = &vregs[vreg->def()->virtualRegister() + offset];
-    AssertTypesFormANunbox(vreg->type(), other->type());
-    return other;
-}
-#endif
-
-
 uint32_t
 LinearScanAllocator::allocateSlotFor(const LiveInterval *interval)
 {
     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
 
     SlotList *freed;
     if (reg->type() == LDefinition::DOUBLE || IsNunbox(reg))
         freed = &finishedDoubleSlots_;
@@ -986,17 +974,17 @@ LinearScanAllocator::findBestFreeRegiste
         if (previous->getAllocation()->isRegister()) {
             AnyRegister prevReg = previous->getAllocation()->toRegister();
             if (freeUntilPos[prevReg.code()] != CodePosition::MIN)
                 bestCode = prevReg.code();
         }
     }
 
     // Assign the register suggested by the hint if it's free.
-    Requirement *hint = current->hint();
+    const Requirement *hint = current->hint();
     if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) {
         AnyRegister hintReg = hint->allocation().toRegister();
         if (freeUntilPos[hintReg.code()] > hint->pos())
             bestCode = hintReg.code();
     } else if (hint->kind() == Requirement::SAME_AS_OTHER) {
         LiveInterval *other = vregs[hint->virtualRegister()].intervalFor(hint->pos());
         if (other && other->getAllocation()->isRegister()) {
             AnyRegister hintReg = other->getAllocation()->toRegister();
@@ -1107,38 +1095,16 @@ LinearScanAllocator::canCoexist(LiveInte
 {
     LAllocation *aa = a->getAllocation();
     LAllocation *ba = b->getAllocation();
     if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
         return a->intersect(b) == CodePosition::MIN;
     return true;
 }
 
-bool
-LinearScanAllocator::addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to)
-{
-    if (*from->getAllocation() == *to->getAllocation())
-        return true;
-    return moves->add(from->getAllocation(), to->getAllocation());
-}
-
-bool
-LinearScanAllocator::moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to)
-{
-    LMoveGroup *moves = getInputMoveGroup(pos);
-    return addMove(moves, from, to);
-}
-
-bool
-LinearScanAllocator::moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to)
-{
-    LMoveGroup *moves = getMoveGroupAfter(pos);
-    return addMove(moves, from, to);
-}
-
 #ifdef DEBUG
 /*
  * Ensure intervals appear in exactly the appropriate one of {active,inactive,
  * handled}, and that active and inactive intervals do not conflict. Handled
  * intervals are checked for conflicts in validateAllocations for performance
  * reasons.
  */
 void
--- a/js/src/ion/LinearScan.h
+++ b/js/src/ion/LinearScan.h
@@ -5,17 +5,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef js_ion_linearscan_h__
 #define js_ion_linearscan_h__
 
 #include "LiveRangeAllocator.h"
 #include "BitSet.h"
-#include "StackSlotAllocator.h"
 
 #include "js/Vector.h"
 
 namespace js {
 namespace ion {
 
 class LinearScanVirtualRegister : public VirtualRegister
 {
@@ -74,19 +73,16 @@ class LinearScanAllocator : public LiveR
         void enqueueBackward(LiveInterval *interval);
         void enqueueAtHead(LiveInterval *interval);
 
         void assertSorted();
 
         LiveInterval *dequeue();
     };
 
-    // Allocation state
-    StackSlotAllocator stackSlotAllocator;
-
     typedef Vector<LiveInterval *, 0, SystemAllocPolicy> SlotList;
     SlotList finishedSlots_;
     SlotList finishedDoubleSlots_;
 
     // Run-time state
     UnhandledQueue unhandled;
     InlineList<LiveInterval> active;
     InlineList<LiveInterval> inactive;
@@ -107,40 +103,33 @@ class LinearScanAllocator : public LiveR
     bool splitBlockingIntervals(LAllocation allocation);
     bool assign(LAllocation allocation);
     bool spill();
     void freeAllocation(LiveInterval *interval, LAllocation *alloc);
     void finishInterval(LiveInterval *interval);
     AnyRegister::Code findBestFreeRegister(CodePosition *freeUntil);
     AnyRegister::Code findBestBlockedRegister(CodePosition *nextUsed);
     bool canCoexist(LiveInterval *a, LiveInterval *b);
-    bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to);
-    bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to);
     bool moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to);
-    bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to);
     void setIntervalRequirement(LiveInterval *interval);
     size_t findFirstSafepoint(LiveInterval *interval, size_t firstSafepoint);
     size_t findFirstNonCallSafepoint(CodePosition from);
     bool isSpilledAt(LiveInterval *interval, CodePosition pos);
 
 #ifdef DEBUG
     void validateIntervals();
     void validateAllocations();
 #else
     inline void validateIntervals() { }
     inline void validateAllocations() { }
 #endif
 
-#ifdef JS_NUNBOX32
-    LinearScanVirtualRegister *otherHalfOfNunbox(VirtualRegister *vreg);
-#endif
-
   public:
     LinearScanAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
-      : LiveRangeAllocator<LinearScanVirtualRegister>(mir, lir, graph)
+      : LiveRangeAllocator<LinearScanVirtualRegister>(mir, lir, graph, /* forLSRA = */ true)
     {
     }
 
     bool go();
 };
 
 } // namespace ion
 } // namespace js
--- a/js/src/ion/LiveRangeAllocator.cpp
+++ b/js/src/ion/LiveRangeAllocator.cpp
@@ -2,16 +2,17 @@
  * vim: set ts=4 sw=4 et tw=99:
  *
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "LiveRangeAllocator.h"
 
+#include "BacktrackingAllocator.h"
 #include "LinearScan.h"
 
 using namespace js;
 using namespace js::ion;
 
 using mozilla::DebugOnly;
 
 int
@@ -29,16 +30,53 @@ Requirement::priority() const
 
       default:
         JS_NOT_REACHED("Unknown requirement kind.");
         return -1;
     }
 }
 
 bool
+LiveInterval::Range::contains(const Range *other) const
+{
+    Range pre, inside, post;
+    intersect(other, &pre, &inside, &post);
+    return inside.from == other->from && inside.to == other->to;
+}
+
+void
+LiveInterval::Range::intersect(const Range *other, Range *pre, Range *inside, Range *post) const
+{
+    JS_ASSERT(pre->empty() && inside->empty() && post->empty());
+
+    CodePosition innerFrom = from;
+    if (from < other->from) {
+        if (to < other->from) {
+            *pre = Range(from, to);
+            return;
+        }
+        *pre = Range(from, other->from);
+        innerFrom = other->from;
+    }
+
+    CodePosition innerTo = to;
+    if (to > other->to) {
+        if (from >= other->to) {
+            *post = Range(from, to);
+            return;
+        }
+        *post = Range(other->to, to);
+        innerTo = other->to;
+    }
+
+    if (innerFrom != innerTo)
+        *inside = Range(innerFrom, innerTo);
+}
+
+bool
 LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
 {
     JS_ASSERT(from < to);
 
     Range newRange(from, to);
 
     if (ranges_.empty())
         return ranges_.append(newRange);
@@ -311,18 +349,21 @@ VirtualRegister::getFirstInterval()
     JS_ASSERT(!intervals_.empty());
     return intervals_[0];
 }
 
 // Dummy function to instantiate LiveRangeAllocator for each template instance.
 void
 EnsureLiveRangeAllocatorInstantiation(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
 {
-    LiveRangeAllocator<LinearScanVirtualRegister> allocator(mir, lir, graph);
-    allocator.buildLivenessInfo();
+    LiveRangeAllocator<LinearScanVirtualRegister> lsra(mir, lir, graph, true);
+    lsra.buildLivenessInfo();
+
+    LiveRangeAllocator<BacktrackingVirtualRegister> backtracking(mir, lir, graph, false);
+    backtracking.buildLivenessInfo();
 }
 
 #ifdef DEBUG
 static inline bool
 NextInstructionHasFixedUses(LBlock *block, LInstruction *ins)
 {
     LInstructionIterator iter(block->begin(ins));
     iter++;
@@ -494,41 +535,54 @@ LiveRangeAllocator<VREG>::buildLivenessI
         }
 
         // Shorten the front end of live intervals for live variables to their
         // point of definition, if found.
         for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
             // Calls may clobber registers, so force a spill and reload around the callsite.
             if (ins->isCall()) {
                 for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
-                    if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
-                        return false;
+                    if (forLSRA) {
+                        if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
+                            return false;
+                    } else {
+                        bool found = false;
+                        for (size_t i = 0; i < ins->numDefs(); i++) {
+                            if (ins->getDef(i)->isPreset() &&
+                                *ins->getDef(i)->output() == LAllocation(*iter)) {
+                                found = true;
+                                break;
+                            }
+                        }
+                        if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
+                            return false;
+                    }
                 }
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++) {
                 if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                     LDefinition *def = ins->getDef(i);
 
                     CodePosition from;
-                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
+                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
                         // The fixed range covers the current instruction so the
                         // interval for the virtual register starts at the next
                         // instruction. If the next instruction has a fixed use,
                         // this can lead to unnecessary register moves. To avoid
                         // special handling for this, assert the next instruction
                         // has no fixed uses. defineFixed guarantees this by inserting
                         // an LNop.
                         JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
                         AnyRegister reg = def->output()->toRegister();
                         if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
                             return false;
                         from = outputOf(*ins).next();
                     } else {
-                        from = inputOf(*ins);
+                        from = forLSRA ? inputOf(*ins) : outputOf(*ins);
                     }
 
                     if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                         // MUST_REUSE_INPUT is implemented by allocating an output
                         // register and moving the input to it. Register hints are
                         // used to avoid unnecessary moves. We give the input an
                         // LUse::ANY policy to avoid allocating a register for the
                         // input.
@@ -550,27 +604,32 @@ LiveRangeAllocator<VREG>::buildLivenessI
                     live->remove(def->virtualRegister());
                 }
             }
 
             for (size_t i = 0; i < ins->numTemps(); i++) {
                 LDefinition *temp = ins->getTemp(i);
                 if (temp->isBogusTemp())
                     continue;
-                if (ins->isCall()) {
-                    JS_ASSERT(temp->isPreset());
-                    continue;
-                }
 
-                if (temp->policy() == LDefinition::PRESET) {
-                    AnyRegister reg = temp->output()->toRegister();
-                    if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
-                        return false;
+                if (forLSRA) {
+                    if (temp->policy() == LDefinition::PRESET) {
+                        if (ins->isCall())
+                            continue;
+                        AnyRegister reg = temp->output()->toRegister();
+                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
+                            return false;
+                    } else {
+                        JS_ASSERT(!ins->isCall());
+                        if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
+                            return false;
+                    }
                 } else {
-                    if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
+                    CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
+                    if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), to))
                         return false;
                 }
             }
 
             DebugOnly<bool> hasUseRegister = false;
             DebugOnly<bool> hasUseRegisterAtStart = false;
 
             for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
@@ -608,28 +667,40 @@ LiveRangeAllocator<VREG>::buildLivenessI
                     JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
 #endif
 
                     // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
                     if (use->policy() == LUse::RECOVERED_INPUT)
                         continue;
 
                     CodePosition to;
-                    if (use->isFixedRegister()) {
-                        JS_ASSERT(!use->usedAtStart());
-                        AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
-                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
-                            return false;
-                        to = inputOf(*ins);
+                    if (forLSRA) {
+                        if (use->isFixedRegister()) {
+                            JS_ASSERT(!use->usedAtStart());
+                            AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
+                            if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
+                                return false;
+                            to = inputOf(*ins);
+                        } else {
+                            to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
+                        }
                     } else {
-                        to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
+                        to = (use->usedAtStart() || ins->isCall()) ? inputOf(*ins) : outputOf(*ins);
+                        if (use->isFixedRegister()) {
+                            LAllocation reg(AnyRegister::FromCode(use->registerCode()));
+                            for (size_t i = 0; i < ins->numDefs(); i++) {
+                                LDefinition *def = ins->getDef(i);
+                                if (def->policy() == LDefinition::PRESET && *def->output() == reg)
+                                    to = inputOf(*ins);
+                            }
+                        }
                     }
 
                     LiveInterval *interval = vregs[use].getInterval(0);
-                    if (!interval->addRangeAtHead(inputOf(block->firstId()), to))
+                    if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
                         return false;
                     interval->addUse(new UsePosition(use, to));
 
                     live->insert(use->virtualRegister());
                 }
             }
         }
 
--- a/js/src/ion/LiveRangeAllocator.h
+++ b/js/src/ion/LiveRangeAllocator.h
@@ -4,16 +4,17 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef js_ion_liverangeallocator_h__
 #define js_ion_liverangeallocator_h__
 
 #include "RegisterAllocator.h"
+#include "StackSlotAllocator.h"
 
 // Common structures and functions used by register allocators that operate on
 // virtual register live ranges.
 
 namespace js {
 namespace ion {
 
 class Requirement
@@ -162,26 +163,41 @@ class LiveInterval
     public TempObject
 {
   public:
     /*
      * A range is a contiguous sequence of CodePositions where the virtual
      * register associated with this interval is live.
      */
     struct Range {
+        Range()
+          : from(),
+            to()
+        { }
         Range(CodePosition f, CodePosition t)
           : from(f),
             to(t)
         {
             JS_ASSERT(from < to);
         }
         CodePosition from;
 
         // The end of this range, exclusive.
         CodePosition to;
+
+        bool empty() const {
+            return from >= to;
+        }
+
+        // Whether this range wholly contains other.
+        bool contains(const Range *other) const;
+
+        // Intersect this range with other, returning the subranges of this
+        // that are before, inside, or after other.
+        void intersect(const Range *other, Range *pre, Range *inside, Range *post) const;
     };
 
   private:
     Vector<Range, 1, IonAllocPolicy> ranges_;
     LAllocation alloc_;
     uint32_t vreg_;
     uint32_t index_;
     Requirement requirement_;
@@ -252,32 +268,45 @@ class LiveInterval
         return vreg_;
     }
     uint32_t index() const {
         return index_;
     }
     void setIndex(uint32_t index) {
         index_ = index;
     }
-    Requirement *requirement() {
+    const Requirement *requirement() const {
         return &requirement_;
     }
     void setRequirement(const Requirement &requirement) {
         // A SAME_AS_OTHER requirement complicates regalloc too much; it
         // should only be used as hint.
         JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
-
-        // Fixed registers are handled with fixed intervals, so fixed requirements
-        // are only valid for non-register allocations.f
-        JS_ASSERT_IF(requirement.kind() == Requirement::FIXED,
-                     !requirement.allocation().isRegister());
-
         requirement_ = requirement;
     }
-    Requirement *hint() {
+    bool addRequirement(const Requirement &newRequirement) {
+        // Merge newRequirement with any existing requirement, returning false
+        // if the new and old requirements conflict.
+        JS_ASSERT(newRequirement.kind() != Requirement::SAME_AS_OTHER);
+
+        if (newRequirement.kind() == Requirement::FIXED) {
+            if (requirement_.kind() == Requirement::FIXED)
+                return newRequirement.allocation() == requirement_.allocation();
+            requirement_ = newRequirement;
+            return true;
+        }
+
+        JS_ASSERT(newRequirement.kind() == Requirement::REGISTER);
+        if (requirement_.kind() == Requirement::FIXED)
+            return requirement_.allocation().isRegister();
+
+        requirement_ = newRequirement;
+        return true;
+    }
+    const Requirement *hint() const {
         return &hint_;
     }
     void setHint(const Requirement &hint) {
         hint_ = hint;
     }
     bool isSpill() const {
         return alloc_.isStackSlot();
     }
@@ -314,16 +343,17 @@ class VirtualRegister
     LDefinition *def_;
     Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
 
     // Whether def_ is a temp or an output.
     bool isTemp_ : 1;
 
   public:
     bool init(uint32_t id, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) {
+        JS_ASSERT(block && !block_);
         id_ = id;
         block_ = block;
         ins_ = ins;
         def_ = def;
         isTemp_ = isTemp;
         LiveInterval *initial = new LiveInterval(def->virtualRegister(), 0);
         if (!initial)
             return false;
@@ -352,30 +382,36 @@ class VirtualRegister
     }
     LiveInterval *getInterval(size_t i) const {
         return intervals_[i];
     }
     LiveInterval *lastInterval() const {
         JS_ASSERT(numIntervals() > 0);
         return getInterval(numIntervals() - 1);
     }
+    void replaceInterval(LiveInterval *old, LiveInterval *interval) {
+        JS_ASSERT(intervals_[old->index()] == old);
+        interval->setIndex(old->index());
+        intervals_[old->index()] = interval;
+    }
     bool addInterval(LiveInterval *interval) {
         JS_ASSERT(interval->numRanges());
 
         // Preserve ascending order for faster lookups.
         LiveInterval **found = NULL;
         LiveInterval **i;
         for (i = intervals_.begin(); i != intervals_.end(); i++) {
             if (!found && interval->start() < (*i)->start())
                 found = i;
             if (found)
                 (*i)->setIndex((*i)->index() + 1);
         }
         if (!found)
             found = intervals_.end();
+        interval->setIndex(found - intervals_.begin());
         return intervals_.insert(found, interval);
     }
     bool isDouble() const {
         return def_->type() == LDefinition::DOUBLE;
     }
 
     LiveInterval *intervalFor(CodePosition pos);
     LiveInterval *getFirstInterval();
@@ -464,21 +500,32 @@ class LiveRangeAllocator : public Regist
     BitSet **liveIn;
     VirtualRegisterMap<VREG> vregs;
     FixedArityList<LiveInterval *, AnyRegister::Total> fixedIntervals;
 
     // Union of all ranges in fixedIntervals, used to quickly determine
     // whether an interval intersects with a fixed register.
     LiveInterval *fixedIntervalsUnion;
 
+    // Whether the underlying allocator is LSRA. This changes the generated
+    // live ranges in various ways: inserting additional fixed uses of
+    // registers, and shifting the boundaries of live ranges by small amounts.
+    // This exists because different allocators handle live ranges differently;
+    // ideally, they would all treat live ranges in the same way.
+    bool forLSRA;
+
+    // Allocation state
+    StackSlotAllocator stackSlotAllocator;
+
   public:
-    LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
+    LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph, bool forLSRA)
       : RegisterAllocator(mir, lir, graph),
         liveIn(NULL),
-        fixedIntervalsUnion(NULL)
+        fixedIntervalsUnion(NULL),
+        forLSRA(forLSRA)
     {
     }
 
     bool buildLivenessInfo();
 
   protected:
     bool init();
 
@@ -506,14 +553,39 @@ class LiveRangeAllocator : public Regist
                 JS_ASSERT_IF(prev, prev->end() <= interval->start());
                 interval->validateRanges();
 
                 prev = interval;
             }
         }
 #endif
     }
+
+#ifdef JS_NUNBOX32
+    VREG *otherHalfOfNunbox(VirtualRegister *vreg) {
+        signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
+        VREG *other = &vregs[vreg->def()->virtualRegister() + offset];
+        AssertTypesFormANunbox(vreg->type(), other->type());
+        return other;
+    }
+#endif
+
+    bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to) {
+        if (*from->getAllocation() == *to->getAllocation())
+            return true;
+        return moves->add(from->getAllocation(), to->getAllocation());
+    }
+
+    bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to) {
+        LMoveGroup *moves = getInputMoveGroup(pos);
+        return addMove(moves, from, to);
+    }
+
+    bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to) {
+        LMoveGroup *moves = getMoveGroupAfter(pos);
+        return addMove(moves, from, to);
+    }
 };
 
 } // namespace ion
 } // namespace js
 
 #endif
--- a/js/src/ion/RegisterAllocator.cpp
+++ b/js/src/ion/RegisterAllocator.cpp
@@ -58,33 +58,45 @@ AllocationIntegrityState::record()
             LInstruction *ins = *iter;
             InstructionInfo &info = instructions[ins->id()];
 
             for (size_t k = 0; k < ins->numDefs(); k++) {
                 uint32_t vreg = ins->getDef(k)->virtualRegister();
                 virtualRegisters[vreg] = ins->getDef(k);
                 if (!info.outputs.append(vreg))
                     return false;
+                if (ins->getDef(k)->policy() == LDefinition::MUST_REUSE_INPUT) {
+                    JS_ASSERT(k == 0);
+                    info.reusedInput = ins->getDef(k)->getReusedInput();
+                }
             }
             for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
-                if (!info.inputs.append(alloc->isUse() ? alloc->toUse()->virtualRegister() : UINT32_MAX))
-                    return false;
+                if (alloc->isUse() && alloc->toUse()->policy() != LUse::RECOVERED_INPUT) {
+                    if (!info.inputs.append(alloc->toUse()->virtualRegister()))
+                        return false;
+                } else {
+                    if (!info.inputs.append(UINT32_MAX))
+                        return false;
+                }
             }
         }
     }
 
     return seen.init();
 }
 
 bool
 AllocationIntegrityState::check(bool populateSafepoints)
 {
     JS_ASSERT(!instructions.empty());
 
 #ifdef DEBUG
+    if (IonSpewEnabled(IonSpew_RegAlloc))
+        dump();
+
     for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
         LBlock *block = graph.getBlock(blockIndex);
 
         // Check that all instruction inputs and outputs have been assigned an allocation.
         for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
             LInstruction *ins = *iter;
 
             for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
@@ -95,16 +107,25 @@ AllocationIntegrityState::check(bool pop
                 JS_ASSERT_IF(def->policy() != LDefinition::PASSTHROUGH, !def->output()->isUse());
 
                 if (def->output()->isRegister()) {
                     // The live regs for an instruction's safepoint should
                     // exclude the instruction's definitions.
                     LSafepoint *safepoint = ins->safepoint();
                     JS_ASSERT_IF(safepoint, !safepoint->liveRegs().has(def->output()->toRegister()));
                 }
+
+                uint32_t reusedInput = instructions[ins->id()].reusedInput;
+                JS_ASSERT_IF(reusedInput != UINT32_MAX,
+                             *def->output() == *ins->getOperand(reusedInput));
+            }
+
+            for (size_t i = 0; i < ins->numTemps(); i++) {
+                LDefinition *temp = ins->getTemp(i);
+                JS_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
             }
         }
     }
 #endif
 
     // Check that the register assignment and move groups preserve the original
     // semantics of the virtual registers. Each virtual register has a single
     // write (owing to the SSA representation), but the allocation may move the
@@ -136,19 +157,16 @@ AllocationIntegrityState::check(bool pop
                     IntegrityItem item = worklist.back();
                     worklist.popBack();
                     checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints);
                 }
             }
         }
     }
 
-    if (IonSpewEnabled(IonSpew_RegAlloc))
-        dump();
-
     return true;
 }
 
 bool
 AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins,
                                          uint32_t vreg, LAllocation alloc, bool populateSafepoints)
 {
     for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) {
@@ -173,76 +191,85 @@ AllocationIntegrityState::checkIntegrity
         // another instruction, and that if the originating vreg definition is
         // found that it is writing to the tracked location.
 
         for (size_t i = 0; i < ins->numDefs(); i++) {
             LDefinition *def = ins->getDef(i);
             if (def->policy() == LDefinition::PASSTHROUGH)
                 continue;
             if (info.outputs[i] == vreg) {
-                check(*def->output() == alloc,
-                      "Found vreg definition, but tracked value does not match");
+                JS_ASSERT(*def->output() == alloc);
 
                 // Found the original definition, done scanning.
                 return true;
             } else {
-                check(*def->output() != alloc,
-                      "Tracked value clobbered by intermediate definition");
+                JS_ASSERT(*def->output() != alloc);
             }
         }
 
         for (size_t i = 0; i < ins->numTemps(); i++) {
             LDefinition *temp = ins->getTemp(i);
-            if (!temp->isBogusTemp()) {
-                check(*temp->output() != alloc,
-                      "Tracked value clobbered by intermediate temporary");
-            }
+            if (!temp->isBogusTemp())
+                JS_ASSERT(*temp->output() != alloc);
         }
 
         LSafepoint *safepoint = ins->safepoint();
         if (!safepoint)
             continue;
 
         if (alloc.isRegister()) {
             AnyRegister reg = alloc.toRegister();
             if (populateSafepoints)
                 safepoint->addLiveRegister(reg);
             else
-                check(safepoint->liveRegs().has(reg), "Register not marked in safepoint");
+                JS_ASSERT(safepoint->liveRegs().has(reg));
         }
 
         LDefinition::Type type = virtualRegisters[vreg]
                                  ? virtualRegisters[vreg]->type()
                                  : LDefinition::GENERAL;
 
         switch (type) {
           case LDefinition::OBJECT:
-            if (populateSafepoints)
+            if (populateSafepoints) {
+                IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s",
+                        vreg, ins->id(), alloc.toString());
                 safepoint->addGcPointer(alloc);
-            else
-                check(safepoint->hasGcPointer(alloc), "GC register not marked in safepoint");
+            } else {
+                JS_ASSERT(safepoint->hasGcPointer(alloc));
+            }
             break;
 #ifdef JS_NUNBOX32
-          // If a vreg for a value's components are copied in multiple places
+          // Do not assert that safepoint information for nunboxes is complete,
+          // as if a vreg for a value's components are copied in multiple places
           // then the safepoint information may be incomplete and not reflect
           // all copies. See SafepointWriter::writeNunboxParts.
           case LDefinition::TYPE:
-            if (populateSafepoints)
+            if (populateSafepoints) {
+                IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s",
+                        vreg, ins->id(), alloc.toString());
                 safepoint->addNunboxType(vreg, alloc);
+            }
             break;
           case LDefinition::PAYLOAD:
-            if (populateSafepoints)
+            if (populateSafepoints) {
+                IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s",
+                        vreg, ins->id(), alloc.toString());
                 safepoint->addNunboxPayload(vreg, alloc);
+            }
             break;
 #else
           case LDefinition::BOX:
-            if (populateSafepoints)
+            if (populateSafepoints) {
+                IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
+                        vreg, ins->id(), alloc.toString());
                 safepoint->addBoxedValue(alloc);
-            else
-                check(safepoint->hasBoxedValue(alloc), "Boxed value not marked in safepoint");
+            } else {
+                JS_ASSERT(safepoint->hasBoxedValue(alloc));
+            }
             break;
 #endif
           default:
             break;
         }
     }
 
     // Phis are effectless, but change the vreg we are tracking. Check if there
@@ -291,27 +318,16 @@ AllocationIntegrityState::addPredecessor
         return true;
     if (!seen.add(p, item))
         return false;
 
     return worklist.append(item);
 }
 
 void
-AllocationIntegrityState::check(bool cond, const char *msg)
-{
-  if (!cond) {
-    if (IonSpewEnabled(IonSpew_RegAlloc))
-      dump();
-    printf("%s\n", msg);
-    JS_NOT_REACHED("Regalloc integrity failure");
-  }
-}
-
-void
 AllocationIntegrityState::dump()
 {
 #ifdef DEBUG
     printf("Register Allocation:\n");
 
     for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
         LBlock *block = graph.getBlock(blockIndex);
         MBasicBlock *mir = block->mir();
@@ -330,55 +346,49 @@ AllocationIntegrityState::dump()
                 printf(" v%u", info.inputs[j]);
             printf("\n");
         }
 
         for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
             LInstruction *ins = *iter;
             InstructionInfo &info = instructions[ins->id()];
 
-            printf("[%s]", ins->opName());
+            CodePosition input(ins->id(), CodePosition::INPUT);
+            CodePosition output(ins->id(), CodePosition::OUTPUT);
+
+            printf("[%u,%u %s]", input.pos(), output.pos(), ins->opName());
 
             if (ins->isMoveGroup()) {
                 LMoveGroup *group = ins->toMoveGroup();
                 for (int i = group->numMoves() - 1; i >= 0; i--) {
-                    printf(" [");
-                    LAllocation::PrintAllocation(stdout, group->getMove(i).from());
-                    printf(" -> ");
-                    LAllocation::PrintAllocation(stdout, group->getMove(i).to());
-                    printf("]");
+                    // Use two printfs, as LAllocation::toString is not reentant.
+                    printf(" [%s", group->getMove(i).from()->toString());
+                    printf(" -> %s]", group->getMove(i).to()->toString());
                 }
                 printf("\n");
                 continue;
             }
 
             for (size_t i = 0; i < ins->numTemps(); i++) {
                 LDefinition *temp = ins->getTemp(i);
-                if (!temp->isBogusTemp()) {
-                    printf(" [temp ");
-                    LAllocation::PrintAllocation(stdout, temp->output());
-                    printf("]");
-                }
+                if (!temp->isBogusTemp())
+                    printf(" [temp %s]", temp->output()->toString());
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++) {
                 LDefinition *def = ins->getDef(i);
-                printf(" [def v%u ", info.outputs[i]);
-                LAllocation::PrintAllocation(stdout, def->output());
-                printf("]");
+                printf(" [def v%u %s]", info.outputs[i], def->output()->toString());
             }
 
             size_t index = 0;
             for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                 uint32_t vreg = info.inputs[index++];
                 if (vreg == UINT32_MAX)
                     continue;
-                printf(" [use v%u ", vreg);
-                LAllocation::PrintAllocation(stdout, *alloc);
-                printf("]");
+                printf(" [use v%u %s]", vreg, alloc->toString());
             }
 
             printf("\n");
         }
     }
 
     printf("\nIntermediate Allocations:\n\n");
 
@@ -391,19 +401,18 @@ AllocationIntegrityState::dump()
 
     for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
         IntegrityItem item = iter.front();
         seenOrdered[item.index] = item;
     }
 
     for (size_t i = 0; i < seenOrdered.length(); i++) {
         IntegrityItem item = seenOrdered[i];
-        printf("block %u reg v%u alloc ", item.block->mir()->id(), item.vreg);
-        LAllocation::PrintAllocation(stdout, &item.alloc);
-        printf("\n");
+        printf("block %u reg v%u alloc %s\n",
+               item.block->mir()->id(), item.vreg, item.alloc.toString());
     }
 
     printf("\n");
 #endif
 }
 
 const CodePosition CodePosition::MAX(UINT_MAX);
 const CodePosition CodePosition::MIN(0);
--- a/js/src/ion/RegisterAllocator.h
+++ b/js/src/ion/RegisterAllocator.h
@@ -52,18 +52,25 @@ struct AllocationIntegrityState
     // registers for all inputs and outputs of the nodes. These are overwritten
     // in place during register allocation. This information is kept on the
     // side rather than in the instructions and phis themselves to avoid
     // debug-builds-only bloat in the size of the involved structures.
 
     struct InstructionInfo {
         Vector<uint32_t, 2, SystemAllocPolicy> inputs;
         Vector<uint32_t, 1, SystemAllocPolicy> outputs;
-        InstructionInfo() {}
-        InstructionInfo(const InstructionInfo &o) {
+        uint32_t reusedInput;
+
+        InstructionInfo()
+          : reusedInput(UINT32_MAX)
+        {}
+
+        InstructionInfo(const InstructionInfo &o)
+          : reusedInput(o.reusedInput)
+        {
             for (size_t i = 0; i < o.inputs.length(); i++)
                 inputs.append(o.inputs[i]);
             for (size_t i = 0; i < o.outputs.length(); i++)
                 outputs.append(o.outputs[i]);
         }
     };
     Vector<InstructionInfo, 0, SystemAllocPolicy> instructions;
 
@@ -111,17 +118,16 @@ struct AllocationIntegrityState
     // Set of all items that have already been processed.
     typedef HashSet<IntegrityItem, IntegrityItem, SystemAllocPolicy> IntegrityItemSet;
     IntegrityItemSet seen;
 
     bool checkIntegrity(LBlock *block, LInstruction *ins, uint32_t vreg, LAllocation alloc,
                         bool populateSafepoints);
     bool addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc);
 
-    void check(bool cond, const char *msg);
     void dump();
 };
 
 // Represents with better-than-instruction precision a position in the
 // instruction stream.
 //
 // An issue comes up when performing register allocation as to how to represent
 // information such as "this register is only needed for the input of
--- a/js/src/ion/Safepoints.cpp
+++ b/js/src/ion/Safepoints.cpp
@@ -211,21 +211,24 @@ CanEncodeInfoInHeader(const LAllocation 
 void
 SafepointWriter::writeNunboxParts(LSafepoint *safepoint)
 {
     LSafepoint::NunboxList &entries = safepoint->nunboxParts();
 
 # ifdef DEBUG
     if (IonSpewEnabled(IonSpew_Safepoints)) {
         for (uint32_t i = 0; i < entries.length(); i++) {
+            SafepointNunboxEntry &entry = entries[i];
+            if (entry.type.isUse() || entry.payload.isUse())
+                continue;
             IonSpewHeader(IonSpew_Safepoints);
             fprintf(IonSpewFile, "    nunbox (type in ");
-            DumpNunboxPart(entries[i].type);
+            DumpNunboxPart(entry.type);
             fprintf(IonSpewFile, ", payload in ");
-            DumpNunboxPart(entries[i].payload);
+            DumpNunboxPart(entry.payload);
             fprintf(IonSpewFile, ")\n");
         }
     }
 # endif
 
     // Safepoints are permitted to have partially filled in entries for nunboxes,
     // provided that only the type is live and not the payload. Omit these from
     // the written safepoint.
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -2843,16 +2843,18 @@ ScriptAnalysis::addSingletonTypeBarrier(
 
     barrier->next = code.typeBarriers;
     code.typeBarriers = barrier;
 }
 
 void
 TypeCompartment::print(JSContext *cx, bool force)
 {
+    gc::AutoSuppressGC suppressGC(cx);
+
     JSCompartment *compartment = this->compartment();
     AutoEnterAnalysis enter(compartment);
 
     if (!force && !InferSpewActive(ISpewResult))
         return;
 
     for (gc::CellIter i(compartment, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
         RootedScript script(cx, i.get<JSScript>());
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -4844,16 +4844,18 @@ ProcessArgs(JSContext *cx, JSObject *obj
             ion::js_IonOptions.limitScriptSize = false;
         else
             return OptionFailure("ion-limit-script-size", str);
     }
 
     if (const char *str = op->getStringOption("ion-regalloc")) {
         if (strcmp(str, "lsra") == 0)
             ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_LSRA;
+        else if (strcmp(str, "backtracking") == 0)
+            ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_Backtracking;
         else if (strcmp(str, "stupid") == 0)
             ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_Stupid;
         else
             return OptionFailure("ion-regalloc", str);
     }
 
     if (op->getBoolOption("ion-eager"))
         ion::js_IonOptions.setEagerCompilation();
@@ -5075,17 +5077,18 @@ main(int argc, char **argv, char **envp)
                                "Inline methods where possible (default: on, off to disable)")
         || !op.addStringOption('\0', "ion-osr", "on/off",
                                "On-Stack Replacement (default: on, off to disable)")
         || !op.addStringOption('\0', "ion-limit-script-size", "on/off",
                                "Don't compile very large scripts (default: on, off to disable)")
         || !op.addStringOption('\0', "ion-regalloc", "[mode]",
                                "Specify Ion register allocation:\n"
                                "  lsra: Linear Scan register allocation (default)\n"
-                               "  stupid: Simple greedy register allocation")
+                               "  backtracking: Priority based backtracking register allocation\n"
+                               "  stupid: Simple block local register allocation")
         || !op.addBoolOption('\0', "ion-eager", "Always ion-compile methods")
 #ifdef JS_THREADSAFE
         || !op.addStringOption('\0', "ion-parallel-compile", "on/off",
                                "Compile scripts off thread (default: off)")
 #endif
     )
     {
         return EXIT_FAILURE;