Bug 678602: Eliminate deallocation hack from linear scan register allocator. r=dvander
authorAndrew Drake <adrake@adrake.org>
Wed, 10 Aug 2011 17:10:08 -0700
changeset 108725 d6c0c813dd80fb12865a8d9874aea6676697e91d
parent 108724 e029a3cf6a641f6cc083f3633db92622d2993cfb
child 108726 8d3e5e4be9333e5bc4aace2ead98595c4d2b095e
push id2248
push userakeybl@mozilla.com
push dateMon, 08 Oct 2012 19:23:44 +0000
treeherdermozilla-aurora@118a3b748323 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdvander
bugs678602
milestone8.0a1
Bug 678602: Eliminate deallocation hack from linear scan register allocator. r=dvander
js/src/ion/IonLIR.h
js/src/ion/LinearScan.cpp
js/src/ion/LinearScan.h
--- a/js/src/ion/IonLIR.h
+++ b/js/src/ion/IonLIR.h
@@ -658,19 +658,16 @@ class LBlock : public TempObject
         return phis_.length();
     }
     LPhi *getPhi(size_t index) const {
         return phis_[index];
     }
     void removePhi(size_t index) {
         phis_.erase(&phis_[index]);
     }
-    void clearPhis() {
-        phis_.clear();
-    }
     MBasicBlock *mir() const {
         return block_;
     }
     LInstructionIterator begin() {
         return instructions_.begin();
     }
     LInstructionIterator end() {
         return instructions_.end();
--- a/js/src/ion/LinearScan.cpp
+++ b/js/src/ion/LinearScan.cpp
@@ -43,16 +43,65 @@
 #include "BitSet.h"
 #include "LinearScan.h"
 #include "IonLIR-inl.h"
 #include "IonSpewer.h"
 
 using namespace js;
 using namespace js::ion;
 
+static bool
+UseCompatibleWith(const LUse *use, LAllocation alloc)
+{
+    switch (use->policy()) {
+      case LUse::ANY:
+      case LUse::KEEPALIVE:
+        return alloc.isRegister() || alloc.isMemory();
+      case LUse::REGISTER:
+        return alloc.isRegister();
+      case LUse::FIXED:
+        if (!alloc.isGeneralReg())
+            return false;
+        return alloc.toGeneralReg()->reg().code() == use->registerCode();
+      default:
+        JS_NOT_REACHED("Unknown use policy");
+    }
+    return false;
+}
+
+#ifdef DEBUG
+static bool
+DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc)
+{
+    if (ins->isPhi()) {
+        if (def->type() == LDefinition::DOUBLE)
+            return alloc.isFloatReg() || alloc.kind() == LAllocation::DOUBLE_SLOT;
+        return alloc.isGeneralReg() || alloc.kind() == LAllocation::STACK_SLOT;
+    }
+
+    switch (def->policy()) {
+      case LDefinition::DEFAULT:
+        if (!alloc.isRegister())
+            return false;
+        return alloc.isFloatReg() == (def->type() == LDefinition::DOUBLE);
+      case LDefinition::PRESET:
+        return alloc == *def->output();
+      case LDefinition::MUST_REUSE_INPUT:
+        if (!alloc.isRegister() || !ins->numOperands())
+            return false;
+        return alloc == *ins->getOperand(0);
+      case LDefinition::REDEFINED:
+        return true;
+      default:
+        JS_NOT_REACHED("Unknown definition policy");
+    }
+    return false;
+}
+#endif
+
 bool
 LiveInterval::addRange(CodePosition from, CodePosition to)
 {
     JS_ASSERT(from <= to);
 
     Range newRange(from, to);
 
     Range *i;
@@ -265,32 +314,17 @@ VirtualRegister::nextUsePosAfter(CodePos
  * allocation.
  */
 CodePosition
 VirtualRegister::nextIncompatibleUseAfter(CodePosition after, LAllocation alloc)
 {
     CodePosition min = CodePosition::MAX;
     for (LOperand *i = uses_.begin(); i != uses_.end(); i++) {
         CodePosition ip(i->ins->id(), CodePosition::INPUT);
-        if (i->use->policy() == LUse::ANY && (alloc.isStackSlot() ||
-                                              alloc.isGeneralReg() ||
-                                              alloc.isArgument()))
-        {
-            continue;
-        }
-        if (i->use->policy() == LUse::REGISTER && alloc.isGeneralReg())
-            continue;
-        if (i->use->isFixedRegister() && alloc.isGeneralReg() &&
-            alloc.toGeneralReg()->reg().code() == i->use->registerCode())
-        {
-            continue;
-        }
-        if (i->use->policy() == LUse::KEEPALIVE)
-            continue;
-        if (ip >= after && ip < min)
+        if (ip >= after && ip < min && !UseCompatibleWith(i->use, alloc))
             min = ip;
     }
     return min;
 }
 
 const CodePosition CodePosition::MAX(UINT_MAX);
 const CodePosition CodePosition::MIN(0);
 
@@ -474,17 +508,16 @@ LinearScanAllocator::buildLivenessInfo()
             // our block order does not guarantee that blocks of a loop are
             // contiguous. As a result, a single live interval spanning the
             // loop is not possible. Additionally, we require liveIn in a later
             // pass for resolution, so that must also be fixed up here.
             MBasicBlock *loopBlock = mblock->backedge();
             while (true) {
                 // Add an interval for this entire loop block
                 for (BitSet::Iterator i(live->begin()); i != live->end(); i++) {
-                    IonSpew(IonSpew_LSRA, " Marking %d live for all of block %d", *i, loopBlock->id());
                     vregs[*i].getInterval(0)->addRange(inputOf(loopBlock->lir()->firstId()),
                                                        outputOf(loopBlock->lir()->lastId()));
                 }
 
                 // Fix up the liveIn set to account for the new interval
                 liveIn[loopBlock->id()]->insertAll(live);
 
                 // Make sure we don't visit this node again
@@ -537,33 +570,35 @@ LinearScanAllocator::buildLivenessInfo()
  * with the farthest-away next use is spilled to make room for this one. In all
  * cases, intervals which conflict either with other intervals or with
  * use or definition constraints are split at the point of conflict to be
  * assigned a compatible allocation later.
  */
 bool
 LinearScanAllocator::allocateRegisters()
 {
-    // Enqueue intervals for allocation
+    // Compute priorities and enqueue intervals for allocation
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         LiveInterval *live = vregs[i].getInterval(0);
-        if (live->numRanges() > 0)
+        if (live->numRanges() > 0) {
+            setIntervalPriority(live);
             unhandled.enqueue(live);
+        }
     }
 
     // Iterate through all intervals in ascending start order
     while ((current = unhandled.dequeue()) != NULL) {
         JS_ASSERT(current->getAllocation()->isUse());
         JS_ASSERT(current->numRanges() > 0);
 
         CodePosition position = current->start();
 
-        IonSpew(IonSpew_LSRA, "Processing %d = [%u, %u]",
+        IonSpew(IonSpew_LSRA, "Processing %d = [%u, %u] (pri=%d)",
                 current->reg()->reg(), current->start().pos(),
-                current->end().pos());
+                current->end().pos(), current->priority());
 
         // Shift active intervals to the inactive or handled sets as appropriate
         for (IntervalIterator i(active.begin()); i != active.end(); ) {
             LiveInterval *it = *i;
             JS_ASSERT(it->numRanges() > 0);
 
             if (it->end() < position) {
                 i = active.removeAt(i);
@@ -598,17 +633,16 @@ LinearScanAllocator::allocateRegisters()
         // Check the allocation policy if this is a definition or a use
         bool mustHaveRegister = false;
         bool canSpillOthers = true;
         if (position == current->reg()->getFirstInterval()->start()) {
             LDefinition *def = current->reg()->def();
             LDefinition::Policy policy = def->policy();
             if (policy == LDefinition::PRESET) {
                 IonSpew(IonSpew_LSRA, " Definition has preset policy.");
-                current->setFlag(LiveInterval::FIXED);
                 if (!assign(*def->output()))
                     return false;
                 continue;
             }
             if (policy == LDefinition::MUST_REUSE_INPUT) {
                 IonSpew(IonSpew_LSRA, " Definition has 'must reuse input' policy.");
                 LInstruction *ins = current->reg()->ins();
                 JS_ASSERT(ins->numOperands() > 0);
@@ -647,17 +681,16 @@ LinearScanAllocator::allocateRegisters()
                     } else {
                         JS_ASSERT(pol == LUse::ANY || pol == LUse::KEEPALIVE);
                     }
                 }
             }
 
             // Handle the fixed constraint if present
             if (fixedOp) {
-                current->setFlag(LiveInterval::FIXED);
                 if (!assign(LGeneralReg(Register::FromCode(fixedOp->use->registerCode()))))
                     return false;
                 continue;
             }
         }
 
         // Find the first use of this register
         firstUse = current->reg()->nextUseAfter(position);
@@ -673,17 +706,17 @@ LinearScanAllocator::allocateRegisters()
             continue;
         }
 
         // Try to allocate a free register
         IonSpew(IonSpew_LSRA, " Attempting free register allocation");
 
         // Find the best register
         Register best = findBestFreeRegister();
-        IonSpew(IonSpew_LSRA, "  Decided best register was %u, free until %u", best.code(),
+        IonSpew(IonSpew_LSRA, "  Decided best register was %s, free until %u", best.name(),
                 freeUntilPos[best.code()].pos());
 
         // Attempt to allocate
         if (freeUntilPos[best.code()] != CodePosition::MIN) {
             // Split when the register is next needed if necessary
             if (freeUntilPos[best.code()] <= current->end()) {
                 if (!splitInterval(current, freeUntilPos[best.code()]))
                     return false;
@@ -700,208 +733,162 @@ LinearScanAllocator::allocateRegisters()
             IonSpew(IonSpew_LSRA, " Can't spill any other intervals, spilling this one");
             if (!spill())
                 return false;
             continue;
         }
 
         IonSpew(IonSpew_LSRA, " Attempting blocked register allocation");
 
-        // Find the best register
+        // If we absolutely need a register or our next use is closer than the
+        // selected blocking register then we spill the blocker. Otherwise, we
+        // spill the current interval.
         best = findBestBlockedRegister();
-
-        // Figure out whether to spill the blocker or ourselves
-        IonSpew(IonSpew_LSRA, "  Current interval next used at %u", firstUsePos.pos());
-
-        // Deal with constraints
-        if (mustHaveRegister) {
-            IonSpew(IonSpew_LSRA, "   But this interval needs a register.");
-            firstUsePos = position;
-        }
-
-        // We only spill the blocking interval if it is a strict improvement
-        // to do so, otherwise we will continuously re-spill intervals.
-        if (firstUsePos >= nextUsePos[best.code()]) {
-            if (!spill())
+        if (mustHaveRegister || firstUsePos < nextUsePos[best.code()]) {
+            if (!assign(LGeneralReg(best)))
                 return false;
         } else {
-            if (!assign(LGeneralReg(best)))
+            if (!spill())
                 return false;
         }
     }
 
     validateAllocations();
 
     return true;
 }
 
 /*
  * This function iterates over control flow edges in the function and resolves
  * conflicts wherein two predecessors of a block have different allocations
- * for a virtual register than the block itself. In such cases, moves are
- * inserted at the end of the predecessor blocks.
+ * for a virtual register than the block itself. It also turns phis into moves.
  *
  * The algorithm is based on the one published in "Linear Scan Register
  * Allocation on SSA Form" by C. Wimmer et al., for which the full citation
  * appears above.
  */
 bool
 LinearScanAllocator::resolveControlFlow()
 {
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock *successor = graph.getBlock(i);
         MBasicBlock *mSuccessor = successor->mir();
         if (mSuccessor->numPredecessors() < 1)
             continue;
 
-        IonSpew(IonSpew_LSRA, " Resolving control flow into block %d", successor->mir()->id());
-
-        // We want to recover the state of liveIn prior to the removal of phi-
-        // defined instructions. So, we (destructively) add all phis back in.
-        BitSet *live = liveIn[successor->mir()->id()];
-        for (size_t j = 0; j < successor->numPhis(); j++)
-            live->insert(successor->getPhi(j)->getDef(0)->virtualRegister());
-
-        for (BitSet::Iterator liveRegId(live->begin()); liveRegId != live->end(); liveRegId++) {
-            IonSpew(IonSpew_LSRA, "  Inspecting live register %d", *liveRegId);
-            VirtualRegister *reg = &vregs[*liveRegId];
-
-            // Locate the interval in the successor block
-            LPhi *phi = NULL;
-            LiveInterval *to;
-            if (reg->ins()->isPhi() &&
-                reg->ins()->id() >= successor->firstId() &&
-                reg->ins()->id() <= successor->lastId())
-            {
-                IonSpew(IonSpew_LSRA, "   Defined by phi");
-                phi = reg->ins()->toPhi();
-                to = reg->intervalFor(outputOf(successor->firstId()));
-            } else {
-                IonSpew(IonSpew_LSRA, "   Present at input");
-                to = reg->intervalFor(inputOf(successor->firstId()));
-            }
+        // Resolve phis to moves
+        for (size_t j = 0; j < successor->numPhis(); j++) {
+            LPhi *phi = successor->getPhi(j);
+            LiveInterval *to = vregs[phi].intervalFor(inputOf(successor->firstId()));
             JS_ASSERT(to);
 
-            // Locate and resolve the interval in each predecessor block
-            for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
-                MBasicBlock *mPredecessor = mSuccessor->getPredecessor(j);
-                LBlock *predecessor = mPredecessor->lir();
-                CodePosition predecessorEnd = outputOf(predecessor->lastId());
-                LiveInterval *from = NULL;
+            for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
+                LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
+                JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
-                // If this does assertion does not hold, then the edge
-                // currently under consideration is "critical", and can
-                // not be resolved directly without the insertion of an
-                // additional piece of control flow. These critical edges
-                // should have been split in an earlier pass so that this
-                // pass does not have to deal with them.
-                JS_ASSERT_IF(phi, mPredecessor->numSuccessors() == 1);
+                LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
+                LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
+                JS_ASSERT(from);
+
+                if (!moveBefore(outputOf(predecessor->lastId()), from, to))
+                    return false;
+            }
+        }
 
-                // Find the interval at the "from" half of the edge
-                if (phi) {
-                    JS_ASSERT(mPredecessor->successorWithPhis() == successor->mir());
-
-                    LAllocation *phiInput = phi->getOperand(mPredecessor->
-                                                            positionInPhiSuccessor());
-                    JS_ASSERT(phiInput->isUse());
-
-                    IonSpew(IonSpew_LSRA, "   Known as register %u at phi input",
-                            phiInput->toUse()->virtualRegister());
+        // Resolve split intervals with moves
+        BitSet *live = liveIn[mSuccessor->id()];
+        for (BitSet::Iterator liveRegId(live->begin()); liveRegId != live->end(); liveRegId++) {
+            LiveInterval *to = vregs[*liveRegId].intervalFor(inputOf(successor->firstId()));
+            JS_ASSERT(to);
 
-                    from = vregs[phiInput].intervalFor(predecessorEnd);
-                } else {
-                    from = reg->intervalFor(predecessorEnd);
-                }
+            for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
+                LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
+                LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
+                JS_ASSERT(from);
 
-                // Resolve the edge with a move if necessary
-                JS_ASSERT(from);
-                if (*from->getAllocation() != *to->getAllocation()) {
-                    IonSpew(IonSpew_LSRA, "    Inserting move");
-                    if (!moveBefore(predecessorEnd, from, to))
+                if (mSuccessor->numPredecessors() > 1) {
+                    JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
+                    if (!moveBefore(outputOf(predecessor->lastId()), from, to))
+                        return false;
+                } else {
+                    if (!moveBefore(inputOf(successor->firstId()), from, to))
                         return false;
                 }
             }
         }
     }
 
     return true;
 }
 
 /*
  * This function takes the allocations assigned to the live intervals and
  * erases all virtual registers in the function with the allocations
- * corresponding to the appropriate interval. It also removes the now
- * useless phi nodes, as they have been resolved with moves in the previous
- * pass.
+ * corresponding to the appropriate interval.
  */
 bool
 LinearScanAllocator::reifyAllocations()
 {
     // Enqueue all handled intervals for reification
     unhandled.enqueue(inactive);
     unhandled.enqueue(active);
     unhandled.enqueue(handled);
 
     // Handle each interval in sequence
     LiveInterval *interval;
     while ((interval = unhandled.dequeue()) != NULL) {
         VirtualRegister *reg = interval->reg();
 
-        IonSpew(IonSpew_LSRA, " Reifying interval %u = [%u,%u]", reg->reg(),
-                interval->start().pos(), interval->end().pos());
-
         // Erase all uses of this interval
         for (size_t i = 0; i < reg->numUses(); i++) {
             LOperand *use = reg->getUse(i);
-            LAllocation *alloc = use->use;
             CodePosition pos = inputOf(use->ins);
             if (use->snapshot)
                 pos = outputOf(use->ins);
-            if (interval->covers(pos))
-                *alloc = *interval->getAllocation();
+            if (interval->covers(pos)) {
+                JS_ASSERT(UseCompatibleWith(use->use, *interval->getAllocation()));
+                *static_cast<LAllocation *>(use->use) = *interval->getAllocation();
+            }
         }
 
         if (interval->index() == 0)
         {
             // Erase the def of this interval if it's the first one
+            JS_ASSERT(DefinitionCompatibleWith(reg->ins(), reg->def(), *interval->getAllocation()));
             reg->def()->setOutput(*interval->getAllocation());
         }
-        else if (!interval->reg()->canonicalSpill() ||
-                 interval->reg()->canonicalSpill() == interval->getAllocation() ||
-                 *interval->reg()->canonicalSpill() != *interval->getAllocation())
+        else if (interval->start() != inputOf(vregs[interval->start()].block()->firstId()) &&
+                 (!interval->reg()->canonicalSpill() ||
+                  interval->reg()->canonicalSpill() == interval->getAllocation() ||
+                  *interval->reg()->canonicalSpill() != *interval->getAllocation()))
         {
             // If this virtual register has no canonical spill location, this
             // is the first spill to that location, or this is a move to somewhere
             // completely different, we have to emit a move for this interval.
-            LiveInterval *from = reg->getInterval(interval->index() - 1);
-            if (!moveBefore(interval->start(), from, interval))
+            if (!moveBefore(interval->start(), reg->getInterval(interval->index() - 1), interval))
                 return false;
         }
     }
 
-    // Strip phis out
-    for (size_t i = 0; i < graph.numBlocks(); i++)
-        graph.getBlock(i)->clearPhis();
-
     // Set the graph overall stack height
     graph.setLocalSlotCount(stackAssignment.stackHeight());
 
     return true;
 }
 
 /*
  * Split the given interval at the given position, and add the created
  * interval to the unhandled queue.
  */
 bool
 LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos)
 {
     // Make sure we're actually splitting this interval, not some other
     // interval in the same virtual register.
-    JS_ASSERT(interval->start() <= pos && pos <= interval->end());
+    JS_ASSERT(interval->start() < pos && pos <= interval->end());
 
     VirtualRegister *reg = interval->reg();
 
     // Do the split
     LiveInterval *newInterval = new LiveInterval(reg, interval->index() + 1);
     if (!interval->splitFrom(pos, newInterval))
         return false;
 
@@ -910,26 +897,25 @@ LinearScanAllocator::splitInterval(LiveI
 
     if (!reg->addInterval(newInterval))
         return false;
 
     // We'll need a move group later, insert it now.
     if (!getMoveGroupBefore(pos))
         return false;
 
-    IonSpew(IonSpew_LSRA, "  Split interval to %u = [%u, %u]",
+    IonSpew(IonSpew_LSRA, "  Split interval to %u = [%u, %u]/[%u, %u]",
             interval->reg()->reg(), interval->start().pos(),
-            interval->end().pos());
-    IonSpew(IonSpew_LSRA, "  Created new interval %u = [%u, %u]",
-            newInterval->reg()->reg(), newInterval->start().pos(),
+            interval->end().pos(), newInterval->start().pos(),
             newInterval->end().pos());
 
     // We always want to enqueue the resulting split. We always split
     // forward, and we never want to handle something forward of our
     // current position.
+    setIntervalPriority(newInterval);
     unhandled.enqueue(newInterval);
 
     return true;
 }
 
 /*
  * Assign the current interval the given allocation, splitting conflicting
  * intervals as necessary to make the allocation stick.
@@ -939,55 +925,40 @@ LinearScanAllocator::assign(LAllocation 
 {
     if (allocation.isGeneralReg())
         IonSpew(IonSpew_LSRA, "Assigning register %s", allocation.toGeneralReg()->reg().name());
     current->setAllocation(allocation);
 
     // Split this interval at the next incompatible one
     CodePosition toSplit = current->reg()->nextIncompatibleUseAfter(current->start(), allocation);
     if (toSplit <= current->end()) {
-        JS_ASSERT(toSplit != current->start());
         if (!splitInterval(current, toSplit))
             return false;
     }
 
     if (allocation.isGeneralReg()) {
         // Split the blocking interval if it exists
         for (IntervalIterator i(active.begin()); i != active.end(); i++) {
             if (i->getAllocation()->isGeneralReg() &&
                 i->getAllocation()->toGeneralReg()->reg() == allocation.toGeneralReg()->reg())
             {
                 IonSpew(IonSpew_LSRA, " Splitting active interval %u = [%u, %u]",
                         i->reg()->ins()->id(), i->start().pos(), i->end().pos());
 
-                LiveInterval *it = *i;
-                if (it->start() == current->start()) {
-                    // We assigned a needed fixed register to another
-                    // interval, so we break a very important rule
-                    // and "de-assign" the other one. We can do this
-                    // safely (i.e. be guaranteed to terminate) iff
-                    // the register we are reallocating was not itself
-                    // assigned to fulfill a FIXED constraint.
-                    JS_ASSERT(!it->hasFlag(LiveInterval::FIXED));
+                JS_ASSERT(i->start() != current->start());
+                JS_ASSERT(i->covers(current->start()));
+                JS_ASSERT(i->start() != current->start());
 
-                    it->setAllocation(LUse(it->reg()->reg(), LUse::ANY));
-                    active.removeAt(i);
-                    unhandled.enqueue(it);
-                    break;
-                } else {
-                    JS_ASSERT(it->covers(current->start()));
-                    JS_ASSERT(it->start() != current->start());
+                if (!splitInterval(*i, current->start()))
+                    return false;
 
-                    if (!splitInterval(it, current->start()))
-                        return false;
-
-                    active.removeAt(i);
-                    finishInterval(it);
-                    break;
-                }
+                LiveInterval *it = *i;
+                active.removeAt(i);
+                finishInterval(it);
+                break;
             }
         }
 
         // Split any inactive intervals at the next live point
         for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
             if (i->getAllocation()->isGeneralReg() &&
                 i->getAllocation()->toGeneralReg()->reg() == allocation.toGeneralReg()->reg())
             {
@@ -1072,26 +1043,26 @@ LinearScanAllocator::findBestFreeRegiste
             freeUntilPos[i] = CodePosition::MAX;
         else
             freeUntilPos[i] = CodePosition::MIN;
     }
     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
         if (i->getAllocation()->isGeneralReg()) {
             Register reg = i->getAllocation()->toGeneralReg()->reg();
             freeUntilPos[reg.code()] = CodePosition::MIN;
-            IonSpew(IonSpew_LSRA, "   Register %u not free", reg.code());
+            IonSpew(IonSpew_LSRA, "   Register %s not free", reg.name());
         }
     }
     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
         if (i->getAllocation()->isGeneralReg()) {
             Register reg = i->getAllocation()->toGeneralReg()->reg();
             CodePosition pos = current->intersect(*i);
             if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
                 freeUntilPos[reg.code()] = pos;
-                IonSpew(IonSpew_LSRA, "   Register %u free until %u", reg.code(), pos.pos());
+                IonSpew(IonSpew_LSRA, "   Register %s free until %u", reg.name(), pos.pos());
             }
         }
     }
 
     // Search freeUntilPos for largest value
     Register best = Register::FromCode(0);
     for (uint32 i = 0; i < Registers::Total; i++) {
         Register reg = Register::FromCode(i);
@@ -1136,45 +1107,45 @@ LinearScanAllocator::findBestBlockedRegi
             nextUsePos[i] = CodePosition::MAX;
         else
             nextUsePos[i] = CodePosition::MIN;
     }
     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
         if (i->getAllocation()->isGeneralReg()) {
             Register reg = i->getAllocation()->toGeneralReg()->reg();
             CodePosition nextUse = i->reg()->nextUsePosAfter(current->start());
-            IonSpew(IonSpew_LSRA, "   Register %u next used %u", reg.code(), nextUse.pos());
+            IonSpew(IonSpew_LSRA, "   Register %s next used %u", reg.name(), nextUse.pos());
 
             if (i->start() == current->start()) {
                 IonSpew(IonSpew_LSRA, "    Disqualifying due to recency.");
                 nextUsePos[reg.code()] = current->start();
             } else {
                 nextUsePos[reg.code()] = nextUse;
             }
         }
     }
     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
         if (i->getAllocation()->isGeneralReg()) {
             Register reg = i->getAllocation()->toGeneralReg()->reg();
             CodePosition pos = i->reg()->nextUsePosAfter(current->start());
             JS_ASSERT(i->covers(pos) || i->end() == pos);
             if (pos < nextUsePos[reg.code()]) {
                 nextUsePos[reg.code()] = pos;
-                IonSpew(IonSpew_LSRA, "   Register %u next used %u", reg.code(), pos.pos());
+                IonSpew(IonSpew_LSRA, "   Register %s next used %u", reg.name(), pos.pos());
             }
         }
     }
 
     // Search nextUsePos for largest value
     Register best = Register::FromCode(0);
     for (size_t i = 0; i < Registers::Total; i++) {
         if (nextUsePos[i] > nextUsePos[best.code()])
             best = Register::FromCode(i);
     }
-    IonSpew(IonSpew_LSRA, "  Decided best register was %u", best.code());
+    IonSpew(IonSpew_LSRA, "  Decided best register was %s", best.name());
     return best;
 }
 
 /*
  * Two intervals can coexist if any of the following conditions is met:
  *
  *   - The intervals do not intersect.
  *   - The intervals have different allocations.
@@ -1285,17 +1256,17 @@ LinearScanAllocator::validateIntervals()
             JS_ASSERT(*i != *j);
         for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++)
             JS_ASSERT(*i != *j);
     }
 }
 
 /*
  * This function performs a nice, expensive check that all intervals
- * in the function can coext with every other interval.
+ * in the function can coexist with every other interval.
  */
 void
 LinearScanAllocator::validateAllocations()
 {
     for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
         for (IntervalIterator j(handled.begin()); j != i; j++) {
             JS_ASSERT(*i != *j);
             JS_ASSERT(canCoexist(*i, *j));
@@ -1343,23 +1314,69 @@ LinearScanAllocator::go()
         return false;
     IonSpew(IonSpew_LSRA, "Register allocation reification complete");
 
     IonSpew(IonSpew_LSRA, "Register allocation complete");
 
     return true;
 }
 
+/*
+ * If we assign registers to intervals in order of their start positions
+ * without regard to their requirements, we can end up in situations where
+ * intervals that do not require registers block intervals that must have
+ * registers from getting one. To avoid this, we ensure that intervals are
+ * ordered by position and priority so intervals with more stringent
+ * requirements are handled first.
+ */
+void
+LinearScanAllocator::setIntervalPriority(LiveInterval *interval)
+{
+    int priority = 2;
+
+    if (interval == interval->reg()->getFirstInterval()) {
+        if (interval->reg()->def()->policy() == LDefinition::PRESET) {
+            // Interval is a fixed definition
+            priority = 0;
+        } else if (interval->reg()->def()->policy() == LDefinition::MUST_REUSE_INPUT) {
+            // Interval is a must-reuse, which as good as being fixed
+            priority = 0;
+        } else if (!interval->reg()->ins()->isPhi()) {
+            // Interval is a normal definition (and thus needs a register)
+            priority = 1;
+        }
+        // Otherwise, interval is some other sort of definition
+    } else {
+        for (size_t i = 0; i < interval->reg()->numUses(); i++) {
+            LOperand *op = interval->reg()->getUse(i);
+            if (interval->start() == inputOf(op->ins)) {
+                if (op->use->policy() == LUse::FIXED) {
+                    // Interval has a fixed use
+                    priority = 0;
+                    break;
+                } else if (op->use->policy() == LUse::REGISTER) {
+                    // Interval just needs a register
+                    priority = priority < 1 ? priority : 1;
+                }
+            }
+        }
+    }
+
+    interval->setPriority(priority);
+}
+
 void
 LinearScanAllocator::UnhandledQueue::enqueue(LiveInterval *interval)
 {
     IntervalIterator i(begin());
     for (; i != end(); i++) {
         if (i->start() < interval->start())
             break;
+        if (i->start() == interval->start() && i->priority() < interval->priority())
+            break;
     }
     insertBefore(*i, interval);
 }
 
 LiveInterval *
 LinearScanAllocator::UnhandledQueue::dequeue()
 {
     if (rbegin() == rend())
--- a/js/src/ion/LinearScan.h
+++ b/js/src/ion/LinearScan.h
@@ -180,33 +180,29 @@ class LiveInterval : public InlineListNo
         Range(CodePosition f, CodePosition t)
           : from(f),
             to(t)
         { }
         CodePosition from;
         CodePosition to;
     };
 
-    enum Flag {
-        FIXED
-    };
-
   private:
     Vector<Range, 1, IonAllocPolicy> ranges_;
     LAllocation alloc_;
     VirtualRegister *reg_;
     uint32 index_;
-    uint32 flags_;
+    int priority_;
 
   public:
 
     LiveInterval(VirtualRegister *reg, uint32 index)
       : reg_(reg),
         index_(index),
-        flags_(0)
+        priority_(0)
     { }
 
     bool addRange(CodePosition from, CodePosition to);
 
     void setFrom(CodePosition from);
 
     CodePosition start();
 
@@ -229,32 +225,32 @@ class LiveInterval : public InlineListNo
     void setAllocation(LAllocation alloc) {
         alloc_ = alloc;
     }
 
     VirtualRegister *reg() {
         return reg_;
     }
 
-    bool hasFlag(Flag flag) const {
-        return !!(flags_ & (1 << (uint32)flag));
-    }
-
-    void setFlag(Flag flag) {
-        flags_ |= (1 << (uint32)flag);
-    }
-
     uint32 index() const {
         return index_;
     }
 
     void setIndex(uint32 index) {
         index_ = index;
     }
 
+    int priority() const {
+        return priority_;
+    }
+
+    void setPriority(int priority) {
+        priority_ = priority;
+    }
+
     bool splitFrom(CodePosition pos, LiveInterval *after);
 };
 
 /*
  * Represents all of the register allocation state associated with a virtual
  * register, including all associated intervals and pointers to relevant LIR
  * structures.
  */
@@ -484,16 +480,17 @@ class LinearScanAllocator
     bool assign(LAllocation allocation);
     bool spill();
     void finishInterval(LiveInterval *interval);
     Register findBestFreeRegister();
     Register findBestBlockedRegister();
     bool canCoexist(LiveInterval *a, LiveInterval *b);
     LMoveGroup *getMoveGroupBefore(CodePosition pos);
     bool moveBefore(CodePosition pos, LiveInterval *from, LiveInterval *to);
+    void setIntervalPriority(LiveInterval *interval);
 
 #ifdef DEBUG
     void validateIntervals();
     void validateAllocations();
 #else
     inline void validateIntervals() { };
     inline void validateAllocations() { };
 #endif