Bug 670628: Add floating point support to linear scan register allocator. r=dvander
authorAndrew Drake <adrake@adrake.org>
Fri, 12 Aug 2011 16:26:39 -0700
changeset 108727 1e2078273debc9d4c3bc0df278fe566c634cd789
parent 108726 8d3e5e4be9333e5bc4aace2ead98595c4d2b095e
child 108728 8f60694f1ff5c90ee79ace309bc7f3ab171f32d5
push id2248
push userakeybl@mozilla.com
push dateMon, 08 Oct 2012 19:23:44 +0000
treeherdermozilla-aurora@118a3b748323 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdvander
bugs670628
milestone8.0a1
Bug 670628: Add floating point support to linear scan register allocator. r=dvander
js/src/ion/IonRegisters.h
js/src/ion/LinearScan.cpp
js/src/ion/LinearScan.h
js/src/ion/x86/LIR-x86.h
js/src/ion/x86/Lowering-x86.cpp
--- a/js/src/ion/IonRegisters.h
+++ b/js/src/ion/IonRegisters.h
@@ -110,32 +110,49 @@ struct FloatRegister {
         return code_ != other.code_;
     }
     bool allocatable() const {
         return !!((1 << code()) & FloatRegisters::AllocatableMask);
     }
 };
 
 struct AnyRegister {
+    typedef uint32 Code;
+
+    static const uint32 Total = Registers::Total + FloatRegisters::Total;
+    static const uint32 Invalid = UINT_MAX;
+
     union {
         Registers::Code gpr_;
         FloatRegisters::Code fpu_;
     };
     bool isFloat_;
 
     AnyRegister()
     { }
     explicit AnyRegister(Register gpr) {
         gpr_ = gpr.code();
         isFloat_ = false;
     }
     explicit AnyRegister(FloatRegister fpu) {
         fpu_ = fpu.code();
         isFloat_ = true;
     }
+    static AnyRegister FromCode(uint32 i) {
+        JS_ASSERT(i < Total);
+        AnyRegister r;
+        if (i < Registers::Total) {
+            r.gpr_ = Register::Code(i);
+            r.isFloat_ = false;
+        } else {
+            r.fpu_ = FloatRegister::Code(i - Registers::Total);
+            r.isFloat_ = true;
+        }
+        return r;
+    }
     bool isFloat() const {
         return isFloat_;
     }
     Register gpr() const {
         JS_ASSERT(!isFloat());
         return Register::FromCode(gpr_);
     }
     FloatRegister fpu() const {
@@ -152,16 +169,26 @@ struct AnyRegister {
                ? (!other.isFloat() || fpu_ != other.fpu_)
                : (other.isFloat() || gpr_ != other.gpr_);
     }
     bool allocatable() const {
         return isFloat()
                ? FloatRegister::FromCode(fpu_).allocatable()
                : Register::FromCode(gpr_).allocatable();
     }
+    const char *name() const {
+        return isFloat()
+               ? FloatRegister::FromCode(fpu_).name()
+               : Register::FromCode(gpr_).name();
+    }
+    const Code code() const {
+        return isFloat()
+               ? fpu_ + Registers::Total
+               : gpr_;
+    }
 };
 
 template <typename T>
 class TypedRegisterSet
 {
     uint32 bits_;
 
     explicit TypedRegisterSet(uint32 bits)
@@ -307,27 +334,25 @@ class AnyRegisterIterator
     uint32 code_;
 
   public:
     AnyRegisterIterator() : code_(0)
     { }
     AnyRegisterIterator(const AnyRegisterIterator &other) : code_(other.code_)
     { }
     bool more() const {
-        return code_ < Registers::Total + FloatRegisters::Total;
+        return code_ < AnyRegister::Total;
     }
     AnyRegisterIterator operator ++(int) {
         AnyRegisterIterator old(*this);
         code_++;
         return old;
     }
     AnyRegister operator *() const {
-        if (code_ < Registers::Total)
-            return AnyRegister(Register::FromCode(code_));
-        return AnyRegister(FloatRegister::FromCode(code_ - Registers::Total));
+        return AnyRegister::FromCode(code_);
     }
 };
 
 } // namespace js
 } // namespace ion
 
 #endif // jsion_cpu_registers_h__
 
--- a/js/src/ion/LinearScan.cpp
+++ b/js/src/ion/LinearScan.cpp
@@ -53,19 +53,21 @@ UseCompatibleWith(const LUse *use, LAllo
 {
     switch (use->policy()) {
       case LUse::ANY:
       case LUse::KEEPALIVE:
         return alloc.isRegister() || alloc.isMemory();
       case LUse::REGISTER:
         return alloc.isRegister();
       case LUse::FIXED:
-        if (!alloc.isGeneralReg())
-            return false;
-        return alloc.toGeneralReg()->reg().code() == use->registerCode();
+        if (alloc.isGeneralReg())
+            return alloc.toGeneralReg()->reg().code() == use->registerCode();
+        if (alloc.isFloatReg())
+            return alloc.toFloatReg()->reg().code() == use->registerCode();
+        return false;
       default:
         JS_NOT_REACHED("Unknown use policy");
     }
     return false;
 }
 
 #ifdef DEBUG
 static bool
@@ -330,18 +332,16 @@ const CodePosition CodePosition::MIN(0);
 
 /*
  * This function pre-allocates and initializes as much global state as possible
  * to avoid littering the algorithms with memory management cruft.
  */
 bool
 LinearScanAllocator::createDataStructures()
 {
-    allowedRegs = RegisterSet::All();
-
     liveIn = lir->mir()->allocate<BitSet*>(graph.numBlockIds());
     if (!liveIn)
         return false;
 
     if (!vregs.init(lir->mir(), graph.numVirtualRegisters()))
         return false;
 
     // Build virtual register objects
@@ -356,22 +356,20 @@ LinearScanAllocator::createDataStructure
                     if (!vregs[reg].init(reg, block, *ins, ins->getDef(j)))
                         return false;
                 }
             }
             if (!foundRealDef) {
                 if (!vregs[*ins].init(ins->id(), block, *ins, NULL))
                     return false;
             }
-            if (ins->numTemps()) {
-                for (size_t j = 0; j < ins->numTemps(); j++) {
-                    LDefinition *def = ins->getTemp(j);
-                    if (!vregs[def].init(def->virtualRegister(), block, *ins, def, true))
-                        return false;
-                }
+            for (size_t j = 0; j < ins->numTemps(); j++) {
+                LDefinition *def = ins->getTemp(j);
+                if (!vregs[def].init(def->virtualRegister(), block, *ins, def, true))
+                    return false;
             }
         }
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi *phi = block->getPhi(j);
             LDefinition *def = phi->getDef(0);
             if (!vregs[def].init(phi->id(), block, phi, def))
                 return false;
         }
@@ -642,17 +640,17 @@ LinearScanAllocator::allocateRegisters()
             }
             if (policy == LDefinition::MUST_REUSE_INPUT) {
                 IonSpew(IonSpew_LSRA, " Definition has 'must reuse input' policy.");
                 LInstruction *ins = current->reg()->ins();
                 JS_ASSERT(ins->numOperands() > 0);
                 JS_ASSERT(ins->getOperand(0)->isUse());
                 LiveInterval *inputInterval = vregs[ins->getOperand(0)].intervalFor(inputOf(ins));
                 JS_ASSERT(inputInterval);
-                JS_ASSERT(inputInterval->getAllocation()->isGeneralReg());
+                JS_ASSERT(inputInterval->getAllocation()->isRegister());
                 if (!assign(*inputInterval->getAllocation()))
                     return false;
                 continue;
             }
             if (policy == LDefinition::DEFAULT) {
                 if (current->reg()->ins()->isPhi()) {
                     mustHaveRegister = false;
                     canSpillOthers = false;
@@ -679,18 +677,24 @@ LinearScanAllocator::allocateRegisters()
                     } else {
                         JS_ASSERT(pol == LUse::ANY || pol == LUse::KEEPALIVE);
                     }
                 }
             }
 
             // Handle the fixed constraint if present
             if (fixedOp) {
-                if (!assign(LGeneralReg(Register::FromCode(fixedOp->use->registerCode()))))
-                    return false;
+                uint32 code = fixedOp->use->registerCode();
+                if (vregs[fixedOp->use].def()->type() == LDefinition::DOUBLE) {
+                    if (!assign(LFloatReg(FloatRegister::FromCode(code))))
+                        return false;
+                } else {
+                    if (!assign(LGeneralReg(Register::FromCode(code))))
+                        return false;
+                }
                 continue;
             }
         }
 
         // Find the first use of this register
         firstUse = current->reg()->nextUseAfter(position);
         if (firstUse)
             firstUsePos = inputOf(firstUse->ins);
@@ -703,27 +707,27 @@ LinearScanAllocator::allocateRegisters()
                 return false;
             continue;
         }
 
         // Try to allocate a free register
         IonSpew(IonSpew_LSRA, " Attempting free register allocation");
 
         CodePosition bestFreeUntil;
-        Register::Code bestCode = findBestFreeRegister(&bestFreeUntil);
-        if (bestCode != Register::Codes::Invalid) {
-            Register best = Register::FromCode(bestCode);
+        AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil);
+        if (bestCode != AnyRegister::Invalid) {
+            AnyRegister best = AnyRegister::FromCode(bestCode);
             IonSpew(IonSpew_LSRA, "  Decided best register was %s", best.name());
 
             // Split when the register is next needed if necessary
             if (bestFreeUntil <= current->end()) {
                 if (!splitInterval(current, bestFreeUntil))
                     return false;
             }
-            if (!assign(LGeneralReg(best)))
+            if (!assign(LAllocation(best)))
                 return false;
 
             continue;
         }
 
         IonSpew(IonSpew_LSRA, "  Unable to allocate free register");
 
         // We may need to allocate a blocked register
@@ -736,23 +740,21 @@ LinearScanAllocator::allocateRegisters()
 
         IonSpew(IonSpew_LSRA, " Attempting blocked register allocation");
 
         // If we absolutely need a register or our next use is closer than the
         // selected blocking register then we spill the blocker. Otherwise, we
         // spill the current interval.
         CodePosition bestNextUsed;
         bestCode = findBestBlockedRegister(&bestNextUsed);
-        if (bestCode != Register::Codes::Invalid &&
-            (mustHaveRegister || firstUsePos < bestNextUsed))
-        {
-            Register best = Register::FromCode(bestCode);
+        if (bestCode != AnyRegister::Invalid && (mustHaveRegister || firstUsePos < bestNextUsed)) {
+            AnyRegister best = AnyRegister::FromCode(bestCode);
             IonSpew(IonSpew_LSRA, "  Decided best register was %s", best.name());
 
-            if (!assign(LGeneralReg(best)))
+            if (!assign(LAllocation(best)))
                 return false;
 
             continue;
         }
 
         IonSpew(IonSpew_LSRA, "  No registers available to spill");
         JS_ASSERT(!mustHaveRegister);
 
@@ -927,33 +929,31 @@ LinearScanAllocator::splitInterval(LiveI
 
 /*
  * Assign the current interval the given allocation, splitting conflicting
  * intervals as necessary to make the allocation stick.
  */
 bool
 LinearScanAllocator::assign(LAllocation allocation)
 {
-    if (allocation.isGeneralReg())
-        IonSpew(IonSpew_LSRA, "Assigning register %s", allocation.toGeneralReg()->reg().name());
+    if (allocation.isRegister())
+        IonSpew(IonSpew_LSRA, "Assigning register %s", allocation.toRegister().name());
     current->setAllocation(allocation);
 
     // Split this interval at the next incompatible one
     CodePosition toSplit = current->reg()->nextIncompatibleUseAfter(current->start(), allocation);
     if (toSplit <= current->end()) {
         if (!splitInterval(current, toSplit))
             return false;
     }
 
-    if (allocation.isGeneralReg()) {
+    if (allocation.isRegister()) {
         // Split the blocking interval if it exists
         for (IntervalIterator i(active.begin()); i != active.end(); i++) {
-            if (i->getAllocation()->isGeneralReg() &&
-                i->getAllocation()->toGeneralReg()->reg() == allocation.toGeneralReg()->reg())
-            {
+            if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
                 IonSpew(IonSpew_LSRA, " Splitting active interval %u = [%u, %u]",
                         i->reg()->ins()->id(), i->start().pos(), i->end().pos());
 
                 JS_ASSERT(i->start() != current->start());
                 JS_ASSERT(i->covers(current->start()));
                 JS_ASSERT(i->start() != current->start());
 
                 if (!splitInterval(*i, current->start()))
@@ -963,19 +963,17 @@ LinearScanAllocator::assign(LAllocation 
                 active.removeAt(i);
                 finishInterval(it);
                 break;
             }
         }
 
         // Split any inactive intervals at the next live point
         for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
-            if (i->getAllocation()->isGeneralReg() &&
-                i->getAllocation()->toGeneralReg()->reg() == allocation.toGeneralReg()->reg())
-            {
+            if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
                 IonSpew(IonSpew_LSRA, " Splitting inactive interval %u = [%u, %u]",
                         i->reg()->ins()->id(), i->start().pos(), i->end().pos());
 
                 LiveInterval *it = *i;
                 CodePosition nextActive = it->nextCoveredAfter(current->start());
                 JS_ASSERT(nextActive != CodePosition::MIN);
 
                 if (!splitInterval(it, nextActive))
@@ -1036,129 +1034,139 @@ LinearScanAllocator::finishInterval(Live
     handled.pushBack(interval);
 }
 
 /*
  * This function locates a register which may be assigned by the register
  * and is not assigned to any active interval. The register which is free
  * for the longest period of time is then returned.
  */
-Register::Code
+AnyRegister::Code
 LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
 {
     IonSpew(IonSpew_LSRA, "  Computing freeUntilPos");
 
     // Compute free-until positions for all registers
-    CodePosition freeUntilPos[Registers::Total];
-    for (size_t i = 0; i < Registers::Total; i++) {
-        if (allowedRegs.has(Register::FromCode(i)))
+    CodePosition freeUntilPos[AnyRegister::Total];
+    bool needFloat = current->reg()->def()->type() == LDefinition::DOUBLE;
+    for (size_t i = 0; i < AnyRegister::Total; i++) {
+        AnyRegister reg = AnyRegister::FromCode(i);
+        if (reg.allocatable() && reg.isFloat() == needFloat)
             freeUntilPos[i] = CodePosition::MAX;
     }
     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
-        if (i->getAllocation()->isGeneralReg()) {
-            Register reg = i->getAllocation()->toGeneralReg()->reg();
+        if (i->getAllocation()->isRegister()) {
+            AnyRegister reg = i->getAllocation()->toRegister();
             IonSpew(IonSpew_LSRA, "   Register %s not free", reg.name());
             freeUntilPos[reg.code()] = CodePosition::MIN;
         }
     }
     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
-        if (i->getAllocation()->isGeneralReg()) {
-            Register reg = i->getAllocation()->toGeneralReg()->reg();
+        if (i->getAllocation()->isRegister()) {
+            AnyRegister reg = i->getAllocation()->toRegister();
             CodePosition pos = current->intersect(*i);
             if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
                 freeUntilPos[reg.code()] = pos;
                 IonSpew(IonSpew_LSRA, "   Register %s free until %u", reg.name(), pos.pos());
             }
         }
     }
 
-    Register::Code bestCode = Registers::Invalid;
+    AnyRegister::Code bestCode = AnyRegister::Invalid;
     if (current->index()) {
         // As an optimization, use the allocation from the previous interval if
         // it is available.
         LiveInterval *previous = current->reg()->getInterval(current->index() - 1);
-        if (previous->getAllocation()->isGeneralReg()) {
-            Register prevReg = previous->getAllocation()->toGeneralReg()->reg();
+        if (previous->getAllocation()->isRegister()) {
+            AnyRegister prevReg = previous->getAllocation()->toRegister();
             if (freeUntilPos[prevReg.code()] != CodePosition::MIN)
                 bestCode = prevReg.code();
         }
     }
-    if (bestCode == Registers::Invalid && firstUse && firstUse->use->policy() == LUse::FIXED) {
+    if (bestCode == AnyRegister::Invalid && firstUse && firstUse->use->policy() == LUse::FIXED) {
         // As another, use the upcoming fixed register if it is available.
-        uint32 fixedReg = firstUse->use->registerCode();
-        if (freeUntilPos[fixedReg] != CodePosition::MIN)
-            bestCode = Register::Code(fixedReg);
+        uint32 fixedRegCode = firstUse->use->registerCode();
+        AnyRegister fixedReg;
+        if (current->reg()->def()->type() == LDefinition::DOUBLE)
+            fixedReg = AnyRegister(FloatRegister::FromCode(fixedRegCode));
+        else
+            fixedReg = AnyRegister(Register::FromCode(fixedRegCode));
+
+        if (freeUntilPos[fixedReg.code()] != CodePosition::MIN)
+            bestCode = fixedReg.code();
     }
-    if (bestCode == Registers::Invalid) {
+    if (bestCode == AnyRegister::Invalid) {
         // Search freeUntilPos for largest value
-        for (uint32 i = 0; i < Registers::Total; i++) {
+        for (uint32 i = 0; i < AnyRegister::Total; i++) {
             if (freeUntilPos[i] == CodePosition::MIN)
                 continue;
-            if (bestCode == Registers::Invalid || freeUntilPos[i] > freeUntilPos[bestCode])
-                bestCode = Register::Code(i);
+            if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode])
+                bestCode = AnyRegister::Code(i);
         }
     }
 
-    if (bestCode != Registers::Invalid)
+    if (bestCode != AnyRegister::Invalid)
         *freeUntil = freeUntilPos[bestCode];
     return bestCode;
 }
 
 /*
  * This function locates a register which is assigned to an active interval,
  * and returns the one with the furthest away next use. As a side effect,
  * the nextUsePos array is updated with the next use position of all active
  * intervals for use elsewhere in the algorithm.
  */
-Register::Code
+AnyRegister::Code
 LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
 {
     IonSpew(IonSpew_LSRA, "  Computing nextUsePos");
 
     // Compute next-used positions for all registers
-    CodePosition nextUsePos[Registers::Total];
-    for (size_t i = 0; i < Registers::Total; i++) {
-        if (allowedRegs.has(Register::FromCode(i)))
+    CodePosition nextUsePos[AnyRegister::Total];
+    bool needFloat = current->reg()->def()->type() == LDefinition::DOUBLE;
+    for (size_t i = 0; i < AnyRegister::Total; i++) {
+        AnyRegister reg = AnyRegister::FromCode(i);
+        if (reg.allocatable() && reg.isFloat() == needFloat)
             nextUsePos[i] = CodePosition::MAX;
     }
     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
-        if (i->getAllocation()->isGeneralReg()) {
-            Register reg = i->getAllocation()->toGeneralReg()->reg();
+        if (i->getAllocation()->isRegister()) {
+            AnyRegister reg = i->getAllocation()->toRegister();
             if (i->start() == current->start()) {
                 nextUsePos[reg.code()] = CodePosition::MIN;
                 IonSpew(IonSpew_LSRA, "   Disqualifying %s due to recency", reg.name());
-            } else {
+            } else if (nextUsePos[reg.code()] != CodePosition::MIN) {
                 nextUsePos[reg.code()] = i->reg()->nextUsePosAfter(current->start());
                 IonSpew(IonSpew_LSRA, "   Register %s next used %u", reg.name(),
                         nextUsePos[reg.code()].pos());
             }
         }
     }
     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
-        if (i->getAllocation()->isGeneralReg()) {
-            Register reg = i->getAllocation()->toGeneralReg()->reg();
+        if (i->getAllocation()->isRegister()) {
+            AnyRegister reg = i->getAllocation()->toRegister();
             CodePosition pos = i->reg()->nextUsePosAfter(current->start());
-            JS_ASSERT(i->covers(pos));
+            JS_ASSERT(i->covers(pos) || pos == CodePosition::MAX);
             if (pos < nextUsePos[reg.code()]) {
                 nextUsePos[reg.code()] = pos;
                 IonSpew(IonSpew_LSRA, "   Register %s next used %u", reg.name(), pos.pos());
             }
         }
     }
 
     // Search nextUsePos for largest value
-    Register::Code bestCode = Register::Codes::Invalid;
-    for (size_t i = 0; i < Registers::Total; i++) {
+    AnyRegister::Code bestCode = AnyRegister::Invalid;
+    for (size_t i = 0; i < AnyRegister::Total; i++) {
         if (nextUsePos[i] == CodePosition::MIN)
             continue;
-        if (bestCode == Register::Codes::Invalid || nextUsePos[i] > nextUsePos[bestCode])
-            bestCode = Register::Code(i);
+        if (bestCode == AnyRegister::Invalid || nextUsePos[i] > nextUsePos[bestCode])
+            bestCode = AnyRegister::Code(i);
     }
 
-    if (bestCode != Register::Codes::Invalid)
+    if (bestCode != AnyRegister::Invalid)
         *nextUsed = nextUsePos[bestCode];
     return bestCode;
 }
 
 /*
  * Two intervals can coexist if any of the following conditions is met:
  *
  *   - The intervals do not intersect.
@@ -1169,21 +1177,18 @@ LinearScanAllocator::findBestBlockedRegi
  * Intuitively, it is a bug if any allocated intervals exist which can not
  * coexist.
  */
 bool
 LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
 {
     LAllocation *aa = a->getAllocation();
     LAllocation *ba = b->getAllocation();
-    if (aa->isGeneralReg() && ba->isGeneralReg() &&
-        aa->toGeneralReg()->reg() == ba->toGeneralReg()->reg())
-    {
+    if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
         return a->intersect(b) == CodePosition::MIN;
-    }
     return true;
 }
 
 LMoveGroup *
 LinearScanAllocator::getMoveGroupBefore(CodePosition pos)
 {
     VirtualRegister *vreg = &vregs[pos];
     JS_ASSERT(vreg->ins());
@@ -1256,17 +1261,17 @@ LinearScanAllocator::validateIntervals()
         }
         for (IntervalIterator j(inactive.begin()); j != i; j++)
             JS_ASSERT(canCoexist(*i, *j));
     }
 
     for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
         JS_ASSERT(!i->getAllocation()->isUse());
         JS_ASSERT(i->numRanges() > 0);
-        if (i->getAllocation()->isGeneralReg()) {
+        if (i->getAllocation()->isRegister()) {
             JS_ASSERT(i->end() < current->start());
             JS_ASSERT(!i->covers(current->start()));
         }
 
         for (IntervalIterator j(active.begin()); j != active.end(); j++)
             JS_ASSERT(*i != *j);
         for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++)
             JS_ASSERT(*i != *j);
--- a/js/src/ion/LinearScan.h
+++ b/js/src/ion/LinearScan.h
@@ -457,17 +457,16 @@ class LinearScanAllocator
     // Computed inforamtion
     BitSet **liveIn;
     VirtualRegisterMap vregs;
 
     // Allocation state
     StackAssignment stackAssignment;
 
     // Run-time state
-    RegisterSet allowedRegs;
     UnhandledQueue unhandled;
     InlineList<LiveInterval> active;
     InlineList<LiveInterval> inactive;
     InlineList<LiveInterval> handled;
     LiveInterval *current;
     LOperand *firstUse;
     CodePosition firstUsePos;
 
@@ -476,18 +475,18 @@ class LinearScanAllocator
     bool allocateRegisters();
     bool resolveControlFlow();
     bool reifyAllocations();
 
     bool splitInterval(LiveInterval *interval, CodePosition pos);
     bool assign(LAllocation allocation);
     bool spill();
     void finishInterval(LiveInterval *interval);
-    Register::Code findBestFreeRegister(CodePosition *freeUntil);
-    Register::Code findBestBlockedRegister(CodePosition *nextUsed);
+    AnyRegister::Code findBestFreeRegister(CodePosition *freeUntil);
+    AnyRegister::Code findBestBlockedRegister(CodePosition *nextUsed);
     bool canCoexist(LiveInterval *a, LiveInterval *b);
     LMoveGroup *getMoveGroupBefore(CodePosition pos);
     bool moveBefore(CodePosition pos, LiveInterval *from, LiveInterval *to);
     void setIntervalPriority(LiveInterval *interval);
 
 #ifdef DEBUG
     void validateIntervals();
     void validateAllocations();
--- a/js/src/ion/x86/LIR-x86.h
+++ b/js/src/ion/x86/LIR-x86.h
@@ -65,19 +65,19 @@ class LBox : public LInstructionHelper<2
     }
 };
 
 class LBoxDouble : public LInstructionHelper<2, 1, 1>
 {
   public:
     LIR_HEADER(BoxDouble);
 
-    LBoxDouble(const LAllocation &in) {
+    LBoxDouble(const LAllocation &in, const LDefinition &temp) {
         setOperand(0, in);
-        setTemp(0, LDefinition(LDefinition::DOUBLE));
+        setTemp(0, temp);
     }
 };
 
 class LUnbox : public LInstructionHelper<1, 2, 0>
 {
     MIRType type_;
 
   public:
--- a/js/src/ion/x86/Lowering-x86.cpp
+++ b/js/src/ion/x86/Lowering-x86.cpp
@@ -91,17 +91,17 @@ LIRGeneratorX86::visitConstant(MConstant
 
 bool
 LIRGeneratorX86::visitBox(MBox *box)
 {
     MDefinition *inner = box->getOperand(0);
 
     // If the box wrapped a double, it needs a new register.
     if (inner->type() == MIRType_Double)
-        return defineBox(new LBoxDouble(use(inner)), box);
+        return defineBox(new LBoxDouble(use(inner), temp(LDefinition::DOUBLE)), box);
 
     if (!box->isEmittedAtUses())
         return emitAtUses(box);
 
     if (inner->isConstant())
         return defineBox(new LValue(inner->toConstant()->value()), box);
 
     LBox *lir = new LBox(use(inner), inner->type());