Bug 1067610 - Rewrite how the main data structures in the backtracking allocator are organized, r=sunfish.
☠☠ backed out by 5a277f82ff59 ☠ ☠
authorBrian Hackett <bhackett1024@gmail.com>
Fri, 08 May 2015 11:42:10 -0600
changeset 274415 4963ecd92915f1eb341ec813a52dc4181f5cc711
parent 274414 863caf540d9a1762e8aafbc4193ab60b40eb960f
child 274416 bff319d49d583164d3fa8e48c614b364512688e6
push id863
push userraliiev@mozilla.com
push dateMon, 03 Aug 2015 13:22:43 +0000
treeherdermozilla-release@f6321b14228d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssunfish
bugs1067610
milestone40.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1067610 - Rewrite how the main data structures in the backtracking allocator are organized, r=sunfish.
js/src/jit-test/tests/saved-stacks/async-max-frame-count.js
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/BacktrackingAllocator.h
js/src/jit/C1Spewer.cpp
js/src/jit/C1Spewer.h
js/src/jit/CodeGenerator.cpp
js/src/jit/InlineList.h
js/src/jit/JSONSpewer.cpp
js/src/jit/JSONSpewer.h
js/src/jit/JitSpewer.cpp
js/src/jit/LIR-Common.h
js/src/jit/LIR.cpp
js/src/jit/LiveRangeAllocator.cpp
js/src/jit/LiveRangeAllocator.h
js/src/jit/RegisterAllocator.cpp
js/src/jit/StupidAllocator.cpp
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/mips/CodeGenerator-mips.cpp
js/src/jit/mips/CodeGenerator-mips.h
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.h
js/src/jsexn.h
js/src/moz.build
--- a/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js
+++ b/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js
@@ -8,17 +8,24 @@ function recur(n, limit) {
                                       saveStack(limit), "Recurse");
   }
   return saveStack(limit);
 }
 
 function checkRecursion(n, limit) {
   print("checkRecursion(" + uneval(n) + ", " + uneval(limit) + ")");
 
-  var stack = recur(n, limit);
+  try {
+    var stack = recur(n, limit);
+  } catch (e) {
+    // Some platforms, like ASAN builds, can end up overrecursing. Tolerate
+    // these failures.
+    assertEq(/too much recursion/.test("" + e), true);
+    return;
+  }
 
   // Async stacks are limited even if we didn't ask for a limit. There is a
   // default limit on frames attached on top of any synchronous frames. In this
   // case the synchronous frame is the last call to `recur`.
   if (limit == 0) {
     limit = defaultAsyncStackLimit + 1;
   }
 
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -7,88 +7,755 @@
 #include "jit/BacktrackingAllocator.h"
 #include "jit/BitSet.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::DebugOnly;
 
+/////////////////////////////////////////////////////////////////////
+// Utility
+/////////////////////////////////////////////////////////////////////
+
+static inline bool
+SortBefore(UsePosition* a, UsePosition* b)
+{
+    return a->pos <= b->pos;
+}
+
+static inline bool
+SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b)
+{
+    return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
+}
+
+static inline bool
+SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b)
+{
+    return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
+}
+
+template <typename T>
+static inline void
+InsertSortedList(InlineForwardList<T> &list, T* value)
+{
+    if (list.empty()) {
+        list.pushFront(value);
+        return;
+    }
+
+    if (SortBefore(list.back(), value)) {
+        list.pushBack(value);
+        return;
+    }
+
+    T* prev = nullptr;
+    for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
+        if (SortBefore(value, *iter))
+            break;
+        prev = *iter;
+    }
+
+    if (prev)
+        list.insertAfter(prev, value);
+    else
+        list.pushFront(value);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveRange
+/////////////////////////////////////////////////////////////////////
+
+void
+LiveRange::addUse(UsePosition* use)
+{
+    MOZ_ASSERT(covers(use->pos));
+    InsertSortedList(uses_, use);
+}
+
+void
+LiveRange::distributeUses(LiveRange* other)
+{
+    MOZ_ASSERT(other->vreg() == vreg());
+    MOZ_ASSERT(this != other);
+
+    // Move over all uses which fit in |other|'s boundaries.
+    for (UsePositionIterator iter = usesBegin(); iter; ) {
+        UsePosition* use = *iter;
+        if (other->covers(use->pos)) {
+            uses_.removeAndIncrement(iter);
+            other->addUse(use);
+        } else {
+            iter++;
+        }
+    }
+
+    // Distribute the definition to |other| as well, if possible.
+    if (hasDefinition() && from() == other->from())
+        other->setHasDefinition();
+}
+
+bool
+LiveRange::contains(LiveRange* other) const
+{
+    return from() <= other->from() && to() >= other->to();
+}
+
+void
+LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const
+{
+    MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
+
+    CodePosition innerFrom = from();
+    if (from() < other->from()) {
+        if (to() < other->from()) {
+            *pre = range_;
+            return;
+        }
+        *pre = Range(from(), other->from());
+        innerFrom = other->from();
+    }
+
+    CodePosition innerTo = to();
+    if (to() > other->to()) {
+        if (from() >= other->to()) {
+            *post = range_;
+            return;
+        }
+        *post = Range(other->to(), to());
+        innerTo = other->to();
+    }
+
+    if (innerFrom != innerTo)
+        *inside = Range(innerFrom, innerTo);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveBundle
+/////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+size_t
+LiveBundle::numRanges() const
+{
+    size_t count = 0;
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++)
+        count++;
+    return count;
+}
+#endif // DEBUG
+
+LiveRange*
+LiveBundle::rangeFor(CodePosition pos) const
+{
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (range->covers(pos))
+            return range;
+    }
+    return nullptr;
+}
+
+void
+LiveBundle::addRange(LiveRange* range)
+{
+    MOZ_ASSERT(!range->bundle());
+    range->setBundle(this);
+    InsertSortedList(ranges_, &range->bundleLink);
+}
+
+bool
+LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc, vreg, from, to);
+    if (!range)
+        return false;
+    addRange(range);
+    return true;
+}
+
+bool
+LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+                                      CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc, oldRange->vreg(), from, to);
+    if (!range)
+        return false;
+    addRange(range);
+    oldRange->distributeUses(range);
+    return true;
+}
+
+/////////////////////////////////////////////////////////////////////
+// VirtualRegister
+/////////////////////////////////////////////////////////////////////
+
+bool
+VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to)
+{
+    MOZ_ASSERT(from < to);
+
+    // Mark [from,to) as a live range for this register during the initial
+    // liveness analysis, coalescing with any existing overlapping ranges.
+
+    LiveRange* prev = nullptr;
+    LiveRange* merged = nullptr;
+    for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) {
+        LiveRange* existing = LiveRange::get(*iter);
+
+        if (from > existing->to()) {
+            // The new range should go after this one.
+            prev = existing;
+            iter++;
+            continue;
+        }
+
+        if (to.next() < existing->from()) {
+            // The new range should go before this one.
+            break;
+        }
+
+        if (!merged) {
+            // This is the first old range we've found that overlaps the new
+            // range. Extend this one to cover its union with the new range.
+            merged = existing;
+
+            if (from < existing->from())
+                existing->setFrom(from);
+            if (to > existing->to())
+                existing->setTo(to);
+
+            // Continue searching to see if any other old ranges can be
+            // coalesced with the new merged range.
+            iter++;
+            continue;
+        }
+
+        // Coalesce this range into the previous range we merged into.
+        MOZ_ASSERT(existing->from() >= merged->from());
+        if (existing->to() > merged->to())
+            merged->setTo(existing->to());
+
+        MOZ_ASSERT(!existing->hasDefinition());
+        existing->distributeUses(merged);
+        MOZ_ASSERT(!existing->hasUses());
+
+        ranges_.removeAndIncrement(iter);
+    }
+
+    if (!merged) {
+        // The new range does not overlap any existing range for the vreg.
+        LiveRange* range = LiveRange::New(alloc, vreg(), from, to);
+        if (!range)
+            return false;
+
+        if (prev)
+            ranges_.insertAfter(&prev->registerLink, &range->registerLink);
+        else
+            ranges_.pushFront(&range->registerLink);
+    }
+
+    return true;
+}
+
+void
+VirtualRegister::addInitialUse(UsePosition* use)
+{
+    LiveRange::get(*rangesBegin())->addUse(use);
+}
+
+void
+VirtualRegister::setInitialDefinition(CodePosition from)
+{
+    LiveRange* first = LiveRange::get(*rangesBegin());
+    MOZ_ASSERT(from >= first->from());
+    first->setFrom(from);
+    first->setHasDefinition();
+}
+
+LiveRange*
+VirtualRegister::rangeFor(CodePosition pos) const
+{
+    for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (range->covers(pos))
+            return range;
+    }
+    return nullptr;
+}
+
+void
+VirtualRegister::addRange(LiveRange* range)
+{
+    InsertSortedList(ranges_, &range->registerLink);
+}
+
+void
+VirtualRegister::removeRange(LiveRange* range)
+{
+    for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* existing = LiveRange::get(*iter);
+        if (existing == range) {
+            ranges_.removeAt(iter);
+            return;
+        }
+    }
+    MOZ_CRASH();
+}
+
+/////////////////////////////////////////////////////////////////////
+// BacktrackingAllocator
+/////////////////////////////////////////////////////////////////////
+
+// This function pre-allocates and initializes as much global state as possible
+// to avoid littering the algorithms with memory management cruft.
 bool
 BacktrackingAllocator::init()
 {
+    if (!RegisterAllocator::init())
+        return false;
+
+    liveIn = mir->allocate<BitSet>(graph.numBlockIds());
+    if (!liveIn)
+        return false;
+
+    callRanges = LiveBundle::New(alloc());
+
+    size_t numVregs = graph.numVirtualRegisters();
+    if (!vregs.init(mir->alloc(), numVregs))
+        return false;
+    memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs);
+    for (uint32_t i = 0; i < numVregs; i++)
+        new(&vregs[i]) VirtualRegister();
+
+    // Build virtual register objects.
+    for (size_t i = 0; i < graph.numBlocks(); i++) {
+        if (mir->shouldCancel("Create data structures (main loop)"))
+            return false;
+
+        LBlock* block = graph.getBlock(i);
+        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+            for (size_t j = 0; j < ins->numDefs(); j++) {
+                LDefinition* def = ins->getDef(j);
+                if (def->isBogusTemp())
+                    continue;
+                vreg(def).init(*ins, def, /* isTemp = */ false);
+            }
+
+            for (size_t j = 0; j < ins->numTemps(); j++) {
+                LDefinition* def = ins->getTemp(j);
+                if (def->isBogusTemp())
+                    continue;
+                vreg(def).init(*ins, def, /* isTemp = */ true);
+            }
+        }
+        for (size_t j = 0; j < block->numPhis(); j++) {
+            LPhi* phi = block->getPhi(j);
+            LDefinition* def = phi->getDef(0);
+            vreg(def).init(phi, def, /* isTemp = */ false);
+        }
+    }
+
     LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
     while (!remainingRegisters.emptyGeneral()) {
         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
         registers[reg.code()].allocatable = true;
     }
     while (!remainingRegisters.emptyFloat()) {
         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyFloat());
         registers[reg.code()].allocatable = true;
     }
 
     LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         registers[i].reg = AnyRegister::FromCode(i);
         registers[i].allocations.setAllocator(lifoAlloc);
-
-        LiveInterval* fixed = fixedIntervals[i];
-        for (size_t j = 0; j < fixed->numRanges(); j++) {
-            AllocatedRange range(fixed, fixed->getRange(j));
-            if (!registers[i].allocations.insert(range))
-                return false;
-        }
     }
 
     hotcode.setAllocator(lifoAlloc);
 
     // Partition the graph into hot and cold sections, for helping to make
     // splitting decisions. Since we don't have any profiling data this is a
     // crapshoot, so just mark the bodies of inner loops as hot and everything
     // else as cold.
 
-    LiveInterval* hotcodeInterval = LiveInterval::New(alloc(), 0);
-
     LBlock* backedge = nullptr;
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
 
         // If we see a loop header, mark the backedge so we know when we have
         // hit the end of the loop. Don't process the loop immediately, so that
         // if there is an inner loop we will ignore the outer backedge.
         if (block->mir()->isLoopHeader())
             backedge = block->mir()->backedge()->lir();
 
         if (block == backedge) {
             LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
-            CodePosition from = entryOf(header);
-            CodePosition to = exitOf(block).next();
-            if (!hotcodeInterval->addRange(from, to))
+            LiveRange* range = LiveRange::New(alloc(), 0, entryOf(header), exitOf(block).next());
+            if (!range || !hotcode.insert(range))
                 return false;
         }
     }
 
-    for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) {
-        AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i));
-        if (!hotcode.insert(range))
+    return true;
+}
+
+bool
+BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc(), 0, from, to);
+    return range && registers[reg.code()].allocations.insert(range);
+}
+
+#ifdef DEBUG
+// Returns true iff ins has a def/temp reusing the input allocation.
+static bool
+IsInputReused(LInstruction* ins, LUse* use)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
+        {
+            return true;
+        }
+    }
+
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
+        {
+            return true;
+        }
+    }
+
+    return false;
+}
+#endif
+
+/*
+ * This function builds up liveness ranges for all virtual registers
+ * defined in the function. Additionally, it populates the liveIn array with
+ * information about which registers are live at the beginning of a block, to
+ * aid resolution and reification in a later phase.
+ *
+ * The algorithm is based on the one published in:
+ *
+ * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
+ *     SSA Form." Proceedings of the International Symposium on Code Generation
+ *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
+ *
+ * The algorithm operates on blocks ordered such that dominators of a block
+ * are before the block itself, and such that all blocks of a loop are
+ * contiguous. It proceeds backwards over the instructions in this order,
+ * marking registers live at their uses, ending their live ranges at
+ * definitions, and recording which registers are live at the top of every
+ * block. To deal with loop backedges, registers live at the beginning of
+ * a loop gain a range covering the entire loop.
+ */
+bool
+BacktrackingAllocator::buildLivenessInfo()
+{
+    JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
+
+    Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
+    BitSet loopDone(graph.numBlockIds());
+    if (!loopDone.init(alloc()))
+        return false;
+
+    for (size_t i = graph.numBlocks(); i > 0; i--) {
+        if (mir->shouldCancel("Build Liveness Info (main loop)"))
+            return false;
+
+        LBlock* block = graph.getBlock(i - 1);
+        MBasicBlock* mblock = block->mir();
+
+        BitSet& live = liveIn[mblock->id()];
+        new (&live) BitSet(graph.numVirtualRegisters());
+        if (!live.init(alloc()))
             return false;
+
+        // Propagate liveIn from our successors to us.
+        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
+            MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
+            // Skip backedges, as we fix them up at the loop header.
+            if (mblock->id() < successor->id())
+                live.insertAll(liveIn[successor->id()]);
+        }
+
+        // Add successor phis.
+        if (mblock->successorWithPhis()) {
+            LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
+            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
+                LPhi* phi = phiSuccessor->getPhi(j);
+                LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
+                uint32_t reg = use->toUse()->virtualRegister();
+                live.insert(reg);
+            }
+        }
+
+        // Registers are assumed alive for the entire block, a define shortens
+        // the range to the point of definition.
+        for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+            if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next()))
+                return false;
+        }
+
+        // Shorten the front end of ranges for live variables to their point of
+        // definition, if found.
+        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
+            // Calls may clobber registers, so force a spill and reload around the callsite.
+            if (ins->isCall()) {
+                for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) {
+                    bool found = false;
+                    for (size_t i = 0; i < ins->numDefs(); i++) {
+                        if (ins->getDef(i)->isFixed() &&
+                            ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
+                            found = true;
+                            break;
+                        }
+                    }
+                    if (!found) {
+                        if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next()))
+                            return false;
+                    }
+                }
+                if (!callRanges->addRange(alloc(), 0, outputOf(*ins), outputOf(*ins).next()))
+                    return false;
+            }
+            DebugOnly<bool> hasDoubleDef = false;
+            DebugOnly<bool> hasFloat32Def = false;
+            for (size_t i = 0; i < ins->numDefs(); i++) {
+                LDefinition* def = ins->getDef(i);
+                if (def->isBogusTemp())
+                    continue;
+#ifdef DEBUG
+                if (def->type() == LDefinition::DOUBLE)
+                    hasDoubleDef = true;
+                if (def->type() == LDefinition::FLOAT32)
+                    hasFloat32Def = true;
+#endif
+                CodePosition from = outputOf(*ins);
+
+                if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
+                    // MUST_REUSE_INPUT is implemented by allocating an output
+                    // register and moving the input to it. Register hints are
+                    // used to avoid unnecessary moves. We give the input an
+                    // LUse::ANY policy to avoid allocating a register for the
+                    // input.
+                    LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
+                    MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
+                    MOZ_ASSERT(inputUse->usedAtStart());
+                    *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
+                }
+
+                if (!vreg(def).addInitialRange(alloc(), from, from.next()))
+                    return false;
+                vreg(def).setInitialDefinition(from);
+                live.remove(def->virtualRegister());
+            }
+
+            for (size_t i = 0; i < ins->numTemps(); i++) {
+                LDefinition* temp = ins->getTemp(i);
+                if (temp->isBogusTemp())
+                    continue;
+
+                // Normally temps are considered to cover both the input
+                // and output of the associated instruction. In some cases
+                // though we want to use a fixed register as both an input
+                // and clobbered register in the instruction, so watch for
+                // this and shorten the temp to cover only the output.
+                CodePosition from = inputOf(*ins);
+                if (temp->policy() == LDefinition::FIXED) {
+                    AnyRegister reg = temp->output()->toRegister();
+                    for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
+                        if (alloc->isUse()) {
+                            LUse* use = alloc->toUse();
+                            if (use->isFixedRegister()) {
+                                if (GetFixedRegister(vreg(use).def(), use) == reg)
+                                    from = outputOf(*ins);
+                            }
+                        }
+                    }
+                }
+
+                CodePosition to =
+                    ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
+
+                if (!vreg(temp).addInitialRange(alloc(), from, to))
+                    return false;
+                vreg(temp).setInitialDefinition(from);
+            }
+
+            DebugOnly<bool> hasUseRegister = false;
+            DebugOnly<bool> hasUseRegisterAtStart = false;
+
+            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
+                if (inputAlloc->isUse()) {
+                    LUse* use = inputAlloc->toUse();
+
+                    // Call uses should always be at-start or fixed, since
+                    // calls use all registers.
+                    MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
+                                  use->isFixedRegister() || use->usedAtStart());
+
+#ifdef DEBUG
+                    // Don't allow at-start call uses if there are temps of the same kind,
+                    // so that we don't assign the same register.
+                    if (ins->isCall() && use->usedAtStart()) {
+                        for (size_t i = 0; i < ins->numTemps(); i++)
+                            MOZ_ASSERT(vreg(ins->getTemp(i)).type() != vreg(use).type());
+                    }
+
+                    // If there are both useRegisterAtStart(x) and useRegister(y)
+                    // uses, we may assign the same register to both operands
+                    // (bug 772830). Don't allow this for now.
+                    if (use->policy() == LUse::REGISTER) {
+                        if (use->usedAtStart()) {
+                            if (!IsInputReused(*ins, use))
+                                hasUseRegisterAtStart = true;
+                        } else {
+                            hasUseRegister = true;
+                        }
+                    }
+                    MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
+#endif
+
+                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
+                    if (use->policy() == LUse::RECOVERED_INPUT)
+                        continue;
+
+                    // Fixed uses on calls are specially overridden to happen
+                    // at the input position.
+                    CodePosition to =
+                        (use->usedAtStart() || (ins->isCall() && use->isFixedRegister()))
+                        ? inputOf(*ins)
+                        : outputOf(*ins);
+                    if (use->isFixedRegister()) {
+                        LAllocation reg(AnyRegister::FromCode(use->registerCode()));
+                        for (size_t i = 0; i < ins->numDefs(); i++) {
+                            LDefinition* def = ins->getDef(i);
+                            if (def->policy() == LDefinition::FIXED && *def->output() == reg)
+                                to = inputOf(*ins);
+                        }
+                    }
+
+                    if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next()))
+                        return false;
+                    UsePosition* usePosition = new(alloc()) UsePosition(use, to);
+                    if (!usePosition)
+                        return false;
+                    vreg(use).addInitialUse(usePosition);
+                    live.insert(use->virtualRegister());
+                }
+            }
+        }
+
+        // Phis have simultaneous assignment semantics at block begin, so at
+        // the beginning of the block we can be sure that liveIn does not
+        // contain any phi outputs.
+        for (unsigned int i = 0; i < block->numPhis(); i++) {
+            LDefinition* def = block->getPhi(i)->getDef(0);
+            if (live.contains(def->virtualRegister())) {
+                live.remove(def->virtualRegister());
+            } else {
+                // This is a dead phi, so add a dummy range over all phis. This
+                // can go away if we have an earlier dead code elimination pass.
+                CodePosition entryPos = entryOf(block);
+                if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next()))
+                    return false;
+            }
+        }
+
+        if (mblock->isLoopHeader()) {
+            // A divergence from the published algorithm is required here, as
+            // our block order does not guarantee that blocks of a loop are
+            // contiguous. As a result, a single live range spanning the
+            // loop is not possible. Additionally, we require liveIn in a later
+            // pass for resolution, so that must also be fixed up here.
+            MBasicBlock* loopBlock = mblock->backedge();
+            while (true) {
+                // Blocks must already have been visited to have a liveIn set.
+                MOZ_ASSERT(loopBlock->id() >= mblock->id());
+
+                // Add a range for this entire loop block
+                CodePosition from = entryOf(loopBlock->lir());
+                CodePosition to = exitOf(loopBlock->lir()).next();
+
+                for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+                    if (!vregs[*liveRegId].addInitialRange(alloc(), from, to))
+                        return false;
+                }
+
+                // Fix up the liveIn set.
+                liveIn[loopBlock->id()].insertAll(live);
+
+                // Make sure we don't visit this node again
+                loopDone.insert(loopBlock->id());
+
+                // If this is the loop header, any predecessors are either the
+                // backedge or out of the loop, so skip any predecessors of
+                // this block
+                if (loopBlock != mblock) {
+                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
+                        MBasicBlock* pred = loopBlock->getPredecessor(i);
+                        if (loopDone.contains(pred->id()))
+                            continue;
+                        if (!loopWorkList.append(pred))
+                            return false;
+                    }
+                }
+
+                // Terminate loop if out of work.
+                if (loopWorkList.empty())
+                    break;
+
+                // Grab the next block off the work list, skipping any OSR block.
+                MBasicBlock* osrBlock = graph.mir().osrBlock();
+                while (!loopWorkList.empty()) {
+                    loopBlock = loopWorkList.popCopy();
+                    if (loopBlock != osrBlock)
+                        break;
+                }
+
+                // If end is reached without finding a non-OSR block, then no more work items were found.
+                if (loopBlock == osrBlock) {
+                    MOZ_ASSERT(loopWorkList.empty());
+                    break;
+                }
+            }
+
+            // Clear the done set for other loops
+            loopDone.clear();
+        }
+
+        MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
+    }
+
+    JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
+
+    if (JitSpewEnabled(JitSpew_RegAlloc)) {
+        dumpInstructions();
+
+        fprintf(stderr, "Live ranges by virtual register:\n");
+        dumpVregs();
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::go()
 {
     JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
 
-    if (!buildLivenessInfo())
+    if (!init())
         return false;
 
-    if (!init())
+    if (!buildLivenessInfo())
         return false;
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
         dumpFixedRanges();
 
     if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
         return false;
 
@@ -97,23 +764,23 @@ BacktrackingAllocator::go()
         return false;
     JitSpew(JitSpew_RegAlloc, "Grouping and queueing registers complete");
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
         dumpRegisterGroups();
 
     JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
 
-    // Allocate, spill and split register intervals until finished.
+    // Allocate, spill and split bundles until finished.
     while (!allocationQueue.empty()) {
         if (mir->shouldCancel("Backtracking Allocation"))
             return false;
 
         QueueItem item = allocationQueue.removeHighest();
-        if (item.interval ? !processInterval(item.interval) : !processGroup(item.group))
+        if (item.bundle ? !processBundle(item.bundle) : !processGroup(item.group))
             return false;
     }
     JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
 
     if (!pickStackSlots())
         return false;
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
@@ -130,76 +797,77 @@ BacktrackingAllocator::go()
 
     if (!annotateMoveGroups())
         return false;
 
     return true;
 }
 
 static bool
-LifetimesOverlap(BacktrackingVirtualRegister* reg0, BacktrackingVirtualRegister* reg1)
+LifetimesOverlapIgnoreGroupExclude(VirtualRegister* reg0, VirtualRegister* reg1)
 {
-    // Registers may have been eagerly split in two, see tryGroupReusedRegister.
-    // In such cases, only consider the first interval.
-    MOZ_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2);
-
-    LiveInterval* interval0 = reg0->getInterval(0);
-    LiveInterval* interval1 = reg1->getInterval(0);
-
-    // Interval ranges are sorted in reverse order. The lifetimes overlap if
-    // any of their ranges overlap.
-    size_t index0 = 0, index1 = 0;
-    while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) {
-        const LiveInterval::Range
-            *range0 = interval0->getRange(index0),
-            *range1 = interval1->getRange(index1);
-        if (range0->from >= range1->to)
-            index0++;
-        else if (range1->from >= range0->to)
-            index1++;
+    LiveRange::RegisterLinkIterator iter0 = reg0->rangesBegin(), iter1 = reg1->rangesBegin();
+    while (iter0 && iter1) {
+        LiveRange* range0 = LiveRange::get(*iter0);
+        LiveRange* range1 = LiveRange::get(*iter1);
+
+        // Registers may have had portions split off into their groupExclude bundle,
+        // see tryGroupReusedRegister. Ignore any overlap in these ranges.
+        if (range0 == reg0->groupExclude()) {
+            iter0++;
+            continue;
+        } else if (range1 == reg1->groupExclude()) {
+            iter1++;
+            continue;
+        }
+
+        if (range0->from() >= range1->to())
+            iter1++;
+        else if (range1->from() >= range0->to())
+            iter0++;
         else
             return true;
     }
 
     return false;
 }
 
 bool
-BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg)
+BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup* group, VirtualRegister* reg)
 {
     for (size_t i = 0; i < group->registers.length(); i++) {
-        if (LifetimesOverlap(reg, &vregs[group->registers[i]]))
+        if (LifetimesOverlapIgnoreGroupExclude(reg, &vregs[group->registers[i]]))
             return false;
     }
     return true;
 }
 
 static bool
 IsArgumentSlotDefinition(LDefinition* def)
 {
     return def->policy() == LDefinition::FIXED && def->output()->isArgument();
 }
 
 static bool
 IsThisSlotDefinition(LDefinition* def)
 {
     return IsArgumentSlotDefinition(def) &&
-           def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
+        def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
 }
 
 bool
 BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
 {
     // See if reg0 and reg1 can be placed in the same group, following the
     // restrictions imposed by VirtualRegisterGroup and any other registers
     // already grouped with reg0 or reg1.
-    BacktrackingVirtualRegister* reg0 = &vregs[vreg0];
-    BacktrackingVirtualRegister* reg1 = &vregs[vreg1];
-
-    if (!reg0->isCompatibleVReg(*reg1))
+    VirtualRegister* reg0 = &vregs[vreg0];
+    VirtualRegister* reg1 = &vregs[vreg1];
+
+    if (!reg0->isCompatible(*reg1))
         return true;
 
     // Registers which might spill to the frame's |this| slot can only be
     // grouped with other such registers. The frame's |this| slot must always
     // hold the |this| value, as required by JitFrame tracing and by the Ion
     // constructor calling convention.
     if (IsThisSlotDefinition(reg0->def()) || IsThisSlotDefinition(reg1->def())) {
         if (*reg0->def()->output() != *reg1->def()->output())
@@ -245,124 +913,135 @@ BacktrackingAllocator::tryGroupRegisters
         if (!canAddToGroup(group0, reg1))
             return true;
         if (!group0->registers.append(vreg1))
             return false;
         reg1->setGroup(group0);
         return true;
     }
 
-    if (LifetimesOverlap(reg0, reg1))
+    if (LifetimesOverlapIgnoreGroupExclude(reg0, reg1))
         return true;
 
     VirtualRegisterGroup* group = new(alloc()) VirtualRegisterGroup(alloc());
     if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
         return false;
 
     reg0->setGroup(group);
     reg1->setGroup(group);
     return true;
 }
 
+static inline LDefinition*
+FindReusingDefinition(LNode* ins, LAllocation* alloc)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        LDefinition* def = ins->getDef(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        LDefinition* def = ins->getTemp(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    return nullptr;
+}
+
 bool
 BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use)
 {
-    BacktrackingVirtualRegister& reg = vregs[def];
-    BacktrackingVirtualRegister& usedReg = vregs[use];
+    VirtualRegister& reg = vregs[def];
+    VirtualRegister& usedReg = vregs[use];
 
     // reg is a vreg which reuses its input usedReg for its output physical
     // register. Try to group reg with usedReg if at all possible, as avoiding
     // copies before reg's instruction is crucial for the quality of the
     // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions
     // on x86/x64).
 
-    if (reg.intervalFor(inputOf(reg.ins()))) {
+    if (reg.rangeFor(inputOf(reg.ins()))) {
         MOZ_ASSERT(reg.isTemp());
         reg.setMustCopyInput();
         return true;
     }
 
-    if (!usedReg.intervalFor(outputOf(reg.ins()))) {
+    if (!usedReg.rangeFor(outputOf(reg.ins()))) {
         // The input is not live after the instruction, either in a safepoint
         // for the instruction or in subsequent code. The input and output
         // can thus be in the same group.
         return tryGroupRegisters(use, def);
     }
 
     // The input is live afterwards, either in future instructions or in a
     // safepoint for the reusing instruction. This is impossible to satisfy
     // without copying the input.
     //
-    // It may or may not be better to split the interval at the point of the
-    // definition, which may permit grouping. One case where it is definitely
-    // better to split is if the input never has any register uses after the
-    // instruction. Handle this splitting eagerly.
-
-    if (usedReg.numIntervals() != 1 ||
+    // It may or may not be better to split at the point of the definition,
+    // which may permit grouping. One case where it is definitely better to
+    // split is if the input never has any register uses after the instruction.
+    // Handle this splitting eagerly.
+
+    if (usedReg.groupExclude() ||
         (usedReg.def()->isFixed() && !usedReg.def()->output()->isRegister())) {
         reg.setMustCopyInput();
         return true;
     }
-    LiveInterval* interval = usedReg.getInterval(0);
     LBlock* block = reg.ins()->block();
 
     // The input's lifetime must end within the same block as the definition,
     // otherwise it could live on in phis elsewhere.
-    if (interval->end() > exitOf(block)) {
+    LiveRange* lastUsedRange = nullptr;
+    for (LiveRange::RegisterLinkIterator iter = usedReg.rangesBegin(); iter; iter++)
+        lastUsedRange = LiveRange::get(*iter);
+    if (lastUsedRange->to() > exitOf(block)) {
         reg.setMustCopyInput();
         return true;
     }
 
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
+    for (UsePositionIterator iter = lastUsedRange->usesBegin(); iter; iter++) {
         if (iter->pos <= inputOf(reg.ins()))
             continue;
 
         LUse* use = iter->use;
         if (FindReusingDefinition(insData[iter->pos], use)) {
             reg.setMustCopyInput();
             return true;
         }
         if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
             reg.setMustCopyInput();
             return true;
         }
     }
 
-    LiveInterval* preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        const LiveInterval::Range* range = interval->getRange(i);
-        MOZ_ASSERT(range->from <= inputOf(reg.ins()));
-
-        CodePosition to = Min(range->to, outputOf(reg.ins()));
-        if (!preInterval->addRange(range->from, to))
-            return false;
-    }
-
-    // The new interval starts at reg's input position, which means it overlaps
-    // with the old interval at one position. This is what we want, because we
+    LiveRange* preRange = LiveRange::New(alloc(), use, lastUsedRange->from(), outputOf(reg.ins()));
+    if (!preRange)
+        return false;
+
+    // The new range starts at reg's input position, which means it overlaps
+    // with the old range at one position. This is what we want, because we
     // need to copy the input before the instruction.
-    LiveInterval* postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
+    LiveRange* postRange = LiveRange::New(alloc(), use, inputOf(reg.ins()), lastUsedRange->to());
+    if (!postRange)
         return false;
 
-    LiveIntervalVector newIntervals;
-    if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
-        return false;
-
-    distributeUses(interval, newIntervals);
+    lastUsedRange->distributeUses(preRange);
+    lastUsedRange->distributeUses(postRange);
+    MOZ_ASSERT(!lastUsedRange->hasUses());
 
     JitSpew(JitSpew_RegAlloc, "  splitting reused input at %u to try to help grouping",
             inputOf(reg.ins()));
 
-    if (!split(interval, newIntervals))
-        return false;
-
-    MOZ_ASSERT(usedReg.numIntervals() == 2);
-
-    usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
+    usedReg.removeRange(lastUsedRange);
+    usedReg.addRange(preRange);
+    usedReg.addRange(postRange);
+
+    usedReg.setGroupExclude(postRange);
 
     return tryGroupRegisters(use, def);
 }
 
 bool
 BacktrackingAllocator::groupAndQueueRegisters()
 {
     // If there is an OSR block, group parameters in that block with the
@@ -386,21 +1065,20 @@ BacktrackingAllocator::groupAndQueueRegi
                     }
                     MOZ_ASSERT(found);
                 }
             }
         }
     }
 
     // Try to group registers with their reused inputs.
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister& reg = vregs[i];
-        if (!reg.numIntervals())
+        VirtualRegister& reg = vregs[i];
+        if (!reg.hasRanges())
             continue;
 
         if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
             LUse* use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
             if (!tryGroupReusedRegister(i, use->virtualRegister()))
                 return false;
         }
     }
@@ -414,449 +1092,452 @@ BacktrackingAllocator::groupAndQueueRegi
             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
                 uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
                 if (!tryGroupRegisters(input, output))
                     return false;
             }
         }
     }
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         if (mir->shouldCancel("Backtracking Enqueue Registers"))
             return false;
 
-        BacktrackingVirtualRegister& reg = vregs[i];
-        MOZ_ASSERT(reg.numIntervals() <= 2);
+        VirtualRegister& reg = vregs[i];
         MOZ_ASSERT(!reg.canonicalSpill());
 
-        if (!reg.numIntervals())
+        if (!reg.hasRanges())
             continue;
 
         // Eagerly set the canonical spill slot for registers which are fixed
         // for that slot, and reuse it for other registers in the group.
         LDefinition* def = reg.def();
         if (def->policy() == LDefinition::FIXED && !def->output()->isRegister()) {
             MOZ_ASSERT(!def->output()->isStackSlot());
             reg.setCanonicalSpill(*def->output());
             if (reg.group() && reg.group()->spill.isUse())
                 reg.group()->spill = *def->output();
         }
 
-        // Place all intervals for this register on the allocation queue.
-        // During initial queueing use single queue items for groups of
-        // registers, so that they will be allocated together and reduce the
-        // risk of unnecessary conflicts. This is in keeping with the idea that
-        // register groups are effectively single registers whose value changes
-        // during execution. If any intervals in the group are evicted later
-        // then they will be reallocated individually.
-        size_t start = 0;
+        // If the register has a range that needs to be excluded from its
+        // group, queue a bundle containing that range separately.
+        if (reg.groupExclude()) {
+            LiveBundle* bundle = LiveBundle::New(alloc());
+            if (!bundle)
+                return false;
+            bundle->addRange(reg.groupExclude());
+            size_t priority = computePriority(bundle);
+            if (!allocationQueue.insert(QueueItem(bundle, priority)))
+                return false;
+        }
+
+        // Create a bundle containing all other ranges for the register.
+        LiveBundle* bundle = LiveBundle::New(alloc());
+        if (!bundle)
+            return false;
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (range != reg.groupExclude())
+                bundle->addRange(range);
+        }
+
+        // Place the new bundle on the allocation queue. During initial
+        // queueing use single queue items for groups of registers, so that
+        // they will be allocated together and reduce the risk of unnecessary
+        // conflicts. This is in keeping with the idea that register groups are
+        // effectively single registers whose value changes during execution.
+        // If any ranges in the group are evicted later then they will be
+        // reallocated individually.
         if (VirtualRegisterGroup* group = reg.group()) {
             if (i == group->canonicalReg()) {
                 size_t priority = computePriority(group);
                 if (!allocationQueue.insert(QueueItem(group, priority)))
                     return false;
             }
-            start++;
-        }
-        for (; start < reg.numIntervals(); start++) {
-            LiveInterval* interval = reg.getInterval(start);
-            if (interval->numRanges() > 0) {
-                size_t priority = computePriority(interval);
-                if (!allocationQueue.insert(QueueItem(interval, priority)))
-                    return false;
-            }
+        } else {
+            size_t priority = computePriority(bundle);
+            if (!allocationQueue.insert(QueueItem(bundle, priority)))
+                return false;
         }
     }
 
     return true;
 }
 
 static const size_t MAX_ATTEMPTS = 2;
 
 bool
-BacktrackingAllocator::tryAllocateFixed(LiveInterval* interval, bool* success,
-                                        bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+                                        bool* success, bool* pfixed,
+                                        LiveBundleVector& conflicting)
 {
-    // Spill intervals which are required to be in a certain stack slot.
-    if (!interval->requirement()->allocation().isRegister()) {
+    // Spill bundles which are required to be in a certain stack slot.
+    if (!requirement.allocation().isRegister()) {
         JitSpew(JitSpew_RegAlloc, "  stack allocation requirement");
-        interval->setAllocation(interval->requirement()->allocation());
+        bundle->setAllocation(requirement.allocation());
         *success = true;
         return true;
     }
 
-    AnyRegister reg = interval->requirement()->allocation().toRegister();
-    return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting);
+    AnyRegister reg = requirement.allocation().toRegister();
+    return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting);
 }
 
 bool
-BacktrackingAllocator::tryAllocateNonFixed(LiveInterval* interval, bool* success,
-                                           bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
+                                           Requirement requirement, Requirement hint,
+                                           bool* success, bool* pfixed,
+                                           LiveBundleVector& conflicting)
 {
-    // If we want, but do not require an interval to be in a specific
-    // register, only look at that register for allocating and evict
-    // or spill if it is not available. Picking a separate register may
-    // be even worse than spilling, as it will still necessitate moves
-    // and will tie up more registers than if we spilled.
-    if (interval->hint()->kind() == Requirement::FIXED) {
-        AnyRegister reg = interval->hint()->allocation().toRegister();
-        if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting))
+    // If we want, but do not require a bundle to be in a specific register,
+    // only look at that register for allocating and evict or spill if it is
+    // not available. Picking a separate register may be even worse than
+    // spilling, as it will still necessitate moves and will tie up more
+    // registers than if we spilled.
+    if (hint.kind() == Requirement::FIXED) {
+        AnyRegister reg = hint.allocation().toRegister();
+        if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting))
             return false;
         if (*success)
             return true;
     }
 
-    // Spill intervals which have no hint or register requirement.
-    if (interval->requirement()->kind() == Requirement::NONE &&
-        interval->hint()->kind() != Requirement::REGISTER)
-    {
-        spill(interval);
+    // Spill bundles which have no hint or register requirement.
+    if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) {
+        spill(bundle);
         *success = true;
         return true;
     }
 
-    if (conflicting.empty() || minimalInterval(interval)) {
-        // Search for any available register which the interval can be
+    if (conflicting.empty() || minimalBundle(bundle)) {
+        // Search for any available register which the bundle can be
         // allocated to.
         for (size_t i = 0; i < AnyRegister::Total; i++) {
-            if (!tryAllocateRegister(registers[i], interval, success, pfixed, conflicting))
+            if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting))
                 return false;
             if (*success)
                 return true;
         }
     }
 
-    // Spill intervals which have no register requirement if they didn't get
+    // Spill bundles which have no register requirement if they didn't get
     // allocated.
-    if (interval->requirement()->kind() == Requirement::NONE) {
-        spill(interval);
+    if (requirement.kind() == Requirement::NONE) {
+        spill(bundle);
         *success = true;
         return true;
     }
 
-    // We failed to allocate this interval.
+    // We failed to allocate this bundle.
     MOZ_ASSERT(!*success);
     return true;
 }
 
 bool
-BacktrackingAllocator::processInterval(LiveInterval* interval)
+BacktrackingAllocator::processBundle(LiveBundle* bundle)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %lu] [weight %lu]",
-                interval->toString(), computePriority(interval), computeSpillWeight(interval));
+                bundle->toString(), computePriority(bundle), computeSpillWeight(bundle));
     }
 
-    // An interval can be processed by doing any of the following:
+    // A bundle can be processed by doing any of the following:
     //
-    // - Assigning the interval a register. The interval cannot overlap any
-    //   other interval allocated for that physical register.
+    // - Assigning the bundle a register. The bundle cannot overlap any other
+    //   bundle allocated for that physical register.
     //
-    // - Spilling the interval, provided it has no register uses.
+    // - Spilling the bundle, provided it has no register uses.
     //
-    // - Splitting the interval into two or more intervals which cover the
-    //   original one. The new intervals are placed back onto the priority
-    //   queue for later processing.
+    // - Splitting the bundle into two or more bundles which cover the original
+    //   one. The new bundles are placed back onto the priority queue for later
+    //   processing.
     //
-    // - Evicting one or more existing allocated intervals, and then doing one
-    //   of the above operations. Evicted intervals are placed back on the
-    //   priority queue. Any evicted intervals must have a lower spill weight
-    //   than the interval being processed.
+    // - Evicting one or more existing allocated bundles, and then doing one
+    //   of the above operations. Evicted bundles are placed back on the
+    //   priority queue. Any evicted bundles must have a lower spill weight
+    //   than the bundle being processed.
     //
     // As long as this structure is followed, termination is guaranteed.
-    // In general, we want to minimize the amount of interval splitting
-    // (which generally necessitates spills), so allocate longer lived, lower
-    // weight intervals first and evict and split them later if they prevent
-    // allocation for higher weight intervals.
-
-    bool canAllocate = setIntervalRequirement(interval);
+    // In general, we want to minimize the amount of bundle splitting (which
+    // generally necessitates spills), so allocate longer lived, lower weight
+    // bundles first and evict and split them later if they prevent allocation
+    // for higher weight bundles.
+
+    Requirement requirement, hint;
+    bool canAllocate = computeRequirement(bundle, &requirement, &hint);
 
     bool fixed;
-    LiveIntervalVector conflicting;
+    LiveBundleVector conflicting;
     for (size_t attempt = 0;; attempt++) {
         if (canAllocate) {
             bool success = false;
             fixed = false;
             conflicting.clear();
 
-            // Ok, let's try allocating for this interval.
-            if (interval->requirement()->kind() == Requirement::FIXED) {
-                if (!tryAllocateFixed(interval, &success, &fixed, conflicting))
+            // Ok, let's try allocating for this bundle.
+            if (requirement.kind() == Requirement::FIXED) {
+                if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting))
                     return false;
             } else {
-                if (!tryAllocateNonFixed(interval, &success, &fixed, conflicting))
+                if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting))
                     return false;
             }
 
             // If that worked, we're done!
             if (success)
                 return true;
 
-            // If that didn't work, but we have one or more non-fixed intervals
+            // If that didn't work, but we have one or more non-fixed bundles
             // known to be conflicting, maybe we can evict them and try again.
             if (attempt < MAX_ATTEMPTS &&
                 !fixed &&
                 !conflicting.empty() &&
-                maximumSpillWeight(conflicting) < computeSpillWeight(interval))
-            {
-                for (size_t i = 0; i < conflicting.length(); i++) {
-                    if (!evictInterval(conflicting[i]))
-                        return false;
+                maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
+                {
+                    for (size_t i = 0; i < conflicting.length(); i++) {
+                        if (!evictBundle(conflicting[i]))
+                            return false;
+                    }
+                    continue;
                 }
-                continue;
-            }
         }
 
-        // A minimal interval cannot be split any further. If we try to split
-        // it at this point we will just end up with the same interval and will
-        // enter an infinite loop. Weights and the initial live intervals must
-        // be constructed so that any minimal interval is allocatable.
-        MOZ_ASSERT(!minimalInterval(interval));
-
-        LiveInterval* conflict = conflicting.empty() ? nullptr : conflicting[0];
-        return chooseIntervalSplit(interval, canAllocate && fixed, conflict);
+        // A minimal bundle cannot be split any further. If we try to split it
+        // it at this point we will just end up with the same bundle and will
+        // enter an infinite loop. Weights and the initial live ranges must
+        // be constructed so that any minimal bundle is allocatable.
+        MOZ_ASSERT(!minimalBundle(bundle));
+
+        LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
+        return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
     }
 }
 
 bool
 BacktrackingAllocator::processGroup(VirtualRegisterGroup* group)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]",
                 group->registers[0], computePriority(group), computeSpillWeight(group));
     }
 
-    bool fixed;
-    LiveInterval* conflict;
+    LiveBundle* conflict;
     for (size_t attempt = 0;; attempt++) {
         // Search for any available register which the group can be allocated to.
-        fixed = false;
         conflict = nullptr;
         for (size_t i = 0; i < AnyRegister::Total; i++) {
             bool success;
-            if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict))
+            if (!tryAllocateGroupRegister(registers[i], group, &success, &conflict))
                 return false;
             if (success) {
                 conflict = nullptr;
                 break;
             }
         }
 
         if (attempt < MAX_ATTEMPTS &&
-            !fixed &&
             conflict &&
-            conflict->hasVreg() &&
             computeSpillWeight(conflict) < computeSpillWeight(group))
         {
-            if (!evictInterval(conflict))
+            if (!evictBundle(conflict))
                 return false;
             continue;
         }
 
         for (size_t i = 0; i < group->registers.length(); i++) {
             VirtualRegister& reg = vregs[group->registers[i]];
-            MOZ_ASSERT(reg.numIntervals() <= 2);
-            if (!processInterval(reg.getInterval(0)))
+            LiveRange* firstRange = LiveRange::get(*reg.rangesBegin());
+            MOZ_ASSERT(firstRange != reg.groupExclude());
+            if (!processBundle(firstRange->bundle()))
                 return false;
         }
 
         return true;
     }
 }
 
 bool
-BacktrackingAllocator::setIntervalRequirement(LiveInterval* interval)
+BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
+                                          Requirement *requirement, Requirement *hint)
 {
-    // Set any requirement or hint on interval according to its definition and
+    // Set any requirement or hint on bundle according to its definition and
     // uses. Return false if there are conflicting requirements which will
-    // require the interval to be split.
-    interval->setHint(Requirement());
-    interval->setRequirement(Requirement());
-
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-
-    // Set a hint if another interval in the same group is in a register.
-    if (VirtualRegisterGroup* group = reg->group()) {
-        if (group->allocation.isRegister()) {
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
+    // require the bundle to be split.
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        VirtualRegister &reg = vregs[range->vreg()];
+
+        // Set a hint if another range in the same group is in a register.
+        if (VirtualRegisterGroup* group = reg.group()) {
+            if (group->allocation.isRegister()) {
                 JitSpew(JitSpew_RegAlloc, "  Hint %s, used by group allocation",
                         group->allocation.toString());
-            }
-            interval->setHint(Requirement(group->allocation));
-        }
-    }
-
-    if (interval->index() == 0) {
-        // The first interval is the definition, so deal with any definition
-        // constraints/hints.
-
-        LDefinition::Policy policy = reg->def()->policy();
-        if (policy == LDefinition::FIXED) {
-            // Fixed policies get a FIXED requirement.
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
-                JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
-                        reg->def()->output()->toString());
+                hint->merge(Requirement(group->allocation));
             }
-            interval->setRequirement(Requirement(*reg->def()->output()));
-        } else if (reg->ins()->isPhi()) {
-            // Phis don't have any requirements, but they should prefer their
-            // input allocations. This is captured by the group hints above.
-        } else {
-            // Non-phis get a REGISTER requirement.
-            interval->setRequirement(Requirement(Requirement::REGISTER));
         }
-    }
-
-    // Search uses for requirements.
-    for (UsePositionIterator iter = interval->usesBegin();
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse::Policy policy = iter->use->policy();
-        if (policy == LUse::FIXED) {
-            AnyRegister required = GetFixedRegister(reg->def(), iter->use);
-
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
+
+        if (range->hasDefinition()) {
+            // Deal with any definition constraints/hints.
+            LDefinition::Policy policy = reg.def()->policy();
+            if (policy == LDefinition::FIXED) {
+                // Fixed policies get a FIXED requirement.
+                JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
+                        reg.def()->output()->toString());
+                if (!requirement->merge(Requirement(*reg.def()->output())))
+                    return false;
+            } else if (reg.ins()->isPhi()) {
+                // Phis don't have any requirements, but they should prefer their
+                // input allocations. This is captured by the group hints above.
+            } else {
+                // Non-phis get a REGISTER requirement.
+                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                    return false;
+            }
+        }
+
+        // Search uses for requirements.
+        for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+            LUse::Policy policy = iter->use->policy();
+            if (policy == LUse::FIXED) {
+                AnyRegister required = GetFixedRegister(reg.def(), iter->use);
+
                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
                         required.name(), iter->pos.bits());
+
+                // If there are multiple fixed registers which the bundle is
+                // required to use, fail. The bundle will need to be split before
+                // it can be allocated.
+                if (!requirement->merge(Requirement(LAllocation(required))))
+                    return false;
+            } else if (policy == LUse::REGISTER) {
+                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                    return false;
+            } else if (policy == LUse::ANY) {
+                // ANY differs from KEEPALIVE by actively preferring a register.
+                hint->merge(Requirement(Requirement::REGISTER));
             }
-
-            // If there are multiple fixed registers which the interval is
-            // required to use, fail. The interval will need to be split before
-            // it can be allocated.
-            if (!interval->addRequirement(Requirement(LAllocation(required))))
-                return false;
-        } else if (policy == LUse::REGISTER) {
-            if (!interval->addRequirement(Requirement(Requirement::REGISTER)))
-                return false;
-        } else if (policy == LUse::ANY) {
-            // ANY differs from KEEPALIVE by actively preferring a register.
-            interval->addHint(Requirement(Requirement::REGISTER));
         }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
-                                                bool* psuccess, bool* pfixed, LiveInterval** pconflicting)
+                                                bool* psuccess, LiveBundle** pconflicting)
 {
     *psuccess = false;
 
     if (!r.allocatable)
         return true;
 
-    if (!vregs[group->registers[0]].isCompatibleReg(r.reg))
+    if (!vregs[group->registers[0]].isCompatible(r.reg))
         return true;
 
     bool allocatable = true;
-    LiveInterval* conflicting = nullptr;
+    LiveBundle* conflicting = nullptr;
 
     for (size_t i = 0; i < group->registers.length(); i++) {
         VirtualRegister& reg = vregs[group->registers[i]];
-        MOZ_ASSERT(reg.numIntervals() <= 2);
-        LiveInterval* interval = reg.getInterval(0);
-
-        for (size_t j = 0; j < interval->numRanges(); j++) {
-            AllocatedRange range(interval, interval->getRange(j)), existing;
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (range == reg.groupExclude())
+                continue;
+            LiveRange* existing;
             if (r.allocations.contains(range, &existing)) {
+                if (!existing->bundle()) {
+                    // The group's range spans a call.
+                    return true;
+                }
                 if (conflicting) {
-                    if (conflicting != existing.interval)
+                    if (conflicting != existing->bundle())
                         return true;
                 } else {
-                    conflicting = existing.interval;
+                    conflicting = existing->bundle();
                 }
                 allocatable = false;
             }
         }
     }
 
     if (!allocatable) {
         MOZ_ASSERT(conflicting);
         if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting))
             *pconflicting = conflicting;
-        if (!conflicting->hasVreg())
-            *pfixed = true;
         return true;
     }
 
     *psuccess = true;
 
     group->allocation = LAllocation(r.reg);
     return true;
 }
 
 bool
-BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
-                                           bool* success, bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
+                                           bool* success, bool* pfixed, LiveBundleVector& conflicting)
 {
     *success = false;
 
     if (!r.allocatable)
         return true;
 
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-    if (!reg->isCompatibleReg(r.reg))
-        return true;
-
-    MOZ_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
-                  interval->requirement()->allocation() == LAllocation(r.reg));
-
-    LiveIntervalVector aliasedConflicting;
-
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i)), existing;
+    LiveBundleVector aliasedConflicting;
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        VirtualRegister &reg = vregs[range->vreg()];
+
+        if (!reg.isCompatible(r.reg))
+            return true;
+
         for (size_t a = 0; a < r.reg.numAliased(); a++) {
             PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
+            LiveRange* existing;
             if (!rAlias.allocations.contains(range, &existing))
                 continue;
-            if (existing.interval->hasVreg()) {
-                MOZ_ASSERT(existing.interval->getAllocation()->toRegister() == rAlias.reg);
+            if (existing->hasVreg()) {
+                MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
                 bool duplicate = false;
                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
-                    if (aliasedConflicting[i] == existing.interval) {
+                    if (aliasedConflicting[i] == existing->bundle()) {
                         duplicate = true;
                         break;
                     }
                 }
-                if (!duplicate && !aliasedConflicting.append(existing.interval))
+                if (!duplicate && !aliasedConflicting.append(existing->bundle()))
                     return false;
             } else {
-                if (JitSpewEnabled(JitSpew_RegAlloc)) {
-                    JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
-                            rAlias.reg.name(), existing.range->toString());
-                }
+                JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
+                        rAlias.reg.name(), existing->toString());
                 *pfixed = true;
                 return true;
             }
         }
     }
 
     if (!aliasedConflicting.empty()) {
-        // One or more aliased registers is allocated to another live interval
+        // One or more aliased registers is allocated to another bundle
         // overlapping this one. Keep track of the conflicting set, and in the
         // case of multiple conflicting sets keep track of the set with the
         // lowest maximum spill weight.
 
         if (JitSpewEnabled(JitSpew_RegAlloc)) {
             if (aliasedConflicting.length() == 1) {
-                LiveInterval* existing = aliasedConflicting[0];
-                JitSpew(JitSpew_RegAlloc, "  %s collides with v%u[%u] %s [weight %lu]",
-                        r.reg.name(), existing->vreg(), existing->index(),
-                        existing->rangesToString(), computeSpillWeight(existing));
+                LiveBundle* existing = aliasedConflicting[0];
+                JitSpew(JitSpew_RegAlloc, "  %s collides with %s [weight %lu]",
+                        r.reg.name(), existing->toString(), computeSpillWeight(existing));
             } else {
                 JitSpew(JitSpew_RegAlloc, "  %s collides with the following", r.reg.name());
                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
-                    LiveInterval* existing = aliasedConflicting[i];
-                    JitSpew(JitSpew_RegAlloc, "      v%u[%u] %s [weight %lu]",
-                            existing->vreg(), existing->index(),
-                            existing->rangesToString(), computeSpillWeight(existing));
+                    LiveBundle* existing = aliasedConflicting[i];
+                    JitSpew(JitSpew_RegAlloc, "      %s [weight %lu]",
+                            existing->toString(), computeSpillWeight(existing));
                 }
             }
         }
 
         if (conflicting.empty()) {
             if (!conflicting.appendAll(aliasedConflicting))
                 return false;
         } else {
@@ -866,269 +1547,267 @@ BacktrackingAllocator::tryAllocateRegist
                     return false;
             }
         }
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i));
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        // Set any register hint for allocating other bundles in the same group.
+        if (VirtualRegisterGroup* group = vregs[range->vreg()].group()) {
+            if (!group->allocation.isRegister())
+                group->allocation = LAllocation(r.reg);
+        }
+
         if (!r.allocations.insert(range))
             return false;
     }
 
-    // Set any register hint for allocating other intervals in the same group.
-    if (VirtualRegisterGroup* group = reg->group()) {
-        if (!group->allocation.isRegister())
-            group->allocation = LAllocation(r.reg);
-    }
-
-    interval->setAllocation(LAllocation(r.reg));
+    bundle->setAllocation(LAllocation(r.reg));
     *success = true;
     return true;
 }
 
 bool
-BacktrackingAllocator::evictInterval(LiveInterval* interval)
+BacktrackingAllocator::evictBundle(LiveBundle* bundle)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "  Evicting %s [priority %lu] [weight %lu]",
-                interval->toString(), computePriority(interval), computeSpillWeight(interval));
+                bundle->toString(), computePriority(bundle), computeSpillWeight(bundle));
     }
 
-    MOZ_ASSERT(interval->getAllocation()->isRegister());
-
-    AnyRegister reg(interval->getAllocation()->toRegister());
+    AnyRegister reg(bundle->allocation().toRegister());
     PhysicalRegister& physical = registers[reg.code()];
     MOZ_ASSERT(physical.reg == reg && physical.allocatable);
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i));
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
         physical.allocations.remove(range);
     }
 
-    interval->setAllocation(LAllocation());
-
-    size_t priority = computePriority(interval);
-    return allocationQueue.insert(QueueItem(interval, priority));
-}
-
-void
-BacktrackingAllocator::distributeUses(LiveInterval* interval,
-                                      const LiveIntervalVector& newIntervals)
-{
-    MOZ_ASSERT(newIntervals.length() >= 2);
-
-    // Simple redistribution of uses from an old interval to a set of new
-    // intervals. Intervals are permitted to overlap, in which case this will
-    // assign uses in the overlapping section to the interval with the latest
-    // start position.
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        CodePosition pos = iter->pos;
-        LiveInterval* addInterval = nullptr;
-        for (size_t i = 0; i < newIntervals.length(); i++) {
-            LiveInterval* newInterval = newIntervals[i];
-            if (newInterval->covers(pos)) {
-                if (!addInterval || newInterval->start() < addInterval->start())
-                    addInterval = newInterval;
-            }
-        }
-        addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-    }
+    bundle->setAllocation(LAllocation());
+
+    size_t priority = computePriority(bundle);
+    return allocationQueue.insert(QueueItem(bundle, priority));
 }
 
 bool
-BacktrackingAllocator::split(LiveInterval* interval,
-                             const LiveIntervalVector& newIntervals)
+BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle,
+                                              const LiveBundleVector& newBundles)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
-        JitSpew(JitSpew_RegAlloc, "    splitting interval %s into:", interval->toString());
-        for (size_t i = 0; i < newIntervals.length(); i++) {
-            JitSpew(JitSpew_RegAlloc, "      %s", newIntervals[i]->toString());
-            MOZ_ASSERT(newIntervals[i]->start() >= interval->start());
-            MOZ_ASSERT(newIntervals[i]->end() <= interval->end());
+        JitSpew(JitSpew_RegAlloc, "    splitting bundle %s into:", bundle->toString());
+        for (size_t i = 0; i < newBundles.length(); i++)
+            JitSpew(JitSpew_RegAlloc, "      %s", newBundles[i]->toString());
+    }
+
+    // Remove all ranges in the old bundle from their register's list.
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        MOZ_ASSERT(range != vregs[range->vreg()].groupExclude());
+        vregs[range->vreg()].removeRange(range);
+    }
+
+    // Add all ranges in the new bundles to their register's list.
+    MOZ_ASSERT(newBundles.length() >= 2);
+    for (size_t i = 0; i < newBundles.length(); i++) {
+        LiveBundle* newBundle = newBundles[i];
+        for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            vregs[range->vreg()].addRange(range);
         }
     }
 
-    MOZ_ASSERT(newIntervals.length() >= 2);
-
-    // Find the earliest interval in the new list.
-    LiveInterval* first = newIntervals[0];
-    for (size_t i = 1; i < newIntervals.length(); i++) {
-        if (newIntervals[i]->start() < first->start())
-            first = newIntervals[i];
-    }
-
-    // Replace the old interval in the virtual register's state with the new intervals.
-    VirtualRegister* reg = &vregs[interval->vreg()];
-    reg->replaceInterval(interval, first);
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        if (newIntervals[i] != first && !reg->addInterval(newIntervals[i]))
+    // Queue the new bundles for register assignment.
+    for (size_t i = 0; i < newBundles.length(); i++) {
+        LiveBundle* newBundle = newBundles[i];
+        size_t priority = computePriority(newBundle);
+        if (!allocationQueue.insert(QueueItem(newBundle, priority)))
             return false;
     }
 
     return true;
 }
 
-bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector& newIntervals)
-{
-    // Queue the new intervals for register assignment.
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        LiveInterval* newInterval = newIntervals[i];
-        size_t priority = computePriority(newInterval);
-        if (!allocationQueue.insert(QueueItem(newInterval, priority)))
-            return false;
-    }
-    return true;
-}
-
 void
-BacktrackingAllocator::spill(LiveInterval* interval)
+BacktrackingAllocator::spill(LiveBundle* bundle)
 {
-    JitSpew(JitSpew_RegAlloc, "  Spilling interval");
-
-    MOZ_ASSERT(interval->requirement()->kind() == Requirement::NONE);
-    MOZ_ASSERT(!interval->getAllocation()->isStackSlot());
-
-    // We can't spill bogus intervals.
-    MOZ_ASSERT(interval->hasVreg());
-
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-
-    if (LiveInterval* spillInterval = interval->spillInterval()) {
-        JitSpew(JitSpew_RegAlloc, "    Spilling to existing spill interval");
-        while (!interval->usesEmpty())
-            spillInterval->addUse(interval->popUse());
-        reg->removeInterval(interval);
+    JitSpew(JitSpew_RegAlloc, "  Spilling bundle");
+
+    if (LiveBundle* spillParent = bundle->spillParent()) {
+        JitSpew(JitSpew_RegAlloc, "    Using existing spill bundle");
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            LiveRange* parentRange = spillParent->rangeFor(range->from());
+            MOZ_ASSERT(parentRange->from() <= range->from());
+            MOZ_ASSERT(parentRange->to() >= range->to());
+            MOZ_ASSERT(range->vreg() == parentRange->vreg());
+            range->distributeUses(parentRange);
+            MOZ_ASSERT(!range->hasUses());
+            vregs[range->vreg()].removeRange(range);
+        }
         return;
     }
 
-    bool useCanonical = !reg->hasCanonicalSpillExclude()
-        || interval->start() < reg->canonicalSpillExclude();
+    // For now, we require that all ranges in the bundle have the same vreg. See bug 1067610.
+    LiveRange* firstRange = LiveRange::get(*bundle->rangesBegin());
+    VirtualRegister &reg = vregs[firstRange->vreg()];
+
+#ifdef DEBUG
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        MOZ_ASSERT(range->vreg() == reg.vreg());
+        MOZ_ASSERT_IF(range == reg.groupExclude(), bundle->numRanges() == 1);
+    }
+#endif
+
+    // Canonical spill slots cannot be used for the groupExclude range, which
+    // might overlap other ranges in the register's group.
+    bool useCanonical = firstRange != reg.groupExclude();
 
     if (useCanonical) {
-        if (reg->canonicalSpill()) {
+        if (reg.canonicalSpill()) {
             JitSpew(JitSpew_RegAlloc, "    Picked canonical spill location %s",
-                    reg->canonicalSpill()->toString());
-            interval->setAllocation(*reg->canonicalSpill());
+                    reg.canonicalSpill()->toString());
+            bundle->setAllocation(*reg.canonicalSpill());
             return;
         }
 
-        if (reg->group() && !reg->group()->spill.isUse()) {
+        if (reg.group() && !reg.group()->spill.isUse()) {
             JitSpew(JitSpew_RegAlloc, "    Reusing group spill location %s",
-                    reg->group()->spill.toString());
-            interval->setAllocation(reg->group()->spill);
-            reg->setCanonicalSpill(reg->group()->spill);
+                    reg.group()->spill.toString());
+            bundle->setAllocation(reg.group()->spill);
+            reg.setCanonicalSpill(reg.group()->spill);
             return;
         }
     }
 
     uint32_t virtualSlot = numVirtualStackSlots++;
 
-    // Count virtual stack slots down from the maximum representable value, so
-    // that virtual slots are more obviously distinguished from real slots.
+    // Count virtual stack slots down from the maximum representable value.
     LStackSlot alloc(LAllocation::DATA_MASK - virtualSlot);
-    interval->setAllocation(alloc);
+    bundle->setAllocation(alloc);
 
     JitSpew(JitSpew_RegAlloc, "    Allocating spill location %s", alloc.toString());
 
     if (useCanonical) {
-        reg->setCanonicalSpill(alloc);
-        if (reg->group())
-            reg->group()->spill = alloc;
+        reg.setCanonicalSpill(alloc);
+        if (reg.group())
+            reg.group()->spill = alloc;
     }
 }
 
 bool
 BacktrackingAllocator::pickStackSlots()
 {
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Pick Stack Slots"))
             return false;
 
-        for (size_t j = 0; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            if (!pickStackSlot(interval))
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (!pickStackSlot(range->bundle()))
                 return false;
         }
     }
 
     return true;
 }
 
 bool
-BacktrackingAllocator::pickStackSlot(LiveInterval* interval)
+BacktrackingAllocator::addBundlesUsingAllocation(VirtualRegister &reg, LAllocation alloc,
+                                                 LiveBundleVector &bundles)
 {
-    LAllocation alloc = *interval->getAllocation();
+    for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        LiveBundle* bundle = range->bundle();
+        if (bundle->allocation() == alloc) {
+            bool duplicate = false;
+            for (size_t i = 0; i < bundles.length(); i++) {
+                if (bundles[i] == bundle) {
+                    duplicate = true;
+                    break;
+                }
+            }
+            if (!duplicate && !bundles.append(bundle))
+                return false;
+        }
+    }
+    return true;
+}
+
+bool
+BacktrackingAllocator::pickStackSlot(LiveBundle* bundle)
+{
+    LAllocation alloc = bundle->allocation();
     MOZ_ASSERT(!alloc.isUse());
 
     if (!isVirtualStackSlot(alloc))
         return true;
 
-    BacktrackingVirtualRegister& reg = vregs[interval->vreg()];
-
-    // Get a list of all the intervals which will share this stack slot.
-    LiveIntervalVector commonIntervals;
-
-    if (!commonIntervals.append(interval))
+    // For now, we require that all ranges in the bundle have the same vreg. See bug 1067610.
+    LiveRange* firstRange = LiveRange::get(*bundle->rangesBegin());
+    VirtualRegister &reg = vregs[firstRange->vreg()];
+
+#ifdef DEBUG
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        MOZ_ASSERT(range->vreg() == reg.vreg());
+        MOZ_ASSERT_IF(range == reg.groupExclude(), bundle->numRanges() == 1);
+    }
+#endif
+
+    // Get a list of all the bundles which will share this stack slot.
+    LiveBundleVector commonBundles;
+
+    if (!commonBundles.append(bundle))
         return false;
 
     if (reg.canonicalSpill() && alloc == *reg.canonicalSpill()) {
-        // Look for other intervals in the vreg using this spill slot.
-        for (size_t i = 0; i < reg.numIntervals(); i++) {
-            LiveInterval* ninterval = reg.getInterval(i);
-            if (ninterval != interval && *ninterval->getAllocation() == alloc) {
-                if (!commonIntervals.append(ninterval))
-                    return false;
-            }
-        }
-
-        // Look for intervals in other registers with the same group using this
+        // Look for other bundles with ranges in the vreg using this spill slot.
+        if (!addBundlesUsingAllocation(reg, alloc, commonBundles))
+            return false;
+
+        // Look for bundles in other registers with the same group using this
         // spill slot.
         if (reg.group() && alloc == reg.group()->spill) {
             for (size_t i = 0; i < reg.group()->registers.length(); i++) {
                 uint32_t nvreg = reg.group()->registers[i];
-                if (nvreg == interval->vreg())
+                if (nvreg == reg.vreg())
                     continue;
-                BacktrackingVirtualRegister& nreg = vregs[nvreg];
-                for (size_t j = 0; j < nreg.numIntervals(); j++) {
-                    LiveInterval* ninterval = nreg.getInterval(j);
-                    if (*ninterval->getAllocation() == alloc) {
-                        if (!commonIntervals.append(ninterval))
-                            return false;
-                    }
-                }
+                VirtualRegister& nreg = vregs[nvreg];
+                if (!addBundlesUsingAllocation(nreg, alloc, commonBundles))
+                    return false;
             }
         }
     } else {
         MOZ_ASSERT_IF(reg.group(), alloc != reg.group()->spill);
     }
 
-    if (!reuseOrAllocateStackSlot(commonIntervals, reg.type(), &alloc))
+    if (!reuseOrAllocateStackSlot(commonBundles, reg.type(), &alloc))
         return false;
 
     MOZ_ASSERT(!isVirtualStackSlot(alloc));
 
-    // Set the physical stack slot for each of the intervals found earlier.
-    for (size_t i = 0; i < commonIntervals.length(); i++)
-        commonIntervals[i]->setAllocation(alloc);
+    // Set the physical stack slot for each of the bundles found earlier.
+    for (size_t i = 0; i < commonBundles.length(); i++)
+        commonBundles[i]->setAllocation(alloc);
 
     return true;
 }
 
 bool
-BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
-                                                LAllocation* palloc)
+BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles,
+                                                LDefinition::Type type, LAllocation* palloc)
 {
     SpillSlotList* slotList;
     switch (StackSlotAllocator::width(type)) {
       case 4:  slotList = &normalSlots; break;
       case 8:  slotList = &doubleSlots; break;
       case 16: slotList = &quadSlots;   break;
       default:
         MOZ_CRASH("Bad width");
@@ -1146,30 +1825,31 @@ BacktrackingAllocator::reuseOrAllocateSt
             if (!stop) {
                 stop = spill;
             } else if (stop == spill) {
                 // We looked through every slot in the list.
                 break;
             }
 
             bool success = true;
-            for (size_t i = 0; i < intervals.length() && success; i++) {
-                LiveInterval* interval = intervals[i];
-                for (size_t j = 0; j < interval->numRanges(); j++) {
-                    AllocatedRange range(interval, interval->getRange(j)), existing;
+            for (size_t i = 0; i < bundles.length() && success; i++) {
+                LiveBundle* bundle = bundles[i];
+                for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+                    LiveRange* range = LiveRange::get(*iter);
+                    LiveRange* existing;
                     if (spill->allocated.contains(range, &existing)) {
                         success = false;
                         break;
                     }
                 }
             }
             if (success) {
-                // We can reuse this physical stack slot for the new intervals.
+                // We can reuse this physical stack slot for the new bundles.
                 // Update the allocated ranges for the slot.
-                if (!insertAllRanges(spill->allocated, intervals))
+                if (!insertAllRanges(spill->allocated, bundles))
                     return false;
                 *palloc = spill->alloc;
                 return true;
             }
 
             // On a miss, move the spill to the end of the list. This will cause us
             // to make fewer attempts to allocate from slots with a large and
             // highly contended range.
@@ -1187,138 +1867,150 @@ BacktrackingAllocator::reuseOrAllocateSt
     // Make sure the virtual and physical stack slots don't start overlapping.
     if (isVirtualStackSlot(LStackSlot(stackSlot)))
         return false;
 
     SpillSlot* spill = new(alloc()) SpillSlot(stackSlot, alloc().lifoAlloc());
     if (!spill)
         return false;
 
-    if (!insertAllRanges(spill->allocated, intervals))
+    if (!insertAllRanges(spill->allocated, bundles))
         return false;
 
     *palloc = spill->alloc;
 
     slotList->pushFront(spill);
     return true;
 }
 
 bool
-BacktrackingAllocator::insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals)
+BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, const LiveBundleVector& bundles)
 {
-    for (size_t i = 0; i < intervals.length(); i++) {
-        LiveInterval* interval = intervals[i];
-        for (size_t j = 0; j < interval->numRanges(); j++) {
-            AllocatedRange range(interval, interval->getRange(j));
+    for (size_t i = 0; i < bundles.length(); i++) {
+        LiveBundle* bundle = bundles[i];
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
             if (!set.insert(range))
                 return false;
         }
     }
     return true;
 }
 
-// Add moves to resolve conflicting assignments between a block and its
-// predecessors.
 bool
 BacktrackingAllocator::resolveControlFlow()
 {
+    // Add moves to handle changing assignments for vregs over their lifetime.
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    // Look for places where a register's assignment changes in the middle of a
+    // basic block.
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)"))
             return false;
 
-        for (size_t j = 1; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            MOZ_ASSERT(interval->index() == j);
-
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            // The range which defines the register does not have a predecessor
+            // to add moves from.
+            if (range->hasDefinition())
+                continue;
+
+            // Ignore ranges that start at block boundaries. We will handle
+            // these in the next phase.
+            CodePosition start = range->from();
+            LNode* ins = insData[start];
+            if (start == entryOf(ins->block()))
+                continue;
+
+            // If we already saw a range which covers the start of this range
+            // and has the same allocation, we don't need an explicit move at
+            // the start of this range.
             bool skip = false;
-            for (int k = j - 1; k >= 0; k--) {
-                LiveInterval* prevInterval = reg->getInterval(k);
-                if (prevInterval->start() != interval->start())
-                    break;
-                if (*prevInterval->getAllocation() == *interval->getAllocation()) {
+            for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
+                 prevIter != iter;
+                 prevIter++)
+            {
+                LiveRange* prevRange = LiveRange::get(*prevIter);
+                if (prevRange->covers(start) &&
+                    prevRange->bundle()->allocation() == range->bundle()->allocation())
+                {
                     skip = true;
                     break;
                 }
             }
             if (skip)
                 continue;
 
-            CodePosition start = interval->start();
-            LNode* ins = insData[start];
-            if (start > entryOf(ins->block())) {
-                MOZ_ASSERT(start == inputOf(ins) || start == outputOf(ins));
-
-                LiveInterval* prevInterval = reg->intervalFor(start.previous());
-                if (start.subpos() == CodePosition::INPUT) {
-                    if (!moveInput(ins->toInstruction(), prevInterval, interval, reg->type()))
-                        return false;
-                } else {
-                    if (!moveAfter(ins->toInstruction(), prevInterval, interval, reg->type()))
-                        return false;
-                }
+            LiveRange* predecessorRange = reg.rangeFor(start.previous());
+            if (start.subpos() == CodePosition::INPUT) {
+                if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type()))
+                    return false;
+            } else {
+                if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type()))
+                    return false;
             }
         }
     }
 
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
             return false;
 
         LBlock* successor = graph.getBlock(i);
         MBasicBlock* mSuccessor = successor->mir();
         if (mSuccessor->numPredecessors() < 1)
             continue;
 
-        // Resolve phis to moves
+        // Resolve phis to moves.
         for (size_t j = 0; j < successor->numPhis(); j++) {
             LPhi* phi = successor->getPhi(j);
             MOZ_ASSERT(phi->numDefs() == 1);
             LDefinition* def = phi->getDef(0);
-            VirtualRegister* vreg = &vregs[def];
-            LiveInterval* to = vreg->intervalFor(entryOf(successor));
+            VirtualRegister& reg = vreg(def);
+            LiveRange* to = reg.rangeFor(entryOf(successor));
             MOZ_ASSERT(to);
 
             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
                 LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
                 MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
                 LAllocation* input = phi->getOperand(k);
-                LiveInterval* from = vregs[input].intervalFor(exitOf(predecessor));
+                LiveRange* from = vreg(input).rangeFor(exitOf(predecessor));
                 MOZ_ASSERT(from);
 
                 if (!moveAtExit(predecessor, from, to, def->type()))
                     return false;
             }
         }
 
-        // Resolve split intervals with moves
+        // Add moves to resolve graph edges with different allocations at their
+        // source and target.
         BitSet& live = liveIn[mSuccessor->id()];
 
         for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
             VirtualRegister& reg = vregs[*liveRegId];
 
             for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
                 LBlock* predecessor = mSuccessor->getPredecessor(j)->lir();
 
-                for (size_t k = 0; k < reg.numIntervals(); k++) {
-                    LiveInterval* to = reg.getInterval(k);
+                for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+                    LiveRange* to = LiveRange::get(*iter);
                     if (!to->covers(entryOf(successor)))
                         continue;
                     if (to->covers(exitOf(predecessor)))
                         continue;
 
-                    LiveInterval* from = reg.intervalFor(exitOf(predecessor));
+                    LiveRange* from = reg.rangeFor(exitOf(predecessor));
 
                     if (mSuccessor->numPredecessors() > 1) {
                         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
                         if (!moveAtExit(predecessor, from, to, reg.type()))
                             return false;
                     } else {
                         if (!moveAtEntry(successor, from, to, reg.type()))
                             return false;
@@ -1351,167 +2043,251 @@ BacktrackingAllocator::isRegisterUse(LUs
         return true;
 
       default:
         return false;
     }
 }
 
 bool
-BacktrackingAllocator::isRegisterDefinition(LiveInterval* interval)
+BacktrackingAllocator::isRegisterDefinition(LiveRange* range)
 {
-    if (interval->index() != 0)
+    if (!range->hasDefinition())
         return false;
 
-    VirtualRegister& reg = vregs[interval->vreg()];
+    VirtualRegister& reg = vregs[range->vreg()];
     if (reg.ins()->isPhi())
         return false;
 
     if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister())
         return false;
 
     return true;
 }
 
 bool
 BacktrackingAllocator::reifyAllocations()
 {
     JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        VirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
             return false;
 
-        for (size_t j = 0; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            MOZ_ASSERT(interval->index() == j);
-
-            if (interval->index() == 0) {
-                reg->def()->setOutput(*interval->getAllocation());
-                if (reg->ins()->recoversInput()) {
-                    LSnapshot* snapshot = reg->ins()->toInstruction()->snapshot();
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            if (range->hasDefinition()) {
+                reg.def()->setOutput(range->bundle()->allocation());
+                if (reg.ins()->recoversInput()) {
+                    LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
                     for (size_t i = 0; i < snapshot->numEntries(); i++) {
                         LAllocation* entry = snapshot->getEntry(i);
                         if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
-                            *entry = *reg->def()->output();
+                            *entry = *reg.def()->output();
                     }
                 }
             }
 
-            for (UsePositionIterator iter(interval->usesBegin());
-                 iter != interval->usesEnd();
-                 iter++)
-            {
+            for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
                 LAllocation* alloc = iter->use;
-                *alloc = *interval->getAllocation();
+                *alloc = range->bundle()->allocation();
 
                 // For any uses which feed into MUST_REUSE_INPUT definitions,
                 // add copies if the use and def have different allocations.
                 LNode* ins = insData[iter->pos];
                 if (LDefinition* def = FindReusingDefinition(ins, alloc)) {
-                    LiveInterval* outputInterval =
-                        vregs[def->virtualRegister()].intervalFor(outputOf(ins));
-                    LAllocation* res = outputInterval->getAllocation();
-                    LAllocation* sourceAlloc = interval->getAllocation();
-
-                    if (*res != *alloc) {
+                    LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
+                    LAllocation res = outputRange->bundle()->allocation();
+                    LAllocation sourceAlloc = range->bundle()->allocation();
+
+                    if (res != *alloc) {
                         LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
-                        if (!group->addAfter(sourceAlloc, res, reg->type()))
+                        if (!group->addAfter(sourceAlloc, res, reg.type()))
                             return false;
-                        *alloc = *res;
+                        *alloc = res;
                     }
                 }
             }
 
-            addLiveRegistersForInterval(reg, interval);
+            addLiveRegistersForRange(reg, range);
         }
     }
 
     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
     return true;
 }
 
+size_t
+BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from)
+{
+    size_t i = 0;
+    for (; i < graph.numNonCallSafepoints(); i++) {
+        const LInstruction* ins = graph.getNonCallSafepoint(i);
+        if (from <= inputOf(ins))
+            break;
+    }
+    return i;
+}
+
+void
+BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range)
+{
+    // Fill in the live register sets for all non-call safepoints.
+    LAllocation a = range->bundle()->allocation();
+    if (!a.isRegister())
+        return;
+
+    // Don't add output registers to the safepoint.
+    CodePosition start = range->from();
+    if (range->hasDefinition() && !reg.isTemp()) {
+#ifdef CHECK_OSIPOINT_REGISTERS
+        // We don't add the output register to the safepoint,
+        // but it still might get added as one of the inputs.
+        // So eagerly add this reg to the safepoint clobbered registers.
+        if (reg.ins()->isInstruction()) {
+            if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint())
+                safepoint->addClobberedRegister(a.toRegister());
+        }
+#endif
+        start = start.next();
+    }
+
+    size_t i = findFirstNonCallSafepoint(start);
+    for (; i < graph.numNonCallSafepoints(); i++) {
+        LInstruction* ins = graph.getNonCallSafepoint(i);
+        CodePosition pos = inputOf(ins);
+
+        // Safepoints are sorted, so we can shortcut out of this loop
+        // if we go out of range.
+        if (range->to() <= pos)
+            break;
+
+        MOZ_ASSERT(range->covers(pos));
+
+        LSafepoint* safepoint = ins->safepoint();
+        safepoint->addLiveRegister(a.toRegister());
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+        if (reg.isTemp())
+            safepoint->addClobberedRegister(a.toRegister());
+#endif
+    }
+}
+
+static inline bool
+IsNunbox(VirtualRegister& reg)
+{
+#ifdef JS_NUNBOX32
+    return reg.type() == LDefinition::TYPE ||
+           reg.type() == LDefinition::PAYLOAD;
+#else
+    return false;
+#endif
+}
+
+static inline bool
+IsSlotsOrElements(VirtualRegister& reg)
+{
+    return reg.type() == LDefinition::SLOTS;
+}
+
+static inline bool
+IsTraceable(VirtualRegister& reg)
+{
+    if (reg.type() == LDefinition::OBJECT)
+        return true;
+#ifdef JS_PUNBOX64
+    if (reg.type() == LDefinition::BOX)
+        return true;
+#endif
+    return false;
+}
+
+size_t
+BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom)
+{
+    size_t i = startFrom;
+    for (; i < graph.numSafepoints(); i++) {
+        LInstruction* ins = graph.getSafepoint(i);
+        if (pos <= inputOf(ins))
+            break;
+    }
+    return i;
+}
+
 bool
 BacktrackingAllocator::populateSafepoints()
 {
     JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
 
     size_t firstSafepoint = 0;
 
-    // Virtual register number 0 is unused.
     MOZ_ASSERT(!vregs[0u].def());
-    for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
-
-        if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
+    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        VirtualRegister& reg = vregs[i];
+
+        if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
             continue;
 
-        firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
+        firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
         if (firstSafepoint >= graph.numSafepoints())
             break;
 
-        // Find the furthest endpoint. Intervals are sorted, but by start
-        // position, and we want the greatest end position.
-        CodePosition end = reg->getInterval(0)->end();
-        for (size_t j = 1; j < reg->numIntervals(); j++)
-            end = Max(end, reg->getInterval(j)->end());
-
-        for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
-            LInstruction* ins = graph.getSafepoint(j);
-
-            // Stop processing safepoints if we know we're out of this virtual
-            // register's range.
-            if (end < outputOf(ins))
-                break;
-
-            // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
-            // is not used with gcthings or nunboxes, or we would have to add the input reg
-            // to this safepoint.
-            if (ins == reg->ins() && !reg->isTemp()) {
-                DebugOnly<LDefinition*> def = reg->def();
-                MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
-                              def->type() == LDefinition::GENERAL ||
-                              def->type() == LDefinition::INT32 ||
-                              def->type() == LDefinition::FLOAT32 ||
-                              def->type() == LDefinition::DOUBLE);
-                continue;
-            }
-
-            LSafepoint* safepoint = ins->safepoint();
-
-            for (size_t k = 0; k < reg->numIntervals(); k++) {
-                LiveInterval* interval = reg->getInterval(k);
-                if (!interval->covers(inputOf(ins)))
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
+                LInstruction* ins = graph.getSafepoint(j);
+
+                if (!range->covers(inputOf(ins))) {
+                    if (inputOf(ins) >= range->to())
+                        break;
                     continue;
-
-                LAllocation* a = interval->getAllocation();
-                if (a->isGeneralReg() && ins->isCall())
+                }
+
+                // Include temps but not instruction outputs. Also make sure
+                // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
+                // we would have to add the input reg to this safepoint.
+                if (ins == reg.ins() && !reg.isTemp()) {
+                    DebugOnly<LDefinition*> def = reg.def();
+                    MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
+                                  def->type() == LDefinition::GENERAL ||
+                                  def->type() == LDefinition::INT32 ||
+                                  def->type() == LDefinition::FLOAT32 ||
+                                  def->type() == LDefinition::DOUBLE);
                     continue;
-
-                switch (reg->type()) {
+                }
+
+                LSafepoint* safepoint = ins->safepoint();
+
+                LAllocation a = range->bundle()->allocation();
+                if (a.isGeneralReg() && ins->isCall())
+                    continue;
+
+                switch (reg.type()) {
                   case LDefinition::OBJECT:
-                    safepoint->addGcPointer(*a);
+                    safepoint->addGcPointer(a);
                     break;
                   case LDefinition::SLOTS:
-                    safepoint->addSlotsOrElementsPointer(*a);
+                    safepoint->addSlotsOrElementsPointer(a);
                     break;
 #ifdef JS_NUNBOX32
                   case LDefinition::TYPE:
-                    safepoint->addNunboxType(i, *a);
+                    safepoint->addNunboxType(i, a);
                     break;
                   case LDefinition::PAYLOAD:
-                    safepoint->addNunboxPayload(i, *a);
+                    safepoint->addNunboxPayload(i, a);
                     break;
 #else
                   case LDefinition::BOX:
-                    safepoint->addBoxedValue(*a);
+                    safepoint->addBoxedValue(a);
                     break;
 #endif
                   default:
                     MOZ_CRASH("Bad register type");
                 }
             }
         }
     }
@@ -1522,37 +2298,41 @@ BacktrackingAllocator::populateSafepoint
 bool
 BacktrackingAllocator::annotateMoveGroups()
 {
     // Annotate move groups in the LIR graph with any register that is not
     // allocated at that point and can be used as a scratch register. This is
     // only required for x86, as other platforms always have scratch registers
     // available for use.
 #ifdef JS_CODEGEN_X86
+    LiveRange* range = LiveRange::New(alloc(), 0, CodePosition(), CodePosition().next());
+    if (!range)
+        return false;
+
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Backtracking Annotate Move Groups"))
             return false;
 
         LBlock* block = graph.getBlock(i);
         LInstruction* last = nullptr;
         for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
             if (iter->isMoveGroup()) {
                 CodePosition from = last ? outputOf(last) : entryOf(block);
-                LiveInterval::Range range(from, from.next());
-                AllocatedRange search(nullptr, &range), existing;
+                range->setFrom(from);
+                range->setTo(from.next());
 
                 for (size_t i = 0; i < AnyRegister::Total; i++) {
                     PhysicalRegister& reg = registers[i];
                     if (reg.reg.isFloat() || !reg.allocatable)
                         continue;
 
                     // This register is unavailable for use if (a) it is in use
-                    // by some live interval immediately before the move group,
+                    // by some live range immediately before the move group,
                     // or (b) it is an operand in one of the group's moves. The
-                    // latter case handles live intervals which end immediately
+                    // latter case handles live ranges which end immediately
                     // before the move group or start immediately after.
                     // For (b) we need to consider move groups immediately
                     // preceding or following this one.
 
                     if (iter->toMoveGroup()->uses(reg.reg.gpr()))
                         continue;
                     bool found = false;
                     LInstructionIterator niter(iter);
@@ -1576,32 +2356,128 @@ BacktrackingAllocator::annotateMoveGroup
                                     break;
                                 }
                             } else {
                                 break;
                             }
                         } while (riter != block->begin());
                     }
 
-                    if (found || reg.allocations.contains(search, &existing))
+                    LiveRange* existing;
+                    if (found || reg.allocations.contains(range, &existing))
                         continue;
 
                     iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
                     break;
                 }
             } else {
                 last = *iter;
             }
         }
     }
 #endif
 
     return true;
 }
 
+bool
+BacktrackingAllocator::addLiveBundle(LiveBundleVector& bundles, uint32_t vreg,
+                                     LiveBundle* spillParent,
+                                     CodePosition from, CodePosition to)
+{
+    LiveBundle* bundle = LiveBundle::New(alloc());
+    if (!bundle || !bundles.append(bundle))
+        return false;
+    bundle->setSpillParent(spillParent);
+    return bundle->addRange(alloc(), vreg, from, to);
+}
+
+/////////////////////////////////////////////////////////////////////
+// Debugging methods
+/////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+
+const char*
+LiveRange::toString() const
+{
+    // Not reentrant!
+    static char buf[2000];
+
+    char* cursor = buf;
+    char* end = cursor + sizeof(buf);
+
+    int n = JS_snprintf(cursor, end - cursor, "v%u [%u,%u)",
+                        hasVreg() ? vreg() : 0, from().bits(), to().bits());
+    if (n < 0) MOZ_CRASH();
+    cursor += n;
+
+    if (bundle() && !bundle()->allocation().isBogus()) {
+        n = JS_snprintf(cursor, end - cursor, " %s", bundle()->allocation().toString());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    if (hasDefinition()) {
+        n = JS_snprintf(cursor, end - cursor, " (def)");
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    for (UsePositionIterator iter = usesBegin(); iter; iter++) {
+        n = JS_snprintf(cursor, end - cursor, " %s@%u", iter->use->toString(), iter->pos.bits());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    return buf;
+}
+
+const char*
+LiveBundle::toString() const
+{
+    // Not reentrant!
+    static char buf[2000];
+
+    char* cursor = buf;
+    char* end = cursor + sizeof(buf);
+
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+        int n = JS_snprintf(cursor, end - cursor, "%s %s",
+                            (iter == rangesBegin()) ? "" : " ##",
+                            LiveRange::get(*iter)->toString());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    return buf;
+}
+
+#endif // DEBUG
+
+void
+BacktrackingAllocator::dumpVregs()
+{
+#ifdef DEBUG
+    MOZ_ASSERT(!vregs[0u].hasRanges());
+    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        fprintf(stderr, "  ");
+        VirtualRegister& reg = vregs[i];
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            if (iter != reg.rangesBegin())
+                fprintf(stderr, " / ");
+            fprintf(stderr, "%s", LiveRange::get(*iter)->toString());
+        }
+        fprintf(stderr, "\n");
+    }
+
+    fprintf(stderr, "\n");
+#endif
+}
+
 void
 BacktrackingAllocator::dumpRegisterGroups()
 {
 #ifdef DEBUG
     bool any = false;
 
     // Virtual register number 0 is unused.
     MOZ_ASSERT(!vregs[0u].group());
@@ -1622,51 +2498,34 @@ BacktrackingAllocator::dumpRegisterGroup
         fprintf(stderr, "\n");
 #endif
 }
 
 void
 BacktrackingAllocator::dumpFixedRanges()
 {
 #ifdef DEBUG
-    bool any = false;
-
-    for (size_t i = 0; i < AnyRegister::Total; i++) {
-        if (registers[i].allocatable && fixedIntervals[i]->numRanges() != 0) {
-            if (!any) {
-                fprintf(stderr, "Live ranges by physical register:\n");
-                any = true;
-            }
-            fprintf(stderr, "  %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->toString());
-        }
-    }
-
-    if (any)
-        fprintf(stderr, "\n");
+    fprintf(stderr, "Live ranges by physical register: %s\n", callRanges->toString());
 #endif // DEBUG
 }
 
 #ifdef DEBUG
-struct BacktrackingAllocator::PrintLiveIntervalRange
+struct BacktrackingAllocator::PrintLiveRange
 {
     bool& first_;
 
-    explicit PrintLiveIntervalRange(bool& first) : first_(first) {}
-
-    void operator()(const AllocatedRange& item)
+    explicit PrintLiveRange(bool& first) : first_(first) {}
+
+    void operator()(const LiveRange* range)
     {
-        if (item.range == item.interval->getRange(0)) {
-            if (first_)
-                first_ = false;
-            else
-                fprintf(stderr, " /");
-            if (item.interval->hasVreg())
-                fprintf(stderr, " v%u[%u]", item.interval->vreg(), item.interval->index());
-            fprintf(stderr, "%s", item.interval->rangesToString());
-        }
+        if (first_)
+            first_ = false;
+        else
+            fprintf(stderr, " /");
+        fprintf(stderr, " %s", range->toString());
     }
 };
 #endif
 
 void
 BacktrackingAllocator::dumpAllocations()
 {
 #ifdef DEBUG
@@ -1675,484 +2534,509 @@ BacktrackingAllocator::dumpAllocations()
     dumpVregs();
 
     fprintf(stderr, "Allocations by physical register:\n");
 
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         if (registers[i].allocatable && !registers[i].allocations.empty()) {
             fprintf(stderr, "  %s:", AnyRegister::FromCode(i).name());
             bool first = true;
-            registers[i].allocations.forEach(PrintLiveIntervalRange(first));
+            registers[i].allocations.forEach(PrintLiveRange(first));
             fprintf(stderr, "\n");
         }
     }
 
     fprintf(stderr, "\n");
 #endif // DEBUG
 }
 
-bool
-BacktrackingAllocator::addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
-                                       LiveInterval* spillInterval,
-                                       CodePosition from, CodePosition to)
-{
-    LiveInterval* interval = LiveInterval::New(alloc(), vreg, 0);
-    interval->setSpillInterval(spillInterval);
-    return interval->addRange(from, to) && intervals.append(interval);
-}
-
 ///////////////////////////////////////////////////////////////////////////////
 // Heuristic Methods
 ///////////////////////////////////////////////////////////////////////////////
 
 size_t
-BacktrackingAllocator::computePriority(const LiveInterval* interval)
+BacktrackingAllocator::computePriority(LiveBundle* bundle)
 {
-    // The priority of an interval is its total length, so that longer lived
-    // intervals will be processed before shorter ones (even if the longer ones
-    // have a low spill weight). See processInterval().
+    // The priority of a bundle is its total length, so that longer lived
+    // bundles will be processed before shorter ones (even if the longer ones
+    // have a low spill weight). See processBundle().
     size_t lifetimeTotal = 0;
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        const LiveInterval::Range* range = interval->getRange(i);
-        lifetimeTotal += range->to - range->from;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        lifetimeTotal += range->to() - range->from();
     }
 
     return lifetimeTotal;
 }
 
 size_t
 BacktrackingAllocator::computePriority(const VirtualRegisterGroup* group)
 {
     size_t priority = 0;
     for (size_t j = 0; j < group->registers.length(); j++) {
         uint32_t vreg = group->registers[j];
-        priority += computePriority(vregs[vreg].getInterval(0));
+        priority += computePriority(LiveRange::get(*vregs[vreg].rangesBegin())->bundle());
     }
     return priority;
 }
 
 bool
-BacktrackingAllocator::minimalDef(const LiveInterval* interval, LNode* ins)
+BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins)
 {
-    // Whether interval is a minimal interval capturing a definition at ins.
-    return (interval->end() <= minimalDefEnd(ins).next()) &&
-        ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins));
+    // Whether this is a minimal range capturing a definition at ins.
+    return (range->to() <= minimalDefEnd(ins).next()) &&
+           ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins));
 }
 
 bool
-BacktrackingAllocator::minimalUse(const LiveInterval* interval, LNode* ins)
+BacktrackingAllocator::minimalUse(LiveRange* range, LNode* ins)
 {
-    // Whether interval is a minimal interval capturing a use at ins.
-    return (interval->start() == inputOf(ins)) &&
-        (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
+    // Whether this is a minimal range capturing a use at ins.
+    return (range->from() == inputOf(ins)) &&
+           (range->to() == outputOf(ins) || range->to() == outputOf(ins).next());
 }
 
 bool
-BacktrackingAllocator::minimalInterval(const LiveInterval* interval, bool* pfixed)
+BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed)
 {
-    if (!interval->hasVreg()) {
+    LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
+    LiveRange* range = LiveRange::get(*iter);
+
+    if (!range->hasVreg()) {
         *pfixed = true;
         return true;
     }
 
-    if (interval->index() == 0) {
-        VirtualRegister& reg = vregs[interval->vreg()];
+    // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
+    // each range into a separate bundle.
+    if (++iter)
+        return false;
+
+    if (range->hasDefinition()) {
+        VirtualRegister& reg = vregs[range->vreg()];
         if (pfixed)
             *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister();
-        return minimalDef(interval, reg.ins());
+        return minimalDef(range, reg.ins());
     }
 
     bool fixed = false, minimal = false, multiple = false;
 
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
-        if (iter != interval->usesBegin())
+    for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+        if (iter != range->usesBegin())
             multiple = true;
         LUse* use = iter->use;
 
         switch (use->policy()) {
           case LUse::FIXED:
             if (fixed)
                 return false;
             fixed = true;
-            if (minimalUse(interval, insData[iter->pos]))
+            if (minimalUse(range, insData[iter->pos]))
                 minimal = true;
             break;
 
           case LUse::REGISTER:
-            if (minimalUse(interval, insData[iter->pos]))
+            if (minimalUse(range, insData[iter->pos]))
                 minimal = true;
             break;
 
           default:
             break;
         }
     }
 
-    // If an interval contains a fixed use and at least one other use,
-    // splitAtAllRegisterUses will split each use into a different interval.
+    // If a range contains a fixed use and at least one other use,
+    // splitAtAllRegisterUses will split each use into a different bundle.
     if (multiple && fixed)
         minimal = false;
 
     if (pfixed)
         *pfixed = fixed;
     return minimal;
 }
 
 size_t
-BacktrackingAllocator::computeSpillWeight(const LiveInterval* interval)
+BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle)
 {
-    // Minimal intervals have an extremely high spill weight, to ensure they
-    // can evict any other intervals and be allocated to a register.
+    // Minimal bundles have an extremely high spill weight, to ensure they
+    // can evict any other bundles and be allocated to a register.
     bool fixed;
-    if (minimalInterval(interval, &fixed))
+    if (minimalBundle(bundle, &fixed))
         return fixed ? 2000000 : 1000000;
 
     size_t usesTotal = 0;
 
-    if (interval->index() == 0) {
-        VirtualRegister* reg = &vregs[interval->vreg()];
-        if (reg->def()->policy() == LDefinition::FIXED && reg->def()->output()->isRegister())
-            usesTotal += 2000;
-        else if (!reg->ins()->isPhi())
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        if (range->hasDefinition()) {
+            VirtualRegister& reg = vregs[range->vreg()];
+            if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister())
+                usesTotal += 2000;
+            else if (!reg.ins()->isPhi())
+                usesTotal += 2000;
+        }
+
+        for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+            LUse* use = iter->use;
+
+            switch (use->policy()) {
+              case LUse::ANY:
+                usesTotal += 1000;
+                break;
+
+              case LUse::REGISTER:
+              case LUse::FIXED:
+                usesTotal += 2000;
+                break;
+
+              case LUse::KEEPALIVE:
+                break;
+
+              default:
+                // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
+                MOZ_CRASH("Bad use");
+            }
+        }
+
+        // Ranges for registers in groups get higher weights.
+        if (vregs[range->vreg()].group())
             usesTotal += 2000;
     }
 
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
-        LUse* use = iter->use;
-
-        switch (use->policy()) {
-          case LUse::ANY:
-            usesTotal += 1000;
-            break;
-
-          case LUse::REGISTER:
-          case LUse::FIXED:
-            usesTotal += 2000;
-            break;
-
-          case LUse::KEEPALIVE:
-            break;
-
-          default:
-            // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
-            MOZ_CRASH("Bad use");
-        }
-    }
-
-    // Intervals for registers in groups get higher weights.
-    if (interval->hint()->kind() != Requirement::NONE)
-        usesTotal += 2000;
-
     // Compute spill weight as a use density, lowering the weight for long
-    // lived intervals with relatively few uses.
-    size_t lifetimeTotal = computePriority(interval);
+    // lived bundles with relatively few uses.
+    size_t lifetimeTotal = computePriority(bundle);
     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
 }
 
 size_t
 BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup* group)
 {
     size_t maxWeight = 0;
     for (size_t j = 0; j < group->registers.length(); j++) {
         uint32_t vreg = group->registers[j];
-        maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0)));
+        maxWeight = Max(maxWeight, computeSpillWeight(LiveRange::get(*vregs[vreg].rangesBegin())->bundle()));
     }
     return maxWeight;
 }
 
 size_t
-BacktrackingAllocator::maximumSpillWeight(const LiveIntervalVector& intervals)
+BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
 {
     size_t maxWeight = 0;
-    for (size_t i = 0; i < intervals.length(); i++)
-        maxWeight = Max(maxWeight, computeSpillWeight(intervals[i]));
+    for (size_t i = 0; i < bundles.length(); i++)
+        maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
     return maxWeight;
 }
 
 bool
-BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval* interval, bool* success)
+BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
 {
-    // If this interval has portions that are hot and portions that are cold,
+    // If this bundle has portions that are hot and portions that are cold,
     // split it at the boundaries between hot and cold code.
 
-    const LiveInterval::Range* hotRange = nullptr;
-
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i)), existing;
-        if (hotcode.contains(range, &existing)) {
-            hotRange = existing.range;
+    LiveRange* hotRange = nullptr;
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (hotcode.contains(range, &hotRange))
             break;
-        }
     }
 
-    // Don't split if there is no hot code in the interval.
+    // Don't split if there is no hot code in the bundle.
     if (!hotRange) {
-        JitSpew(JitSpew_RegAlloc, "  interval does not contain hot code");
+        JitSpew(JitSpew_RegAlloc, "  bundle does not contain hot code");
         return true;
     }
 
-    // Don't split if there is no cold code in the interval.
+    // Don't split if there is no cold code in the bundle.
     bool coldCode = false;
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        if (!hotRange->contains(interval->getRange(i))) {
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (!hotRange->contains(range)) {
             coldCode = true;
             break;
         }
     }
     if (!coldCode) {
-        JitSpew(JitSpew_RegAlloc, "  interval does not contain cold code");
+        JitSpew(JitSpew_RegAlloc, "  bundle does not contain cold code");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString());
 
     // Tweak the splitting method when compiling asm.js code to look at actual
     // uses within the hot/cold code. This heuristic is in place as the below
     // mechanism regresses several asm.js tests. Hopefully this will be fixed
     // soon and this special case removed. See bug 948838.
     if (compilingAsmJS()) {
         SplitPositionVector splitPositions;
-        if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to))
+        if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
             return false;
         *success = true;
-        return splitAt(interval, splitPositions);
+        return splitAt(bundle, splitPositions);
     }
 
-    LiveInterval* hotInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    LiveInterval* preInterval = nullptr;
-    LiveInterval* postInterval = nullptr;
-
-    // Accumulate the ranges of hot and cold code in the interval. Note that
+    LiveBundle* hotBundle = LiveBundle::New(alloc());
+    if (!hotBundle)
+        return false;
+    LiveBundle* preBundle = nullptr;
+    LiveBundle* postBundle = nullptr;
+
+    // Accumulate the ranges of hot and cold code in the bundle. Note that
     // we are only comparing with the single hot range found, so the cold code
     // may contain separate hot ranges.
-    Vector<LiveInterval::Range, 1, SystemAllocPolicy> hotList, coldList;
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        LiveInterval::Range hot, coldPre, coldPost;
-        interval->getRange(i)->intersect(hotRange, &coldPre, &hot, &coldPost);
-
-        if (!hot.empty() && !hotInterval->addRange(hot.from, hot.to))
-            return false;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        LiveRange::Range hot, coldPre, coldPost;
+        range->intersect(hotRange, &coldPre, &hot, &coldPost);
+
+        if (!hot.empty()) {
+            if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to))
+                return false;
+        }
 
         if (!coldPre.empty()) {
-            if (!preInterval)
-                preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-            if (!preInterval->addRange(coldPre.from, coldPre.to))
+            if (!preBundle) {
+                preBundle = LiveBundle::New(alloc());
+                if (!preBundle)
+                    return false;
+            }
+            if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
                 return false;
         }
 
         if (!coldPost.empty()) {
-            if (!postInterval)
-                postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-            if (!postInterval->addRange(coldPost.from, coldPost.to))
+            if (!postBundle)
+                postBundle = LiveBundle::New(alloc());
+            if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
                 return false;
         }
     }
 
-    MOZ_ASSERT(preInterval || postInterval);
-    MOZ_ASSERT(hotInterval->numRanges());
-
-    LiveIntervalVector newIntervals;
-    if (!newIntervals.append(hotInterval))
+    MOZ_ASSERT(preBundle || postBundle);
+    MOZ_ASSERT(hotBundle->numRanges() != 0);
+
+    LiveBundleVector newBundles;
+    if (!newBundles.append(hotBundle))
         return false;
-    if (preInterval && !newIntervals.append(preInterval))
+    if (preBundle && !newBundles.append(preBundle))
         return false;
-    if (postInterval && !newIntervals.append(postInterval))
+    if (postBundle && !newBundles.append(postBundle))
         return false;
 
-    distributeUses(interval, newIntervals);
-
     *success = true;
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    return splitAndRequeueBundles(bundle, newBundles);
 }
 
 bool
-BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success)
+BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
+                                                    bool* success)
 {
-    // If this interval's later uses do not require it to be in a register,
+    // If this bundle's later uses do not require it to be in a register,
     // split it after the last use which does require a register. If conflict
     // is specified, only consider register uses before the conflict starts.
 
     CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
 
-    // If the definition of the interval is in a register, consider that a
-    // register use too for our purposes here.
-    if (isRegisterDefinition(interval)) {
-        CodePosition spillStart = minimalDefEnd(insData[interval->start()]).next();
-        if (!conflict || spillStart < conflict->start()) {
-            lastUse = lastRegisterFrom = interval->start();
-            lastRegisterTo = spillStart;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        // If the range defines a register, consider that a register use for
+        // our purposes here.
+        if (isRegisterDefinition(range)) {
+            CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
+            if (!conflict || spillStart < conflict->firstRange()->from()) {
+                lastUse = lastRegisterFrom = range->from();
+                lastRegisterTo = spillStart;
+            }
         }
-    }
-
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse* use = iter->use;
-        LNode* ins = insData[iter->pos];
-
-        // Uses in the interval should be sorted.
-        MOZ_ASSERT(iter->pos >= lastUse);
-        lastUse = inputOf(ins);
-
-        if (!conflict || outputOf(ins) < conflict->start()) {
-            if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
-                lastRegisterFrom = inputOf(ins);
-                lastRegisterTo = iter->pos.next();
+
+        for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+            LUse* use = iter->use;
+            LNode* ins = insData[iter->pos];
+
+            // Uses in the bundle should be sorted.
+            MOZ_ASSERT(iter->pos >= lastUse);
+            lastUse = inputOf(ins);
+
+            if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
+                if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
+                    lastRegisterFrom = inputOf(ins);
+                    lastRegisterTo = iter->pos.next();
+                }
             }
         }
     }
 
     // Can't trim non-register uses off the end by splitting.
     if (!lastRegisterFrom.bits()) {
-        JitSpew(JitSpew_RegAlloc, "  interval has no register uses");
+        JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
         return true;
     }
     if (lastRegisterFrom == lastUse) {
-        JitSpew(JitSpew_RegAlloc, "  interval's last use is a register use");
+        JitSpew(JitSpew_RegAlloc, "  bundle's last use is a register use");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split after last register use at %u",
             lastRegisterTo.bits());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(lastRegisterTo))
         return false;
     *success = true;
-    return splitAt(interval, splitPositions);
+    return splitAt(bundle, splitPositions);
 }
 
 bool
-BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success)
+BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success)
 {
-    // If this interval's earlier uses do not require it to be in a register,
+    // If this bundle's earlier uses do not require it to be in a register,
     // split it before the first use which does require a register. If conflict
     // is specified, only consider register uses after the conflict ends.
 
-    if (isRegisterDefinition(interval)) {
-        JitSpew(JitSpew_RegAlloc, "  interval is defined by a register");
+    if (isRegisterDefinition(bundle->firstRange())) {
+        JitSpew(JitSpew_RegAlloc, "  bundle is defined by a register");
         return true;
     }
-    if (interval->index() != 0) {
-        JitSpew(JitSpew_RegAlloc, "  interval is not defined in memory");
+    if (!bundle->firstRange()->hasDefinition()) {
+        JitSpew(JitSpew_RegAlloc, "  bundle does not have definition");
         return true;
     }
 
     CodePosition firstRegisterFrom;
 
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse* use = iter->use;
-        LNode* ins = insData[iter->pos];
-
-        if (!conflict || outputOf(ins) >= conflict->end()) {
-            if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
-                firstRegisterFrom = inputOf(ins);
-                break;
+    CodePosition conflictEnd;
+    if (conflict) {
+        for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (range->to() > conflictEnd)
+                conflictEnd = range->to();
+        }
+    }
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+            LUse* use = iter->use;
+            LNode* ins = insData[iter->pos];
+
+            if (!conflict || outputOf(ins) >= conflictEnd) {
+                if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
+                    firstRegisterFrom = inputOf(ins);
+                    break;
+                }
             }
         }
     }
 
     if (!firstRegisterFrom.bits()) {
         // Can't trim non-register uses off the beginning by splitting.
-        JitSpew(JitSpew_RegAlloc, "  interval has no register uses");
+        JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split before first register use at %u",
             firstRegisterFrom.bits());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(firstRegisterFrom))
         return false;
     *success = true;
-    return splitAt(interval, splitPositions);
+    return splitAt(bundle, splitPositions);
 }
 
 bool
-BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval* interval)
+BacktrackingAllocator::splitAtAllRegisterUses(LiveBundle* bundle)
 {
-    // Split this interval so that all its register uses become minimal
-    // intervals and allow the vreg to be spilled throughout its range.
-
-    LiveIntervalVector newIntervals;
-    uint32_t vreg = interval->vreg();
+    // Split this bundle so that all its register uses are placed in minimal
+    // bundles and allow the original bundle to be spilled throughout its range.
+
+    LiveBundleVector newBundles;
 
     JitSpew(JitSpew_RegAlloc, "  split at all register uses");
 
-    // If this LiveInterval is the result of an earlier split which created a
-    // spill interval, that spill interval covers the whole range, so we don't
-    // need to create a new one.
-    bool spillIntervalIsNew = false;
-    LiveInterval* spillInterval = interval->spillInterval();
-    if (!spillInterval) {
-        spillInterval = LiveInterval::New(alloc(), vreg, 0);
-        spillIntervalIsNew = true;
+    // If this bundle is the result of an earlier split which created a spill
+    // parent, that spill parent covers the whole range, so we don't need to
+    // create a new one.
+    bool spillBundleIsNew = false;
+    LiveBundle* spillBundle = bundle->spillParent();
+    if (!spillBundle) {
+        spillBundle = LiveBundle::New(alloc());
+        spillBundleIsNew = true;
     }
 
-    CodePosition spillStart = interval->start();
-    if (isRegisterDefinition(interval)) {
-        // Treat the definition of the interval as a register use so that it
+    CodePosition spillStart = bundle->firstRange()->from();
+    if (isRegisterDefinition(bundle->firstRange())) {
+        // Treat the definition of the first range as a register use so that it
         // can be split and spilled ASAP.
-        CodePosition from = interval->start();
+        CodePosition from = spillStart;
         CodePosition to = minimalDefEnd(insData[from]).next();
-        if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
+        if (!addLiveBundle(newBundles, bundle->firstRange()->vreg(), spillBundle, from, to))
             return false;
+        bundle->firstRange()->distributeUses(newBundles.back()->firstRange());
         spillStart = to;
     }
 
-    if (spillIntervalIsNew) {
-        for (size_t i = 0; i < interval->numRanges(); i++) {
-            const LiveInterval::Range* range = interval->getRange(i);
-            CodePosition from = Max(range->from, spillStart);
-            if (!spillInterval->addRange(from, range->to))
+    if (spillBundleIsNew) {
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            CodePosition from = Max(range->from(), spillStart);
+            if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
                 return false;
         }
     }
 
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LNode* ins = insData[iter->pos];
-        if (iter->pos < spillStart) {
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-        } else if (isRegisterUse(iter->use, ins)) {
-            // For register uses which are not useRegisterAtStart, pick an
-            // interval that covers both the instruction's input and output, so
-            // that the register is not reused for an output.
-            CodePosition from = inputOf(ins);
-            CodePosition to = iter->use->usedAtStart() ? outputOf(ins) : iter->pos.next();
-
-            // Use the same interval for duplicate use positions, except when
-            // the uses are fixed (they may require incompatible registers).
-            if (newIntervals.empty() ||
-                newIntervals.back()->end() != to ||
-                newIntervals.back()->usesBegin()->use->policy() == LUse::FIXED ||
-                iter->use->policy() == LUse::FIXED)
-            {
-                if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
-                    return false;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        LiveRange* lastNewRange = nullptr;
+        while (range->hasUses()) {
+            UsePosition* use = range->popUse();
+            LNode* ins = insData[use->pos];
+
+            // The earlier call to distributeUses should have taken care of
+            // uses before the spill bundle started.
+            MOZ_ASSERT(use->pos >= spillStart);
+
+            if (isRegisterUse(use->use, ins)) {
+                // For register uses which are not useRegisterAtStart, pick a
+                // bundle that covers both the instruction's input and output, so
+                // that the register is not reused for an output.
+                CodePosition from = inputOf(ins);
+                CodePosition to = use->use->usedAtStart() ? outputOf(ins) : use->pos.next();
+
+                // Use the same bundle for duplicate use positions, except when
+                // the uses are fixed (they may require incompatible registers).
+                if (!lastNewRange ||
+                    lastNewRange->to() != to ||
+                    lastNewRange->usesBegin()->use->policy() == LUse::FIXED ||
+                    use->use->policy() == LUse::FIXED)
+                {
+                    if (!addLiveBundle(newBundles, range->vreg(), spillBundle, from, to))
+                        return false;
+                    lastNewRange = newBundles.back()->firstRange();
+                }
+
+                lastNewRange->addUse(use);
+            } else {
+                MOZ_ASSERT(spillBundleIsNew);
+                spillBundle->rangeFor(use->pos)->addUse(use);
             }
-
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-        } else {
-            MOZ_ASSERT(spillIntervalIsNew);
-            spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
         }
     }
 
-    if (spillIntervalIsNew && !newIntervals.append(spillInterval))
+    if (spillBundleIsNew && !newBundles.append(spillBundle))
         return false;
 
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    return splitAndRequeueBundles(bundle, newBundles);
 }
 
 // Find the next split position after the current position.
 static size_t NextSplitPosition(size_t activeSplitPosition,
                                 const SplitPositionVector& splitPositions,
                                 CodePosition currentPos)
 {
     while (activeSplitPosition < splitPositions.length() &&
@@ -2168,174 +3052,181 @@ static bool SplitHere(size_t activeSplit
                       const SplitPositionVector& splitPositions,
                       CodePosition currentPos)
 {
     return activeSplitPosition < splitPositions.length() &&
            currentPos >= splitPositions[activeSplitPosition];
 }
 
 bool
-BacktrackingAllocator::splitAt(LiveInterval* interval,
-                               const SplitPositionVector& splitPositions)
+BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions)
 {
-    // Split the interval at the given split points. Unlike splitAtAllRegisterUses,
+    // Split the bundle at the given split points. Unlike splitAtAllRegisterUses,
     // consolidate any register uses which have no intervening split points into the
-    // same resulting interval.
+    // same resulting bundle.
 
     // splitPositions should be non-empty and sorted.
     MOZ_ASSERT(!splitPositions.empty());
     for (size_t i = 1; i < splitPositions.length(); ++i)
         MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]);
 
-    // Don't spill the interval until after the end of its definition.
-    CodePosition spillStart = interval->start();
-    if (isRegisterDefinition(interval))
-        spillStart = minimalDefEnd(insData[interval->start()]).next();
-
-    uint32_t vreg = interval->vreg();
-
-    // If this LiveInterval is the result of an earlier split which created a
-    // spill interval, that spill interval covers the whole range, so we don't
-    // need to create a new one.
-    bool spillIntervalIsNew = false;
-    LiveInterval* spillInterval = interval->spillInterval();
-    if (!spillInterval) {
-        spillInterval = LiveInterval::New(alloc(), vreg, 0);
-        spillIntervalIsNew = true;
-
-        for (size_t i = 0; i < interval->numRanges(); i++) {
-            const LiveInterval::Range* range = interval->getRange(i);
-            CodePosition from = Max(range->from, spillStart);
-            if (!spillInterval->addRange(from, range->to))
+    // Don't spill the bundle until after the end of its definition.
+    CodePosition start = bundle->firstRange()->from();
+    CodePosition spillStart = start;
+    if (isRegisterDefinition(bundle->firstRange()))
+        spillStart = minimalDefEnd(insData[spillStart]).next();
+
+    // As in splitAtAllRegisterUses, we don't need to create a new spill bundle
+    // if there already is one.
+    bool spillBundleIsNew = false;
+    LiveBundle* spillBundle = bundle->spillParent();
+    if (!spillBundle) {
+        spillBundle = LiveBundle::New(alloc());
+        if (!spillBundle)
+            return false;
+        spillBundleIsNew = true;
+
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            CodePosition from = Max(range->from(), spillStart);
+            if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
                 return false;
         }
+
+        if (bundle->firstRange()->hasDefinition() && !isRegisterDefinition(bundle->firstRange())) {
+            MOZ_ASSERT(spillStart == start);
+            spillBundle->firstRange()->setHasDefinition();
+        }
     }
 
-    LiveIntervalVector newIntervals;
-
+    LiveBundleVector newBundles;
+
+    // The last point where a register use was seen.
     CodePosition lastRegisterUse;
-    if (spillStart != interval->start()) {
-        LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0);
-        newInterval->setSpillInterval(spillInterval);
-        if (!newIntervals.append(newInterval))
+
+    // The new range which the last register use was added to.
+    LiveRange* activeRange = nullptr;
+
+    if (spillStart != start) {
+        if (!addLiveBundle(newBundles, bundle->firstRange()->vreg(), spillBundle, start, spillStart))
             return false;
-        lastRegisterUse = interval->start();
+        bundle->firstRange()->distributeUses(newBundles.back()->firstRange());
+        activeRange = newBundles.back()->firstRange();
+        lastRegisterUse = start.previous();
     }
 
-    size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start());
-    for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
-        LNode* ins = insData[iter->pos];
-        if (iter->pos < spillStart) {
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-            activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos);
-        } else if (isRegisterUse(iter->use, ins)) {
-            if (lastRegisterUse.bits() == 0 ||
-                SplitHere(activeSplitPosition, splitPositions, iter->pos))
-            {
-                // Place this register use into a different interval from the
+    size_t activeSplitPosition = NextSplitPosition(0, splitPositions, start);
+
+    // All ranges in the bundle which we have finished processing since the
+    // last register use.
+    LiveRangeVector originalRanges;
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        while (range->hasUses()) {
+            UsePosition* use = range->popUse();
+            LNode* ins = insData[use->pos];
+
+            // The earlier call to distributeUses should have taken care of
+            // uses before the spill bundle started.
+            MOZ_ASSERT(use->pos >= spillStart);
+
+            if (isRegisterUse(use->use, ins)) {
+                // Place this register use into a different bundle from the
                 // last one if there are any split points between the two uses.
-                LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0);
-                newInterval->setSpillInterval(spillInterval);
-                if (!newIntervals.append(newInterval))
-                    return false;
-                activeSplitPosition = NextSplitPosition(activeSplitPosition,
-                                                        splitPositions,
-                                                        iter->pos);
+                if (lastRegisterUse.bits() == 0 ||
+                    SplitHere(activeSplitPosition, splitPositions, use->pos))
+                {
+                    if (!addLiveBundle(newBundles, range->vreg(), spillBundle, inputOf(ins), use->pos.next()))
+                        return false;
+                    activeSplitPosition = NextSplitPosition(activeSplitPosition,
+                                                            splitPositions,
+                                                            use->pos);
+                    activeRange = newBundles.back()->firstRange();
+                } else {
+                    if (!originalRanges.empty()) {
+                        activeRange->setTo(originalRanges[0]->to());
+                        for (size_t i = 1; i < originalRanges.length(); i++) {
+                            LiveRange* orange = originalRanges[i];
+                            if (!newBundles.back()->addRange(alloc(), orange->vreg(), orange->from(), orange->to()))
+                                return false;
+                        }
+                        activeRange = LiveRange::New(alloc(), range->vreg(), range->from(), use->pos.next());
+                        if (!activeRange)
+                            return false;
+                        newBundles.back()->addRange(activeRange);
+                    } else {
+                        activeRange->setTo(use->pos.next());
+                    }
+                    MOZ_ASSERT(range->vreg() == activeRange->vreg());
+                }
+                activeRange->addUse(use);
+                originalRanges.clear();
+                lastRegisterUse = use->pos;
+            } else {
+                MOZ_ASSERT(spillBundleIsNew);
+                spillBundle->rangeFor(use->pos)->addUse(use);
             }
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-            lastRegisterUse = iter->pos;
-        } else {
-            MOZ_ASSERT(spillIntervalIsNew);
-            spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
         }
+
+        if (!originalRanges.append(range))
+            return false;
     }
 
-    // Compute ranges for each new interval that cover all its uses.
-    size_t activeRange = interval->numRanges();
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        LiveInterval* newInterval = newIntervals[i];
-        CodePosition start, end;
-        if (i == 0 && spillStart != interval->start()) {
-            start = interval->start();
-            if (newInterval->usesEmpty())
-                end = spillStart;
-            else
-                end = newInterval->usesBack()->pos.next();
-        } else {
-            start = inputOf(insData[newInterval->usesBegin()->pos]);
-            end = newInterval->usesBack()->pos.next();
-        }
-        for (; activeRange > 0; --activeRange) {
-            const LiveInterval::Range* range = interval->getRange(activeRange - 1);
-            if (range->to <= start)
-                continue;
-            if (range->from >= end)
-                break;
-            if (!newInterval->addRange(Max(range->from, start),
-                                       Min(range->to, end)))
-                return false;
-            if (range->to >= end)
-                break;
-        }
-    }
-
-    if (spillIntervalIsNew && !newIntervals.append(spillInterval))
+    if (spillBundleIsNew && !newBundles.append(spillBundle))
         return false;
 
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    return splitAndRequeueBundles(bundle, newBundles);
 }
 
 bool
-BacktrackingAllocator::splitAcrossCalls(LiveInterval* interval)
+BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle)
 {
-    // Split the interval to separate register uses and non-register uses and
+    // Split the bundle to separate register uses and non-register uses and
     // allow the vreg to be spilled across its range.
 
-    // Find the locations of all calls in the interval's range. Fixed intervals
-    // are introduced by buildLivenessInfo only for calls when allocating for
-    // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so
-    // iterate through it backwards.
+    // Find the locations of all calls in the bundle's range.
     SplitPositionVector callPositions;
-    for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) {
-        const LiveInterval::Range* range = fixedIntervalsUnion->getRange(i - 1);
-        if (interval->covers(range->from) && interval->covers(range->from.previous())) {
-            if (!callPositions.append(range->from))
+    for (LiveRange::BundleLinkIterator iter = callRanges->rangesBegin(); iter; iter++) {
+        LiveRange* callRange = LiveRange::get(*iter);
+        if (bundle->rangeFor(callRange->from()) && bundle->rangeFor(callRange->from().previous())) {
+            if (!callPositions.append(callRange->from()))
                 return false;
         }
     }
     MOZ_ASSERT(callPositions.length());
 
 #ifdef DEBUG
     JitSpewStart(JitSpew_RegAlloc, "  split across calls at ");
-    for (size_t i = 0; i < callPositions.length(); ++i) {
+    for (size_t i = 0; i < callPositions.length(); ++i)
         JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits());
-    }
     JitSpewFin(JitSpew_RegAlloc);
 #endif
 
-    return splitAt(interval, callPositions);
+    return splitAt(bundle, callPositions);
 }
 
 bool
-BacktrackingAllocator::chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict)
+BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict)
 {
     bool success = false;
 
-    if (!trySplitAcrossHotcode(interval, &success))
+    if (!trySplitAcrossHotcode(bundle, &success))
         return false;
     if (success)
         return true;
 
     if (fixed)
-        return splitAcrossCalls(interval);
-
-    if (!trySplitBeforeFirstRegisterUse(interval, conflict, &success))
+        return splitAcrossCalls(bundle);
+
+    if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success))
         return false;
     if (success)
         return true;
 
-    if (!trySplitAfterLastRegisterUse(interval, conflict, &success))
+    if (!trySplitAfterLastRegisterUse(bundle, conflict, &success))
         return false;
     if (success)
         return true;
 
-    return splitAtAllRegisterUses(interval);
+    return splitAtAllRegisterUses(bundle);
 }
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -6,26 +6,395 @@
 
 #ifndef jit_BacktrackingAllocator_h
 #define jit_BacktrackingAllocator_h
 
 #include "mozilla/Array.h"
 
 #include "ds/PriorityQueue.h"
 #include "ds/SplayTree.h"
-#include "jit/LiveRangeAllocator.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/StackSlotAllocator.h"
 
 // Backtracking priority queue based register allocator based on that described
 // in the following blog post:
 //
 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
 
 namespace js {
 namespace jit {
 
+class Requirement
+{
+  public:
+    enum Kind {
+        NONE,
+        REGISTER,
+        FIXED,
+        MUST_REUSE_INPUT
+    };
+
+    Requirement()
+      : kind_(NONE)
+    { }
+
+    explicit Requirement(Kind kind)
+      : kind_(kind)
+    {
+        // These have dedicated constructors.
+        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+    }
+
+    Requirement(Kind kind, CodePosition at)
+      : kind_(kind),
+        position_(at)
+    {
+        // These have dedicated constructors.
+        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+    }
+
+    explicit Requirement(LAllocation fixed)
+      : kind_(FIXED),
+        allocation_(fixed)
+    {
+        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+    }
+
+    // Only useful as a hint, encodes where the fixed requirement is used to
+    // avoid allocating a fixed register too early.
+    Requirement(LAllocation fixed, CodePosition at)
+      : kind_(FIXED),
+        allocation_(fixed),
+        position_(at)
+    {
+        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+    }
+
+    Requirement(uint32_t vreg, CodePosition at)
+      : kind_(MUST_REUSE_INPUT),
+        allocation_(LUse(vreg, LUse::ANY)),
+        position_(at)
+    { }
+
+    Kind kind() const {
+        return kind_;
+    }
+
+    LAllocation allocation() const {
+        MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
+        return allocation_;
+    }
+
+    uint32_t virtualRegister() const {
+        MOZ_ASSERT(allocation_.isUse());
+        MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
+        return allocation_.toUse()->virtualRegister();
+    }
+
+    CodePosition pos() const {
+        return position_;
+    }
+
+    int priority() const;
+
+    bool merge(const Requirement& newRequirement) {
+        // Merge newRequirement with any existing requirement, returning false
+        // if the new and old requirements conflict.
+        MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
+
+        if (newRequirement.kind() == Requirement::FIXED) {
+            if (kind() == Requirement::FIXED)
+                return newRequirement.allocation() == allocation();
+            *this = newRequirement;
+            return true;
+        }
+
+        MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
+        if (kind() == Requirement::FIXED)
+            return allocation().isRegister();
+
+        *this = newRequirement;
+        return true;
+    }
+
+    void dump() const;
+
+  private:
+    Kind kind_;
+    LAllocation allocation_;
+    CodePosition position_;
+};
+
+struct UsePosition : public TempObject,
+                     public InlineForwardListNode<UsePosition>
+{
+    LUse* use;
+    CodePosition pos;
+
+    UsePosition(LUse* use, CodePosition pos) :
+        use(use),
+        pos(pos)
+    {
+        // Verify that the usedAtStart() flag is consistent with the
+        // subposition. For now ignore fixed registers, because they
+        // are handled specially around calls.
+        MOZ_ASSERT_IF(!use->isFixedRegister(),
+                      pos.subpos() == (use->usedAtStart()
+                                       ? CodePosition::INPUT
+                                       : CodePosition::OUTPUT));
+    }
+};
+
+typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
+
+// Backtracking allocator data structures overview.
+//
+// LiveRange: A continuous range of positions where a virtual register is live.
+// LiveBundle: A set of LiveRanges which do not overlap.
+// VirtualRegister: A set of all LiveRanges used for some LDefinition.
+//
+// The allocator first performs a liveness ananlysis on the LIR graph which
+// constructs LiveRanges for each VirtualRegister, determining where the
+// registers are live.
+//
+// The ranges are then bundled together according to heuristics, and placed on
+// the allocation queue.
+//
+// As bundles are removed from the allocation queue, we attempt to find a
+// physical register or stack slot allocation for all ranges in the removed
+// bundle, possibly evicting already-allocated bundles. See processBundle()
+// for details.
+//
+// If we are not able to allocate a bundle, it is split according to heuristics
+// into two or more smaller bundles which cover all the ranges of the original.
+// These smaller bundles are then allocated independently.
+
+class LiveBundle;
+
+class LiveRange : public TempObject
+{
+  public:
+    // Linked lists are used to keep track of the ranges in each LiveBundle and
+    // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
+    // these auxiliary classes to keep things straight.
+    class BundleLink : public InlineForwardListNode<BundleLink> {};
+    class RegisterLink : public InlineForwardListNode<RegisterLink> {};
+
+    typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
+    typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
+
+    // Links in the lists in LiveBundle and VirtualRegister.
+    BundleLink bundleLink;
+    RegisterLink registerLink;
+
+    static LiveRange* get(BundleLink* link) {
+        return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+                                            offsetof(LiveRange, bundleLink));
+    }
+    static LiveRange* get(RegisterLink* link) {
+        return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+                                            offsetof(LiveRange, registerLink));
+    }
+
+    struct Range
+    {
+        // The beginning of this range, inclusive.
+        CodePosition from;
+
+        // The end of this range, exclusive.
+        CodePosition to;
+
+        Range() {}
+
+        Range(CodePosition from, CodePosition to)
+          : from(from), to(to)
+        {
+            MOZ_ASSERT(from < to);
+        }
+
+        bool empty() {
+            return from == to;
+        }
+    };
+
+  private:
+    // The virtual register this range is for, or zero if this does not have a
+    // virtual register (for example, it is in the callRanges bundle).
+    uint32_t vreg_;
+
+    // The bundle containing this range, null if liveness information is being
+    // constructed and we haven't started allocating bundles yet.
+    LiveBundle* bundle_;
+
+    // The code positions in this range.
+    Range range_;
+
+    // All uses of the virtual register in this range, ordered by location.
+    InlineForwardList<UsePosition> uses_;
+
+    // Whether this range contains the virtual register's definition.
+    bool hasDefinition_;
+
+    LiveRange(uint32_t vreg, Range range)
+      : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
+    {
+        MOZ_ASSERT(!range.empty());
+    }
+
+  public:
+    static LiveRange* New(TempAllocator& alloc, uint32_t vreg,
+                          CodePosition from, CodePosition to) {
+        return new(alloc) LiveRange(vreg, Range(from, to));
+    }
+
+    uint32_t vreg() const {
+        MOZ_ASSERT(hasVreg());
+        return vreg_;
+    }
+    bool hasVreg() const {
+        return vreg_ != 0;
+    }
+
+    LiveBundle* bundle() const {
+        return bundle_;
+    }
+
+    CodePosition from() const {
+        return range_.from;
+    }
+    CodePosition to() const {
+        return range_.to;
+    }
+    bool covers(CodePosition pos) const {
+        return pos >= from() && pos < to();
+    }
+
+    // Whether this range wholly contains other.
+    bool contains(LiveRange* other) const;
+
+    // Intersect this range with other, returning the subranges of this
+    // that are before, inside, or after other.
+    void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
+
+    UsePositionIterator usesBegin() const {
+        return uses_.begin();
+    }
+    bool hasUses() const {
+        return !!usesBegin();
+    }
+    UsePosition* popUse() {
+        return uses_.popFront();
+    }
+
+    bool hasDefinition() const {
+        return hasDefinition_;
+    }
+
+    void setFrom(CodePosition from) {
+        range_.from = from;
+    }
+    void setTo(CodePosition to) {
+        range_.to = to;
+    }
+
+    void setBundle(LiveBundle* bundle) {
+        bundle_ = bundle;
+    }
+
+    void addUse(UsePosition* use);
+    void distributeUses(LiveRange* other);
+
+    void setHasDefinition() {
+        MOZ_ASSERT(!hasDefinition_);
+        hasDefinition_ = true;
+    }
+
+    // Return a string describing this range. This is not re-entrant!
+#ifdef DEBUG
+    const char* toString() const;
+#else
+    const char* toString() const { return "???"; }
+#endif
+
+    // Comparator for use in range splay trees.
+    static int compare(LiveRange* v0, LiveRange* v1) {
+        // LiveRange includes 'from' but excludes 'to'.
+        if (v0->to() <= v1->from())
+            return -1;
+        if (v0->from() >= v1->to())
+            return 1;
+        return 0;
+    }
+};
+
+// A set of live ranges which are all pairwise disjoint. The register allocator
+// attempts to find allocations for an entire bundle, and if it fails the
+// bundle will be broken into smaller ones which are allocated independently.
+class LiveBundle : public TempObject
+{
+    // All the ranges in this set, ordered by location.
+    InlineForwardList<LiveRange::BundleLink> ranges_;
+
+    // Allocation to use for ranges in this set, bogus if not allocated.
+    LAllocation alloc_;
+
+    // Optional live range set which is a superset of this one, and which has
+    // been spilled.
+    LiveBundle* spillParent_;
+
+    LiveBundle()
+      : spillParent_(nullptr)
+    { }
+
+  public:
+    static LiveBundle* New(TempAllocator& alloc) {
+        return new(alloc) LiveBundle();
+    }
+
+    LiveRange::BundleLinkIterator rangesBegin() const {
+        return ranges_.begin();
+    }
+    LiveRange* firstRange() const {
+        return LiveRange::get(*rangesBegin());
+    }
+    LiveRange* rangeFor(CodePosition pos) const;
+    void addRange(LiveRange* range);
+    bool addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to);
+    bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+                                   CodePosition from, CodePosition to);
+#ifdef DEBUG
+    size_t numRanges() const;
+#endif
+
+    LAllocation allocation() const {
+        return alloc_;
+    }
+    void setAllocation(LAllocation alloc) {
+        alloc_ = alloc;
+    }
+
+    LiveBundle* spillParent() {
+        return spillParent_;
+    }
+    void setSpillParent(LiveBundle* spill) {
+        spillParent_ = spill;
+    }
+
+    bool isSpill() const {
+        return alloc_.isStackSlot();
+    }
+
+    // Return a string describing this bundle. This is not re-entrant!
+#ifdef DEBUG
+    const char* toString() const;
+#else
+    const char* toString() const { return "???"; }
+#endif
+};
+
 // Information about a group of registers. Registers may be grouped together
 // when (a) all of their lifetimes are disjoint, (b) they are of the same type
 // (double / non-double) and (c) it is desirable that they have the same
 // allocation.
 struct VirtualRegisterGroup : public TempObject
 {
     // All virtual registers in the group.
     Vector<uint32_t, 2, JitAllocPolicy> registers;
@@ -43,252 +412,354 @@ struct VirtualRegisterGroup : public Tem
     uint32_t canonicalReg() {
         uint32_t minimum = registers[0];
         for (size_t i = 1; i < registers.length(); i++)
             minimum = Min(minimum, registers[i]);
         return minimum;
     }
 };
 
-class BacktrackingVirtualRegister : public VirtualRegister
+// Information about the allocation for a virtual register.
+class VirtualRegister
 {
+    // Instruction which defines this register.
+    LNode* ins_;
+
+    // Definition in the instruction for this register.
+    LDefinition* def_;
+
+    // All live ranges for this register. These may overlap each other, and are
+    // ordered by their start position.
+    InlineForwardList<LiveRange::RegisterLink> ranges_;
+
+    // Whether def_ is a temp or an output.
+    bool isTemp_;
+
     // If this register's definition is MUST_REUSE_INPUT, whether a copy must
     // be introduced before the definition that relaxes the policy.
     bool mustCopyInput_;
 
-    // Spill location to use for this register.
-    LAllocation canonicalSpill_;
-
-    // Code position above which the canonical spill cannot be used; such
-    // intervals may overlap other registers in the same group.
-    CodePosition canonicalSpillExclude_;
-
     // If this register is associated with a group of other registers,
     // information about the group. This structure is shared between all
     // registers in the group.
     VirtualRegisterGroup* group_;
 
+    // Range which might overlap other registers in the group, and should be
+    // allocated separately. If it exists, this range is always spillable, and
+    // cannot be split.
+    LiveRange* groupExclude_;
+
+    // Spill location to use for ranges this register, except groupExclude.
+    LAllocation canonicalSpill_;
+
+    void operator=(const VirtualRegister&) = delete;
+    VirtualRegister(const VirtualRegister&) = delete;
+
   public:
-    explicit BacktrackingVirtualRegister(TempAllocator& alloc)
-      : VirtualRegister(alloc)
-    {}
+    explicit VirtualRegister()
+    {
+        // Note: This class is zeroed before it is constructed.
+    }
+
+    void init(LNode* ins, LDefinition* def, bool isTemp) {
+        MOZ_ASSERT(!ins_);
+        ins_ = ins;
+        def_ = def;
+        isTemp_ = isTemp;
+    }
+
+    LNode* ins() const {
+        return ins_;
+    }
+    LDefinition* def() const {
+        return def_;
+    }
+    LDefinition::Type type() const {
+        return def()->type();
+    }
+    uint32_t vreg() const {
+        return def()->virtualRegister();
+    }
+    bool isCompatible(const AnyRegister& r) const {
+        return def_->isCompatibleReg(r);
+    }
+    bool isCompatible(const VirtualRegister& vr) const {
+        return def_->isCompatibleDef(*vr.def_);
+    }
+    bool isTemp() const {
+        return isTemp_;
+    }
+
     void setMustCopyInput() {
         mustCopyInput_ = true;
     }
     bool mustCopyInput() {
         return mustCopyInput_;
     }
 
     void setCanonicalSpill(LAllocation alloc) {
         MOZ_ASSERT(!alloc.isUse());
         canonicalSpill_ = alloc;
     }
     const LAllocation* canonicalSpill() const {
         return canonicalSpill_.isBogus() ? nullptr : &canonicalSpill_;
     }
 
-    void setCanonicalSpillExclude(CodePosition pos) {
-        canonicalSpillExclude_ = pos;
-    }
-    bool hasCanonicalSpillExclude() const {
-        return canonicalSpillExclude_.bits() != 0;
-    }
-    CodePosition canonicalSpillExclude() const {
-        MOZ_ASSERT(hasCanonicalSpillExclude());
-        return canonicalSpillExclude_;
-    }
-
     void setGroup(VirtualRegisterGroup* group) {
         group_ = group;
     }
     VirtualRegisterGroup* group() {
         return group_;
     }
+
+    void setGroupExclude(LiveRange* range) {
+        groupExclude_ = range;
+    }
+    LiveRange* groupExclude() {
+        return groupExclude_;
+    }
+
+    LiveRange::RegisterLinkIterator rangesBegin() const {
+        return ranges_.begin();
+    }
+    bool hasRanges() const {
+        return !!rangesBegin();
+    }
+    LiveRange* rangeFor(CodePosition pos) const;
+    void removeRange(LiveRange* range);
+    void addRange(LiveRange* range);
+
+    bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
+    void addInitialUse(UsePosition* use);
+    void setInitialDefinition(CodePosition from);
 };
 
 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
 // where to split.
 typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
 
-class BacktrackingAllocator
-  : private LiveRangeAllocator<BacktrackingVirtualRegister>
+class BacktrackingAllocator : protected RegisterAllocator
 {
     friend class C1Spewer;
     friend class JSONSpewer;
 
-    // Priority queue element: either an interval or group of intervals and the
+    BitSet* liveIn;
+    FixedList<VirtualRegister> vregs;
+
+    // Ranges where all registers must be spilled due to call instructions.
+    LiveBundle* callRanges;
+
+    // Allocation state.
+    StackSlotAllocator stackSlotAllocator;
+
+    // Priority queue element: either a bundle or a group of registers and the
     // associated priority.
     struct QueueItem
     {
-        LiveInterval* interval;
+        LiveBundle* bundle;
         VirtualRegisterGroup* group;
 
-        QueueItem(LiveInterval* interval, size_t priority)
-          : interval(interval), group(nullptr), priority_(priority)
+        QueueItem(LiveBundle* bundle, size_t priority)
+          : bundle(bundle), group(nullptr), priority_(priority)
         {}
 
         QueueItem(VirtualRegisterGroup* group, size_t priority)
-          : interval(nullptr), group(group), priority_(priority)
+          : bundle(nullptr), group(group), priority_(priority)
         {}
 
         static size_t priority(const QueueItem& v) {
             return v.priority_;
         }
 
       private:
         size_t priority_;
     };
 
     PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
 
-    // A subrange over which a physical register is allocated.
-    struct AllocatedRange {
-        LiveInterval* interval;
-        const LiveInterval::Range* range;
-
-        AllocatedRange()
-          : interval(nullptr), range(nullptr)
-        {}
-
-        AllocatedRange(LiveInterval* interval, const LiveInterval::Range* range)
-          : interval(interval), range(range)
-        {}
-
-        static int compare(const AllocatedRange& v0, const AllocatedRange& v1) {
-            // LiveInterval::Range includes 'from' but excludes 'to'.
-            if (v0.range->to <= v1.range->from)
-                return -1;
-            if (v0.range->from >= v1.range->to)
-                return 1;
-            return 0;
-        }
-    };
-
-    typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
+    typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
 
     // Each physical register is associated with the set of ranges over which
     // that register is currently allocated.
     struct PhysicalRegister {
         bool allocatable;
         AnyRegister reg;
-        AllocatedRangeSet allocations;
+        LiveRangeSet allocations;
 
         PhysicalRegister() : allocatable(false) {}
     };
     mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
 
     // Ranges of code which are considered to be hot, for which good allocation
     // should be prioritized.
-    AllocatedRangeSet hotcode;
+    LiveRangeSet hotcode;
 
     // During register allocation, virtual stack slots are used for spills.
     // These are converted to actual spill locations
     size_t numVirtualStackSlots;
 
     // Information about an allocated stack slot.
     struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
         LStackSlot alloc;
-        AllocatedRangeSet allocated;
+        LiveRangeSet allocated;
 
         SpillSlot(uint32_t slot, LifoAlloc* alloc)
           : alloc(slot), allocated(alloc)
         {}
     };
     typedef InlineForwardList<SpillSlot> SpillSlotList;
 
     // All allocated slots of each width.
     SpillSlotList normalSlots, doubleSlots, quadSlots;
 
   public:
     BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
-      : LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph),
+      : RegisterAllocator(mir, lir, graph),
+        liveIn(nullptr),
+        callRanges(nullptr),
         numVirtualStackSlots(0)
     { }
 
     bool go();
 
   private:
 
-    typedef Vector<LiveInterval*, 4, SystemAllocPolicy> LiveIntervalVector;
+    typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
+    typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
+
+    // Liveness methods.
+    bool init();
+    bool buildLivenessInfo();
+
+    bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
 
-    bool init();
-    bool canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg);
+    VirtualRegister& vreg(const LDefinition* def) {
+        return vregs[def->virtualRegister()];
+    }
+    VirtualRegister& vreg(const LAllocation* alloc) {
+        MOZ_ASSERT(alloc->isUse());
+        return vregs[alloc->toUse()->virtualRegister()];
+    }
+
+    // Allocation methods.
+    bool canAddToGroup(VirtualRegisterGroup* group, VirtualRegister* reg);
     bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
     bool tryGroupReusedRegister(uint32_t def, uint32_t use);
     bool groupAndQueueRegisters();
-    bool tryAllocateFixed(LiveInterval* interval, bool* success, bool* pfixed,
-                          LiveIntervalVector& conflicting);
-    bool tryAllocateNonFixed(LiveInterval* interval, bool* success, bool* pfixed,
-                             LiveIntervalVector& conflicting);
-    bool processInterval(LiveInterval* interval);
+    bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+                          bool* success, bool* pfixed, LiveBundleVector& conflicting);
+    bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, Requirement hint,
+                             bool* success, bool* pfixed, LiveBundleVector& conflicting);
+    bool processBundle(LiveBundle* bundle);
     bool processGroup(VirtualRegisterGroup* group);
-    bool setIntervalRequirement(LiveInterval* interval);
-    bool tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
-                             bool* success, bool* pfixed, LiveIntervalVector& conflicting);
+    bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, Requirement *phint);
+    bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
+                             bool* success, bool* pfixed, LiveBundleVector& conflicting);
     bool tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
-                                  bool* psuccess, bool* pfixed, LiveInterval** pconflicting);
-    bool evictInterval(LiveInterval* interval);
-    void distributeUses(LiveInterval* interval, const LiveIntervalVector& newIntervals);
-    bool split(LiveInterval* interval, const LiveIntervalVector& newIntervals);
-    bool requeueIntervals(const LiveIntervalVector& newIntervals);
-    void spill(LiveInterval* interval);
+                                  bool* psuccess, LiveBundle** pconflicting);
+    bool evictBundle(LiveBundle* bundle);
+    bool splitAndRequeueBundles(LiveBundle* bundle, const LiveBundleVector& newBundles);
+    void spill(LiveBundle* bundle);
 
     bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
     bool isRegisterUse(LUse* use, LNode* ins, bool considerCopy = false);
-    bool isRegisterDefinition(LiveInterval* interval);
-    bool addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
-                         LiveInterval* spillInterval,
-                         CodePosition from, CodePosition to);
-    bool pickStackSlot(LiveInterval* interval);
-    bool reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
+    bool isRegisterDefinition(LiveRange* range);
+    bool addLiveBundle(LiveBundleVector& bundles, uint32_t vreg, LiveBundle* spillParent,
+                       CodePosition from, CodePosition to);
+    bool pickStackSlot(LiveBundle* bundle);
+    bool addBundlesUsingAllocation(VirtualRegister &reg, LAllocation alloc,
+                                   LiveBundleVector &bundles);
+    bool reuseOrAllocateStackSlot(const LiveBundleVector& bundles, LDefinition::Type type,
                                   LAllocation* palloc);
-    bool insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals);
+    bool insertAllRanges(LiveRangeSet& set, const LiveBundleVector& bundles);
 
+    // Reification methods.
     bool pickStackSlots();
     bool resolveControlFlow();
     bool reifyAllocations();
     bool populateSafepoints();
     bool annotateMoveGroups();
+    size_t findFirstNonCallSafepoint(CodePosition from);
+    size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
+    void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
 
+    bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to, LDefinition::Type type) {
+        LAllocation fromAlloc = from->bundle()->allocation();
+        LAllocation toAlloc = to->bundle()->allocation();
+        MOZ_ASSERT(fromAlloc != toAlloc);
+        return moves->add(fromAlloc, toAlloc, type);
+    }
+
+    bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
+        if (from->bundle()->allocation() == to->bundle()->allocation())
+            return true;
+        LMoveGroup* moves = getInputMoveGroup(ins);
+        return addMove(moves, from, to, type);
+    }
+
+    bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
+        if (from->bundle()->allocation() == to->bundle()->allocation())
+            return true;
+        LMoveGroup* moves = getMoveGroupAfter(ins);
+        return addMove(moves, from, to, type);
+    }
+
+    bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
+        if (from->bundle()->allocation() == to->bundle()->allocation())
+            return true;
+        LMoveGroup* moves = block->getExitMoveGroup(alloc());
+        return addMove(moves, from, to, type);
+    }
+
+    bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
+        if (from->bundle()->allocation() == to->bundle()->allocation())
+            return true;
+        LMoveGroup* moves = block->getEntryMoveGroup(alloc());
+        return addMove(moves, from, to, type);
+    }
+
+    // Debugging methods.
     void dumpRegisterGroups();
     void dumpFixedRanges();
     void dumpAllocations();
 
-    struct PrintLiveIntervalRange;
+    struct PrintLiveRange;
 
-    bool minimalDef(const LiveInterval* interval, LNode* ins);
-    bool minimalUse(const LiveInterval* interval, LNode* ins);
-    bool minimalInterval(const LiveInterval* interval, bool* pfixed = nullptr);
+    bool minimalDef(LiveRange* range, LNode* ins);
+    bool minimalUse(LiveRange* range, LNode* ins);
+    bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
 
     // Heuristic methods.
 
-    size_t computePriority(const LiveInterval* interval);
-    size_t computeSpillWeight(const LiveInterval* interval);
+    size_t computePriority(LiveBundle* bundle);
+    size_t computeSpillWeight(LiveBundle* bundle);
 
     size_t computePriority(const VirtualRegisterGroup* group);
     size_t computeSpillWeight(const VirtualRegisterGroup* group);
 
-    size_t maximumSpillWeight(const LiveIntervalVector& intervals);
+    size_t maximumSpillWeight(const LiveBundleVector& bundles);
 
-    bool chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict);
+    bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict);
 
-    bool splitAt(LiveInterval* interval,
+    bool splitAt(LiveBundle* bundle,
                  const SplitPositionVector& splitPositions);
-    bool trySplitAcrossHotcode(LiveInterval* interval, bool* success);
-    bool trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
-    bool trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
-    bool splitAtAllRegisterUses(LiveInterval* interval);
-    bool splitAcrossCalls(LiveInterval* interval);
+    bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
+    bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
+    bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
+    bool splitAtAllRegisterUses(LiveBundle* bundle);
+    bool splitAcrossCalls(LiveBundle* bundle);
 
     bool compilingAsmJS() {
         return mir->info().compilingAsmJS();
     }
 
     bool isVirtualStackSlot(LAllocation alloc) {
         return alloc.isStackSlot() &&
                LAllocation::DATA_MASK - alloc.toStackSlot()->slot() < numVirtualStackSlots;
     }
+
+    void dumpVregs();
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_BacktrackingAllocator_h */
--- a/js/src/jit/C1Spewer.cpp
+++ b/js/src/jit/C1Spewer.cpp
@@ -58,29 +58,28 @@ C1Spewer::spewPass(const char* pass)
     for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
         spewPass(spewout_, *block);
 
     fprintf(spewout_, "end_cfg\n");
     fflush(spewout_);
 }
 
 void
-C1Spewer::spewIntervals(const char* pass, BacktrackingAllocator* regalloc)
+C1Spewer::spewRanges(const char* pass, BacktrackingAllocator* regalloc)
 {
     if (!spewout_)
         return;
 
-    fprintf(spewout_, "begin_intervals\n");
+    fprintf(spewout_, "begin_ranges\n");
     fprintf(spewout_, " name \"%s\"\n", pass);
 
-    size_t nextId = 0x4000;
     for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
-        spewIntervals(spewout_, *block, regalloc, nextId);
+        spewRanges(spewout_, *block, regalloc);
 
-    fprintf(spewout_, "end_intervals\n");
+    fprintf(spewout_, "end_ranges\n");
     fflush(spewout_);
 }
 
 void
 C1Spewer::endFunction()
 {
 }
 
@@ -107,52 +106,47 @@ DumpLIR(FILE* fp, LNode* ins)
 {
     fprintf(fp, "      ");
     fprintf(fp, "%d ", ins->id());
     ins->dump(fp);
     fprintf(fp, " <|@\n");
 }
 
 void
-C1Spewer::spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId)
+C1Spewer::spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins)
 {
     for (size_t k = 0; k < ins->numDefs(); k++) {
         uint32_t id = ins->getDef(k)->virtualRegister();
         VirtualRegister* vreg = &regalloc->vregs[id];
 
-        for (size_t i = 0; i < vreg->numIntervals(); i++) {
-            LiveInterval* live = vreg->getInterval(i);
-            if (live->numRanges()) {
-                fprintf(fp, "%d object \"", (i == 0) ? id : int32_t(nextId++));
-                fprintf(fp, "%s", live->getAllocation()->toString());
-                fprintf(fp, "\" %d -1", id);
-                for (size_t j = 0; j < live->numRanges(); j++) {
-                    fprintf(fp, " [%u, %u[", live->getRange(j)->from.bits(),
-                            live->getRange(j)->to.bits());
-                }
-                for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++)
-                    fprintf(fp, " %u M", usePos->pos.bits());
-                fprintf(fp, " \"\"\n");
-            }
+        for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            fprintf(fp, "%d object \"", id);
+            fprintf(fp, "%s", range->bundle()->allocation().toString());
+            fprintf(fp, "\" %d -1", id);
+            fprintf(fp, " [%u, %u[", range->from().bits(), range->to().bits());
+            for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++)
+                fprintf(fp, " %u M", usePos->pos.bits());
+            fprintf(fp, " \"\"\n");
         }
     }
 }
 
 void
-C1Spewer::spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId)
+C1Spewer::spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc)
 {
     LBlock* lir = block->lir();
     if (!lir)
         return;
 
     for (size_t i = 0; i < lir->numPhis(); i++)
-        spewIntervals(fp, regalloc, lir->getPhi(i), nextId);
+        spewRanges(fp, regalloc, lir->getPhi(i));
 
     for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
-        spewIntervals(fp, regalloc, *ins, nextId);
+        spewRanges(fp, regalloc, *ins);
 }
 
 void
 C1Spewer::spewPass(FILE* fp, MBasicBlock* block)
 {
     fprintf(fp, "  begin_block\n");
     fprintf(fp, "    name \"B%d\"\n", block->id());
     fprintf(fp, "    from_bci -1\n");
--- a/js/src/jit/C1Spewer.h
+++ b/js/src/jit/C1Spewer.h
@@ -29,24 +29,24 @@ class C1Spewer
   public:
     C1Spewer()
       : graph(nullptr), spewout_(nullptr)
     { }
 
     bool init(const char* path);
     void beginFunction(MIRGraph* graph, HandleScript script);
     void spewPass(const char* pass);
-    void spewIntervals(const char* pass, BacktrackingAllocator* regalloc);
+    void spewRanges(const char* pass, BacktrackingAllocator* regalloc);
     void endFunction();
     void finish();
 
   private:
     void spewPass(FILE* fp, MBasicBlock* block);
-    void spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId);
-    void spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId);
+    void spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins);
+    void spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* DEBUG */
 
 #endif /* jit_C1Spewer_h */
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -2115,23 +2115,23 @@ CodeGenerator::visitMoveGroup(LMoveGroup
     if (!group->numMoves())
         return;
 
     MoveResolver& resolver = masm.moveResolver();
 
     for (size_t i = 0; i < group->numMoves(); i++) {
         const LMove& move = group->getMove(i);
 
-        const LAllocation* from = move.from();
-        const LAllocation* to = move.to();
+        LAllocation from = move.from();
+        LAllocation to = move.to();
         LDefinition::Type type = move.type();
 
         // No bogus moves.
-        MOZ_ASSERT(*from != *to);
-        MOZ_ASSERT(!from->isConstant());
+        MOZ_ASSERT(from != to);
+        MOZ_ASSERT(!from.isConstant());
         MoveOp::Type moveType;
         switch (type) {
           case LDefinition::OBJECT:
           case LDefinition::SLOTS:
 #ifdef JS_NUNBOX32
           case LDefinition::TYPE:
           case LDefinition::PAYLOAD:
 #else
--- a/js/src/jit/InlineList.h
+++ b/js/src/jit/InlineList.h
@@ -63,21 +63,16 @@ class InlineForwardList : protected Inli
   public:
     iterator begin() const {
         return iterator(this);
     }
     iterator end() const {
         return iterator(nullptr);
     }
     void removeAt(iterator where) {
-        iterator iter(where);
-        iter++;
-#ifdef DEBUG
-        iter.modifyCount_++;
-#endif
         removeAfter(where.prev, where.iter);
     }
     void pushFront(Node* t) {
         insertAfter(this, t);
     }
     void pushBack(Node* t) {
         MOZ_ASSERT(t->next == nullptr);
 #ifdef DEBUG
@@ -111,16 +106,28 @@ class InlineForwardList : protected Inli
         modifyCount_++;
 #endif
         if (item == tail_)
             tail_ = at;
         MOZ_ASSERT(at->next == item);
         at->next = item->next;
         item->next = nullptr;
     }
+    void removeAndIncrement(iterator &where) {
+        // Do not change modifyCount_ here. The iterator can still be used
+        // after calling this method, unlike the other methods that modify
+        // the list.
+        Node* item = where.iter;
+        where.iter = item->next;
+        if (item == tail_)
+            tail_ = where.prev;
+        MOZ_ASSERT(where.prev->next == item);
+        where.prev->next = where.iter;
+        item->next = nullptr;
+    }
     void splitAfter(Node* at, InlineForwardList<T>* to) {
         MOZ_ASSERT(to->empty());
         if (!at)
             at = this;
         if (at == tail_)
             return;
 #ifdef DEBUG
         modifyCount_++;
@@ -180,16 +187,19 @@ public:
         return static_cast<T*>(iter);
     }
     bool operator !=(const InlineForwardListIterator<T>& where) const {
         return iter != where.iter;
     }
     bool operator ==(const InlineForwardListIterator<T>& where) const {
         return iter == where.iter;
     }
+    explicit operator bool() const {
+        return iter != nullptr;
+    }
 
 private:
     Node* prev;
     Node* iter;
 
 #ifdef DEBUG
     const InlineForwardList<T>* owner_;
     int modifyCount_;
--- a/js/src/jit/JSONSpewer.cpp
+++ b/js/src/jit/JSONSpewer.cpp
@@ -391,58 +391,48 @@ JSONSpewer::spewLIR(MIRGraph* mir)
         endObject();
     }
 
     endList();
     endObject();
 }
 
 void
-JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc)
+JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
 {
     if (!fp_)
         return;
 
-    beginObjectProperty("intervals");
+    beginObjectProperty("ranges");
     beginListProperty("blocks");
 
     for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
         beginObject();
         integerProperty("number", bno);
         beginListProperty("vregs");
 
         LBlock* lir = regalloc->graph.getBlock(bno);
         for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
             for (size_t k = 0; k < ins->numDefs(); k++) {
                 uint32_t id = ins->getDef(k)->virtualRegister();
                 VirtualRegister* vreg = &regalloc->vregs[id];
 
                 beginObject();
                 integerProperty("vreg", id);
-                beginListProperty("intervals");
-
-                for (size_t i = 0; i < vreg->numIntervals(); i++) {
-                    LiveInterval* live = vreg->getInterval(i);
+                beginListProperty("ranges");
 
-                    if (live->numRanges()) {
-                        beginObject();
-                        property("allocation");
-                        fprintf(fp_, "\"%s\"", live->getAllocation()->toString());
-                        beginListProperty("ranges");
+                for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
+                    LiveRange* range = LiveRange::get(*iter);
 
-                        for (size_t j = 0; j < live->numRanges(); j++) {
-                            beginObject();
-                            integerProperty("start", live->getRange(j)->from.bits());
-                            integerProperty("end", live->getRange(j)->to.bits());
-                            endObject();
-                        }
-
-                        endList();
-                        endObject();
-                    }
+                    beginObject();
+                    property("allocation");
+                    fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString());
+                    integerProperty("start", range->from().bits());
+                    integerProperty("end", range->to().bits());
+                    endObject();
                 }
 
                 endList();
                 endObject();
             }
         }
 
         endList();
--- a/js/src/jit/JSONSpewer.h
+++ b/js/src/jit/JSONSpewer.h
@@ -56,17 +56,17 @@ class JSONSpewer
     bool init(const char* path);
     void beginFunction(JSScript* script);
     void beginPass(const char * pass);
     void spewMDef(MDefinition* def);
     void spewMResumePoint(MResumePoint* rp);
     void spewMIR(MIRGraph* mir);
     void spewLIns(LNode* ins);
     void spewLIR(MIRGraph* mir);
-    void spewIntervals(BacktrackingAllocator* regalloc);
+    void spewRanges(BacktrackingAllocator* regalloc);
     void endPass();
     void endFunction();
     void finish();
 };
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -172,21 +172,21 @@ IonSpewer::spewPass(const char* pass)
 
 void
 IonSpewer::spewPass(const char* pass, BacktrackingAllocator* ra)
 {
     if (!isSpewingFunction())
         return;
 
     c1Spewer.spewPass(pass);
-    c1Spewer.spewIntervals(pass, ra);
+    c1Spewer.spewRanges(pass, ra);
     jsonSpewer.beginPass(pass);
     jsonSpewer.spewMIR(graph);
     jsonSpewer.spewLIR(graph);
-    jsonSpewer.spewIntervals(ra);
+    jsonSpewer.spewRanges(ra);
     jsonSpewer.endPass();
 }
 
 void
 IonSpewer::endFunction()
 {
     if (!isSpewingFunction()) {
         if (inited_) {
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -53,37 +53,31 @@ class LOsiPoint : public LInstructionHel
         return safepoint_;
     }
 
     LIR_HEADER(OsiPoint)
 };
 
 class LMove
 {
-    LAllocation* from_;
-    LAllocation* to_;
+    LAllocation from_;
+    LAllocation to_;
     LDefinition::Type type_;
 
   public:
-    LMove(LAllocation* from, LAllocation* to, LDefinition::Type type)
+    LMove(LAllocation from, LAllocation to, LDefinition::Type type)
       : from_(from),
         to_(to),
         type_(type)
     { }
 
-    LAllocation* from() {
-        return from_;
-    }
-    const LAllocation* from() const {
+    LAllocation from() const {
         return from_;
     }
-    LAllocation* to() {
-        return to_;
-    }
-    const LAllocation* to() const {
+    LAllocation to() const {
         return to_;
     }
     LDefinition::Type type() const {
         return type_;
     }
 };
 
 class LMoveGroup : public LInstructionHelper<0, 0, 0>
@@ -104,20 +98,20 @@ class LMoveGroup : public LInstructionHe
 
     static LMoveGroup* New(TempAllocator& alloc) {
         return new(alloc) LMoveGroup(alloc);
     }
 
     void printOperands(FILE* fp);
 
     // Add a move which takes place simultaneously with all others in the group.
-    bool add(LAllocation* from, LAllocation* to, LDefinition::Type type);
+    bool add(LAllocation from, LAllocation to, LDefinition::Type type);
 
     // Add a move which takes place after existing moves in the group.
-    bool addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type);
+    bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
 
     size_t numMoves() const {
         return moves_.length();
     }
     const LMove& getMove(size_t i) const {
         return moves_[i];
     }
 
@@ -132,17 +126,17 @@ class LMoveGroup : public LInstructionHe
 #else
         return LAllocation();
 #endif
     }
 
     bool uses(Register reg) {
         for (size_t i = 0; i < numMoves(); i++) {
             LMove move = getMove(i);
-            if (*move.from() == LGeneralReg(reg) || *move.to() == LGeneralReg(reg))
+            if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
                 return true;
         }
         return false;
     }
 };
 
 
 // Constructs a SIMD object (value type) based on the MIRType of its input.
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -530,79 +530,79 @@ void
 LInstruction::initSafepoint(TempAllocator& alloc)
 {
     MOZ_ASSERT(!safepoint_);
     safepoint_ = new(alloc) LSafepoint(alloc);
     MOZ_ASSERT(safepoint_);
 }
 
 bool
-LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type)
+LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
 {
 #ifdef DEBUG
-    MOZ_ASSERT(*from != *to);
+    MOZ_ASSERT(from != to);
     for (size_t i = 0; i < moves_.length(); i++)
-        MOZ_ASSERT(*to != *moves_[i].to());
+        MOZ_ASSERT(to != moves_[i].to());
 
     // Check that SIMD moves are aligned according to ABI requirements.
     if (LDefinition(type).isSimdType()) {
-        MOZ_ASSERT(from->isMemory() || from->isFloatReg());
-        if (from->isMemory()) {
-            if (from->isArgument())
-                MOZ_ASSERT(from->toArgument()->index() % SimdMemoryAlignment == 0);
+        MOZ_ASSERT(from.isMemory() || from.isFloatReg());
+        if (from.isMemory()) {
+            if (from.isArgument())
+                MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
             else
-                MOZ_ASSERT(from->toStackSlot()->slot() % SimdMemoryAlignment == 0);
+                MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
         }
-        MOZ_ASSERT(to->isMemory() || to->isFloatReg());
-        if (to->isMemory()) {
-            if (to->isArgument())
-                MOZ_ASSERT(to->toArgument()->index() % SimdMemoryAlignment == 0);
+        MOZ_ASSERT(to.isMemory() || to.isFloatReg());
+        if (to.isMemory()) {
+            if (to.isArgument())
+                MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
             else
-                MOZ_ASSERT(to->toStackSlot()->slot() % SimdMemoryAlignment == 0);
+                MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
         }
     }
 #endif
     return moves_.append(LMove(from, to, type));
 }
 
 bool
-LMoveGroup::addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type)
+LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type)
 {
     // Transform the operands to this move so that performing the result
     // simultaneously with existing moves in the group will have the same
     // effect as if the original move took place after the existing moves.
 
     for (size_t i = 0; i < moves_.length(); i++) {
-        if (*moves_[i].to() == *from) {
+        if (moves_[i].to() == from) {
             from = moves_[i].from();
             break;
         }
     }
 
-    if (*from == *to)
+    if (from == to)
         return true;
 
     for (size_t i = 0; i < moves_.length(); i++) {
-        if (*to == *moves_[i].to()) {
+        if (to == moves_[i].to()) {
             moves_[i] = LMove(from, to, type);
             return true;
         }
     }
 
     return add(from, to, type);
 }
 
 void
 LMoveGroup::printOperands(FILE* fp)
 {
     for (size_t i = 0; i < numMoves(); i++) {
         const LMove& move = getMove(i);
         // Use two printfs, as LAllocation::toString is not reentrant.
-        fprintf(fp, " [%s", move.from()->toString());
-        fprintf(fp, " -> %s", move.to()->toString());
+        fprintf(fp, " [%s", move.from().toString());
+        fprintf(fp, " -> %s", move.to().toString());
 #ifdef DEBUG
         fprintf(fp, ", %s", TypeChars[move.type()]);
 #endif
         fprintf(fp, "]");
         if (i != numMoves() - 1)
             fprintf(fp, ",");
     }
 }
deleted file mode 100644
--- a/js/src/jit/LiveRangeAllocator.cpp
+++ /dev/null
@@ -1,996 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/LiveRangeAllocator.h"
-
-#include "mozilla/DebugOnly.h"
-
-#include "jsprf.h"
-
-#include "jit/BacktrackingAllocator.h"
-#include "jit/BitSet.h"
-
-using namespace js;
-using namespace js::jit;
-
-using mozilla::DebugOnly;
-
-int
-Requirement::priority() const
-{
-    switch (kind_) {
-      case Requirement::FIXED:
-        return 0;
-
-      case Requirement::REGISTER:
-        return 1;
-
-      case Requirement::NONE:
-        return 2;
-
-      default:
-        MOZ_CRASH("Unknown requirement kind.");
-    }
-}
-
-const char*
-Requirement::toString() const
-{
-#ifdef DEBUG
-    // Not reentrant!
-    static char buf[1000];
-
-    char* cursor = buf;
-    char* end = cursor + sizeof(buf);
-
-    int n = -1;  // initialize to silence GCC warning
-    switch (kind()) {
-      case NONE:
-        return "none";
-      case REGISTER:
-        n = JS_snprintf(cursor, end - cursor, "r");
-        break;
-      case FIXED:
-        n = JS_snprintf(cursor, end - cursor, "%s", allocation().toString());
-        break;
-      case MUST_REUSE_INPUT:
-        n = JS_snprintf(cursor, end - cursor, "v%u", virtualRegister());
-        break;
-    }
-    if (n < 0)
-        return "???";
-    cursor += n;
-
-    if (pos() != CodePosition::MIN) {
-        n = JS_snprintf(cursor, end - cursor, "@%u", pos().bits());
-        if (n < 0)
-            return "???";
-        cursor += n;
-    }
-
-    return buf;
-#else
-    return " ???";
-#endif
-}
-
-void
-Requirement::dump() const
-{
-    fprintf(stderr, "%s\n", toString());
-}
-
-bool
-LiveInterval::Range::contains(const Range* other) const
-{
-    return from <= other->from && to >= other->to;
-}
-
-void
-LiveInterval::Range::intersect(const Range* other, Range* pre, Range* inside, Range* post) const
-{
-    MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
-
-    CodePosition innerFrom = from;
-    if (from < other->from) {
-        if (to < other->from) {
-            *pre = Range(from, to);
-            return;
-        }
-        *pre = Range(from, other->from);
-        innerFrom = other->from;
-    }
-
-    CodePosition innerTo = to;
-    if (to > other->to) {
-        if (from >= other->to) {
-            *post = Range(from, to);
-            return;
-        }
-        *post = Range(other->to, to);
-        innerTo = other->to;
-    }
-
-    if (innerFrom != innerTo)
-        *inside = Range(innerFrom, innerTo);
-}
-
-const char*
-LiveInterval::Range::toString() const
-{
-#ifdef DEBUG
-    // Not reentrant!
-    static char buf[1000];
-
-    char* cursor = buf;
-    char* end = cursor + sizeof(buf);
-
-    int n = JS_snprintf(cursor, end - cursor, "[%u,%u)", from.bits(), to.bits());
-    if (n < 0)
-        return " ???";
-    cursor += n;
-
-    return buf;
-#else
-    return " ???";
-#endif
-}
-
-void
-LiveInterval::Range::dump() const
-{
-    fprintf(stderr, "%s\n", toString());
-}
-
-bool
-LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
-{
-    MOZ_ASSERT(from < to);
-    MOZ_ASSERT(ranges_.empty() || from <= ranges_.back().from);
-
-    Range newRange(from, to);
-
-    if (ranges_.empty())
-        return ranges_.append(newRange);
-
-    Range& first = ranges_.back();
-    if (to < first.from)
-        return ranges_.append(newRange);
-
-    if (to == first.from) {
-        first.from = from;
-        return true;
-    }
-
-    MOZ_ASSERT(from < first.to);
-    MOZ_ASSERT(to > first.from);
-    if (from < first.from)
-        first.from = from;
-    if (to > first.to)
-        first.to = to;
-
-    return true;
-}
-
-bool
-LiveInterval::addRange(CodePosition from, CodePosition to)
-{
-    MOZ_ASSERT(from < to);
-
-    Range newRange(from, to);
-
-    Range* i;
-    // Find the location to insert the new range
-    for (i = ranges_.end(); i > ranges_.begin(); i--) {
-        if (newRange.from <= i[-1].to) {
-            if (i[-1].from < newRange.from)
-                newRange.from = i[-1].from;
-            break;
-        }
-    }
-    // Perform coalescing on overlapping ranges
-    Range* coalesceEnd = i;
-    for (; i > ranges_.begin(); i--) {
-        if (newRange.to < i[-1].from)
-            break;
-        if (newRange.to < i[-1].to)
-            newRange.to = i[-1].to;
-    }
-
-    if (i == coalesceEnd)
-        return ranges_.insert(i, newRange);
-
-    i[0] = newRange;
-    ranges_.erase(i + 1, coalesceEnd);
-    return true;
-}
-
-void
-LiveInterval::setFrom(CodePosition from)
-{
-    while (!ranges_.empty()) {
-        if (ranges_.back().to < from) {
-            ranges_.popBack();
-        } else {
-            if (from == ranges_.back().to)
-                ranges_.popBack();
-            else
-                ranges_.back().from = from;
-            break;
-        }
-    }
-}
-
-bool
-LiveInterval::covers(CodePosition pos)
-{
-    if (pos < start() || pos >= end())
-        return false;
-
-    // Loop over the ranges in ascending order.
-    size_t i = lastProcessedRangeIfValid(pos);
-    for (; i < ranges_.length(); i--) {
-        if (pos < ranges_[i].from)
-            return false;
-        setLastProcessedRange(i, pos);
-        if (pos < ranges_[i].to)
-            return true;
-    }
-    return false;
-}
-
-CodePosition
-LiveInterval::intersect(LiveInterval* other)
-{
-    if (start() > other->start())
-        return other->intersect(this);
-
-    // Loop over the ranges in ascending order. As an optimization,
-    // try to start at the last processed range.
-    size_t i = lastProcessedRangeIfValid(other->start());
-    size_t j = other->ranges_.length() - 1;
-    if (i >= ranges_.length() || j >= other->ranges_.length())
-        return CodePosition::MIN;
-
-    while (true) {
-        const Range& r1 = ranges_[i];
-        const Range& r2 = other->ranges_[j];
-
-        if (r1.from <= r2.from) {
-            if (r1.from <= other->start())
-                setLastProcessedRange(i, other->start());
-            if (r2.from < r1.to)
-                return r2.from;
-            if (i == 0 || ranges_[i-1].from > other->end())
-                break;
-            i--;
-        } else {
-            if (r1.from < r2.to)
-                return r1.from;
-            if (j == 0 || other->ranges_[j-1].from > end())
-                break;
-            j--;
-        }
-    }
-
-    return CodePosition::MIN;
-}
-
-/*
- * This function takes the callee interval and moves all ranges following or
- * including provided position to the target interval. Additionally, if a
- * range in the callee interval spans the given position, it is split and the
- * latter half is placed in the target interval.
- *
- * This function should only be called if it is known that the interval should
- * actually be split (and, presumably, a move inserted). As such, it is an
- * error for the caller to request a split that moves all intervals into the
- * target. Doing so will trip the assertion at the bottom of the function.
- */
-bool
-LiveInterval::splitFrom(CodePosition pos, LiveInterval* after)
-{
-    MOZ_ASSERT(pos >= start() && pos < end());
-    MOZ_ASSERT(after->ranges_.empty());
-
-    // Move all intervals over to the target
-    size_t bufferLength = ranges_.length();
-    Range* buffer = ranges_.extractRawBuffer();
-    if (!buffer)
-        return false;
-    after->ranges_.replaceRawBuffer(buffer, bufferLength);
-
-    // Move intervals back as required
-    for (Range* i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
-        if (pos >= i->to)
-            continue;
-
-        if (pos > i->from) {
-            // Split the range
-            Range split(i->from, pos);
-            i->from = pos;
-            if (!ranges_.append(split))
-                return false;
-        }
-        if (!ranges_.append(i + 1, after->ranges_.end()))
-            return false;
-        after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
-        break;
-    }
-
-    // Split the linked list of use positions
-    UsePosition* prev = nullptr;
-    for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
-        if (usePos->pos > pos)
-            break;
-        prev = *usePos;
-    }
-
-    uses_.splitAfter(prev, &after->uses_);
-    return true;
-}
-
-void
-LiveInterval::addUse(UsePosition* use)
-{
-    // Insert use positions in ascending order. Note that instructions
-    // are visited in reverse order, so in most cases the loop terminates
-    // at the first iteration and the use position will be added to the
-    // front of the list.
-    UsePosition* prev = nullptr;
-    for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
-        if (current->pos >= use->pos)
-            break;
-        prev = *current;
-    }
-
-    if (prev)
-        uses_.insertAfter(prev, use);
-    else
-        uses_.pushFront(use);
-}
-
-void
-LiveInterval::addUseAtEnd(UsePosition* use)
-{
-    MOZ_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos);
-    uses_.pushBack(use);
-}
-
-UsePosition*
-LiveInterval::nextUseAfter(CodePosition after)
-{
-    for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
-        if (usePos->pos >= after) {
-            LUse::Policy policy = usePos->use->policy();
-            MOZ_ASSERT(policy != LUse::RECOVERED_INPUT);
-            if (policy != LUse::KEEPALIVE)
-                return *usePos;
-        }
-    }
-    return nullptr;
-}
-
-UsePosition*
-LiveInterval::popUse()
-{
-    return uses_.popFront();
-}
-
-/*
- * This function locates the first "real" use of this interval that follows
- * the given code position. Non-"real" uses are currently just snapshots,
- * which keep virtual registers alive but do not result in the
- * generation of code that use them.
- */
-CodePosition
-LiveInterval::nextUsePosAfter(CodePosition after)
-{
-    UsePosition* min = nextUseAfter(after);
-    return min ? min->pos : CodePosition::MAX;
-}
-
-LiveInterval*
-VirtualRegister::intervalFor(CodePosition pos)
-{
-    // Intervals are sorted in ascending order by their start position.
-    for (LiveInterval** i = intervals_.begin(); i != intervals_.end(); i++) {
-        if ((*i)->covers(pos))
-            return *i;
-        if (pos < (*i)->start())
-            break;
-    }
-    return nullptr;
-}
-
-LiveInterval*
-VirtualRegister::getFirstInterval()
-{
-    MOZ_ASSERT(!intervals_.empty());
-    return intervals_[0];
-}
-
-// Instantiate LiveRangeAllocator for each template instance.
-template bool LiveRangeAllocator<BacktrackingVirtualRegister>::buildLivenessInfo();
-template void LiveRangeAllocator<BacktrackingVirtualRegister>::dumpVregs();
-
-#ifdef DEBUG
-// Returns true iff ins has a def/temp reusing the input allocation.
-static bool
-IsInputReused(LInstruction* ins, LUse* use)
-{
-    for (size_t i = 0; i < ins->numDefs(); i++) {
-        if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
-            ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
-        {
-            return true;
-        }
-    }
-
-    for (size_t i = 0; i < ins->numTemps(); i++) {
-        if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
-            ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
-        {
-            return true;
-        }
-    }
-
-    return false;
-}
-#endif
-
-/*
- * This function pre-allocates and initializes as much global state as possible
- * to avoid littering the algorithms with memory management cruft.
- */
-template <typename VREG>
-bool
-LiveRangeAllocator<VREG>::init()
-{
-    if (!RegisterAllocator::init())
-        return false;
-
-    liveIn = mir->allocate<BitSet>(graph.numBlockIds());
-    if (!liveIn)
-        return false;
-
-    // Initialize fixed intervals.
-    for (size_t i = 0; i < AnyRegister::Total; i++) {
-        AnyRegister reg = AnyRegister::FromCode(i);
-        LiveInterval* interval = LiveInterval::New(alloc(), 0);
-        interval->setAllocation(LAllocation(reg));
-        fixedIntervals[i] = interval;
-    }
-
-    fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
-
-    if (!vregs.init(mir, graph.numVirtualRegisters()))
-        return false;
-
-    // Build virtual register objects
-    for (size_t i = 0; i < graph.numBlocks(); i++) {
-        if (mir->shouldCancel("Create data structures (main loop)"))
-            return false;
-
-        LBlock* block = graph.getBlock(i);
-        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
-            for (size_t j = 0; j < ins->numDefs(); j++) {
-                LDefinition* def = ins->getDef(j);
-                if (def->isBogusTemp())
-                    continue;
-                if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ false))
-                    return false;
-            }
-
-            for (size_t j = 0; j < ins->numTemps(); j++) {
-                LDefinition* def = ins->getTemp(j);
-                if (def->isBogusTemp())
-                    continue;
-                if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ true))
-                    return false;
-            }
-        }
-        for (size_t j = 0; j < block->numPhis(); j++) {
-            LPhi* phi = block->getPhi(j);
-            LDefinition* def = phi->getDef(0);
-            if (!vregs[def].init(alloc(), phi, def, /* isTemp */ false))
-                return false;
-        }
-    }
-
-    return true;
-}
-
-/*
- * This function builds up liveness intervals for all virtual registers
- * defined in the function. Additionally, it populates the liveIn array with
- * information about which registers are live at the beginning of a block, to
- * aid resolution and reification in a later phase.
- *
- * The algorithm is based on the one published in:
- *
- * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
- *     SSA Form." Proceedings of the International Symposium on Code Generation
- *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
- *
- * The algorithm operates on blocks ordered such that dominators of a block
- * are before the block itself, and such that all blocks of a loop are
- * contiguous. It proceeds backwards over the instructions in this order,
- * marking registers live at their uses, ending their live intervals at
- * definitions, and recording which registers are live at the top of every
- * block. To deal with loop backedges, variables live at the beginning of
- * a loop gain an interval covering the entire loop.
- */
-template <typename VREG>
-bool
-LiveRangeAllocator<VREG>::buildLivenessInfo()
-{
-    JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
-
-    if (!init())
-        return false;
-
-    Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
-    BitSet loopDone(graph.numBlockIds());
-    if (!loopDone.init(alloc()))
-        return false;
-
-    for (size_t i = graph.numBlocks(); i > 0; i--) {
-        if (mir->shouldCancel("Build Liveness Info (main loop)"))
-            return false;
-
-        LBlock* block = graph.getBlock(i - 1);
-        MBasicBlock* mblock = block->mir();
-
-        BitSet& live = liveIn[mblock->id()];
-        new (&live) BitSet(graph.numVirtualRegisters());
-        if (!live.init(alloc()))
-            return false;
-
-        // Propagate liveIn from our successors to us
-        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
-            MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
-            // Skip backedges, as we fix them up at the loop header.
-            if (mblock->id() < successor->id())
-                live.insertAll(liveIn[successor->id()]);
-        }
-
-        // Add successor phis
-        if (mblock->successorWithPhis()) {
-            LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
-            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
-                LPhi* phi = phiSuccessor->getPhi(j);
-                LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
-                uint32_t reg = use->toUse()->virtualRegister();
-                live.insert(reg);
-            }
-        }
-
-        // Variables are assumed alive for the entire block, a define shortens
-        // the interval to the point of definition.
-        for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
-            if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(entryOf(block),
-                                                                  exitOf(block).next()))
-            {
-                return false;
-            }
-        }
-
-        // Shorten the front end of live intervals for live variables to their
-        // point of definition, if found.
-        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
-            // Calls may clobber registers, so force a spill and reload around the callsite.
-            if (ins->isCall()) {
-                for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) {
-                    bool found = false;
-
-                    for (size_t i = 0; i < ins->numDefs(); i++) {
-                        if (ins->getDef(i)->isFixed() &&
-                            ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
-                            found = true;
-                            break;
-                        }
-                    }
-                    if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
-                        return false;
-                }
-            }
-            DebugOnly<bool> hasDoubleDef = false;
-            DebugOnly<bool> hasFloat32Def = false;
-            for (size_t i = 0; i < ins->numDefs(); i++) {
-                LDefinition* def = ins->getDef(i);
-                if (def->isBogusTemp())
-                    continue;
-#ifdef DEBUG
-                    if (def->type() == LDefinition::DOUBLE)
-                        hasDoubleDef = true;
-                    if (def->type() == LDefinition::FLOAT32)
-                        hasFloat32Def = true;
-#endif
-                CodePosition from = outputOf(*ins);
-
-                if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
-                    // MUST_REUSE_INPUT is implemented by allocating an output
-                    // register and moving the input to it. Register hints are
-                    // used to avoid unnecessary moves. We give the input an
-                    // LUse::ANY policy to avoid allocating a register for the
-                    // input.
-                    LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
-                    MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
-                    MOZ_ASSERT(inputUse->usedAtStart());
-                    *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
-                }
-
-                LiveInterval* interval = vregs[def].getInterval(0);
-                interval->setFrom(from);
-
-                // Ensure that if there aren't any uses, there's at least
-                // some interval for the output to go into.
-                if (interval->numRanges() == 0) {
-                    if (!interval->addRangeAtHead(from, from.next()))
-                        return false;
-                }
-                live.remove(def->virtualRegister());
-            }
-
-            for (size_t i = 0; i < ins->numTemps(); i++) {
-                LDefinition* temp = ins->getTemp(i);
-                if (temp->isBogusTemp())
-                    continue;
-
-                // Normally temps are considered to cover both the input
-                // and output of the associated instruction. In some cases
-                // though we want to use a fixed register as both an input
-                // and clobbered register in the instruction, so watch for
-                // this and shorten the temp to cover only the output.
-                CodePosition from = inputOf(*ins);
-                if (temp->policy() == LDefinition::FIXED) {
-                    AnyRegister reg = temp->output()->toRegister();
-                    for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
-                        if (alloc->isUse()) {
-                            LUse* use = alloc->toUse();
-                            if (use->isFixedRegister()) {
-                                if (GetFixedRegister(vregs[use].def(), use) == reg)
-                                    from = outputOf(*ins);
-                            }
-                        }
-                    }
-                }
-
-                CodePosition to =
-                    ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
-                if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
-                    return false;
-            }
-
-            DebugOnly<bool> hasUseRegister = false;
-            DebugOnly<bool> hasUseRegisterAtStart = false;
-
-            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
-                if (inputAlloc->isUse()) {
-                    LUse* use = inputAlloc->toUse();
-
-                    // Call uses should always be at-start or fixed, since the fixed intervals
-                    // use all registers.
-                    MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
-                                  use->isFixedRegister() || use->usedAtStart());
-
-#ifdef DEBUG
-                    // Don't allow at-start call uses if there are temps of the same kind,
-                    // so that we don't assign the same register.
-                    if (ins->isCall() && use->usedAtStart()) {
-                        for (size_t i = 0; i < ins->numTemps(); i++)
-                            MOZ_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg());
-                    }
-
-                    // If there are both useRegisterAtStart(x) and useRegister(y)
-                    // uses, we may assign the same register to both operands due to
-                    // interval splitting (bug 772830). Don't allow this for now.
-                    if (use->policy() == LUse::REGISTER) {
-                        if (use->usedAtStart()) {
-                            if (!IsInputReused(*ins, use))
-                                hasUseRegisterAtStart = true;
-                        } else {
-                            hasUseRegister = true;
-                        }
-                    }
-                    MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
-#endif
-
-                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
-                    if (use->policy() == LUse::RECOVERED_INPUT)
-                        continue;
-
-                    // Fixed uses on calls are specially overridden to happen
-                    // at the input position.
-                    CodePosition to =
-                        (use->usedAtStart() || (ins->isCall() && use->isFixedRegister()))
-                        ? inputOf(*ins)
-                        : outputOf(*ins);
-                    if (use->isFixedRegister()) {
-                        LAllocation reg(AnyRegister::FromCode(use->registerCode()));
-                        for (size_t i = 0; i < ins->numDefs(); i++) {
-                            LDefinition* def = ins->getDef(i);
-                            if (def->policy() == LDefinition::FIXED && *def->output() == reg)
-                                to = inputOf(*ins);
-                        }
-                    }
-
-                    LiveInterval* interval = vregs[use].getInterval(0);
-                    if (!interval->addRangeAtHead(entryOf(block), to.next()))
-                        return false;
-                    interval->addUse(new(alloc()) UsePosition(use, to));
-
-                    live.insert(use->virtualRegister());
-                }
-            }
-        }
-
-        // Phis have simultaneous assignment semantics at block begin, so at
-        // the beginning of the block we can be sure that liveIn does not
-        // contain any phi outputs.
-        for (unsigned int i = 0; i < block->numPhis(); i++) {
-            LDefinition* def = block->getPhi(i)->getDef(0);
-            if (live.contains(def->virtualRegister())) {
-                live.remove(def->virtualRegister());
-            } else {
-                // This is a dead phi, so add a dummy range over all phis. This
-                // can go away if we have an earlier dead code elimination pass.
-                CodePosition entryPos = entryOf(block);
-                if (!vregs[def].getInterval(0)->addRangeAtHead(entryPos, entryPos.next()))
-                    return false;
-            }
-        }
-
-        if (mblock->isLoopHeader()) {
-            // A divergence from the published algorithm is required here, as
-            // our block order does not guarantee that blocks of a loop are
-            // contiguous. As a result, a single live interval spanning the
-            // loop is not possible. Additionally, we require liveIn in a later
-            // pass for resolution, so that must also be fixed up here.
-            MBasicBlock* loopBlock = mblock->backedge();
-            while (true) {
-                // Blocks must already have been visited to have a liveIn set.
-                MOZ_ASSERT(loopBlock->id() >= mblock->id());
-
-                // Add an interval for this entire loop block
-                CodePosition from = entryOf(loopBlock->lir());
-                CodePosition to = exitOf(loopBlock->lir()).next();
-
-                for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
-                    if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
-                        return false;
-                }
-
-                // Fix up the liveIn set to account for the new interval
-                liveIn[loopBlock->id()].insertAll(live);
-
-                // Make sure we don't visit this node again
-                loopDone.insert(loopBlock->id());
-
-                // If this is the loop header, any predecessors are either the
-                // backedge or out of the loop, so skip any predecessors of
-                // this block
-                if (loopBlock != mblock) {
-                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
-                        MBasicBlock* pred = loopBlock->getPredecessor(i);
-                        if (loopDone.contains(pred->id()))
-                            continue;
-                        if (!loopWorkList.append(pred))
-                            return false;
-                    }
-                }
-
-                // Terminate loop if out of work.
-                if (loopWorkList.empty())
-                    break;
-
-                // Grab the next block off the work list, skipping any OSR block.
-                MBasicBlock* osrBlock = graph.mir().osrBlock();
-                while (!loopWorkList.empty()) {
-                    loopBlock = loopWorkList.popCopy();
-                    if (loopBlock != osrBlock)
-                        break;
-                }
-
-                // If end is reached without finding a non-OSR block, then no more work items were found.
-                if (loopBlock == osrBlock) {
-                    MOZ_ASSERT(loopWorkList.empty());
-                    break;
-                }
-            }
-
-            // Clear the done set for other loops
-            loopDone.clear();
-        }
-
-        MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
-    }
-
-    validateVirtualRegisters();
-
-    // If the script has an infinite loop, there may be no MReturn and therefore
-    // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
-    // rest of the allocator can assume it has at least one range.
-    if (fixedIntervalsUnion->numRanges() == 0) {
-        if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
-                                                 CodePosition(0, CodePosition::OUTPUT)))
-        {
-            return false;
-        }
-    }
-
-    JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
-
-    if (JitSpewEnabled(JitSpew_RegAlloc)) {
-        dumpInstructions();
-
-        fprintf(stderr, "Live ranges by virtual register:\n");
-        dumpVregs();
-    }
-
-    return true;
-}
-
-template <typename VREG>
-void
-LiveRangeAllocator<VREG>::dumpVregs()
-{
-#ifdef DEBUG
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
-    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        fprintf(stderr, "  ");
-        VirtualRegister& vreg = vregs[i];
-        for (size_t j = 0; j < vreg.numIntervals(); j++) {
-            if (j)
-                fprintf(stderr, " / ");
-            fprintf(stderr, "%s", vreg.getInterval(j)->toString());
-        }
-        fprintf(stderr, "\n");
-    }
-
-    fprintf(stderr, "\n");
-#endif
-}
-
-#ifdef DEBUG
-
-void
-LiveInterval::validateRanges()
-{
-    Range* prev = nullptr;
-
-    for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
-        Range* range = &ranges_[i];
-
-        MOZ_ASSERT(range->from < range->to);
-        MOZ_ASSERT_IF(prev, prev->to <= range->from);
-        prev = range;
-    }
-}
-
-#endif // DEBUG
-
-const char*
-LiveInterval::rangesToString() const
-{
-#ifdef DEBUG
-    // Not reentrant!
-    static char buf[2000];
-
-    char* cursor = buf;
-    char* end = cursor + sizeof(buf);
-
-    int n;
-
-    for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
-        const LiveInterval::Range* range = getRange(i);
-        n = JS_snprintf(cursor, end - cursor, " %s", range->toString());
-        if (n < 0)
-            return " ???";
-        cursor += n;
-    }
-
-    return buf;
-#else
-    return " ???";
-#endif
-}
-
-#ifdef DEBUG
-static bool
-IsHintInteresting(const Requirement& requirement, const Requirement& hint)
-{
-    if (hint.kind() == Requirement::NONE)
-        return false;
-
-    if (hint.kind() != Requirement::FIXED && hint.kind() != Requirement::REGISTER)
-        return true;
-
-    Requirement merge = requirement;
-    if (!merge.mergeRequirement(hint))
-        return true;
-
-    return merge.kind() != requirement.kind();
-}
-#endif
-
-const char*
-LiveInterval::toString() const
-{
-#ifdef DEBUG
-    // Not reentrant!
-    static char buf[2000];
-
-    char* cursor = buf;
-    char* end = cursor + sizeof(buf);
-
-    int n;
-
-    if (hasVreg()) {
-        n = JS_snprintf(cursor, end - cursor, "v%u", vreg());
-        if (n < 0) return "???";
-        cursor += n;
-    }
-
-    n = JS_snprintf(cursor, end - cursor, "[%u]", index());
-    if (n < 0) return "???";
-    cursor += n;
-
-    if (requirement_.kind() != Requirement::NONE || hint_.kind() != Requirement::NONE) {
-        n = JS_snprintf(cursor, end - cursor, " req(");
-        if (n < 0) return "???";
-        cursor += n;
-
-        bool printHint = IsHintInteresting(requirement_, hint_);
-
-        if (requirement_.kind() != Requirement::NONE) {
-            n = JS_snprintf(cursor, end - cursor, "%s%s",
-                            requirement_.toString(),
-                            printHint ? "," : "");
-            if (n < 0) return "???";
-            cursor += n;
-        }
-        if (printHint) {
-            n = JS_snprintf(cursor, end - cursor, "%s?", hint_.toString());
-            if (n < 0) return "???";
-            cursor += n;
-        }
-
-        n = JS_snprintf(cursor, end - cursor, ")");
-        if (n < 0) return "???";
-        cursor += n;
-    }
-
-    if (!alloc_.isBogus()) {
-        n = JS_snprintf(cursor, end - cursor, " has(%s)", alloc_.toString());
-        if (n < 0) return "???";
-        cursor += n;
-    }
-
-    n = JS_snprintf(cursor, end - cursor, "%s", rangesToString());
-    if (n < 0) return "???";
-    cursor += n;
-
-    for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
-        n = JS_snprintf(cursor, end - cursor, " %s@%u",
-                        usePos->use->toString(), usePos->pos.bits());
-        if (n < 0) return "???";
-        cursor += n;
-    }
-
-    return buf;
-#else
-    return "???";
-#endif
-}
-
-void
-LiveInterval::dump() const
-{
-    fprintf(stderr, "%s\n", toString());
-}
deleted file mode 100644
--- a/js/src/jit/LiveRangeAllocator.h
+++ /dev/null
@@ -1,758 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_LiveRangeAllocator_h
-#define jit_LiveRangeAllocator_h
-
-#include "mozilla/Array.h"
-#include "mozilla/DebugOnly.h"
-
-#include "jit/RegisterAllocator.h"
-#include "jit/StackSlotAllocator.h"
-
-// Common structures and functions used by register allocators that operate on
-// virtual register live ranges.
-
-namespace js {
-namespace jit {
-
-class Requirement
-{
-  public:
-    enum Kind {
-        NONE,
-        REGISTER,
-        FIXED,
-        MUST_REUSE_INPUT
-    };
-
-    Requirement()
-      : kind_(NONE)
-    { }
-
-    explicit Requirement(Kind kind)
-      : kind_(kind)
-    {
-        // These have dedicated constructors.
-        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
-    }
-
-    Requirement(Kind kind, CodePosition at)
-      : kind_(kind),
-        position_(at)
-    {
-        // These have dedicated constructors.
-        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
-    }
-
-    explicit Requirement(LAllocation fixed)
-      : kind_(FIXED),
-        allocation_(fixed)
-    {
-        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
-    }
-
-    // Only useful as a hint, encodes where the fixed requirement is used to
-    // avoid allocating a fixed register too early.
-    Requirement(LAllocation fixed, CodePosition at)
-      : kind_(FIXED),
-        allocation_(fixed),
-        position_(at)
-    {
-        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
-    }
-
-    Requirement(uint32_t vreg, CodePosition at)
-      : kind_(MUST_REUSE_INPUT),
-        allocation_(LUse(vreg, LUse::ANY)),
-        position_(at)
-    { }
-
-    Kind kind() const {
-        return kind_;
-    }
-
-    LAllocation allocation() const {
-        MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
-        return allocation_;
-    }
-
-    uint32_t virtualRegister() const {
-        MOZ_ASSERT(allocation_.isUse());
-        MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
-        return allocation_.toUse()->virtualRegister();
-    }
-
-    CodePosition pos() const {
-        return position_;
-    }
-
-    int priority() const;
-
-    bool mergeRequirement(const Requirement& newRequirement) {
-        // Merge newRequirement with any existing requirement, returning false
-        // if the new and old requirements conflict.
-        MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
-
-        if (newRequirement.kind() == Requirement::FIXED) {
-            if (kind() == Requirement::FIXED)
-                return newRequirement.allocation() == allocation();
-            *this = newRequirement;
-            return true;
-        }
-
-        MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
-        if (kind() == Requirement::FIXED)
-            return allocation().isRegister();
-
-        *this = newRequirement;
-        return true;
-    }
-
-    // Return a string describing this requirement. This is not re-entrant!
-    const char* toString() const;
-
-    void dump() const;
-
-  private:
-    Kind kind_;
-    LAllocation allocation_;
-    CodePosition position_;
-};
-
-struct UsePosition : public TempObject,
-                     public InlineForwardListNode<UsePosition>
-{
-    LUse* use;
-    CodePosition pos;
-
-    UsePosition(LUse* use, CodePosition pos) :
-        use(use),
-        pos(pos)
-    {
-        // Verify that the usedAtStart() flag is consistent with the
-        // subposition. For now ignore fixed registers, because they
-        // are handled specially around calls.
-        MOZ_ASSERT_IF(!use->isFixedRegister(),
-                      pos.subpos() == (use->usedAtStart()
-                                       ? CodePosition::INPUT
-                                       : CodePosition::OUTPUT));
-    }
-};
-
-typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
-
-static inline bool
-UseCompatibleWith(const LUse* use, LAllocation alloc)
-{
-    switch (use->policy()) {
-      case LUse::ANY:
-      case LUse::KEEPALIVE:
-        return alloc.isRegister() || alloc.isMemory();
-      case LUse::REGISTER:
-        return alloc.isRegister();
-      case LUse::FIXED:
-          // Fixed uses are handled using fixed intervals. The
-          // UsePosition is only used as hint.
-        return alloc.isRegister();
-      default:
-        MOZ_CRASH("Unknown use policy");
-    }
-}
-
-#ifdef DEBUG
-
-static inline bool
-DefinitionCompatibleWith(LNode* ins, const LDefinition* def, LAllocation alloc)
-{
-    if (ins->isPhi()) {
-        if (def->isFloatReg())
-            return alloc.isFloatReg() || alloc.isStackSlot();
-        return alloc.isGeneralReg() || alloc.isStackSlot();
-    }
-
-    switch (def->policy()) {
-      case LDefinition::REGISTER:
-        if (!alloc.isRegister())
-            return false;
-        return alloc.isFloatReg() == def->isFloatReg();
-      case LDefinition::FIXED:
-        return alloc == *def->output();
-      case LDefinition::MUST_REUSE_INPUT:
-        if (!alloc.isRegister() || !ins->numOperands())
-            return false;
-        return alloc == *ins->getOperand(def->getReusedInput());
-      default:
-        MOZ_CRASH("Unknown definition policy");
-    }
-}
-
-#endif // DEBUG
-
-static inline LDefinition*
-FindReusingDefinition(LNode* ins, LAllocation* alloc)
-{
-    for (size_t i = 0; i < ins->numDefs(); i++) {
-        LDefinition* def = ins->getDef(i);
-        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
-            ins->getOperand(def->getReusedInput()) == alloc)
-            return def;
-    }
-    for (size_t i = 0; i < ins->numTemps(); i++) {
-        LDefinition* def = ins->getTemp(i);
-        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
-            ins->getOperand(def->getReusedInput()) == alloc)
-            return def;
-    }
-    return nullptr;
-}
-
-/*
- * A live interval is a set of disjoint ranges of code positions where a
- * virtual register is live. Register allocation operates on these intervals,
- * splitting them as necessary and assigning allocations to them as it runs.
- */
-class LiveInterval
-  : public TempObject
-{
-  public:
-    /*
-     * A range is a contiguous sequence of CodePositions where the virtual
-     * register associated with this interval is live.
-     */
-    struct Range {
-        Range()
-          : from(),
-            to()
-        { }
-        Range(CodePosition f, CodePosition t)
-          : from(f),
-            to(t)
-        {
-            MOZ_ASSERT(from < to);
-        }
-
-        // The beginning of this range, inclusive.
-        CodePosition from;
-
-        // The end of this range, exclusive.
-        CodePosition to;
-
-        bool empty() const {
-            return from >= to;
-        }
-
-        // Whether this range wholly contains other.
-        bool contains(const Range* other) const;
-
-        // Intersect this range with other, returning the subranges of this
-        // that are before, inside, or after other.
-        void intersect(const Range* other, Range* pre, Range* inside, Range* post) const;
-
-        // Return a string describing this range. This is not re-entrant!
-        const char* toString() const;
-
-        void dump() const;
-    };
-
-  private:
-    Vector<Range, 1, JitAllocPolicy> ranges_;
-    LAllocation alloc_;
-    LiveInterval* spillInterval_;
-    uint32_t vreg_;
-    uint32_t index_;
-    Requirement requirement_;
-    Requirement hint_;
-    InlineForwardList<UsePosition> uses_;
-    size_t lastProcessedRange_;
-
-    LiveInterval(TempAllocator& alloc, uint32_t vreg, uint32_t index)
-      : ranges_(alloc),
-        spillInterval_(nullptr),
-        vreg_(vreg),
-        index_(index),
-        lastProcessedRange_(size_t(-1))
-    { }
-
-    LiveInterval(TempAllocator& alloc, uint32_t index)
-      : ranges_(alloc),
-        spillInterval_(nullptr),
-        vreg_(UINT32_MAX),
-        index_(index),
-        lastProcessedRange_(size_t(-1))
-    { }
-
-  public:
-    static LiveInterval* New(TempAllocator& alloc, uint32_t vreg, uint32_t index) {
-        return new(alloc) LiveInterval(alloc, vreg, index);
-    }
-    static LiveInterval* New(TempAllocator& alloc, uint32_t index) {
-        return new(alloc) LiveInterval(alloc, index);
-    }
-
-    bool addRange(CodePosition from, CodePosition to);
-    bool addRangeAtHead(CodePosition from, CodePosition to);
-    void setFrom(CodePosition from);
-    CodePosition intersect(LiveInterval* other);
-    bool covers(CodePosition pos);
-
-    CodePosition start() const {
-        MOZ_ASSERT(!ranges_.empty());
-        return ranges_.back().from;
-    }
-
-    CodePosition end() const {
-        MOZ_ASSERT(!ranges_.empty());
-        return ranges_.begin()->to;
-    }
-
-    size_t numRanges() const {
-        return ranges_.length();
-    }
-    const Range* getRange(size_t i) const {
-        return &ranges_[i];
-    }
-    void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
-        // If the range starts after pos, we may not be able to use
-        // it in the next lastProcessedRangeIfValid call.
-        MOZ_ASSERT(ranges_[range].from <= pos);
-        lastProcessedRange_ = range;
-    }
-    size_t lastProcessedRangeIfValid(CodePosition pos) const {
-        if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
-            return lastProcessedRange_;
-        return ranges_.length() - 1;
-    }
-
-    LAllocation* getAllocation() {
-        return &alloc_;
-    }
-    void setAllocation(LAllocation alloc) {
-        alloc_ = alloc;
-    }
-    void setSpillInterval(LiveInterval* spill) {
-        spillInterval_ = spill;
-    }
-    LiveInterval* spillInterval() {
-        return spillInterval_;
-    }
-    bool hasVreg() const {
-        return vreg_ != UINT32_MAX;
-    }
-    uint32_t vreg() const {
-        MOZ_ASSERT(hasVreg());
-        return vreg_;
-    }
-    uint32_t index() const {
-        return index_;
-    }
-    void setIndex(uint32_t index) {
-        index_ = index;
-    }
-    const Requirement* requirement() const {
-        return &requirement_;
-    }
-    void setRequirement(const Requirement& requirement) {
-        // A MUST_REUSE_INPUT requirement complicates regalloc too much; it
-        // should only be used as hint.
-        MOZ_ASSERT(requirement.kind() != Requirement::MUST_REUSE_INPUT);
-        requirement_ = requirement;
-    }
-    bool addRequirement(const Requirement& newRequirement) {
-        return requirement_.mergeRequirement(newRequirement);
-    }
-    void addHint(const Requirement& newHint) {
-        // Unlike addRequirement, here in addHint we ignore merge failures,
-        // because these are just hints.
-        hint_.mergeRequirement(newHint);
-    }
-    const Requirement* hint() const {
-        return &hint_;
-    }
-    void setHint(const Requirement& hint) {
-        hint_ = hint;
-    }
-    bool isSpill() const {
-        return alloc_.isStackSlot();
-    }
-    bool splitFrom(CodePosition pos, LiveInterval* after);
-
-    void addUse(UsePosition* use);
-    void addUseAtEnd(UsePosition* use);
-    UsePosition* popUse();
-    UsePosition* nextUseAfter(CodePosition pos);
-    CodePosition nextUsePosAfter(CodePosition pos);
-
-    UsePositionIterator usesBegin() const {
-        return uses_.begin();
-    }
-
-    UsePositionIterator usesEnd() const {
-        return uses_.end();
-    }
-
-    bool usesEmpty() const {
-        return uses_.empty();
-    }
-
-    UsePosition* usesBack() {
-        return uses_.back();
-    }
-
-#ifdef DEBUG
-    void validateRanges();
-#endif
-
-    // Return a string describing the ranges in this LiveInterval. This is
-    // not re-entrant!
-    const char* rangesToString() const;
-
-    // Return a string describing this LiveInterval. This is not re-entrant!
-    const char* toString() const;
-
-    void dump() const;
-};
-
-/*
- * Represents all of the register allocation state associated with a virtual
- * register, including all associated intervals and pointers to relevant LIR
- * structures.
- */
-class VirtualRegister
-{
-    LNode* ins_;
-    LDefinition* def_;
-    Vector<LiveInterval*, 1, JitAllocPolicy> intervals_;
-
-    // Whether def_ is a temp or an output.
-    bool isTemp_ : 1;
-
-    void operator=(const VirtualRegister&) = delete;
-    VirtualRegister(const VirtualRegister&) = delete;
-
-  protected:
-    explicit VirtualRegister(TempAllocator& alloc)
-      : intervals_(alloc)
-    {}
-
-  public:
-    bool init(TempAllocator& alloc, LNode* ins, LDefinition* def,
-              bool isTemp)
-    {
-        MOZ_ASSERT(ins && !ins_);
-        ins_ = ins;
-        def_ = def;
-        isTemp_ = isTemp;
-        LiveInterval* initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
-        if (!initial)
-            return false;
-        return intervals_.append(initial);
-    }
-    LBlock* block() {
-        return ins_->block();
-    }
-    LNode* ins() {
-        return ins_;
-    }
-    LDefinition* def() const {
-        return def_;
-    }
-    LDefinition::Type type() const {
-        return def()->type();
-    }
-    bool isTemp() const {
-        return isTemp_;
-    }
-    size_t numIntervals() const {
-        return intervals_.length();
-    }
-    LiveInterval* getInterval(size_t i) const {
-        return intervals_[i];
-    }
-    LiveInterval* lastInterval() const {
-        MOZ_ASSERT(numIntervals() > 0);
-        return getInterval(numIntervals() - 1);
-    }
-    void replaceInterval(LiveInterval* old, LiveInterval* interval) {
-        MOZ_ASSERT(intervals_[old->index()] == old);
-        interval->setIndex(old->index());
-        intervals_[old->index()] = interval;
-    }
-    bool addInterval(LiveInterval* interval) {
-        MOZ_ASSERT(interval->numRanges());
-        MOZ_ASSERT(interval->vreg() != 0);
-
-        // Preserve ascending order for faster lookups.
-        LiveInterval** found = nullptr;
-        LiveInterval** i;
-        for (i = intervals_.begin(); i != intervals_.end(); i++) {
-            if (!found && interval->start() < (*i)->start())
-                found = i;
-            if (found)
-                (*i)->setIndex((*i)->index() + 1);
-        }
-        if (!found)
-            found = intervals_.end();
-        interval->setIndex(found - intervals_.begin());
-        return intervals_.insert(found, interval);
-    }
-    void removeInterval(LiveInterval* interval) {
-        intervals_.erase(intervals_.begin() + interval->index());
-        for (size_t i = interval->index(), e = intervals_.length(); i < e; ++i)
-            intervals_[i]->setIndex(i);
-        interval->setIndex(-1);
-    }
-    bool isFloatReg() const {
-        return def_->isFloatReg();
-    }
-    bool isCompatibleReg(const AnyRegister& r) const {
-        return def_->isCompatibleReg(r);
-    }
-    bool isCompatibleVReg(const VirtualRegister& vr) const {
-        return def_->isCompatibleDef(*vr.def_);
-    }
-
-    LiveInterval* intervalFor(CodePosition pos);
-    LiveInterval* getFirstInterval();
-};
-
-// Index of the virtual registers in a graph. VREG is a subclass of
-// VirtualRegister extended with any allocator specific state for the vreg.
-template <typename VREG>
-class VirtualRegisterMap
-{
-  private:
-    FixedList<VREG> vregs_;
-
-    void operator=(const VirtualRegisterMap&) = delete;
-    VirtualRegisterMap(const VirtualRegisterMap&) = delete;
-
-  public:
-    VirtualRegisterMap()
-      : vregs_()
-    { }
-
-    bool init(MIRGenerator* gen, uint32_t numVregs) {
-        if (!vregs_.init(gen->alloc(), numVregs))
-            return false;
-        memset(&vregs_[0], 0, sizeof(VREG) * numVregs);
-        TempAllocator& alloc = gen->alloc();
-        for (uint32_t i = 0; i < numVregs; i++)
-            new(&vregs_[i]) VREG(alloc);
-        return true;
-    }
-    VREG& operator[](unsigned int index) {
-        return vregs_[index];
-    }
-    VREG& operator[](const LAllocation* alloc) {
-        MOZ_ASSERT(alloc->isUse());
-        return vregs_[alloc->toUse()->virtualRegister()];
-    }
-    VREG& operator[](const LDefinition* def) {
-        return vregs_[def->virtualRegister()];
-    }
-    uint32_t numVirtualRegisters() const {
-        return vregs_.length();
-    }
-};
-
-static inline bool
-IsNunbox(VirtualRegister* vreg)
-{
-#ifdef JS_NUNBOX32
-    return vreg->type() == LDefinition::TYPE ||
-           vreg->type() == LDefinition::PAYLOAD;
-#else
-    return false;
-#endif
-}
-
-static inline bool
-IsSlotsOrElements(VirtualRegister* vreg)
-{
-    return vreg->type() == LDefinition::SLOTS;
-}
-
-static inline bool
-IsTraceable(VirtualRegister* reg)
-{
-    if (reg->type() == LDefinition::OBJECT)
-        return true;
-#ifdef JS_PUNBOX64
-    if (reg->type() == LDefinition::BOX)
-        return true;
-#endif
-    return false;
-}
-
-template <typename VREG>
-class LiveRangeAllocator : protected RegisterAllocator
-{
-  protected:
-    // Computed inforamtion
-    BitSet* liveIn;
-    VirtualRegisterMap<VREG> vregs;
-    mozilla::Array<LiveInterval*, AnyRegister::Total> fixedIntervals;
-
-    // Union of all ranges in fixedIntervals, used to quickly determine
-    // whether an interval intersects with a fixed register.
-    LiveInterval* fixedIntervalsUnion;
-
-    // Allocation state
-    StackSlotAllocator stackSlotAllocator;
-
-    LiveRangeAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
-      : RegisterAllocator(mir, lir, graph),
-        liveIn(nullptr),
-        fixedIntervalsUnion(nullptr)
-    {
-    }
-
-    bool buildLivenessInfo();
-
-    bool init();
-
-    bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
-        if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
-            return false;
-        return fixedIntervalsUnion->addRangeAtHead(from, to);
-    }
-
-    void validateVirtualRegisters()
-    {
-#ifdef DEBUG
-        if (!js_JitOptions.checkGraphConsistency)
-            return;
-
-        for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-            VirtualRegister* reg = &vregs[i];
-
-            LiveInterval* prev = nullptr;
-            for (size_t j = 0; j < reg->numIntervals(); j++) {
-                LiveInterval* interval = reg->getInterval(j);
-                MOZ_ASSERT(interval->vreg() == i);
-                MOZ_ASSERT(interval->index() == j);
-
-                if (interval->numRanges() == 0)
-                    continue;
-
-                MOZ_ASSERT_IF(prev, prev->end() <= interval->start());
-                interval->validateRanges();
-
-                prev = interval;
-            }
-        }
-#endif
-    }
-
-    bool addMove(LMoveGroup* moves, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
-        MOZ_ASSERT(*from->getAllocation() != *to->getAllocation());
-        return moves->add(from->getAllocation(), to->getAllocation(), type);
-    }
-
-    bool moveInput(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
-        if (*from->getAllocation() == *to->getAllocation())
-            return true;
-        LMoveGroup* moves = getInputMoveGroup(ins);
-        return addMove(moves, from, to, type);
-    }
-
-    bool moveAfter(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
-        if (*from->getAllocation() == *to->getAllocation())
-            return true;
-        LMoveGroup* moves = getMoveGroupAfter(ins);
-        return addMove(moves, from, to, type);
-    }
-
-    bool moveAtExit(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
-        if (*from->getAllocation() == *to->getAllocation())
-            return true;
-        LMoveGroup* moves = block->getExitMoveGroup(alloc());
-        return addMove(moves, from, to, type);
-    }
-
-    bool moveAtEntry(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
-        if (*from->getAllocation() == *to->getAllocation())
-            return true;
-        LMoveGroup* moves = block->getEntryMoveGroup(alloc());
-        return addMove(moves, from, to, type);
-    }
-
-    size_t findFirstNonCallSafepoint(CodePosition from) const
-    {
-        size_t i = 0;
-        for (; i < graph.numNonCallSafepoints(); i++) {
-            const LInstruction* ins = graph.getNonCallSafepoint(i);
-            if (from <= inputOf(ins))
-                break;
-        }
-        return i;
-    }
-
-    void addLiveRegistersForInterval(VirtualRegister* reg, LiveInterval* interval)
-    {
-        // Fill in the live register sets for all non-call safepoints.
-        LAllocation* a = interval->getAllocation();
-        if (!a->isRegister())
-            return;
-
-        // Don't add output registers to the safepoint.
-        CodePosition start = interval->start();
-        if (interval->index() == 0 && !reg->isTemp()) {
-#ifdef CHECK_OSIPOINT_REGISTERS
-            // We don't add the output register to the safepoint,
-            // but it still might get added as one of the inputs.
-            // So eagerly add this reg to the safepoint clobbered registers.
-            if (reg->ins()->isInstruction()) {
-                if (LSafepoint* safepoint = reg->ins()->toInstruction()->safepoint())
-                    safepoint->addClobberedRegister(a->toRegister());
-            }
-#endif
-            start = start.next();
-        }
-
-        size_t i = findFirstNonCallSafepoint(start);
-        for (; i < graph.numNonCallSafepoints(); i++) {
-            LInstruction* ins = graph.getNonCallSafepoint(i);
-            CodePosition pos = inputOf(ins);
-
-            // Safepoints are sorted, so we can shortcut out of this loop
-            // if we go out of range.
-            if (interval->end() <= pos)
-                break;
-
-            if (!interval->covers(pos))
-                continue;
-
-            LSafepoint* safepoint = ins->safepoint();
-            safepoint->addLiveRegister(a->toRegister());
-
-#ifdef CHECK_OSIPOINT_REGISTERS
-            if (reg->isTemp())
-                safepoint->addClobberedRegister(a->toRegister());
-#endif
-        }
-    }
-
-    // Finds the first safepoint that is within range of an interval.
-    size_t findFirstSafepoint(const LiveInterval* interval, size_t startFrom) const
-    {
-        size_t i = startFrom;
-        for (; i < graph.numSafepoints(); i++) {
-            LInstruction* ins = graph.getSafepoint(i);
-            if (interval->start() <= inputOf(ins))
-                break;
-        }
-        return i;
-    }
-
-    void dumpVregs();
-};
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_LiveRangeAllocator_h */
--- a/js/src/jit/RegisterAllocator.cpp
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -186,18 +186,18 @@ AllocationIntegrityState::checkIntegrity
         ins = *iter;
 
         // Follow values through assignments in move groups. All assignments in
         // a move group are considered to happen simultaneously, so stop after
         // the first matching move is found.
         if (ins->isMoveGroup()) {
             LMoveGroup* group = ins->toMoveGroup();
             for (int i = group->numMoves() - 1; i >= 0; i--) {
-                if (*group->getMove(i).to() == alloc) {
-                    alloc = *group->getMove(i).from();
+                if (group->getMove(i).to() == alloc) {
+                    alloc = group->getMove(i).from();
                     break;
                 }
             }
         }
 
         const InstructionInfo& info = instructions[ins->id()];
 
         // Make sure the physical location being tracked is not clobbered by
@@ -407,18 +407,18 @@ AllocationIntegrityState::dump()
             if (input != CodePosition::MIN)
                 fprintf(stderr, "%u,%u ", input.bits(), output.bits());
             fprintf(stderr, "%s]", ins->opName());
 
             if (ins->isMoveGroup()) {
                 LMoveGroup* group = ins->toMoveGroup();
                 for (int i = group->numMoves() - 1; i >= 0; i--) {
                     // Use two printfs, as LAllocation::toString is not reentrant.
-                    fprintf(stderr, " [%s", group->getMove(i).from()->toString());
-                    fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
+                    fprintf(stderr, " [%s", group->getMove(i).from().toString());
+                    fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
                 }
                 fprintf(stderr, "\n");
                 continue;
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++)
                 fprintf(stderr, " [def %s]", ins->getDef(i)->toString());
 
@@ -547,18 +547,18 @@ RegisterAllocator::dumpInstructions()
             if (ins->id() != 0)
                 fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits());
             fprintf(stderr, "%s]", ins->opName());
 
             if (ins->isMoveGroup()) {
                 LMoveGroup* group = ins->toMoveGroup();
                 for (int i = group->numMoves() - 1; i >= 0; i--) {
                     // Use two printfs, as LAllocation::toString is not reentant.
-                    fprintf(stderr, " [%s", group->getMove(i).from()->toString());
-                    fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
+                    fprintf(stderr, " [%s", group->getMove(i).from().toString());
+                    fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
                 }
                 fprintf(stderr, "\n");
                 continue;
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++)
                 fprintf(stderr, " [def %s]", ins->getDef(i)->toString());
 
--- a/js/src/jit/StupidAllocator.cpp
+++ b/js/src/jit/StupidAllocator.cpp
@@ -181,21 +181,21 @@ StupidAllocator::allocateRegister(LInstr
     return best;
 }
 
 void
 StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index)
 {
     if (registers[index].dirty) {
         LMoveGroup* input = getInputMoveGroup(ins);
-        LAllocation* source = new(alloc()) LAllocation(registers[index].reg);
+        LAllocation source(registers[index].reg);
 
         uint32_t existing = registers[index].vreg;
         LAllocation* dest = stackLocation(existing);
-        input->addAfter(source, dest, registers[index].type);
+        input->addAfter(source, *dest, registers[index].type);
 
         registers[index].dirty = false;
     }
 }
 
 void
 StupidAllocator::evictRegister(LInstruction* ins, RegisterIndex index)
 {
@@ -214,18 +214,18 @@ StupidAllocator::evictAliasedRegister(LI
 }
 
 void
 StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex index, LDefinition::Type type)
 {
     // Load a vreg from its stack location to a register.
     LMoveGroup* input = getInputMoveGroup(ins);
     LAllocation* source = stackLocation(vreg);
-    LAllocation* dest = new(alloc()) LAllocation(registers[index].reg);
-    input->addAfter(source, dest, type);
+    LAllocation dest(registers[index].reg);
+    input->addAfter(*source, dest, type);
     registers[index].set(vreg, ins);
     registers[index].type = type;
 }
 
 StupidAllocator::RegisterIndex
 StupidAllocator::findExistingRegister(uint32_t vreg)
 {
     for (size_t i = 0; i < registerCount; i++) {
@@ -316,17 +316,17 @@ StupidAllocator::syncForBlockEnd(LBlock*
                 if (input->numMoves() == 0) {
                     group = input;
                 } else {
                     group = LMoveGroup::New(alloc());
                     block->insertAfter(input, group);
                 }
             }
 
-            group->add(source, dest, phi->getDef(0)->type());
+            group->add(*source, *dest, phi->getDef(0)->type());
         }
     }
 }
 
 void
 StupidAllocator::allocateForInstruction(LInstruction* ins)
 {
     // Sync all registers before making a call.
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1004,21 +1004,21 @@ CodeGeneratorARM::visitPowHalfD(LPowHalf
     masm.ma_vimm(0.0, ScratchDoubleReg);
     masm.ma_vadd(ScratchDoubleReg, input, output);
     masm.ma_vsqrt(output, output);
 
     masm.bind(&done);
 }
 
 MoveOperand
-CodeGeneratorARM::toMoveOperand(const LAllocation* a) const
+CodeGeneratorARM::toMoveOperand(LAllocation a) const
 {
-    if (a->isGeneralReg())
+    if (a.isGeneralReg())
         return MoveOperand(ToRegister(a));
-    if (a->isFloatReg())
+    if (a.isFloatReg())
         return MoveOperand(ToFloatRegister(a));
     int32_t offset = ToStackOffset(a);
     MOZ_ASSERT((offset & 3) == 0);
     return MoveOperand(StackPointer, offset);
 }
 
 class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM>
 {
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -38,17 +38,17 @@ class CodeGeneratorARM : public CodeGene
     }
     inline Operand ToOperand(const LAllocation* a) {
         return ToOperand(*a);
     }
     inline Operand ToOperand(const LDefinition* def) {
         return ToOperand(def->output());
     }
 
-    MoveOperand toMoveOperand(const LAllocation* a) const;
+    MoveOperand toMoveOperand(LAllocation a) const;
 
     void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
     void bailoutFrom(Label* label, LSnapshot* snapshot);
     void bailout(LSnapshot* snapshot);
 
     template <typename T1, typename T2>
     void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
         masm.cmpPtr(lhs, rhs);
--- a/js/src/jit/mips/CodeGenerator-mips.cpp
+++ b/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -943,21 +943,21 @@ CodeGeneratorMIPS::visitPowHalfD(LPowHal
     masm.loadConstantDouble(0.0, ScratchDoubleReg);
     masm.as_addd(output, input, ScratchDoubleReg);
     masm.as_sqrtd(output, output);
 
     masm.bind(&done);
 }
 
 MoveOperand
-CodeGeneratorMIPS::toMoveOperand(const LAllocation* a) const
+CodeGeneratorMIPS::toMoveOperand(LAllocation a) const
 {
-    if (a->isGeneralReg())
+    if (a.isGeneralReg())
         return MoveOperand(ToRegister(a));
-    if (a->isFloatReg()) {
+    if (a.isFloatReg()) {
         return MoveOperand(ToFloatRegister(a));
     }
     int32_t offset = ToStackOffset(a);
     MOZ_ASSERT((offset & 3) == 0);
 
     return MoveOperand(StackPointer, offset);
 }
 
--- a/js/src/jit/mips/CodeGenerator-mips.h
+++ b/js/src/jit/mips/CodeGenerator-mips.h
@@ -53,17 +53,17 @@ class CodeGeneratorMIPS : public CodeGen
     }
     inline Operand ToOperand(const LAllocation* a) {
         return ToOperand(*a);
     }
     inline Operand ToOperand(const LDefinition* def) {
         return ToOperand(def->output());
     }
 
-    MoveOperand toMoveOperand(const LAllocation* a) const;
+    MoveOperand toMoveOperand(LAllocation a) const;
 
     template <typename T1, typename T2>
     void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
         Label skip;
         masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(c), ShortJump);
         bailout(snapshot);
         masm.bind(&skip);
     }
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -230,20 +230,23 @@ class CodeGeneratorShared : public LElem
         // by sizeof(Value) is desirable since everything on the stack is a Value.
         // Note that paddedLocalSlotCount() aligns to at least a Value boundary
         // specifically to support this.
         MOZ_ASSERT(offset >= 0);
         MOZ_ASSERT(offset % sizeof(Value) == 0);
         return offset;
     }
 
+    inline int32_t ToStackOffset(LAllocation a) const {
+        if (a.isArgument())
+            return ArgToStackOffset(a.toArgument()->index());
+        return SlotToStackOffset(a.toStackSlot()->slot());
+    }
     inline int32_t ToStackOffset(const LAllocation* a) const {
-        if (a->isArgument())
-            return ArgToStackOffset(a->toArgument()->index());
-        return SlotToStackOffset(a->toStackSlot()->slot());
+        return ToStackOffset(*a);
     }
 
     uint32_t frameSize() const {
         return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize();
     }
 
   protected:
     // Ensure the cache is an IonCache while expecting the size of the derived
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -1523,21 +1523,21 @@ CodeGeneratorX86Shared::visitUrshD(LUrsh
         MOZ_ASSERT(ToRegister(rhs) == ecx);
         masm.shrl_cl(lhs);
     }
 
     masm.convertUInt32ToDouble(lhs, out);
 }
 
 MoveOperand
-CodeGeneratorX86Shared::toMoveOperand(const LAllocation* a) const
+CodeGeneratorX86Shared::toMoveOperand(LAllocation a) const
 {
-    if (a->isGeneralReg())
+    if (a.isGeneralReg())
         return MoveOperand(ToRegister(a));
-    if (a->isFloatReg())
+    if (a.isFloatReg())
         return MoveOperand(ToFloatRegister(a));
     return MoveOperand(StackPointer, ToStackOffset(a));
 }
 
 class OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorX86Shared>
 {
     MTableSwitch* mir_;
     CodeLabel jumpLabel_;
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -110,17 +110,17 @@ class CodeGeneratorX86Shared : public Co
     }
     inline Operand ToOperand(const LAllocation* a) {
         return ToOperand(*a);
     }
     inline Operand ToOperand(const LDefinition* def) {
         return ToOperand(def->output());
     }
 
-    MoveOperand toMoveOperand(const LAllocation* a) const;
+    MoveOperand toMoveOperand(LAllocation a) const;
 
     void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
     void bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot);
     void bailoutFrom(Label* label, LSnapshot* snapshot);
     void bailout(LSnapshot* snapshot);
 
     template <typename T1, typename T2>
     void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
--- a/js/src/jsexn.h
+++ b/js/src/jsexn.h
@@ -7,16 +7,17 @@
 /*
  * JS runtime exception classes.
  */
 
 #ifndef jsexn_h
 #define jsexn_h
 
 #include "jsapi.h"
+#include "jscntxt.h"
 #include "NamespaceImports.h"
 
 namespace js {
 class ErrorObject;
 
 JSErrorReport*
 CopyErrorReport(JSContext* cx, JSErrorReport* report);
 
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -208,17 +208,16 @@ UNIFIED_SOURCES += [
     'jit/IonOptimizationLevels.cpp',
     'jit/JitcodeMap.cpp',
     'jit/JitFrames.cpp',
     'jit/JitOptions.cpp',
     'jit/JitSpewer.cpp',
     'jit/JSONSpewer.cpp',
     'jit/LICM.cpp',
     'jit/LIR.cpp',
-    'jit/LiveRangeAllocator.cpp',
     'jit/LoopUnroller.cpp',
     'jit/Lowering.cpp',
     'jit/MacroAssembler.cpp',
     'jit/MCallOptimize.cpp',
     'jit/MIR.cpp',
     'jit/MIRGraph.cpp',
     'jit/MoveResolver.cpp',
     'jit/OptimizationTracking.cpp',