Remove spillRegs() and rework LSRA spill intervals (bug 714428, r=dvander)
authorJan de Mooij <jdemooij@mozilla.com>
Fri, 06 Jan 2012 11:21:34 +0100
changeset 112387 a89c5ef5da366f4dfe8f7e1f337253f718d08b8c
parent 112386 cf07f07279f15a3d836f3aed6d4b46cd3d97a14d
child 112388 012b6e6fa0cb9d6958831d3b8531016ae9773663
push id239
push userakeybl@mozilla.com
push dateThu, 03 Jan 2013 21:54:43 +0000
treeherdermozilla-release@3a7b66445659 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdvander
bugs714428
milestone12.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Remove spillRegs() and rework LSRA spill intervals (bug 714428, r=dvander)
js/src/ion/LIR-Common.h
js/src/ion/LIR.h
js/src/ion/LinearScan.cpp
js/src/ion/LinearScan.h
--- a/js/src/ion/LIR-Common.h
+++ b/js/src/ion/LIR-Common.h
@@ -195,17 +195,17 @@ class LGoto : public LInstructionHelper<
       : block_(block)
     { }
 
     MBasicBlock *target() const {
         return block_;
     }
 };
 
-class LNewArray : public LVMCallInstructionHelper<LDefinition::OBJECT, 1, 0, 0>
+class LNewArray : public LCallInstructionHelper<1, 0, 0>
 {
   public:
     LIR_HEADER(NewArray);
 
     MNewArray *mir() const {
         return mir_->toNewArray();
     }
 };
@@ -1117,17 +1117,17 @@ class LStringLength : public LInstructio
     const LAllocation *string() {
         return getOperand(0);
     }
     const LDefinition *output() {
         return getDef(0);
     }
 };
 
-class LLoadPropertyGeneric : public LVMCallInstructionHelper<LDefinition::BOX, BOX_PIECES, 1, 0>
+class LLoadPropertyGeneric : public LCallInstructionHelper<BOX_PIECES, 1, 0>
 {
   public:
     LIR_HEADER(LoadPropertyGeneric);
 
     MLoadProperty *mir() const {
         return mir_->toLoadProperty();
     }
 };
--- a/js/src/ion/LIR.h
+++ b/js/src/ion/LIR.h
@@ -639,22 +639,16 @@ class LInstruction
     // register is an LUse with a TEMPORARY policy, or a fixed register.
     virtual size_t numTemps() const = 0;
     virtual LDefinition *getTemp(size_t index) = 0;
     virtual void setTemp(size_t index, const LDefinition &a) = 0;
 
     virtual bool isCall() const {
         return false;
     };
-    virtual RegisterSet &spillRegs() const {
-        JS_NOT_REACHED("spillRegs should be guarded by isCall().");
-        static RegisterSet regs;
-        return regs;
-    }
-
     uint32 id() const {
         return id_;
     }
     void setId(uint32 id) {
         JS_ASSERT(!id_);
         JS_ASSERT(id);
         id_ = id;
     }
@@ -814,57 +808,18 @@ class LInstructionHelper : public LInstr
 
 template <size_t Defs, size_t Operands, size_t Temps>
 class LCallInstructionHelper : public LInstructionHelper<Defs, Operands, Temps>
 {
   public:
     virtual bool isCall() const {
         return true;
     }
-    virtual RegisterSet &spillRegs() const {
-        JS_ASSERT(Defs == BOX_PIECES);
-        static RegisterSet regs(
-                  GeneralRegisterSet::Not(GeneralRegisterSet(Registers::JSCallMask)),
-                  FloatRegisterSet::All()
-        );
-        return regs;
-    }
 };
 
-template <LDefinition::Type DefType, size_t Defs, size_t Operands, size_t Temps>
-class LVMCallInstructionHelper : public LCallInstructionHelper<Defs, Operands, Temps>
-{
-  public:
-    virtual RegisterSet &spillRegs() const {
-        static RegisterSet regs(
-            GeneralRegisterSet::Not(GeneralRegisterSet(defMask())),
-            FloatRegisterSet::All()
-        );
-        return regs;
-    }
-
-  private:
-    static uint32 defMask() {
-        switch (DefType) {
-          case LDefinition::BOX:
-            JS_ASSERT(Defs == BOX_PIECES);
-            return Registers::JSCallMask;
-          case LDefinition::GENERAL:
-          case LDefinition::OBJECT:
-            JS_ASSERT(Defs == 1);
-            return Registers::CallMask;
-          default:
-            JS_NOT_REACHED("unexpected return type (void)");
-            return 0;
-        }
-        return 0;
-    }
-};
-
-
 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike MResumePoints,
 // they cannot be shared, as they are filled in by the register allocator in
 // order to capture the precise low-level stack state in between an
 // instruction's input and output. During code generation, LSnapshots are
 // compressed and saved in the compiled script.
 class LSnapshot : public TempObject
 {
   private:
--- a/js/src/ion/LinearScan.cpp
+++ b/js/src/ion/LinearScan.cpp
@@ -154,30 +154,16 @@ LiveInterval::setFrom(CodePosition from)
             ranges_.erase(&ranges_.back());
         } else {
             ranges_.back().from = from;
             break;
         }
     }
 }
 
-CodePosition
-LiveInterval::start()
-{
-    JS_ASSERT(!ranges_.empty());
-    return ranges_.back().from;
-}
-
-CodePosition
-LiveInterval::end()
-{
-    JS_ASSERT(!ranges_.empty());
-    return ranges_.begin()->to;
-}
-
 bool
 LiveInterval::covers(CodePosition pos)
 {
     for (size_t i = 0; i < ranges_.length(); i++) {
         if (ranges_[i].to <= pos)
             return false;
         if (ranges_[i].from <= pos)
             return true;
@@ -216,28 +202,16 @@ LiveInterval::intersect(LiveInterval *ot
                 return ranges_[i].from;
             j++;
         }
     }
 
     return CodePosition::MIN;
 }
 
-size_t
-LiveInterval::numRanges()
-{
-    return ranges_.length();
-}
-
-LiveInterval::Range *
-LiveInterval::getRange(size_t i)
-{
-    return &ranges_[i];
-}
-
 /*
  * This function takes the callee interval and moves all ranges following or
  * including provided position to the target interval. Additionally, if a
  * range in the callee interval spans the given position, it is split and the
  * latter half is placed in the target interval.
  *
  * This function should only be called if it is known that the interval should
  * actually be split (and, presumably, a move inserted). As such, it is an
@@ -495,38 +469,17 @@ LinearScanAllocator::buildLivenessInfo()
                                                outputOf(block->lastId()).next());
         }
 
         // Shorten the front end of live intervals for live variables to their
         // point of definition, if found.
         for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
             // Calls may clobber registers, so force a spill and reload around the callsite.
             if (ins->isCall()) {
-                // Leave enough registers available for temporaries.
-                RegisterSet toSpill = ins->spillRegs();
-
-                // Take fixed regs first.
-                for (size_t i = 0; i < ins->numTemps(); i++) {
-                    LDefinition *temp = ins->getTemp(i);
-                    if (temp->isPreset()) {
-                        AnyRegister reg = temp->output()->toRegister();
-                        toSpill.take(reg);
-                    }
-                }
-
-                for (size_t i = 0; i < ins->numTemps(); i++) {
-                    LDefinition *temp = ins->getTemp(i);
-                    if (!temp->isPreset()) {
-                        JS_ASSERT(temp->policy() == LDefinition::DEFAULT ||
-                                  temp->policy() == LDefinition::MUST_REUSE_INPUT);
-                        toSpill.takeGeneral();
-                    }
-                }
-
-                for (AnyRegisterIterator iter(toSpill); iter.more(); iter++)
+                for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++)
                     addSpillInterval(*ins, Requirement(LAllocation(*iter)));
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++) {
                 if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                     LDefinition *def = ins->getDef(i);
                     CodePosition from(inputOf(*ins));
 
@@ -549,16 +502,20 @@ LinearScanAllocator::buildLivenessInfo()
             for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next())
             {
                 if (alloc->isUse()) {
                     LUse *use = alloc->toUse();
 
                     // The first instruction, LLabel, has no uses.
                     JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));
 
+                    // Call uses should always be at-start, since all registers (except temps
+                    // and defs) are spilled.
+                    JS_ASSERT_IF(ins->isCall() && !alloc.isSnapshotInput(), use->usedAtStart());
+
                     CodePosition endPos = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                     LiveInterval *interval = vregs[use].getInterval(0);
                     interval->addRange(inputOf(block->firstId()), endPos);
                     interval->addUse(new UsePosition(use, endPos));
 
                     live->insert(use->virtualRegister());
                 }
             }
@@ -1208,16 +1165,23 @@ LinearScanAllocator::assign(LAllocation 
                 return false;
         }
     }
 
     if (allocation.isRegister()) {
         // Split the blocking interval if it exists
         for (IntervalIterator i(active.begin()); i != active.end(); i++) {
             if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
+                // Allow temps and defs of a call instruction to have the same register
+                // as a spill interval.
+                if (i->isSpillInterval() && !i->requireSpill(current))
+                    continue;
+                if (current->isSpillInterval() && !current->requireSpill(*i))
+                    continue;
+
                 IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]",
                         i->reg()->ins()->id(), i->start().pos(), i->end().pos());
 
                 JS_ASSERT(i->start() != current->start());
                 JS_ASSERT(i->covers(current->start()));
                 JS_ASSERT(i->start() != current->start());
 
                 if (!splitInterval(*i, current->start()))
@@ -1431,16 +1395,24 @@ LinearScanAllocator::findBestFreeRegiste
     CodePosition freeUntilPos[AnyRegister::Total];
     bool needFloat = current->reg()->isDouble();
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         AnyRegister reg = AnyRegister::FromCode(i);
         if (reg.allocatable() && reg.isFloat() == needFloat)
             freeUntilPos[i] = CodePosition::MAX;
     }
     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
+        // Allow temps and defs of a call instruction to have the same register
+        // as a spill interval.
+        if (i->isSpillInterval() && !i->requireSpill(current)) {
+            IonSpew(IonSpew_RegAlloc, "   Register %s free (used by spill interval)",
+                    i->getAllocation()->toRegister().name());
+            continue;
+        }
+
         if (i->getAllocation()->isRegister()) {
             AnyRegister reg = i->getAllocation()->toRegister();
             IonSpew(IonSpew_RegAlloc, "   Register %s not free", reg.name());
             freeUntilPos[reg.code()] = CodePosition::MIN;
         }
     }
     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
         if (i->getAllocation()->isRegister()) {
@@ -1565,18 +1537,23 @@ LinearScanAllocator::findBestBlockedRegi
  * Intuitively, it is a bug if any allocated intervals exist which can not
  * coexist.
  */
 bool
 LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
 {
     LAllocation *aa = a->getAllocation();
     LAllocation *ba = b->getAllocation();
-    if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
+    if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister()) {
+        if (a->isSpillInterval() && !a->requireSpill(b))
+            return true;
+        if (b->isSpillInterval() && !b->requireSpill(a))
+            return true;
         return a->intersect(b) == CodePosition::MIN;
+    }
     return true;
 }
 
 LMoveGroup *
 LinearScanAllocator::getMoveGroupBefore(CodePosition pos)
 {
     VirtualRegister *vreg = &vregs[pos];
     JS_ASSERT(vreg->ins());
--- a/js/src/ion/LinearScan.h
+++ b/js/src/ion/LinearScan.h
@@ -274,23 +274,36 @@ class LiveInterval
 
     LiveInterval(VirtualRegister *reg, uint32 index)
       : reg_(reg),
         index_(index)
     { }
 
     bool addRange(CodePosition from, CodePosition to);
     void setFrom(CodePosition from);
-    CodePosition start();
-    CodePosition end();
     CodePosition intersect(LiveInterval *other);
     bool covers(CodePosition pos);
     CodePosition nextCoveredAfter(CodePosition pos);
-    size_t numRanges();
-    Range *getRange(size_t i);
+
+    CodePosition start() const {
+        JS_ASSERT(!ranges_.empty());
+        return ranges_.back().from;
+    }
+
+    CodePosition end() const {
+        JS_ASSERT(!ranges_.empty());
+        return ranges_.begin()->to;
+    }
+
+    size_t numRanges() const {
+        return ranges_.length();
+    }
+    const Range *getRange(size_t i) const {
+        return &ranges_[i];
+    }
 
     LAllocation *getAllocation() {
         return &alloc_;
     }
     void setAllocation(LAllocation alloc) {
         alloc_ = alloc;
     }
     VirtualRegister *reg() const {
@@ -315,16 +328,26 @@ class LiveInterval
         return &hint_;
     }
     void setHint(const Requirement &hint) {
         hint_ = hint;
     }
     bool isSpill() const {
         return alloc_.isStackSlot();
     }
+    bool isSpillInterval() const {
+        return !reg_;
+    }
+    bool requireSpill(const LiveInterval *other) const {
+        // Spill intervals at call instructions force spills of everything
+        // except temps and defs of this call.
+        JS_ASSERT(isSpillInterval());
+        JS_ASSERT(!other->isSpillInterval());
+        return (other->index() > 0 || start() != other->start());
+    }
 
     bool splitFrom(CodePosition pos, LiveInterval *after);
 
     void addUse(UsePosition *use);
     UsePosition *nextUseAfter(CodePosition pos);
     CodePosition nextUsePosAfter(CodePosition pos);
     CodePosition firstIncompatibleUse(LAllocation alloc);