Backed out changeset e8dc1103c1b7.
authorDavid Anderson <danderson@mozilla.com>
Mon, 06 Sep 2010 18:17:59 -0700
changeset 74553 b77f32d2d5c9283dc61cc7d91c75b5eee0f3c428
parent 74514 e8dc1103c1b76aec702e06c92358b7274bc237f0
child 74554 801344c03110aeb8abd9b7b15171cd3c5c57c20d
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
milestone2.0b5pre
backs oute8dc1103c1b76aec702e06c92358b7274bc237f0
Backed out changeset e8dc1103c1b7.
js/src/methodjit/Compiler.cpp
js/src/methodjit/FrameState.cpp
js/src/methodjit/FrameState.h
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -1054,33 +1054,22 @@ mjit::Compiler::generateMethod()
           BEGIN_CASE(JSOP_GETLOCAL)
           {
             uint32 slot = GET_SLOTNO(PC);
             frame.pushLocal(slot);
           }
           END_CASE(JSOP_GETLOCAL)
 
           BEGIN_CASE(JSOP_SETLOCAL)
-          {
-            jsbytecode *next = &PC[JSOP_SETLOCAL_LENGTH];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis[next].nincoming;
-            frame.storeLocal(GET_SLOTNO(PC), pop);
-            if (pop) {
+          BEGIN_CASE(JSOP_SETLOCALPOP)
+            frame.storeLocal(GET_SLOTNO(PC));
+            if (op == JSOP_SETLOCALPOP)
                 frame.pop();
-                PC += JSOP_SETLOCAL_LENGTH + JSOP_POP_LENGTH;
-                break;
-            }
-          }
           END_CASE(JSOP_SETLOCAL)
 
-          BEGIN_CASE(JSOP_SETLOCALPOP)
-            frame.storeLocal(GET_SLOTNO(PC), true);
-            frame.pop();
-          END_CASE(JSOP_SETLOCALPOP)
-
           BEGIN_CASE(JSOP_UINT16)
             frame.push(Value(Int32Value((int32_t) GET_UINT16(PC))));
           END_CASE(JSOP_UINT16)
 
           BEGIN_CASE(JSOP_NEWINIT)
           {
             jsint i = GET_INT8(PC);
             JS_ASSERT(i == JSProto_Array || i == JSProto_Object);
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -777,18 +777,42 @@ FrameState::pushCopyOf(uint32 index)
         /* Maintain tracker ordering guarantees for copies. */
         JS_ASSERT(backing->isCopied());
         if (fe->trackerIndex() < backing->trackerIndex())
             swapInTracker(fe, backing);
     }
 }
 
 FrameEntry *
-FrameState::walkTrackerForUncopy(FrameEntry *original)
+FrameState::uncopy(FrameEntry *original)
 {
+    JS_ASSERT(original->isCopied());
+
+    /*
+     * Copies have three critical invariants:
+     *  1) The backing store precedes all copies in the tracker.
+     *  2) The backing store precedes all copies in the FrameState.
+     *  3) The backing store of a copy cannot be popped from the stack
+     *     while the copy is still live.
+     *
+     * Maintaining this invariant iteratively is kind of hard, so we choose
+     * the "lowest" copy in the frame up-front.
+     *
+     * For example, if the stack is:
+     *    [A, B, C, D]
+     * And the tracker has:
+     *    [A, D, C, B]
+     *
+     * If B, C, and D are copies of A - we will walk the tracker to the end
+     * and select B, not D (see bug 583684).
+     *
+     * Note: |tracker.nentries <= (nslots + nargs)|. However, this walk is
+     * sub-optimal if |original->trackerIndex() > sp - original|. With large
+     * scripts this may be a problem worth investigating.
+     */
     uint32 firstCopy = InvalidIndex;
     FrameEntry *bestFe = NULL;
     uint32 ncopies = 0;
     for (uint32 i = original->trackerIndex() + 1; i < tracker.nentries; i++) {
         FrameEntry *fe = tracker[i];
         if (fe >= sp)
             continue;
         if (fe->isCopy() && fe->copyOf() == original) {
@@ -800,16 +824,17 @@ FrameState::walkTrackerForUncopy(FrameEn
             }
             ncopies++;
         }
     }
 
     if (!ncopies) {
         JS_ASSERT(firstCopy == InvalidIndex);
         JS_ASSERT(!bestFe);
+        original->copied = false;
         return NULL;
     }
 
     JS_ASSERT(firstCopy != InvalidIndex);
     JS_ASSERT(bestFe);
     JS_ASSERT(bestFe > original);
 
     /* Mark all extra copies as copies of the new backing index. */
@@ -838,87 +863,17 @@ FrameState::walkTrackerForUncopy(FrameEn
              */
             if (other->trackerIndex() < bestFe->trackerIndex())
                 swapInTracker(bestFe, other);
         }
     } else {
         bestFe->setNotCopied();
     }
 
-    return bestFe;
-}
-
-FrameEntry *
-FrameState::walkFrameForUncopy(FrameEntry *original)
-{
-    FrameEntry *bestFe = NULL;
-    uint32 ncopies = 0;
-
-    /* It's only necessary to visit as many FEs are being tracked. */
-    uint32 maxvisits = tracker.nentries;
-
-    for (FrameEntry *fe = original + 1; fe < sp && maxvisits; fe++) {
-        if (!fe->isTracked())
-            continue;
-
-        maxvisits--;
-
-        if (fe->isCopy() && fe->copyOf() == original) {
-            if (!bestFe) {
-                bestFe = fe;
-                bestFe->setCopyOf(NULL);
-            } else {
-                fe->setCopyOf(bestFe);
-                if (fe->trackerIndex() < bestFe->trackerIndex())
-                    swapInTracker(bestFe, fe);
-            }
-            ncopies++;
-        }
-    }
-
-    return bestFe;
-}
-
-FrameEntry *
-FrameState::uncopy(FrameEntry *original)
-{
-    JS_ASSERT(original->isCopied());
-
-    /*
-     * Copies have three critical invariants:
-     *  1) The backing store precedes all copies in the tracker.
-     *  2) The backing store precedes all copies in the FrameState.
-     *  3) The backing store of a copy cannot be popped from the stack
-     *     while the copy is still live.
-     *
-     * Maintaining this invariant iteratively is kind of hard, so we choose
-     * the "lowest" copy in the frame up-front.
-     *
-     * For example, if the stack is:
-     *    [A, B, C, D]
-     * And the tracker has:
-     *    [A, D, C, B]
-     *
-     * If B, C, and D are copies of A - we will walk the tracker to the end
-     * and select B, not D (see bug 583684).
-     *
-     * Note: |tracker.nentries <= (nslots + nargs)|. However, this walk is
-     * sub-optimal if |tracker.nentries - original->trackerIndex() > sp - original|.
-     * With large scripts this may be a problem worth investigating. Note that
-     * the tracker is walked twice, so we multiply by 2 for pessimism.
-     */
-    FrameEntry *fe;
-    if ((tracker.nentries - original->trackerIndex()) * 2 > uint32(sp - original))
-        fe = walkFrameForUncopy(original);
-    else
-        fe = walkTrackerForUncopy(original);
-    if (!fe) {
-        original->setNotCopied();
-        return NULL;
-    }
+    FrameEntry *fe = bestFe;
 
     /*
      * Switch the new backing store to the old backing store. During
      * this process we also necessarily make sure the copy can be
      * synced.
      */
     if (!original->isTypeKnown()) {
         /*
@@ -942,86 +897,55 @@ FrameState::uncopy(FrameEntry *original)
         regstate[fe->data.reg()].reassociate(fe);
 
     return fe;
 }
 
 void
 FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange)
 {
-    FrameEntry *local = getLocal(n);
-
-    storeTop(local, popGuaranteed, typeChange);
+    FrameEntry *localFe = getLocal(n);
+    bool cacheable = !eval && !escaping[n];
 
-    if (eval || escaping[n]) {
-        /* Ensure that the local variable remains synced. */
-        if (local->isCopy()) {
-            FrameEntry *backing = local->copyOf();
-            if (!local->data.synced()) {
-                if (backing->data.inMemory())
-                    tempRegForData(backing);
-                syncData(backing, addressOf(local), masm);
-            }
-            if (!local->type.synced()) {
-                if (backing->type.inMemory())
-                    tempRegForType(backing);
-                syncType(backing, addressOf(local), masm);
-            }
-        } else if (local->isConstant()) {
-            if (!local->data.synced())
-                syncData(local, addressOf(local), masm);
-        } else {
-            if (!local->data.synced()) {
-                syncData(local, addressOf(local), masm);
-                local->data.sync();
-            }
-            if (!local->type.synced()) {
-                syncType(local, addressOf(local), masm);
-                local->type.sync();
-            }
-            forgetEntry(local);
-        }
+    if (!popGuaranteed && !cacheable) {
+        JS_ASSERT_IF(locals[n].isTracked() && (!eval || n < script->nfixed),
+                     locals[n].type.inMemory() &&
+                     locals[n].data.inMemory());
+        Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value));
+        storeTo(peek(-1), local, false);
+        forgetAllRegs(getLocal(n));
+        localFe->resetSynced();
+        return;
+    }
 
-        local->resetSynced();
-    }
-}
-
-void
-FrameState::forgetEntry(FrameEntry *fe)
-{
-    if (fe->isCopied()) {
-        uncopy(fe);
-        if (!fe->isCopied())
-            forgetAllRegs(fe);
-    } else {
-        forgetAllRegs(fe);
-    }
-}
-
-void
-FrameState::storeTop(FrameEntry *target, bool popGuaranteed, bool typeChange)
-{
-    bool wasSynced = target->type.synced();
+    bool wasSynced = localFe->type.synced();
 
     /* Detect something like (x = x) which is a no-op. */
     FrameEntry *top = peek(-1);
-    if (top->isCopy() && top->copyOf() == target) {
-        JS_ASSERT(target->isCopied());
+    if (top->isCopy() && top->copyOf() == localFe) {
+        JS_ASSERT(localFe->isCopied());
         return;
     }
 
     /* Completely invalidate the local variable. */
-    forgetEntry(target);
-    target->resetUnsynced();
+    if (localFe->isCopied()) {
+        uncopy(localFe);
+        if (!localFe->isCopied())
+            forgetAllRegs(localFe);
+    } else {
+        forgetAllRegs(localFe);
+    }
+
+    localFe->resetUnsynced();
 
     /* Constants are easy to propagate. */
     if (top->isConstant()) {
-        target->setCopyOf(NULL);
-        target->setNotCopied();
-        target->setConstant(Jsvalify(top->getValue()));
+        localFe->setCopyOf(NULL);
+        localFe->setNotCopied();
+        localFe->setConstant(Jsvalify(top->getValue()));
         return;
     }
 
     /*
      * When dealing with copies, there are three important invariants:
      *
      * 1) The backing store precedes all copies in the tracker.
      * 2) The backing store precedes all copies in the FrameState.
@@ -1033,28 +957,28 @@ FrameState::storeTop(FrameEntry *target,
      * can be rewritten as a copy of the original backing slot. If the first
      * condition does not hold, force it to hold by swapping in-place.
      */
     FrameEntry *backing = top;
     if (top->isCopy()) {
         backing = top->copyOf();
         JS_ASSERT(backing->trackerIndex() < top->trackerIndex());
 
-        if (backing < target) {
+        if (backing < localFe) {
             /* local.idx < backing.idx means local cannot be a copy yet */
-            if (target->trackerIndex() < backing->trackerIndex())
-                swapInTracker(backing, target);
-            target->setNotCopied();
-            target->setCopyOf(backing);
+            if (localFe->trackerIndex() < backing->trackerIndex())
+                swapInTracker(backing, localFe);
+            localFe->setNotCopied();
+            localFe->setCopyOf(backing);
             if (backing->isTypeKnown())
-                target->setType(backing->getKnownType());
+                localFe->setType(backing->getKnownType());
             else
-                target->type.invalidate();
-            target->data.invalidate();
-            target->isNumber = backing->isNumber;
+                localFe->type.invalidate();
+            localFe->data.invalidate();
+            localFe->isNumber = backing->isNumber;
             return;
         }
 
         /*
          * If control flow lands here, then there was a bytecode sequence like
          *
          *  ENTERBLOCK 2
          *  GETLOCAL 1
@@ -1071,85 +995,87 @@ FrameState::storeTop(FrameEntry *target,
          * but even so there's a quick workaround. We take all copies of the
          * backing fe, and redirect them to be copies of the destination.
          */
         for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
             FrameEntry *fe = tracker[i];
             if (fe >= sp)
                 continue;
             if (fe->isCopy() && fe->copyOf() == backing)
-                fe->setCopyOf(target);
+                fe->setCopyOf(localFe);
         }
     }
     backing->setNotCopied();
     
     /*
      * This is valid from the top->isCopy() path because we're guaranteed a
      * consistent ordering - all copies of |backing| are tracked after 
      * |backing|. Transitively, only one swap is needed.
      */
-    if (backing->trackerIndex() < target->trackerIndex())
-        swapInTracker(backing, target);
+    if (backing->trackerIndex() < localFe->trackerIndex())
+        swapInTracker(backing, localFe);
 
     /*
      * Move the backing store down - we spill registers here, but we could be
      * smarter and re-use the type reg.
      */
     RegisterID reg = tempRegForData(backing);
-    target->data.setRegister(reg);
-    regstate[reg].reassociate(target);
+    localFe->data.setRegister(reg);
+    regstate[reg].reassociate(localFe);
 
     if (typeChange) {
         if (backing->isTypeKnown()) {
-            target->setType(backing->getKnownType());
+            localFe->setType(backing->getKnownType());
         } else {
             RegisterID reg = tempRegForType(backing);
-            target->type.setRegister(reg);
-            regstate[reg].reassociate(target);
+            localFe->type.setRegister(reg);
+            regstate[reg].reassociate(localFe);
         }
     } else {
         if (!wasSynced)
-            masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(target));
-        target->type.setMemory();
+            masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(localFe));
+        localFe->type.setMemory();
     }
 
     if (!backing->isTypeKnown())
         backing->type.invalidate();
     backing->data.invalidate();
-    backing->setCopyOf(target);
-    backing->isNumber = target->isNumber;
-
-    JS_ASSERT(top->copyOf() == target);
+    backing->setCopyOf(localFe);
+    backing->isNumber = localFe->isNumber;
+    localFe->setCopied();
 
-    /*
-     * If this condition fails, top->copyOf()->isCopied() is temporarily
-     * left broken. This is okay since frame.pop() is guaranteed to follow.
-     *
-     * NB: If |top->isCopy()|, then there are two copies: the backing and the
-     * top. This is why popGuaranteed is not enough.
-     */
-    if (!popGuaranteed || top->isCopy())
-        target->setCopied();
+    if (!cacheable) {
+        /* TODO: x64 optimization */
+        if (!localFe->type.synced())
+            syncType(localFe, addressOf(localFe), masm);
+        if (!localFe->data.synced())
+            syncData(localFe, addressOf(localFe), masm);
+        forgetAllRegs(localFe);
+        localFe->type.setMemory();
+        localFe->data.setMemory();
+    }
+
+    JS_ASSERT(top->copyOf() == localFe);
 }
 
 void
 FrameState::shimmy(uint32 n)
 {
     JS_ASSERT(sp - n >= spBase);
     int32 depth = 0 - int32(n);
-    storeTop(&sp[depth - 1], true);
+    storeLocal(uint32(&sp[depth - 1] - locals), true);
     popn(n);
 }
 
 void
 FrameState::shift(int32 n)
 {
     JS_ASSERT(n < 0);
     JS_ASSERT(sp + n - 1 >= spBase);
-    storeTop(&sp[n - 1], true);
+    storeLocal(uint32(&sp[n - 1] - locals), true);
     pop();
 }
 
 static inline bool
 AllocHelper(RematInfo &info, MaybeRegisterID &maybe)
 {
     if (info.inRegister()) {
         maybe = info.reg();
--- a/js/src/methodjit/FrameState.h
+++ b/js/src/methodjit/FrameState.h
@@ -563,20 +563,19 @@ class FrameState
 
     /*
      * Fully stores a FrameEntry at an arbitrary address. popHint specifies
      * how hard the register allocator should try to keep the FE in registers.
      */
     void storeTo(FrameEntry *fe, Address address, bool popHint);
 
     /*
-     * Stores the top stack slot back to a slot.
+     * Stores the top stack slot back to a local variable.
      */
     void storeLocal(uint32 n, bool popGuaranteed = false, bool typeChange = true);
-    void storeTop(FrameEntry *target, bool popGuaranteed = false, bool typeChange = true);
 
     /*
      * Restores state from a slow path.
      */
     void merge(Assembler &masm, Changes changes) const;
 
     /*
      * Writes unsynced stores to an arbitrary buffer.
@@ -778,24 +777,16 @@ class FrameState
      * "Uncopies" the backing store of a FrameEntry that has been copied. The
      * original FrameEntry is not invalidated; this is the responsibility of
      * the caller. The caller can check isCopied() to see if the registers
      * were moved to a copy.
      *
      * Later addition: uncopy() returns the first copy found.
      */
     FrameEntry *uncopy(FrameEntry *original);
-    FrameEntry *walkTrackerForUncopy(FrameEntry *original);
-    FrameEntry *walkFrameForUncopy(FrameEntry *original);
-
-    /*
-     * All registers in the FE are forgotten. If it is copied, it is uncopied
-     * beforehand.
-     */
-    void forgetEntry(FrameEntry *fe);
 
     FrameEntry *entryFor(uint32 index) const {
         JS_ASSERT(entries[index].isTracked());
         return &entries[index];
     }
 
     RegisterID evictSomeReg() {
         return evictSomeReg(Registers::AvailRegs);