[INFER] Loop invariant code motion for array slot pointers, bug 618692.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 07 Apr 2011 16:12:37 -0700
changeset 74910 6228c71f399448689cb6d788c6377131b4c9c9a3
parent 74908 5cc3893a84385ecc79b23fed5944195392facecd
child 74911 9f0cb8d7da58032335ce12a1323c856b54b3026d
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
bugs618692
milestone2.2a1pre
[INFER] Loop invariant code motion for array slot pointers, bug 618692.
js/src/jsanalyze.cpp
js/src/jsanalyze.h
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/FastArithmetic.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameEntry.h
js/src/methodjit/FrameState-inl.h
js/src/methodjit/FrameState.cpp
js/src/methodjit/FrameState.h
js/src/methodjit/ImmutableSync.cpp
js/src/methodjit/LoopState.cpp
js/src/methodjit/LoopState.h
js/src/methodjit/RematInfo.h
js/src/methodjit/StubCompiler.cpp
--- a/js/src/jsanalyze.cpp
+++ b/js/src/jsanalyze.cpp
@@ -932,16 +932,19 @@ LifetimeScript::analyze(JSContext *cx, a
                     if (loop && loop->entry > loop->lastBlock)
                         loop->lastBlock = loop->entry;
 
                     LifetimeLoop *nloop = ArenaNew<LifetimeLoop>(pool);
                     if (!nloop)
                         return false;
                     PodZero(nloop);
 
+                    if (loop)
+                        loop->hasCallsLoops = true;
+
                     nloop->parent = loop;
                     loop = nloop;
 
                     codeArray[targetOffset].loop = loop;
                     loop->head = targetOffset;
                     loop->backedge = offset;
                     loop->lastBlock = loop->head;
 
@@ -1007,16 +1010,25 @@ LifetimeScript::analyze(JSContext *cx, a
                 if (!var.lifetime)
                     return false;
                 var.saved = NULL;
                 saved[i--] = saved[--savedCount];
             }
             savedCount = 0;
             break;
 
+          case JSOP_NEW:
+          case JSOP_CALL:
+          case JSOP_EVAL:
+          case JSOP_FUNAPPLY:
+          case JSOP_FUNCALL:
+            if (loop)
+                loop->hasCallsLoops = true;
+            break;
+
           default:;
         }
 
         offset--;
     }
 
     return true;
 }
--- a/js/src/jsanalyze.h
+++ b/js/src/jsanalyze.h
@@ -424,21 +424,24 @@ struct LifetimeLoop
     };
     Increment *increments;
     uint32 nIncrements;
 
     /* It is unknown which arrays grow or which objects are modified in this loop. */
     bool unknownModset;
 
     /*
-     * This loop contains safe points in its body (which the interpreter might
+     * This loop contains safe points in its body which the interpreter might
      * join at directly.
      */
     bool hasSafePoints;
 
+    /* This loop has calls or inner loops. */
+    bool hasCallsLoops;
+
     /*
      * Arrays which might grow during this loop. This is a guess, and may
      * underapproximate the actual set of such arrays.
      */
     types::TypeObject **growArrays;
     uint32 nGrowArrays;
 };
 
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -855,18 +855,18 @@ PropertyAccess(JSContext *cx, JSScript *
 }
 
 void
 TypeConstraintProp::newType(JSContext *cx, TypeSet *source, jstype type)
 {
     if (type == TYPE_UNKNOWN ||
         (!TypeIsObject(type) && !script->compileAndGo)) {
         /*
-         * Access on an unknown object.  Reads produce an unknown result, writes
-         * need to be monitored.  Note: this isn't a problem for handling overflows
+         * Access on an unknown object. Reads produce an unknown result, writes
+         * need to be monitored. Note: this isn't a problem for handling overflows
          * on inc/dec below, as these go through a slow path which must call
          * addTypeProperty.
          */
         if (assign)
             cx->compartment->types.monitorBytecode(cx, script, pc - script->code);
         else
             target->addType(cx, TYPE_UNKNOWN);
         return;
@@ -2145,44 +2145,41 @@ TypeCompartment::print(JSContext *cx, JS
 #ifdef DEBUG
     TypeObject *object = objects;
     while (object) {
         object->print(cx);
         object = object->next;
     }
 #endif
 
-    double millis = analysisTime / 1000.0;
-
     printf("Counts: ");
     for (unsigned count = 0; count < TYPE_COUNT_LIMIT; count++) {
         if (count)
             printf("/");
         printf("%u", typeCounts[count]);
     }
     printf(" (%u over)\n", typeCountOver);
 
     printf("Recompilations: %u\n", recompilations);
-    printf("Time: %.2f ms\n", millis);
 }
 
 /////////////////////////////////////////////////////////////////////
 // TypeCompartment tables
 /////////////////////////////////////////////////////////////////////
 
 /*
  * The arrayTypeTable and objectTypeTable are per-compartment tables for making
  * common type objects to model the contents of large script singletons and
  * JSON objects. These are vanilla Arrays and native Objects, so we distinguish
  * the types of different ones by looking at the types of their properties.
  *
  * All singleton/JSON arrays which have the same prototype, are homogenous and
- * of the same type will share a type object. All singleton/JSON objects which
- * have the same shape and property types will also share a type object. We
- * don't try to collate arrays or objects that have type mismatches.
+ * of the same element type will share a type object. All singleton/JSON
+ * objects which have the same shape and property types will also share a type
+ * object. We don't try to collate arrays or objects that have type mismatches.
  */
 
 static inline bool
 NumberTypes(jstype a, jstype b)
 {
     return (a == TYPE_INT32 || a == TYPE_DOUBLE) && (b == TYPE_INT32 || b == TYPE_DOUBLE);
 }
 
@@ -2554,17 +2551,17 @@ TypeObject::markUnknown(JSContext *cx)
     while (instance) {
         if (!instance->unknownProperties())
             instance->markUnknown(cx);
         instance = instance->instanceNext;
     }
 
     /*
      * Existing constraints may have already been added to this object, which we need
-     * to do the right thing for.  We can't ensure that we will mark all unknown
+     * to do the right thing for. We can't ensure that we will mark all unknown
      * objects before they have been accessed, as the __proto__ of a known object
      * could be dynamically set to an unknown object, and we can decide to ignore
      * properties of an object during analysis (i.e. hashmaps). Adding unknown for
      * any properties accessed already accounts for possible values read from them.
      */
 
     unsigned count = getPropertyCount();
     for (unsigned i = 0; i < count; i++) {
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -65,38 +65,38 @@ namespace types {
 struct TypeSet;
 struct TypeCallsite;
 struct TypeObject;
 struct TypeFunction;
 struct TypeCompartment;
 struct ClonedTypeSet;
 
 /*
- * Information about a single concrete type.  This is a non-zero value whose
+ * Information about a single concrete type. This is a non-zero value whose
  * lower 3 bits indicate a particular primitive type below, and if those bits
  * are zero then a pointer to a type object.
  */
 typedef jsuword jstype;
 
 /* The primitive types. */
 const jstype TYPE_UNDEFINED = 1;
 const jstype TYPE_NULL      = 2;
 const jstype TYPE_BOOLEAN   = 3;
 const jstype TYPE_INT32     = 4;
 const jstype TYPE_DOUBLE    = 5;
 const jstype TYPE_STRING    = 6;
 
 /*
- * Aggregate unknown type, could be anything.  Typically used when a type set
+ * Aggregate unknown type, could be anything. Typically used when a type set
  * becomes polymorphic, or when accessing an object with unknown properties.
  */
 const jstype TYPE_UNKNOWN = 7;
 
 /*
- * Test whether a type is an primitive or an object.  Object types can be
+ * Test whether a type is an primitive or an object. Object types can be
  * cast into a TypeObject*.
  */
 
 static inline bool
 TypeIsPrimitive(jstype type)
 {
     JS_ASSERT(type);
     return type < TYPE_UNKNOWN;
@@ -191,17 +191,17 @@ public:
      * If this is a persistent subset constraint, the object being propagated
      * into. Such constraints describe relationships between TypeObject
      * properties which are independent of the analysis of any script.
      */
     virtual TypeObject * baseSubset() { return NULL; }
 };
 
 /*
- * Coarse kinds of a set of objects.  These form the following lattice:
+ * Coarse kinds of a set of objects. These form the following lattice:
  *
  *            NONE
  *       ____/    \_____
  *      /               \
  * PACKED_ARRAY  INLINEABLE_FUNCTION
  *     |                 |
  * DENSE_ARRAY    SCRIPTED_FUNCTION
  *      \____      _____/
@@ -660,17 +660,16 @@ struct TypeCompartment
     /* List of objects not associated with a script. */
     TypeObject *objects;
 
     /* Whether type inference is enabled in this compartment. */
     bool inferenceEnabled;
 
     /* Whether type inference is active, see AutoEnterTypeInference. */
     unsigned inferenceDepth;
-    uint64_t inferenceStartTime;
 
     /* Pool for all intermediate type information in this compartment. Cleared on every GC. */
     JSArenaPool pool;
 
     /* Number of scripts in this compartment. */
     unsigned scriptCount;
 
     /* Object to use throughout the compartment as the default type of objects with no prototype. */
@@ -712,43 +711,35 @@ struct TypeCompartment
     ArrayTypeTable *arrayTypeTable;
     ObjectTypeTable *objectTypeTable;
 
     bool fixArrayType(JSContext *cx, JSObject *obj);
     bool fixObjectType(JSContext *cx, JSObject *obj);
 
     /* Constraint solving worklist structures. */
 
-    /* A type that needs to be registered with a constraint. */
+    /*
+     * Worklist of types which need to be propagated to constraints. We use a
+     * worklist to avoid blowing the native stack.
+     */
     struct PendingWork
     {
         TypeConstraint *constraint;
         TypeSet *source;
         jstype type;
     };
-
-    /*
-     * Worklist of types which need to be propagated to constraints.  We use a
-     * worklist to avoid blowing the native stack.
-     */
     PendingWork *pendingArray;
     unsigned pendingCount;
     unsigned pendingCapacity;
 
     /* Whether we are currently resolving the pending worklist. */
     bool resolving;
 
     /* Logging fields */
 
-    /*
-     * The total time (in microseconds) spent generating inference structures
-     * and performing analysis.
-     */
-    uint64_t analysisTime;
-
     /* Counts of stack type sets with some number of possible operand types. */
     static const unsigned TYPE_COUNT_LIMIT = 4;
     unsigned typeCounts[TYPE_COUNT_LIMIT];
     unsigned typeCountOver;
 
     void init(JSContext *cx);
     ~TypeCompartment();
 
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -149,18 +149,17 @@ struct AutoEnterTypeInference
 
     AutoEnterTypeInference(JSContext *cx, bool compiling = false)
         : cx(cx)
     {
 #ifdef DEBUG
         depth = cx->compartment->types.inferenceDepth;
 #endif
         JS_ASSERT_IF(!compiling, cx->compartment->types.inferenceEnabled);
-        if (cx->compartment->types.inferenceDepth++ == 0)
-            cx->compartment->types.inferenceStartTime = cx->compartment->types.currentTime();
+        cx->compartment->types.inferenceDepth++;
     }
 
     ~AutoEnterTypeInference()
     {
         /*
          * This should have been reset by checkPendingRecompiles.
          * :FIXME: be more tolerant and clean up anyways, the caller may be
          * propagating an OOM or other error.
@@ -176,19 +175,16 @@ TypeCompartment::checkPendingRecompiles(
     if (--inferenceDepth != 0) {
         /*
          * There is still a type inference activation on the stack, wait for it to
          * finish before handling any recompilations. Note that we should not be
          * invoking any scripted code while the inference is running :TODO: assert this.
          */
         return true;
     }
-    if (inferenceStartTime)
-        analysisTime += currentTime() - inferenceStartTime;
-    inferenceStartTime = 0;
     if (pendingNukeTypes)
         return nukeTypes(cx);
     else if (pendingRecompiles && !processPendingRecompiles(cx))
         return false;
     return true;
 }
 
 /*
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -3004,16 +3004,21 @@ mjit::Compiler::prepareStubCall(Uses use
 }
 
 JSC::MacroAssembler::Call
 mjit::Compiler::emitStubCall(void *ptr, DataLabelPtr *pinline)
 {
     JaegerSpew(JSpew_Insns, " ---- CALLING STUB ---- \n");
     Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                   ptr, outerPC(), pinline, frame.totalDepth());
+    if (loop && loop->generatingInvariants()) {
+        Jump j = masm.jump();
+        Label l = masm.label();
+        loop->addInvariantCall(j, l, false);
+    }
     JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
     return cl;
 }
 
 void
 mjit::Compiler::interruptCheckHelper()
 {
     /*
@@ -6173,17 +6178,17 @@ mjit::Compiler::startLoop(jsbytecode *he
 
     if (loop) {
         /*
          * Convert all loop registers in the outer loop into unassigned registers.
          * We don't keep track of which registers the inner loop uses, so the only
          * registers that can be carried in the outer loop must be mentioned before
          * the inner loop starts.
          */
-        loop->flushRegisters(stubcc);
+        loop->clearLoopRegisters();
     }
 
     LoopState *nloop = cx->new_<LoopState>(cx, script, this, &frame, &a->analysis, &a->liveness);
     if (!nloop || !nloop->init(head, entry, entryTarget))
         return false;
 
     nloop->outer = loop;
     loop = nloop;
@@ -6199,17 +6204,17 @@ mjit::Compiler::finishLoop(jsbytecode *h
         return true;
 
     /*
      * We're done processing the current loop. Every loop has exactly one backedge
      * at the end ('continue' statements are forward jumps to the loop test),
      * and after jumpAndTrace'ing on that edge we can pop it from the frame.
      */
     JS_ASSERT(loop && loop->headOffset() == uint32(head - script->code));
-    loop->flushRegisters(stubcc);
+    loop->flushLoop(stubcc);
 
     jsbytecode *entryTarget = script->code + loop->entryOffset();
 
     /*
      * Fix up the jump entering the loop. We are doing this after all code has
      * been emitted for the backedge, so that we are now in the loop's fallthrough
      * (where we will emit the entry code).
      */
@@ -6266,16 +6271,22 @@ mjit::Compiler::finishLoop(jsbytecode *h
 
     LoopState *nloop = loop->outer;
     cx->delete_(loop);
     loop = nloop;
     frame.setLoop(loop);
 
     fallthrough.linkTo(masm.label(), &masm);
 
+    /*
+     * Clear all registers used for loop temporaries. In the case of loop
+     * nesting, we do not allocate temporaries for the outer loop.
+     */
+    frame.clearTemporaries();
+
     return true;
 }
 
 /*
  * Note: This function emits tracer hooks into the OOL path. This means if
  * it is used in the middle of an in-progress slow path, the stream will be
  * hopelessly corrupted. Take care to only call this before linkExits() and
  * after rejoin()s.
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -479,16 +479,18 @@ class Compiler : public BaseCompiler
     }
 
     jsbytecode *inlinePC() { return PC; }
     uint32 inlineIndex() { return a->inlineIndex; }
 
     types::TypeSet *getTypeSet(uint32 slot);
     types::TypeSet *getTypeSet(const FrameEntry *fe) { return getTypeSet(frame.indexOfFe(fe)); }
 
+    Assembler &getAssembler(bool ool) { return ool ? stubcc.masm : masm; }
+
   private:
     CompileStatus performCompilation(JITScript **jitp);
     CompileStatus generatePrologue();
     CompileStatus generateMethod();
     CompileStatus generateEpilogue();
     CompileStatus finishThisUp(JITScript **jitp);
     CompileStatus pushActiveFrame(JSScript *script, uint32 argc);
     void popActiveFrame();
--- a/js/src/methodjit/FastArithmetic.cpp
+++ b/js/src/methodjit/FastArithmetic.cpp
@@ -1109,31 +1109,33 @@ mjit::Compiler::jsop_equality_int_string
          * Sync everything except the top two entries.
          * We will handle the lhs/rhs in the stub call path.
          */
         fixDoubleTypes(Uses(2));
         frame.syncAndKill(Registers(Registers::AvailRegs), Uses(frame.frameSlots()), Uses(2));
 
         RegisterID tempReg = frame.allocReg();
 
-        frame.pop();
-        frame.pop();
-        frame.discardFrame();
-
         JaegerSpew(JSpew_Insns, " ---- BEGIN STUB CALL CODE ---- \n");
 
         RESERVE_OOL_SPACE(stubcc.masm);
 
         /* Start of the slow path for equality stub call. */
         Label stubEntry = stubcc.masm.label();
 
         /* The lhs/rhs need to be synced in the stub call path. */
         frame.ensureValueSynced(stubcc.masm, lhs, lvr);
         frame.ensureValueSynced(stubcc.masm, rhs, rvr);
 
+        bool needIntPath = (!lhs->isTypeKnown() || lhsInt) && (!rhs->isTypeKnown() || rhsInt);
+
+        frame.pop();
+        frame.pop();
+        frame.discardFrame();
+
         bool needStub = true;
         
 #ifdef JS_MONOIC
         EqualityGenInfo ic;
 
         ic.cond = cond;
         ic.tempReg = tempReg;
         ic.lvr = lvr;
@@ -1168,17 +1170,17 @@ mjit::Compiler::jsop_equality_int_string
         Jump stubFallthrough = stubcc.masm.jump();
 
         JaegerSpew(JSpew_Insns, " ---- END STUB CALL CODE ---- \n");
         CHECK_OOL_SPACE();
 
         Jump fast;
         MaybeJump firstStubJump;
 
-        if ((!lhs->isTypeKnown() || lhsInt) && (!rhs->isTypeKnown() || rhsInt)) {
+        if (needIntPath) {
             if (!lhsInt) {
                 Jump lhsFail = masm.testInt32(Assembler::NotEqual, lvr.typeReg());
                 stubcc.linkExitDirect(lhsFail, stubEntry);
                 firstStubJump = lhsFail;
             }
             if (!rhsInt) {
                 Jump rhsFail = masm.testInt32(Assembler::NotEqual, rvr.typeReg());
                 stubcc.linkExitDirect(rhsFail, stubEntry);
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -123,45 +123,35 @@ mjit::Compiler::ensureInteger(FrameEntry
 }
 
 void
 mjit::Compiler::jsop_bitnot()
 {
     FrameEntry *top = frame.peek(-1);
 
     /* We only want to handle integers here. */
-    if (top->isTypeKnown() && top->getKnownType() != JSVAL_TYPE_INT32) {
+    if (top->isNotType(JSVAL_TYPE_INT32) && top->isNotType(JSVAL_TYPE_DOUBLE)) {
         prepareStubCall(Uses(1));
         INLINE_STUBCALL(stubs::BitNot);
         frame.pop();
         frame.pushSynced(JSVAL_TYPE_INT32);
         return;
     }
-           
-    /* Test the type. */
-    bool stubNeeded = false;
-    if (!top->isTypeKnown()) {
-        Jump intFail = frame.testInt32(Assembler::NotEqual, top);
-        stubcc.linkExit(intFail, Uses(1));
-        frame.learnType(top, JSVAL_TYPE_INT32);
-        stubNeeded = true;
-    }
+
+    ensureInteger(top, Uses(1));
 
-    if (stubNeeded || recompiling) {
-        stubcc.leave();
-        OOL_STUBCALL(stubs::BitNot);
-    }
+    stubcc.leave();
+    OOL_STUBCALL(stubs::BitNot);
 
     RegisterID reg = frame.ownRegForData(top);
     masm.not32(reg);
     frame.pop();
     frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
 
-    if (stubNeeded)
-        stubcc.rejoin(Changes(1));
+    stubcc.rejoin(Changes(1));
 }
 
 void
 mjit::Compiler::jsop_bitop(JSOp op)
 {
     FrameEntry *rhs = frame.peek(-1);
     FrameEntry *lhs = frame.peek(-2);
 
@@ -250,23 +240,21 @@ mjit::Compiler::jsop_bitop(JSOp op)
             frame.push(Int32Value(L << (R & 31)));
             return;
           case JSOP_RSH:
             frame.push(Int32Value(L >> (R & 31)));
             return;
           case JSOP_URSH:
           {
             uint32 unsignedL;
-            if (ValueToECMAUint32(cx, lhs->getValue(), (uint32_t*)&unsignedL)) {
-                Value v = NumberValue(uint32(unsignedL >> (R & 31)));
-                JS_ASSERT(v.isInt32());
-                frame.push(v);
-                return;
-            }
-            break;
+            ValueToECMAUint32(cx, Int32Value(L), (uint32_t*)&unsignedL);  /* Can't fail. */
+            Value v = NumberValue(uint32(unsignedL >> (R & 31)));
+            JS_ASSERT(v.isInt32());
+            frame.push(v);
+            return;
           }
           default:
             JS_NOT_REACHED("say wat");
         }
     }
 
     RegisterID reg;
 
@@ -835,16 +823,17 @@ mjit::Compiler::booleanJumpScript(JSOp o
 
     /* OOL path: Conversion to boolean. */
     MaybeJump jmpCvtExecScript;
     MaybeJump jmpCvtRejoin;
     Label lblCvtPath = stubcc.masm.label();
 
     if (!fe->isTypeKnown() ||
         !(fe->isType(JSVAL_TYPE_BOOLEAN) || fe->isType(JSVAL_TYPE_INT32))) {
+        /* Note: this cannot overwrite slots holding loop invariants. */
         stubcc.masm.infallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::ValueToBoolean),
                                      frame.totalDepth());
 
         jmpCvtExecScript.setJump(stubcc.masm.branchTest32(cond, Registers::ReturnReg,
                                                           Registers::ReturnReg));
         jmpCvtRejoin.setJump(stubcc.masm.jump());
     }
 
@@ -1094,94 +1083,104 @@ mjit::Compiler::jsop_setelem_dense()
     // Allocate registers.
 
     ValueRemat vr;
     frame.pinEntry(value, vr);
 
     Int32Key key = id->isConstant()
                  ? Int32Key::FromConstant(id->getValue().toInt32())
                  : Int32Key::FromRegister(frame.tempRegForData(id));
-    if (!key.isConstant() && !frame.haveSameBacking(id, value))
+    bool pinKey = !key.isConstant() && !frame.haveSameBacking(id, value);
+    if (pinKey)
         frame.pinReg(key.reg());
 
-    RegisterID objReg;
-    if (frame.haveSameBacking(obj, value)) {
-        objReg = frame.allocReg();
-        masm.move(vr.dataReg(), objReg);
-    } else if (frame.haveSameBacking(obj, id)) {
-        objReg = frame.allocReg();
-        masm.move(key.reg(), objReg);
-    } else {
-        objReg = frame.copyDataIntoReg(obj);
-    }
-
-    // Guard on the array's initialized length.
+    // Register to hold the computed slots pointer for the object. If we can
+    // hoist the initialized length check, we make the slots pointer loop
+    // invariant and never access the object itself.
+    RegisterID slotsReg;
     bool hoisted = loop && !a->parent && loop->hoistArrayLengthCheck(obj, id);
-    MaybeJump initlenGuard;
-    if (!hoisted) {
-        initlenGuard = masm.guardArrayExtent(offsetof(JSObject, initializedLength),
-                                             objReg, key, Assembler::BelowOrEqual);
-    }
 
-    frame.unpinEntry(vr);
-    if (!key.isConstant() && !frame.haveSameBacking(id, value))
-        frame.unpinReg(key.reg());
+    if (hoisted) {
+        FrameEntry *slotsFe = loop->invariantSlots(obj);
+        slotsReg = frame.tempRegForData(slotsFe);
 
-    Label syncTarget = stubcc.syncExitAndJump(Uses(3));
+        frame.unpinEntry(vr);
+        if (pinKey)
+            frame.unpinReg(key.reg());
+    } else {
+        // Get a register for the object which we can clobber.
+        RegisterID objReg;
+        if (frame.haveSameBacking(obj, value)) {
+            objReg = frame.allocReg();
+            masm.move(vr.dataReg(), objReg);
+        } else if (frame.haveSameBacking(obj, id)) {
+            objReg = frame.allocReg();
+            masm.move(key.reg(), objReg);
+        } else {
+            objReg = frame.copyDataIntoReg(obj);
+        }
 
-    // Make an OOL path for setting exactly the initialized length. Skip if we
-    // hoisted the initialized length check entirely, in this case we will
-    // recompile if the index could ever be out of range.
-    if (!hoisted) {
-        stubcc.linkExitDirect(initlenGuard.get(), stubcc.masm.label());
+        frame.unpinEntry(vr);
+        if (pinKey)
+            frame.unpinReg(key.reg());
+
+        // Make an OOL path for setting exactly the initialized length.
+        Label syncTarget = stubcc.syncExitAndJump(Uses(3));
 
-        // Recheck for an exact initialized length.
-        // :TODO: would be nice to reuse the condition bits from the previous test.
+        Jump initlenGuard = masm.guardArrayExtent(offsetof(JSObject, initializedLength),
+                                                  objReg, key, Assembler::BelowOrEqual);
+        stubcc.linkExitDirect(initlenGuard, stubcc.masm.label());
+
+        // Recheck for an exact initialized length. :TODO: would be nice to
+        // reuse the condition bits from the previous test.
         Jump exactlenGuard = stubcc.masm.guardArrayExtent(offsetof(JSObject, initializedLength),
                                                           objReg, key, Assembler::NotEqual);
         exactlenGuard.linkTo(syncTarget, &stubcc.masm);
 
         // Check array capacity.
         Jump capacityGuard = stubcc.masm.guardArrayExtent(offsetof(JSObject, capacity),
                                                           objReg, key, Assembler::BelowOrEqual);
         capacityGuard.linkTo(syncTarget, &stubcc.masm);
 
-        // Bump the index for setting the array length.  The above guard ensures this
-        // won't overflow.
+        // Bump the index for setting the array length.  The above guard
+        // ensures this won't overflow, due to NSLOTS_LIMIT.
         stubcc.masm.bumpKey(key, 1);
 
         // Update the initialized length.
         stubcc.masm.storeKey(key, Address(objReg, offsetof(JSObject, initializedLength)));
 
         // Update the array length if needed.
         Jump lengthGuard = stubcc.masm.guardArrayExtent(offsetof(JSObject, privateData),
                                                         objReg, key, Assembler::AboveOrEqual);
         stubcc.masm.storeKey(key, Address(objReg, offsetof(JSObject, privateData)));
         lengthGuard.linkTo(stubcc.masm.label(), &stubcc.masm);
 
         // Restore the index.
         stubcc.masm.bumpKey(key, -1);
 
-        // Jump back to the inline path.
+        // Rejoin with the inline path.
         Jump initlenExit = stubcc.masm.jump();
         stubcc.crossJump(initlenExit, masm.label());
+
+        masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
+        slotsReg = objReg;
     }
 
     // Fully store the value. :TODO: don't need to do this in the non-initlen case
     // if the array is packed and monomorphic.
-    masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
     if (key.isConstant())
-        masm.storeValue(vr, Address(objReg, key.index() * sizeof(Value)));
+        masm.storeValue(vr, Address(slotsReg, key.index() * sizeof(Value)));
     else
-        masm.storeValue(vr, BaseIndex(objReg, key.reg(), masm.JSVAL_SCALE));
+        masm.storeValue(vr, BaseIndex(slotsReg, key.reg(), masm.JSVAL_SCALE));
 
     stubcc.leave();
     OOL_STUBCALL(STRICT_VARIANT(stubs::SetElem));
 
-    frame.freeReg(objReg);
+    if (!hoisted)
+        frame.freeReg(slotsReg);
     frame.shimmy(2);
     stubcc.rejoin(Changes(2));
 
     if (recompiling) {
         OOL_STUBCALL(STRICT_VARIANT(ic::SetElement));
         stubcc.rejoin(Changes(2));
     }
 }
@@ -1424,56 +1423,71 @@ mjit::Compiler::jsop_getelem_dense(bool 
 
     // Allocate registers.
 
     // If we know the result of the GETELEM may be undefined, then misses on the
     // initialized length or hole checks can just produce an undefined value.
     // We checked in the caller that prototypes do not have indexed properties.
     bool allowUndefined = mayPushUndefined(0);
 
-    RegisterID objReg = frame.tempRegForData(obj);
-    frame.pinReg(objReg);
+    bool hoisted = loop && !a->parent && loop->hoistArrayLengthCheck(obj, id);
+
+    // Get a register with either the object or its slots, depending on whether
+    // we are hoisting the bounds check.
+    RegisterID baseReg;
+    if (hoisted) {
+        FrameEntry *slotsFe = loop->invariantSlots(obj);
+        baseReg = frame.tempRegForData(slotsFe);
+    } else {
+        baseReg = frame.tempRegForData(obj);
+    }
+    frame.pinReg(baseReg);
 
     Int32Key key = id->isConstant()
                  ? Int32Key::FromConstant(id->getValue().toInt32())
                  : Int32Key::FromRegister(frame.tempRegForData(id));
-    if (!key.isConstant() && !frame.haveSameBacking(id, obj))
+    bool pinKey = !key.isConstant() && key.reg() != baseReg;
+    if (pinKey)
         frame.pinReg(key.reg());
 
     RegisterID dataReg = frame.allocReg();
 
     MaybeRegisterID typeReg;
     if (!isPacked || type == JSVAL_TYPE_UNKNOWN || type == JSVAL_TYPE_DOUBLE)
         typeReg = frame.allocReg();
 
     // Guard on the array's initialized length.
-    bool hoisted = loop && !a->parent && loop->hoistArrayLengthCheck(obj, id);
     MaybeJump initlenGuard;
     if (!hoisted) {
         initlenGuard = masm.guardArrayExtent(offsetof(JSObject, initializedLength),
-                                             objReg, key, Assembler::BelowOrEqual);
+                                             baseReg, key, Assembler::BelowOrEqual);
     }
 
-    frame.unpinReg(objReg);
-    if (!key.isConstant() && !frame.haveSameBacking(id, obj))
+    frame.unpinReg(baseReg);
+    if (pinKey)
         frame.unpinReg(key.reg());
 
-    if (!hoisted && !allowUndefined)
-        stubcc.linkExit(initlenGuard.get(), Uses(2));
-
-    masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), dataReg);
+    RegisterID slotsReg;
+    if (hoisted) {
+        slotsReg = baseReg;
+    } else {
+        if (!allowUndefined)
+            stubcc.linkExit(initlenGuard.get(), Uses(2));
+        masm.loadPtr(Address(baseReg, offsetof(JSObject, slots)), dataReg);
+        slotsReg = dataReg;
+    }
 
     // Get the slot, skipping the hole check if the array is known to be packed.
     Jump holeCheck;
     if (key.isConstant()) {
-        Address slot(dataReg, key.index() * sizeof(Value));
+        Address slot(slotsReg, key.index() * sizeof(Value));
         holeCheck = masm.fastArrayLoadSlot(slot, !isPacked, typeReg, dataReg);
     } else {
         JS_ASSERT(key.reg() != dataReg);
-        BaseIndex slot(dataReg, key.reg(), masm.JSVAL_SCALE);
+        BaseIndex slot(slotsReg, key.reg(), masm.JSVAL_SCALE);
         holeCheck = masm.fastArrayLoadSlot(slot, !isPacked, typeReg, dataReg);
     }
 
     if (!isPacked) {
         if (!allowUndefined)
             stubcc.linkExit(holeCheck, Uses(2));
         if (type != JSVAL_TYPE_UNKNOWN && type != JSVAL_TYPE_DOUBLE)
             frame.freeReg(typeReg.reg());
@@ -1655,25 +1669,25 @@ mjit::Compiler::jsop_getelem(bool isCall
 
     ic.fastPathRejoin = masm.label();
 
     frame.popn(2);
     frame.pushRegs(ic.typeReg, ic.objReg, knownPushedType(0));
     if (isCall)
         frame.pushSynced(knownPushedType(1));
 
-    stubcc.rejoin(Changes(2));
+    stubcc.rejoin(Changes(isCall ? 2 : 1));
 
 #ifdef JS_POLYIC
     if (!getElemICs.append(ic))
         return false;
 
     if (recompiling) {
         OOL_STUBCALL(isCall ? stubs::CallElem : stubs::GetElem);
-        stubcc.rejoin(Changes(2));
+        stubcc.rejoin(Changes(isCall ? 2 : 1));
     }
 #endif
 
     return true;
 }
 
 static inline bool
 ReallySimpleStrictTest(FrameEntry *fe)
@@ -1760,20 +1774,19 @@ mjit::Compiler::jsop_stricteq(JSOp op)
     /* Comparison against undefined or null is super easy. */
     bool lhsTest;
     if ((lhsTest = ReallySimpleStrictTest(lhs)) || ReallySimpleStrictTest(rhs)) {
         FrameEntry *test = lhsTest ? rhs : lhs;
         FrameEntry *known = lhsTest ? lhs : rhs;
         RegisterID result = frame.allocReg(Registers::SingleByteRegs).reg();
 
         if (test->isTypeKnown()) {
-            frame.popn(2);
-
             masm.move(Imm32((test->getKnownType() == known->getKnownType()) ==
                             (op == JSOP_STRICTEQ)), result);
+            frame.popn(2);
             frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, result);
             return;
         }
 
         /* This is only true if the other side is |null|. */
 #if defined JS_CPU_X86 || defined JS_CPU_ARM
         JSValueTag mask = known->getKnownTag();
         if (frame.shouldAvoidTypeRemat(test))
--- a/js/src/methodjit/FrameEntry.h
+++ b/js/src/methodjit/FrameEntry.h
@@ -51,51 +51,84 @@ namespace js {
 namespace mjit {
 
 class FrameEntry
 {
     friend class FrameState;
     friend class ImmutableSync;
 
   public:
+
+    /* Accessors for entries which are known constants. */
+
     bool isConstant() const {
+        if (isCopy() || isInvariant())
+            return false;
         return data.isConstant();
     }
 
     const jsval_layout &getConstant() const {
         JS_ASSERT(isConstant());
         return v_;
     }
 
     const Value &getValue() const {
         JS_ASSERT(isConstant());
         return Valueify(JSVAL_FROM_LAYOUT(v_));
     }
 
+#if defined JS_NUNBOX32
+    uint32 getPayload() const {
+        //JS_ASSERT(!Valueify(v_.asBits).isDouble() || type.synced());
+        JS_ASSERT(isConstant());
+        return v_.s.payload.u32;
+    }
+#elif defined JS_PUNBOX64
+    uint64 getPayload() const {
+        JS_ASSERT(isConstant());
+        return v_.asBits & JSVAL_PAYLOAD_MASK;
+    }
+#endif
+
+    /* For a constant double FrameEntry, truncate to an int32. */
+    void convertConstantDoubleToInt32(JSContext *cx) {
+        JS_ASSERT(isType(JSVAL_TYPE_DOUBLE) && isConstant());
+        int32 value;
+        ValueToECMAInt32(cx, getValue(), &value);
+
+        Value newValue = Int32Value(value);
+        setConstant(Jsvalify(newValue));
+    }
+
+    /*
+     * Accessors for entries whose type is known. Any entry can have a known
+     * type, and constant entries must have one.
+     */
+
     bool isTypeKnown() const {
-        return type.isConstant();
+        return backing()->type.isConstant();
     }
 
     /*
      * The known type should not be used in generated code if it is JSVAL_TYPE_DOUBLE.
      * In such cases either the value is constant, in memory or in a floating point register.
      */
     JSValueType getKnownType() const {
         JS_ASSERT(isTypeKnown());
-        return knownType;
+        return backing()->knownType;
     }
 
 #if defined JS_NUNBOX32
     JSValueTag getKnownTag() const {
         JS_ASSERT(v_.s.tag != JSVAL_TAG_CLEAR);
-        return v_.s.tag;
+        return backing()->v_.s.tag;
     }
 #elif defined JS_PUNBOX64
     JSValueShiftedTag getKnownTag() const {
-        return JSValueShiftedTag(v_.asBits & JSVAL_TAG_MASK);
+        return JSValueShiftedTag(backing()->v_.asBits & JSVAL_TAG_MASK);
     }
 #endif
 
     // Return true iff the type of this value is definitely known to be type_.
     bool isType(JSValueType type_) const {
         return isTypeKnown() && getKnownType() == type_;
     }
 
@@ -105,50 +138,45 @@ class FrameEntry
     }
 
     // Return true if the type of this value is definitely type_, or is unknown
     // and thus potentially type_ at runtime.
     bool mightBeType(JSValueType type_) const {
         return !isNotType(type_);
     }
 
-#if defined JS_NUNBOX32
-    uint32 getPayload() const {
-        //JS_ASSERT(!Valueify(v_.asBits).isDouble() || type.synced());
-        return v_.s.payload.u32;
-    }
-#elif defined JS_PUNBOX64
-    uint64 getPayload() const {
-        return v_.asBits & JSVAL_PAYLOAD_MASK;
-    }
-#endif
-
-    bool hasSameBacking(const FrameEntry *other) const {
-        return backing() == other->backing();
-    }
-
-    /* For a constant double FrameEntry, truncate to an int32. */
-    void convertConstantDoubleToInt32(JSContext *cx) {
-        JS_ASSERT(isType(JSVAL_TYPE_DOUBLE) && isConstant());
-        int32 value;
-        ValueToECMAInt32(cx, getValue(), &value);
-
-        Value newValue = Int32Value(value);
-        setConstant(Jsvalify(newValue));
-    }
+    /* Accessors for entries which are copies of other mutable entries. */
 
     bool isCopy() const { return !!copy; }
     bool isCopied() const { return copied; }
 
     const FrameEntry *backing() const {
         return isCopy() ? copyOf() : this;
     }
 
+    bool hasSameBacking(const FrameEntry *other) const {
+        return backing() == other->backing();
+    }
+
+    /*
+     * Accessors for entries which are copies of analysis temporaries. All
+     * temporaries are invariant, so these behave more like constants than like
+     * copies of mutable entries.
+     */
+
+    bool isInvariant() const { return !!invariant_; }
+
+    FrameEntry *invariant() {
+        JS_ASSERT(isInvariant());
+        return invariant_;
+    }
+
   private:
     void setType(JSValueType type_) {
+        JS_ASSERT(!isCopy() && !isInvariant());
         type.setConstant();
 #if defined JS_NUNBOX32
         v_.s.tag = JSVAL_TYPE_TO_TAG(type_);
 #elif defined JS_PUNBOX64
         v_.asBits &= JSVAL_PAYLOAD_MASK;
         v_.asBits |= JSVAL_TYPE_TO_SHIFTED_TAG(type_);
 #endif
         knownType = type_;
@@ -158,33 +186,32 @@ class FrameEntry
         clear();
         index_ = index;
         tracked = true;
     }
 
     void clear() {
         copied = false;
         copy = NULL;
+        invariant_ = NULL;
     }
 
     uint32 trackerIndex() {
         return index_;
     }
 
     /*
      * Marks the FE as unsynced & invalid.
      */
     void resetUnsynced() {
         clear();
         type.unsync();
         data.unsync();
-#ifdef DEBUG
         type.invalidate();
         data.invalidate();
-#endif
     }
 
     /*
      * Marks the FE as synced & in memory.
      */
     void resetSynced() {
         clear();
         type.setMemory();
@@ -222,54 +249,64 @@ class FrameEntry
     void setNotCopied() {
         copied = false;
     }
 
     /*
      * Set copy index.
      */
     void setCopyOf(FrameEntry *fe) {
-        JS_ASSERT_IF(fe, !fe->isConstant());
         JS_ASSERT(!isCopied());
         copy = fe;
+        invariant_ = NULL;
+        if (fe) {
+            type.invalidate();
+            data.invalidate();
+        }
+    }
+
+    void setInvariant(FrameEntry *fe) {
+        JS_ASSERT(!isCopied());
+        copy = NULL;
+        invariant_ = fe;
+        type.invalidate();
+        data.invalidate();
     }
 
     inline bool isTracked() const {
         return tracked;
     }
 
     inline void untrack() {
         tracked = false;
     }
 
     inline bool dataInRegister(AnyRegisterID reg) const {
-        JS_ASSERT(!copy);
-        return (data.inRegister() && reg.isReg() && data.reg() == reg.reg())
-            || (data.inFPRegister() && !reg.isReg() && data.fpreg() == reg.fpreg());
+        JS_ASSERT(!copy && !invariant_);
+        return reg.isReg()
+            ? (data.inRegister() && data.reg() == reg.reg())
+            : (data.inFPRegister() && data.fpreg() == reg.fpreg());
     }
 
   private:
     JSValueType knownType;
     jsval_layout v_;
     RematInfo  type;
     RematInfo  data;
     uint32     index_;
+    FrameEntry *invariant_;
     FrameEntry *copy;
     bool       copied;
     bool       tracked;
     bool       inlined;
 
     /*
      * Offset of the last loop in which this entry was written or had a loop
      * register assigned.
      */
     uint32     lastLoop;
-
-#if JS_BITS_PER_WORD == 32
-    void       *padding;
-#endif
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif /* jsjaeger_valueinfo_h__ */
 
--- a/js/src/methodjit/FrameState-inl.h
+++ b/js/src/methodjit/FrameState-inl.h
@@ -79,16 +79,27 @@ FrameState::haveSameBacking(FrameEntry *
 {
     if (lhs->isCopy())
         lhs = lhs->copyOf();
     if (rhs->isCopy())
         rhs = rhs->copyOf();
     return lhs == rhs;
 }
 
+inline FrameEntry *
+FrameState::getTemporary(uint32 which)
+{
+    JS_ASSERT(which < TEMPORARY_LIMIT);
+
+    FrameEntry *fe = temporaries + which;
+    JS_ASSERT(fe < temporariesTop);
+
+    return getOrTrack(indexOfFe(fe));
+}
+
 inline AnyRegisterID
 FrameState::allocReg(uint32 mask)
 {
     if (a->freeRegs.hasRegInMask(mask)) {
         AnyRegisterID reg = a->freeRegs.takeAnyReg(mask);
         modifyReg(reg);
         return reg;
     }
@@ -121,17 +132,17 @@ FrameState::allocAndLoadReg(FrameEntry *
      * Decide whether to retroactively mark a register as holding the entry
      * at the start of the current loop. We can do this if (a) the register has
      * not been touched since the start of the loop (it is in loopRegs), and (b)
      * the entry has also not been written to or already had a loop register
      * assigned.
      */
     if (loop && a->freeRegs.hasRegInMask(loop->getLoopRegs() & mask) &&
         type == RematInfo::DATA &&
-        (fe == this_ || isArg(fe) || isLocal(fe)) &&
+        (fe == this_ || isArg(fe) || isLocal(fe) || isTemporary(fe)) &&
         fe->lastLoop < loop->headOffset() &&
         !a->parent) {
         reg = a->freeRegs.takeAnyReg(loop->getLoopRegs() & mask);
         regstate(reg).associate(fe, RematInfo::DATA);
         fe->lastLoop = loop->headOffset();
         loop->setLoopReg(reg, fe);
         return reg;
     }
@@ -193,16 +204,19 @@ FrameState::pop()
 {
     JS_ASSERT(sp > spBase);
 
     FrameEntry *fe = --sp;
     if (!fe->isTracked())
         return;
 
     forgetAllRegs(fe);
+    fe->type.invalidate();
+    fe->data.invalidate();
+    fe->clear();
 
     a->extraArray[fe - spBase].reset();
 }
 
 inline void
 FrameState::freeReg(AnyRegisterID reg)
 {
     JS_ASSERT(!regstate(reg).usedBy());
@@ -223,20 +237,23 @@ FrameState::forgetReg(AnyRegisterID reg)
         regstate(reg).forget();
         a->freeRegs.putReg(reg);
     }
 }
 
 inline FrameEntry *
 FrameState::rawPush()
 {
-    JS_ASSERT(unsigned(sp - entries) < feLimit(script));
+    JS_ASSERT(sp < temporaries);
 
     if (!sp->isTracked())
         addToTracker(sp);
+    sp->type.invalidate();
+    sp->data.invalidate();
+    sp->clear();
 
     a->extraArray[sp - spBase].reset();
 
     return sp++;
 }
 
 inline void
 FrameState::push(const Value &v)
@@ -476,65 +493,65 @@ FrameState::tempRegForType(FrameEntry *f
     RegisterID reg = allocAndLoadReg(fe, false, RematInfo::TYPE).reg();
     fe->type.setRegister(reg);
     return reg;
 }
 
 inline JSC::MacroAssembler::RegisterID
 FrameState::tempRegForData(FrameEntry *fe)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
     JS_ASSERT(!fe->isType(JSVAL_TYPE_DOUBLE));
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (fe->data.inRegister())
         return fe->data.reg();
 
     RegisterID reg = allocAndLoadReg(fe, false, RematInfo::DATA).reg();
     fe->data.setRegister(reg);
     return reg;
 }
 
 inline void
 FrameState::forgetConstantData(FrameEntry *fe)
 {
-    if (!fe->data.isConstant())
+    if (!fe->isConstant())
         return;
     JS_ASSERT(fe->isType(JSVAL_TYPE_OBJECT));
 
     RegisterID reg = allocReg();
     regstate(reg).associate(fe, RematInfo::DATA);
 
     masm.move(JSC::MacroAssembler::ImmPtr(&fe->getValue().toObject()), reg);
     fe->data.setRegister(reg);
 }
 
 inline JSC::MacroAssembler::FPRegisterID
 FrameState::tempFPRegForData(FrameEntry *fe)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
     JS_ASSERT(fe->isType(JSVAL_TYPE_DOUBLE));
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (fe->data.inFPRegister())
         return fe->data.fpreg();
 
     FPRegisterID reg = allocAndLoadReg(fe, true, RematInfo::DATA).fpreg();
     fe->data.setFPRegister(reg);
     return reg;
 }
 
 inline AnyRegisterID
 FrameState::tempRegInMaskForData(FrameEntry *fe, uint32 mask)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
     JS_ASSERT_IF(fe->isType(JSVAL_TYPE_DOUBLE), !(mask & ~Registers::AvailFPRegs));
     JS_ASSERT_IF(!fe->isType(JSVAL_TYPE_DOUBLE), !(mask & ~Registers::AvailRegs));
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     AnyRegisterID reg;
     if (fe->data.inRegister() || fe->data.inFPRegister()) {
@@ -584,23 +601,23 @@ FrameState::tempRegForData(FrameEntry *f
         masm.loadPayload(addressOf(fe), reg);
         return reg;
     }
 }
 
 inline bool
 FrameState::shouldAvoidTypeRemat(FrameEntry *fe)
 {
-    return fe->type.inMemory();
+    return !fe->isCopy() && !fe->isInvariant() && fe->type.inMemory();
 }
 
 inline bool
 FrameState::shouldAvoidDataRemat(FrameEntry *fe)
 {
-    return fe->data.inMemory();
+    return !fe->isCopy() && !fe->isInvariant() && fe->data.inMemory();
 }
 
 inline void
 FrameState::ensureFeSynced(const FrameEntry *fe, Assembler &masm) const
 {
     Address to = addressOf(fe);
     const FrameEntry *backing = fe;
     if (fe->isCopy())
@@ -868,31 +885,40 @@ FrameState::learnType(FrameEntry *fe, JS
 
 inline void
 FrameState::learnType(FrameEntry *fe, JSValueType type, RegisterID data)
 {
     /* The copied bit may be set on an entry, but there should not be any actual copies. */
     JS_ASSERT_IF(fe->isCopied(), !isEntryCopied(fe));
 
     forgetAllRegs(fe);
-    fe->copy = NULL;
+    fe->clear();
 
     fe->type.setConstant();
     fe->knownType = type;
 
     fe->data.setRegister(data);
     regstate(data).associate(fe, RematInfo::DATA);
 
     fe->data.unsync();
     fe->type.unsync();
 }
 
 inline int32
 FrameState::frameOffset(const FrameEntry *fe, ActiveFrame *a) const
 {
+    /*
+     * The stored frame offsets for analysis temporaries are immediately above
+     * the script's normal slots (and will thus be clobbered should a C++ or
+     * scripted call push another frame). There must be enough room in the
+     * reserved stack space.
+     */
+    JS_STATIC_ASSERT(StackSpace::STACK_EXTRA >= TEMPORARY_LIMIT);
+    JS_ASSERT(uint32(fe - a->entries) < feLimit(a->script));
+
     if (fe >= a->locals)
         return JSStackFrame::offsetOfFixed(uint32(fe - a->locals));
     if (fe >= a->args)
         return JSStackFrame::offsetOfFormalArg(a->script->fun, uint32(fe - a->args));
     if (fe == a->this_)
         return JSStackFrame::offsetOfThis(a->script->fun);
     if (fe == a->callee_)
         return JSStackFrame::offsetOfCallee(a->script->fun);
@@ -1072,16 +1098,18 @@ FrameState::unpinKilledReg(RegisterID re
 {
     regstate(reg).unpinUnsafe();
     a->freeRegs.putReg(reg);
 }
 
 inline void
 FrameState::forgetAllRegs(FrameEntry *fe)
 {
+    if (fe->isCopy() || fe->isInvariant())
+        return;
     if (fe->type.inRegister())
         forgetReg(fe->type.reg());
     if (fe->data.inRegister())
         forgetReg(fe->data.reg());
     if (fe->data.inFPRegister())
         forgetReg(fe->data.fpreg());
 }
 
@@ -1261,17 +1289,18 @@ FrameState::loadDouble(RegisterID t, Reg
     loadDouble(fe, fpreg, masm);
 #endif
 }
 
 inline bool
 FrameState::tryFastDoubleLoad(FrameEntry *fe, FPRegisterID fpReg, Assembler &masm) const
 {
 #ifdef JS_CPU_X86
-    if (fe->type.inRegister() && fe->data.inRegister()) {
+    if (!fe->isCopy() && !fe->isInvariant() &&
+        fe->type.inRegister() && fe->data.inRegister()) {
         masm.fastLoadDouble(fe->data.reg(), fe->type.reg(), fpReg);
         return true;
     }
 #endif
     return false;
 }
 
 inline void
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -114,18 +114,16 @@ FrameState::getUnsyncedEntries(uint32 *p
     }
 }
 
 bool
 FrameState::pushActiveFrame(JSScript *script, uint32 argc,
                             analyze::Script *analysis, analyze::LifetimeScript *liveness)
 {
     uint32 depth = a ? totalDepth() : 0;
-
-    // nslots + nargs + 2 (callee, this)
     uint32 nentries = feLimit(script);
 
     size_t totalBytes = sizeof(ActiveFrame) +
                         sizeof(FrameEntry) * nentries +              // entries[]
                         sizeof(FrameEntry *) * nentries +            // tracker.entries
                         sizeof(StackEntryExtra) * script->nslots;    // extraArray
 
     uint8 *cursor = (uint8 *)cx->calloc_(totalBytes);
@@ -276,16 +274,18 @@ FrameState::updateActiveFrame()
     script = a->script;
     entries = a->entries;
     callee_ = a->callee_;
     this_ = a->this_;
     args = a->args;
     locals = a->locals;
     spBase = locals + script->nfixed;
     sp = spBase;
+    temporaries = locals + script->nslots;
+    temporariesTop = temporaries;
 }
 
 void
 FrameState::discardLocalRegisters()
 {
     /* Discard all local registers, without syncing. Must be followed by a discardFrame. */
     a->freeRegs = Registers::AvailAnyRegs;
 }
@@ -425,16 +425,18 @@ FrameState::entryName(const FrameEntry *
     static unsigned which = 0;
     which = (which + 1) & 3;
     char *buf = bufs[which];
 
     if (isArg(fe))
         JS_snprintf(buf, 50, "arg%d", fe - args);
     else if (isLocal(fe))
         JS_snprintf(buf, 50, "local%d", fe - locals);
+    else if (isTemporary(fe))
+        JS_snprintf(buf, 50, "temp%d", fe - temporaries);
     else
         JS_snprintf(buf, 50, "slot%d", fe - spBase);
     return buf;
 }
 #endif
 
 void
 FrameState::evictReg(AnyRegisterID reg)
@@ -471,17 +473,17 @@ FrameState::isEntryCopied(FrameEntry *fe
     /*
      * :TODO: It would be better for fe->isCopied() to mean 'is actually copied'
      * rather than 'might have copies', removing the need for this walk.
      */
     JS_ASSERT(fe->isCopied());
 
     for (uint32 i = fe->trackerIndex() + 1; i < a->tracker.nentries; i++) {
         FrameEntry *nfe = a->tracker[i];
-        if (nfe < sp && nfe->isCopy() && nfe->copyOf() == fe)
+        if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe)
             return true;
     }
 
     return false;
 }
 
 AnyRegisterID
 FrameState::bestEvictReg(uint32 mask, bool includePinned) const
@@ -516,17 +518,17 @@ FrameState::bestEvictReg(uint32 mask, bo
          */
 
         if (fe == callee_) {
             JS_ASSERT(fe->inlined || (fe->data.synced() && fe->type.synced()));
             JaegerSpew(JSpew_Regalloc, "result: %s is callee\n", reg.name());
             return reg;
         }
 
-        if (fe >= spBase) {
+        if (fe >= spBase && !isTemporary(fe)) {
             if (!fallback.isSet()) {
                 fallback = reg;
                 fallbackOffset = 0;
             }
             JaegerSpew(JSpew_Regalloc, "    %s is on stack\n", reg.name());
             continue;
         }
 
@@ -539,16 +541,30 @@ FrameState::bestEvictReg(uint32 mask, bo
             if (!fallback.isSet()) {
                 fallback = reg;
                 fallbackOffset = 0;
             }
             JaegerSpew(JSpew_Regalloc, "    %s has copies\n", reg.name());
             continue;
         }
 
+        if (isTemporary(fe)) {
+            /*
+             * All temporaries we currently generate are for loop invariants,
+             * which we treat as being live everywhere within the loop.
+             */
+            JS_ASSERT(loop);
+            if (!fallback.isSet() || loop->backedgeOffset() > fallbackOffset) {
+                fallback = reg;
+                fallbackOffset = loop->backedgeOffset();
+            }
+            JaegerSpew(JSpew_Regalloc, "    %s is a loop temporary\n", reg.name());
+            continue;
+        }
+
         /*
          * Any register for an entry dead at this bytecode is fine to evict.
          * We require an entry to be live at the bytecode which kills it.
          * This ensures that if multiple registers are used for the entry
          * (i.e. type and payload), we do not haphazardly evict the first
          * one when allocating the second one.
          */
         Lifetime *lifetime = variableLive(fe, PC);
@@ -711,26 +727,32 @@ FrameState::computeAllocation(jsbytecode
         }
 #endif
         return alloc;
     }
 
     alloc->setParentRegs(a->parentRegs);
 
     /*
-     * The allocation to use at the target consists of all variables currently
-     * in registers which are live at the target.
+     * The allocation to use at the target consists of all non-stack entries
+     * currently in registers which are live at the target.
      */
     Registers regs = Registers::AvailRegs;
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (a->freeRegs.hasReg(reg) || regstate(reg).type() == RematInfo::TYPE)
             continue;
         FrameEntry *fe = regstate(reg).fe();
-        if (fe == callee_ || fe >= spBase || !variableLive(fe, target))
+        if (fe == callee_)
+            continue;
+        if (fe < spBase && !variableLive(fe, target))
+            continue;
+        if (fe >= spBase && !isTemporary(fe))
+            continue;
+        if (isTemporary(fe) && target - script->code > loop->backedgeOffset())
             continue;
         alloc->set(reg, indexOfFe(fe), fe->data.synced());
     }
 
 #ifdef DEBUG
     if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
         JaegerSpew(JSpew_Regalloc, "allocation at %u:", target - script->code);
         dumpAllocation(alloc);
@@ -809,17 +831,17 @@ FrameState::syncForBranch(jsbytecode *ta
     /*
      * First pass. Sync all entries which will not be carried in a register,
      * and uncopy everything except values used in the branch.
      */
 
     for (uint32 i = a->tracker.nentries - 1; i < a->tracker.nentries; i--) {
         FrameEntry *fe = a->tracker[i];
 
-        if (fe >= sp - uses.nuses) {
+        if (deadEntry(fe, uses.nuses)) {
             /* No need to sync, this will get popped before branching. */
             continue;
         }
 
         unsigned index = indexOfFe(fe);
         if (!fe->isCopy() && alloc->hasAnyReg(index)) {
             /* Types are always synced, except for known doubles. */
             if (!fe->isType(JSVAL_TYPE_DOUBLE))
@@ -1292,33 +1314,32 @@ void FrameState::loadForReturn(FrameEntr
 #ifdef DEBUG
 void
 FrameState::assertValidRegisterState() const
 {
     Registers checkedFreeRegs(Registers::AvailAnyRegs);
 
     for (uint32 i = 0; i < a->tracker.nentries; i++) {
         FrameEntry *fe = a->tracker[i];
-        if (fe >= sp)
+        if (deadEntry(fe))
             continue;
 
         JS_ASSERT(i == fe->trackerIndex());
-        JS_ASSERT_IF(fe->isCopy(),
-                     fe->trackerIndex() > fe->copyOf()->trackerIndex());
-        JS_ASSERT_IF(fe->isCopy(), fe > fe->copyOf());
-        JS_ASSERT_IF(fe->isCopy(),
-                     !fe->type.inRegister() && !fe->data.inRegister() && !fe->data.inFPRegister());
-        JS_ASSERT_IF(fe->isCopy(), fe->copyOf() < sp);
-        JS_ASSERT_IF(fe->isCopy(), fe->copyOf()->isCopied());
-        JS_ASSERT_IF(fe->isCopy(), fe->isTypeKnown() == fe->copyOf()->isTypeKnown());
-        JS_ASSERT_IF(fe->isCopy() && fe->isTypeKnown(),
-                     fe->getKnownType() == fe->copyOf()->getKnownType());
-
-        if (fe->isCopy())
+
+        if (fe->isCopy()) {
+            JS_ASSERT(fe->trackerIndex() > fe->copyOf()->trackerIndex());
+            JS_ASSERT(fe > fe->copyOf());
+            JS_ASSERT(!deadEntry(fe->copyOf()));
+            JS_ASSERT(fe->copyOf()->isCopied());
             continue;
+        }
+
+        if (fe->isInvariant())
+            continue;
+
         if (fe->type.inRegister()) {
             checkedFreeRegs.takeReg(fe->type.reg());
             JS_ASSERT(regstate(fe->type.reg()).fe() == fe);
         }
         if (fe->data.inRegister()) {
             checkedFreeRegs.takeReg(fe->data.reg());
             JS_ASSERT(regstate(fe->data.reg()).fe() == fe);
         }
@@ -1502,53 +1523,53 @@ FrameState::sync(Assembler &masm, Uses u
             if ((!fe->type.synced() && backing->type.inMemory()) ||
                 (!fe->data.synced() && backing->data.inMemory())) {
                 syncFancy(masm, avail, fe, bottom);
                 return;
             }
 #endif
         }
 
+        bool copy = fe->isCopy() || fe->isInvariant();
+
         /* If a part still needs syncing, it is either a copy or constant. */
 #if defined JS_PUNBOX64
         /* All register-backed FEs have been entirely synced up-front. */
-        if (!fe->type.inRegister() && !fe->data.inRegister())
+        if (copy || (!fe->type.inRegister() && !fe->data.inRegister()))
             ensureFeSynced(fe, masm);
 #elif defined JS_NUNBOX32
         /* All components held in registers have been already synced. */
-        if (!fe->data.inRegister())
+        if (copy || !fe->data.inRegister())
             ensureDataSynced(fe, masm);
-        if (!fe->type.inRegister())
+        if (copy || !fe->type.inRegister())
             ensureTypeSynced(fe, masm);
 #endif
     }
 }
 
 void
 FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore)
 {
     syncParentRegistersInMask(masm, a->parentRegs.freeMask, true);
     JS_ASSERT(a->parentRegs.empty());
 
     if (loop) {
         /*
          * Drop any remaining loop registers so we don't do any more after-the-fact
          * allocation of the initial register state.
          */
-        loop->clearRegisters();
+        loop->clearLoopRegisters();
     }
 
-    FrameEntry *spStop = sp - ignore.nuses;
-
     /* Sync all kill-registers up-front. */
     Registers search(kill.freeMask & ~a->freeRegs.freeMask);
     while (!search.empty()) {
         AnyRegisterID reg = search.takeAnyReg();
         FrameEntry *fe = regstate(reg).usedBy();
-        if (!fe || fe >= spStop)
+        if (!fe || deadEntry(fe, ignore.nuses))
             continue;
 
         JS_ASSERT(fe->isTracked());
 
 #if defined JS_PUNBOX64
         /* Don't use syncFe(), since that may clobber more registers. */
         ensureFeSynced(fe, masm);
 
@@ -1587,21 +1608,24 @@ FrameState::syncAndKill(Registers kill, 
     FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : sp - uses.nuses;
 
     for (FrameEntry *fe = sp - 1; fe >= bottom && maxvisits; fe--) {
         if (!fe->isTracked())
             continue;
 
         maxvisits--;
 
-        if (fe >= spStop)
+        if (deadEntry(fe, ignore.nuses))
             continue;
 
         syncFe(fe);
 
+        if (fe->isCopy() || fe->isInvariant())
+            continue;
+
         /* Forget registers. */
         if (fe->data.inRegister() && !regstate(fe->data.reg()).isPinned()) {
             forgetReg(fe->data.reg());
             fe->data.setMemory();
         }
         if (fe->data.inFPRegister() && !regstate(fe->data.fpreg()).isPinned()) {
             forgetReg(fe->data.fpreg());
             fe->data.setMemory();
@@ -1615,17 +1639,17 @@ FrameState::syncAndKill(Registers kill, 
     /*
      * Anything still alive at this point is guaranteed to be synced. However,
      * it is necessary to evict temporary registers.
      */
     search = Registers(kill.freeMask & ~a->freeRegs.freeMask);
     while (!search.empty()) {
         AnyRegisterID reg = search.takeAnyReg();
         FrameEntry *fe = regstate(reg).usedBy();
-        if (!fe || fe >= spStop)
+        if (!fe || deadEntry(fe, ignore.nuses))
             continue;
 
         JS_ASSERT(fe->isTracked() && !fe->isType(JSVAL_TYPE_DOUBLE));
 
         if (regstate(reg).type() == RematInfo::DATA) {
             JS_ASSERT(fe->data.reg() == reg.reg());
             JS_ASSERT(fe->data.synced());
             fe->data.setMemory();
@@ -1736,17 +1760,17 @@ JSC::MacroAssembler::RegisterID
 FrameState::copyDataIntoReg(FrameEntry *fe)
 {
     return copyDataIntoReg(this->masm, fe);
 }
 
 void
 FrameState::copyDataIntoReg(FrameEntry *fe, RegisterID hint)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
     JS_ASSERT(!fe->isType(JSVAL_TYPE_DOUBLE));
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (!fe->data.inRegister())
         tempRegForData(fe);
 
@@ -1770,17 +1794,17 @@ FrameState::copyDataIntoReg(FrameEntry *
     }
 
     modifyReg(hint);
 }
 
 JSC::MacroAssembler::RegisterID
 FrameState::copyDataIntoReg(Assembler &masm, FrameEntry *fe)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (fe->data.inRegister()) {
         RegisterID reg = fe->data.reg();
         if (a->freeRegs.empty(Registers::AvailRegs)) {
             ensureDataSynced(fe, masm);
@@ -1803,21 +1827,21 @@ FrameState::copyDataIntoReg(Assembler &m
         masm.loadPayload(addressOf(fe),reg);
 
     return reg;
 }
 
 JSC::MacroAssembler::RegisterID
 FrameState::copyTypeIntoReg(FrameEntry *fe)
 {
-    JS_ASSERT(!fe->type.isConstant());
-
     if (fe->isCopy())
         fe = fe->copyOf();
 
+    JS_ASSERT(!fe->type.isConstant());
+
     if (fe->type.inRegister()) {
         RegisterID reg = fe->type.reg();
         if (a->freeRegs.empty(Registers::AvailRegs)) {
             ensureTypeSynced(fe, masm);
             fe->type.setMemory();
             regstate(reg).forget();
             modifyReg(reg);
         } else {
@@ -1855,17 +1879,17 @@ FrameState::copyInt32ConstantIntoReg(Ass
     RegisterID reg = allocReg();
     masm.move(Imm32(fe->getValue().toInt32()), reg);
     return reg;
 }
 
 JSC::MacroAssembler::RegisterID
 FrameState::ownRegForType(FrameEntry *fe)
 {
-    JS_ASSERT(!fe->type.isConstant());
+    JS_ASSERT(!fe->isTypeKnown());
 
     RegisterID reg;
     if (fe->isCopy()) {
         /* For now, just do an extra move. The reg must be mutable. */
         FrameEntry *backing = fe->copyOf();
         if (!backing->type.inRegister()) {
             JS_ASSERT(backing->type.inMemory());
             tempRegForType(backing);
@@ -1887,30 +1911,30 @@ FrameState::ownRegForType(FrameEntry *fe
 
     if (fe->type.inRegister()) {
         reg = fe->type.reg();
 
         /* Remove ownership of this register. */
         JS_ASSERT(regstate(reg).fe() == fe);
         JS_ASSERT(regstate(reg).type() == RematInfo::TYPE);
         regstate(reg).forget();
-        fe->type.invalidate();
+        fe->type.setMemory();
         modifyReg(reg);
     } else {
         JS_ASSERT(fe->type.inMemory());
         reg = allocReg();
         masm.loadTypeTag(addressOf(fe), reg);
     }
     return reg;
 }
 
 JSC::MacroAssembler::RegisterID
 FrameState::ownRegForData(FrameEntry *fe)
 {
-    JS_ASSERT(!fe->data.isConstant());
+    JS_ASSERT(!fe->isConstant());
     JS_ASSERT(!fe->isType(JSVAL_TYPE_DOUBLE));
 
     RegisterID reg;
     if (fe->isCopy()) {
         /* For now, just do an extra move. The reg must be mutable. */
         FrameEntry *backing = fe->copyOf();
         if (!backing->data.inRegister()) {
             JS_ASSERT(backing->data.inMemory());
@@ -1929,29 +1953,28 @@ FrameState::ownRegForData(FrameEntry *fe
             masm.move(backing->data.reg(), reg);
         }
         return reg;
     }
 
     if (fe->isCopied()) {
         FrameEntry *copy = uncopy(fe);
         if (fe->isCopied()) {
-            fe->type.invalidate();
-            fe->data.invalidate();
+            fe->resetSynced();
             return copyDataIntoReg(copy);
         }
     }
-    
+
     if (fe->data.inRegister()) {
         reg = fe->data.reg();
         /* Remove ownership of this register. */
         JS_ASSERT(regstate(reg).fe() == fe);
         JS_ASSERT(regstate(reg).type() == RematInfo::DATA);
         regstate(reg).forget();
-        fe->data.invalidate();
+        fe->data.setMemory();
         modifyReg(reg);
     } else {
         JS_ASSERT(fe->data.inMemory());
         reg = allocReg();
         masm.loadPayload(addressOf(fe), reg);
     }
     return reg;
 }
@@ -2000,17 +2023,17 @@ FrameState::ensureDouble(FrameEntry *fe)
     if (fe->isCopy()) {
         /* Forget this entry is a copy.  We are converting this entry, not the backing. */
         backing = fe->copyOf();
         fe->clear();
     } else if (fe->isCopied()) {
         /* Sync and forget any copies of this entry. */
         for (uint32 i = fe->trackerIndex() + 1; i < a->tracker.nentries; i++) {
             FrameEntry *nfe = a->tracker[i];
-            if (nfe < sp && nfe->isCopy() && nfe->copyOf() == fe) {
+            if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe) {
                 syncFe(nfe);
                 nfe->resetSynced();
             }
         }
     }
 
     FPRegisterID fpreg = allocFPReg();
 
@@ -2064,17 +2087,17 @@ FrameState::pushCopyOf(uint32 index)
 FrameEntry *
 FrameState::walkTrackerForUncopy(FrameEntry *original)
 {
     uint32 firstCopy = InvalidIndex;
     FrameEntry *bestFe = NULL;
     uint32 ncopies = 0;
     for (uint32 i = original->trackerIndex() + 1; i < a->tracker.nentries; i++) {
         FrameEntry *fe = a->tracker[i];
-        if (fe >= sp)
+        if (deadEntry(fe))
             continue;
         if (fe->isCopy() && fe->copyOf() == original) {
             if (firstCopy == InvalidIndex) {
                 firstCopy = i;
                 bestFe = fe;
             } else if (fe < bestFe) {
                 bestFe = fe;
             }
@@ -2210,18 +2233,17 @@ FrameState::uncopy(FrameEntry *original)
          * okay if it's spilled.
          */
         if (original->type.inMemory() && !fe->type.synced())
             tempRegForType(original);
         fe->type.inherit(original->type);
         if (fe->type.inRegister())
             regstate(fe->type.reg()).reassociate(fe);
     } else {
-        JS_ASSERT(fe->isTypeKnown());
-        JS_ASSERT(fe->getKnownType() == original->getKnownType());
+        fe->setType(original->getKnownType());
     }
     if (original->isType(JSVAL_TYPE_DOUBLE)) {
         if (original->data.inMemory() && !fe->data.synced())
             tempFPRegForData(original);
         fe->data.inherit(original->data);
         if (fe->data.inFPRegister())
             regstate(fe->data.fpreg()).reassociate(fe);
     } else {
@@ -2237,17 +2259,17 @@ FrameState::uncopy(FrameEntry *original)
 
 bool
 FrameState::hasOnlyCopy(FrameEntry *backing, FrameEntry *fe)
 {
     JS_ASSERT(backing->isCopied() && fe->copyOf() == backing);
 
     for (uint32 i = backing->trackerIndex() + 1; i < a->tracker.nentries; i++) {
         FrameEntry *nfe = a->tracker[i];
-        if (nfe != fe && nfe < sp && nfe->isCopy() && nfe->copyOf() == backing)
+        if (nfe != fe && !deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == backing)
             return false;
     }
 
     return true;
 }
 
 void
 FrameState::separateBinaryEntries(FrameEntry *lhs, FrameEntry *rhs)
@@ -2318,17 +2340,17 @@ FrameState::forgetEntry(FrameEntry *fe)
     if (fe->isCopied()) {
         uncopy(fe);
         if (!fe->isCopied())
             forgetAllRegs(fe);
     } else {
         forgetAllRegs(fe);
     }
 
-    if (fe >= sp)
+    if (fe >= spBase && fe < sp)
         a->extraArray[fe - spBase].reset();
 }
 
 void
 FrameState::storeTop(FrameEntry *target, JSValueType type, bool popGuaranteed)
 {
     /* Detect something like (x = x) which is a no-op. */
     FrameEntry *top = peek(-1);
@@ -2369,21 +2391,16 @@ FrameState::storeTop(FrameEntry *target,
         JS_ASSERT(backing->trackerIndex() < top->trackerIndex());
 
         if (backing < target) {
             /* local.idx < backing.idx means local cannot be a copy yet */
             if (target->trackerIndex() < backing->trackerIndex())
                 swapInTracker(backing, target);
             target->setNotCopied();
             target->setCopyOf(backing);
-            if (backing->isTypeKnown())
-                target->setType(backing->getKnownType());
-            else
-                target->type.invalidate();
-            target->data.invalidate();
             return;
         }
 
         /*
          * If control flow lands here, then there was a bytecode sequence like
          *
          *  ENTERBLOCK 2
          *  GETLOCAL 1
@@ -2397,17 +2414,17 @@ FrameState::storeTop(FrameEntry *target,
          * whether a region on the stack will be popped all at once. Bleh!
          *
          * This should be rare except in browser code (and maybe even then),
          * but even so there's a quick workaround. We take all copies of the
          * backing fe, and redirect them to be copies of the destination.
          */
         for (uint32 i = backing->trackerIndex() + 1; i < a->tracker.nentries; i++) {
             FrameEntry *fe = a->tracker[i];
-            if (fe >= sp)
+            if (deadEntry(fe))
                 continue;
             if (fe->isCopy() && fe->copyOf() == backing) {
                 fe->setCopyOf(target);
                 copied = true;
             }
         }
     }
     backing->setNotCopied();
@@ -2463,57 +2480,33 @@ FrameState::storeTop(FrameEntry *target,
             /*
              * Treat the stored entry as an int even if inference has marked it
              * as a float (we will fixDoubles on it before branching), to avoid
              * demoting the backing.
              */
             if (type == JSVAL_TYPE_DOUBLE)
                 type = JSVAL_TYPE_INT32;
             JS_ASSERT_IF(backing->isTypeKnown(), backing->isType(type));
-            if (!backing->isTypeKnown()) {
-                /*
-                 * If we update the type of the backing, we need to watch for
-                 * any copies of the backing which we already redirected to the
-                 * target. These also need to have their types updated, to
-                 * preserve the invariant that entries have the same type as
-                 * their copies.
-                 */
+            if (!backing->isTypeKnown())
                 learnType(backing, type);
-                for (uint32 i = backing->trackerIndex() + 1; copied && i < a->tracker.nentries; i++) {
-                    FrameEntry *fe = a->tracker[i];
-                    if (fe < sp && fe->isCopy() && fe->copyOf() == target)
-                        fe->setType(type);
-                }
-            }
             target->setType(type);
         } else {
             FPRegisterID fpreg = allocFPReg();
             syncFe(backing);
             masm.moveInt32OrDouble(addressOf(backing), fpreg);
 
             forgetAllRegs(backing);
-
             backing->setType(JSVAL_TYPE_DOUBLE);
-            for (uint32 i = backing->trackerIndex() + 1; copied && i < a->tracker.nentries; i++) {
-                FrameEntry *fe = a->tracker[i];
-                if (fe < sp && fe->isCopy() && fe->copyOf() == target)
-                    fe->setType(JSVAL_TYPE_DOUBLE);
-            }
-
             target->setType(JSVAL_TYPE_DOUBLE);
             target->data.setFPRegister(fpreg);
             regstate(fpreg).associate(target, RematInfo::DATA);
         }
     }
 
-    if (!backing->isTypeKnown())
-        backing->type.invalidate();
-    backing->data.invalidate();
     backing->setCopyOf(target);
-
     JS_ASSERT(top->copyOf() == target);
 
     /*
      * Right now, |backing| is a copy of |target| (note the reversal), but
      * |target| is not marked as copied. This is an optimization so uncopy()
      * may avoid frame traversal.
      *
      * There are two cases where we must set the copy bit, however:
@@ -2721,17 +2714,17 @@ FrameState::binaryEntryLive(FrameEntry *
     /*
      * Compute whether fe is live after the binary operation performed at the current
      * bytecode. This is similar to variableLive except that it returns false for the
      * top two stack entries and special cases LOCALINC/ARGINC and friends, which fuse
      * a binary operation before writing over the local/arg.
      */
     JS_ASSERT(cx->typeInferenceEnabled());
 
-    if (fe >= sp - 2)
+    if (deadEntry(fe, 2))
         return false;
 
     switch (JSOp(*PC)) {
       case JSOP_INCLOCAL:
       case JSOP_DECLOCAL:
       case JSOP_LOCALINC:
       case JSOP_LOCALDEC:
         if (fe - locals == (int) GET_SLOTNO(PC))
@@ -3010,8 +3003,30 @@ FrameState::maybePinType(FrameEntry *fe)
 
 void
 FrameState::maybeUnpinReg(MaybeRegisterID reg)
 {
     if (reg.isSet())
         unpinReg(reg.reg());
 }
 
+uint32
+FrameState::allocTemporary()
+{
+    if (temporariesTop == temporaries + TEMPORARY_LIMIT)
+        return uint32(-1);
+    FrameEntry *fe = temporariesTop++;
+    fe->lastLoop = 0;
+    return fe - temporaries;
+}
+
+void
+FrameState::clearTemporaries()
+{
+    for (FrameEntry *fe = temporaries; fe < temporariesTop; fe++) {
+        if (!fe->isTracked())
+            continue;
+        forgetAllRegs(fe);
+        fe->resetSynced();
+    }
+
+    temporariesTop = temporaries;
+}
--- a/js/src/methodjit/FrameState.h
+++ b/js/src/methodjit/FrameState.h
@@ -873,16 +873,23 @@ class FrameState
 #ifdef DEBUG
     const char * entryName(const FrameEntry *fe) const;
     void dumpAllocation(RegisterAllocation *alloc);
 #else
     const char * entryName(const FrameEntry *fe) const { return NULL; }
 #endif
     const char * entryName(uint32 slot) { return entryName(entries + slot); }
 
+    /* Maximum number of analysis temporaries the FrameState can track. */
+    static const uint32 TEMPORARY_LIMIT = 10;
+
+    uint32 allocTemporary();  /* -1 if limit reached. */
+    void clearTemporaries();
+    inline FrameEntry *getTemporary(uint32 which);
+
   private:
     inline AnyRegisterID allocAndLoadReg(FrameEntry *fe, bool fp, RematInfo::RematType type);
     inline void forgetReg(AnyRegisterID reg);
     AnyRegisterID evictSomeReg(uint32 mask);
     void evictReg(AnyRegisterID reg);
     inline FrameEntry *rawPush();
     inline void addToTracker(FrameEntry *fe);
 
@@ -939,18 +946,23 @@ class FrameState
         return &entries[index];
     }
 
     uint32 indexOf(int32 depth) const {
         JS_ASSERT(uint32((sp + depth) - entries) < feLimit(script));
         return uint32((sp + depth) - entries);
     }
 
+    /* Stack and temporary entries whose contents should be disregarded. */
+    bool deadEntry(const FrameEntry *fe, unsigned uses = 0) const {
+        return (fe >= (sp - uses) && fe < temporaries) || fe >= temporariesTop;
+    }
+
     static uint32 feLimit(JSScript *script) {
-        return script->nslots + 2 + (script->fun ? script->fun->nargs : 0);
+        return script->nslots + 2 + (script->fun ? script->fun->nargs : 0) + TEMPORARY_LIMIT;
     }
 
     RegisterState & regstate(AnyRegisterID reg) {
         JS_ASSERT(reg.reg_ < Registers::TotalAnyRegisters);
         return a->regstate_[reg.reg_];
     }
 
     const RegisterState & regstate(AnyRegisterID reg) const {
@@ -968,16 +980,21 @@ class FrameState
     bool isArg(const FrameEntry *fe) const {
         return script->fun && fe >= args && fe - args < script->fun->nargs;
     }
 
     bool isLocal(const FrameEntry *fe) const {
         return fe >= locals && fe - locals < script->nfixed;
     }
 
+    bool isTemporary(const FrameEntry *fe) const {
+        JS_ASSERT_IF(fe >= temporaries, fe < temporariesTop);
+        return fe >= temporaries;
+    }
+
     int32 frameOffset(const FrameEntry *fe, ActiveFrame *a) const;
     Address addressOf(const FrameEntry *fe, ActiveFrame *a) const;
 
     void updateActiveFrame();
     void syncInlinedEntry(FrameEntry *fe, const FrameEntry *parent);
     void associateReg(FrameEntry *fe, RematInfo::RematType type, AnyRegisterID reg);
 
     inline void modifyReg(AnyRegisterID reg);
@@ -1062,16 +1079,23 @@ class FrameState
     FrameEntry *locals;
 
     /* Base pointer for the stack. */
     FrameEntry *spBase;
 
     /* Dynamic stack pointer. */
     FrameEntry *sp;
 
+    /*
+     * Track state for analysis temporaries. The meaning of these temporaries
+     * is opaque to the frame state, which just tracks where they are stored.
+     */
+    FrameEntry *temporaries;
+    FrameEntry *temporariesTop;
+
     /* Current PC, for managing register allocation. */
     jsbytecode *PC;
 
     /* Stack of active loops. */
     LoopState *loop;
 
     bool inTryBlock;
 };
--- a/js/src/methodjit/ImmutableSync.cpp
+++ b/js/src/methodjit/ImmutableSync.cpp
@@ -96,16 +96,20 @@ ImmutableSync::allocReg()
         lastResort = 0;
 
         if (!regs[i]) {
             /* If the frame does not own this register, take it! */
             FrameEntry *fe = frame->regstate(reg).usedBy();
             if (!fe)
                 return reg;
 
+            /* Take any register used for a loop temporary. */
+            if (frame->isTemporary(fe))
+                return reg;
+
             evictFromFrame = i;
 
             /*
              * If not copied, we can sync and not have to load again later.
              * That's about as good as it gets, so just break out now.
              */
             if (!fe->isCopied())
                 break;
--- a/js/src/methodjit/LoopState.cpp
+++ b/js/src/methodjit/LoopState.cpp
@@ -33,29 +33,32 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "methodjit/Compiler.h"
 #include "methodjit/LoopState.h"
+#include "methodjit/FrameState-inl.h"
 
 using namespace js;
 using namespace js::mjit;
 using namespace js::analyze;
 
 LoopState::LoopState(JSContext *cx, JSScript *script,
                      Compiler *cc, FrameState *frame,
                      Script *analysis, LifetimeScript *liveness)
     : cx(cx), script(script), cc(*cc), frame(*frame), analysis(analysis), liveness(liveness),
       lifetime(NULL), alloc(NULL), loopRegs(0), skipAnalysis(false),
       loopJoins(CompilerAllocPolicy(cx, *cc)),
       loopPatches(CompilerAllocPolicy(cx, *cc)),
+      restoreInvariantCalls(CompilerAllocPolicy(cx, *cc)),
       hoistedBoundsChecks(CompilerAllocPolicy(cx, *cc)),
+      invariantArraySlots(CompilerAllocPolicy(cx, *cc)),
       outer(NULL), PC(NULL)
 {
     JS_ASSERT(cx->typeInferenceEnabled());
 }
 
 bool
 LoopState::init(jsbytecode *head, Jump entry, jsbytecode *entryTarget)
 {
@@ -118,43 +121,87 @@ LoopState::init(jsbytecode *head, Jump e
      * Don't hoist bounds checks or loop invariant code in loops with safe
      * points in the middle, which the interpreter can join at directly without
      * performing hoisted bounds checks or doing initial computation of loop
      * invariant terms.
      */
     if (lifetime->hasSafePoints)
         this->skipAnalysis = true;
 
+    /*
+     * Don't do hoisting in loops with inner loops or calls. This is way too
+     * pessimistic and needs to get fixed.
+     */
+    if (lifetime->hasCallsLoops)
+        this->skipAnalysis = true;
+
     return true;
 }
 
 void
 LoopState::addJoin(unsigned index, bool script)
 {
     StubJoin r;
     r.index = index;
     r.script = script;
     loopJoins.append(r);
 }
 
 void
-LoopState::flushRegisters(StubCompiler &stubcc)
+LoopState::addInvariantCall(Jump jump, Label label, bool ool)
 {
-    clearRegisters();
+    RestoreInvariantCall call;
+    call.jump = jump;
+    call.label = label;
+    call.ool = ool;
+    restoreInvariantCalls.append(call);
+}
 
+void
+LoopState::flushLoop(StubCompiler &stubcc)
+{
+    clearLoopRegisters();
+
+    /*
+     * Patch stub compiler rejoins with loads of loop carried registers
+     * discovered after the fact.
+     */
     for (unsigned i = 0; i < loopPatches.length(); i++) {
         const StubJoinPatch &p = loopPatches[i];
         stubcc.patchJoin(p.join.index, p.join.script, p.address, p.reg);
     }
     loopJoins.clear();
     loopPatches.clear();
+
+    if (hasInvariants()) {
+        for (unsigned i = 0; i < restoreInvariantCalls.length(); i++) {
+            RestoreInvariantCall &call = restoreInvariantCalls[i];
+            Assembler &masm = cc.getAssembler(true);
+            if (call.ool) {
+                call.jump.linkTo(masm.label(), &masm);
+                restoreInvariants(masm);
+                masm.jump().linkTo(call.label, &masm);
+            } else {
+                stubcc.linkExitDirect(call.jump, masm.label());
+                restoreInvariants(masm);
+                stubcc.crossJump(masm.jump(), call.label);
+            }
+        }
+    } else {
+        for (unsigned i = 0; i < restoreInvariantCalls.length(); i++) {
+            RestoreInvariantCall &call = restoreInvariantCalls[i];
+            Assembler &masm = cc.getAssembler(call.ool);
+            call.jump.linkTo(call.label, &masm);
+        }
+    }
+    restoreInvariantCalls.clear();
 }
 
 void
-LoopState::clearRegisters()
+LoopState::clearLoopRegisters()
 {
     alloc->clearLoops();
     loopRegs = 0;
 }
 
 bool
 LoopState::loopInvariantEntry(const FrameEntry *fe)
 {
@@ -176,37 +223,65 @@ LoopState::loopInvariantEntry(const Fram
     if (slot < nargs && !analysis->argEscapes(slot))
         return true;
     if (script->fun)
         slot -= script->fun->nargs;
 
     return !analysis->localEscapes(slot);
 }
 
-void
+bool
 LoopState::addHoistedCheck(uint32 arraySlot, uint32 valueSlot, int32 constant)
 {
     /*
-     * Check to see if this bounds check either implies or is implied by
-     * an existing hoisted check.
+     * Check to see if this bounds check either implies or is implied by an
+     * existing hoisted check.
      */
     for (unsigned i = 0; i < hoistedBoundsChecks.length(); i++) {
         HoistedBoundsCheck &check = hoistedBoundsChecks[i];
         if (check.arraySlot == arraySlot && check.valueSlot == valueSlot) {
             if (check.constant < constant)
                 check.constant = constant;
-            return;
+            return true;
         }
     }
 
+    /*
+     * Maintain an invariant that for any array with a hoisted bounds check,
+     * we also have a loop invariant slot to hold the array's slots pointer.
+     * The compiler gets invariant array slots only for accesses with a hoisted
+     * bounds check, so this makes invariantSlots infallible.
+     */
+    bool hasInvariantSlots = false;
+    for (unsigned i = 0; !hasInvariantSlots && i < invariantArraySlots.length(); i++) {
+        if (invariantArraySlots[i].arraySlot == arraySlot)
+            hasInvariantSlots = true;
+    }
+    if (!hasInvariantSlots) {
+        uint32 which = frame.allocTemporary();
+        if (which == uint32(-1))
+            return false;
+        FrameEntry *fe = frame.getTemporary(which);
+
+        JaegerSpew(JSpew_Analysis, "Using %s for loop invariant slots of %s\n",
+                   frame.entryName(fe), frame.entryName(arraySlot));
+
+        InvariantArraySlots slots;
+        slots.arraySlot = arraySlot;
+        slots.temporary = which;
+        invariantArraySlots.append(slots);
+    }
+
     HoistedBoundsCheck check;
     check.arraySlot = arraySlot;
     check.valueSlot = valueSlot;
     check.constant = constant;
     hoistedBoundsChecks.append(check);
+
+    return true;
 }
 
 void
 LoopState::setLoopReg(AnyRegisterID reg, FrameEntry *fe)
 {
     JS_ASSERT(alloc->loop(reg));
     loopRegs.takeReg(reg);
 
@@ -244,68 +319,74 @@ bool
 LoopState::hoistArrayLengthCheck(const FrameEntry *obj, const FrameEntry *index)
 {
     if (skipAnalysis || script->failedBoundsCheck)
         return false;
 
     /*
      * Note: this should only be used when the object is known to be a dense
      * array (if it is an object at all) whose length has never shrunk.
+     * (determined by checking types->getKnownObjectKind for the object).
      */
 
     obj = obj->backing();
     index = index->backing();
 
     JaegerSpew(JSpew_Analysis, "Trying to hoist bounds check array %s index %s\n",
                frame.entryName(obj), frame.entryName(index));
 
     if (!loopInvariantEntry(obj)) {
         JaegerSpew(JSpew_Analysis, "Object is not loop invariant\n");
         return false;
     }
 
+    types::TypeSet *objTypes = cc.getTypeSet(obj);
+    JS_ASSERT(objTypes && !objTypes->unknown());
+
+    /* Currently, we only do hoisting/LICM on values which are definitely objects. */
+    if (objTypes->getKnownTypeTag(cx) != JSVAL_TYPE_OBJECT) {
+        JaegerSpew(JSpew_Analysis, "Object might be a primitive\n");
+        return false;
+    }
+
     /*
      * Check for an overlap with the arrays we think might grow in this loop.
      * This information is only a guess; if we don't think the array can grow
      * but it actually can, we will probably recompile after the hoisted
      * bounds check fails.
      */
     if (lifetime->nGrowArrays) {
         types::TypeObject **growArrays = lifetime->growArrays;
-        types::TypeSet *types = cc.getTypeSet(obj);
-        JS_ASSERT(types && !types->unknown());
-        unsigned count = types->getObjectCount();
+        unsigned count = objTypes->getObjectCount();
         for (unsigned i = 0; i < count; i++) {
-            types::TypeObject *object = types->getObject(i);
+            types::TypeObject *object = objTypes->getObject(i);
             if (object) {
                 for (unsigned j = 0; j < lifetime->nGrowArrays; j++) {
                     if (object == growArrays[j]) {
                         JaegerSpew(JSpew_Analysis, "Object might grow inside loop\n");
                         return false;
                     }
                 }
             }
         }
     }
 
     if (index->isConstant()) {
         /* Hoist checks on x[n] accesses for constant n. */
         int32 value = index->getValue().toInt32();
         JaegerSpew(JSpew_Analysis, "Hoisted as initlen > %d\n", value);
 
-        addHoistedCheck(frame.indexOfFe(obj), uint32(-1), value);
-        return true;
+        return addHoistedCheck(frame.indexOfFe(obj), uint32(-1), value);
     }
 
     if (loopInvariantEntry(index)) {
         /* Hoist checks on x[y] accesses when y is loop invariant. */
         JaegerSpew(JSpew_Analysis, "Hoisted as initlen > %s\n", frame.entryName(index));
 
-        addHoistedCheck(frame.indexOfFe(obj), frame.indexOfFe(index), 0);
-        return true;
+        return addHoistedCheck(frame.indexOfFe(obj), frame.indexOfFe(index), 0);
     }
 
     if (frame.indexOfFe(index) == lifetime->testLHS && lifetime->testLessEqual) {
         /*
          * If the access is of the form x[y] where we know that y <= z + n at
          * the head of the loop, hoist the check as initlen < z + n provided
          * that y has not been modified since the head of the loop.
          */
@@ -331,28 +412,28 @@ LoopState::hoistArrayLengthCheck(const F
                 return false;
             }
         }
 
         JaegerSpew(JSpew_Analysis, "Hoisted as initlen > %s + %d\n",
                    (rhs == LifetimeLoop::UNASSIGNED) ? "" : frame.entryName(rhs),
                    constant);
 
-        addHoistedCheck(frame.indexOfFe(obj), rhs, constant);
-        return true;
+        return addHoistedCheck(frame.indexOfFe(obj), rhs, constant);
     }
 
     JaegerSpew(JSpew_Analysis, "No match found\n");
-
     return false;
 }
 
 bool
 LoopState::checkHoistedBounds(jsbytecode *PC, Assembler &masm, Vector<Jump> *jumps)
 {
+    restoreInvariants(masm);
+
     /*
      * Emit code to validate all hoisted bounds checks, filling jumps with all
      * failure paths. This is done from a fully synced state, and all registers
      * can be used as temporaries. Note: we assume that no modifications to the
      * terms in the hoisted checks occur between PC and the head of the loop.
      */
 
     for (unsigned i = 0; i < hoistedBoundsChecks.length(); i++) {
@@ -362,24 +443,68 @@ LoopState::checkHoistedBounds(jsbytecode
         RegisterID initlen = Registers::ArgReg0;
         masm.loadPayload(frame.addressOf(check.arraySlot), initlen);
         masm.load32(Address(initlen, offsetof(JSObject, initializedLength)), initlen);
 
         if (check.valueSlot != uint32(-1)) {
             RegisterID value = Registers::ArgReg1;
             masm.loadPayload(frame.addressOf(check.valueSlot), value);
             if (check.constant != 0) {
-                Jump overflow = masm.branchAdd32(Assembler::Overflow, Imm32(check.constant), value);
+                Jump overflow = masm.branchAdd32(Assembler::Overflow,
+                                                 Imm32(check.constant), value);
                 if (!jumps->append(overflow))
                     return false;
             }
             Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, value);
             if (!jumps->append(j))
                 return false;
         } else {
             Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, Imm32(check.constant));
             if (!jumps->append(j))
                 return false;
         }
     }
 
     return true;
 }
+
+FrameEntry *
+LoopState::invariantSlots(const FrameEntry *obj)
+{
+    obj = obj->backing();
+    uint32 slot = frame.indexOfFe(obj);
+
+    for (unsigned i = 0; i < invariantArraySlots.length(); i++) {
+        if (invariantArraySlots[i].arraySlot == slot)
+            return frame.getTemporary(invariantArraySlots[i].temporary);
+    }
+
+    /* addHoistedCheck should have ensured there is an entry for the slots. */
+    JS_NOT_REACHED("Missing invariant slots");
+    return NULL;
+}
+
+void
+LoopState::restoreInvariants(Assembler &masm)
+{
+    /*
+     * Restore all invariants in memory when entering the loop or after any
+     * scripted or C++ call. Care should be taken not to clobber the return
+     * register, which may still be live after some calls.
+     */
+
+    Registers regs(Registers::AvailRegs);
+    regs.takeReg(Registers::ReturnReg);
+
+    for (unsigned i = 0; i < invariantArraySlots.length(); i++) {
+        const InvariantArraySlots &entry = invariantArraySlots[i];
+        FrameEntry *fe = frame.getTemporary(entry.temporary);
+
+        Address array = frame.addressOf(entry.arraySlot);
+        Address address = frame.addressOf(fe);
+
+        RegisterID reg = regs.takeAnyReg().reg();
+        masm.loadPayload(array, reg);
+        masm.loadPtr(Address(reg, offsetof(JSObject, slots)), reg);
+        masm.storePtr(reg, address);
+        regs.putReg(reg);
+    }
+}
--- a/js/src/methodjit/LoopState.h
+++ b/js/src/methodjit/LoopState.h
@@ -70,17 +70,28 @@ namespace mjit {
  * implies the bounds check at one or more array accesses, we hoist that and
  * only check it when initially entering the loop (from JIT code or the
  * interpreter). This condition never needs to be checked again within the
  * loop, but can be invalidated if the script's arguments are indirectly
  * written via the 'arguments' property/local (which loop analysis assumes
  * does not happen) or if the involved arrays shrink dynamically through
  * assignments to the length property.
  *
- * Loop invariant code motion. TODO!
+ * Loop invariant code motion. If we can determine a computation (arithmetic,
+ * array slot pointer or property access) is loop invariant, we give it a slot
+ * on the stack and preserve its value throughout the loop. We can allocate
+ * and carry registers for loop invariant slots as for normal slots. These
+ * slots sit above the frame's normal slots, and are transient --- they are
+ * clobbered whenever a new frame is pushed. We thus regenerate the loop
+ * invariant slots after every C++ and scripted call, and avoid doing LICM on
+ * loops which have such calls. This has a nice property that the slots only
+ * need to be loop invariant wrt the side effects that happen directly in the
+ * loop; if C++ calls a getter which scribbles on the object properties
+ * involved in an 'invariant' then we will reload the invariant's new value
+ * after the call finishes.
  */
 
 class LoopState : public MacroAssemblerTypedefs
 {
     JSContext *cx;
     JSScript *script;
     Compiler &cc;
     FrameState &frame;
@@ -117,49 +128,80 @@ class LoopState : public MacroAssemblerT
     struct StubJoinPatch {
         StubJoin join;
         Address address;
         AnyRegisterID reg;
     };
     Vector<StubJoinPatch,16,CompilerAllocPolicy> loopPatches;
 
     /*
+     * Pair of a jump/label immediately after each call in the loop, to patch
+     * with restores of the loop invariant stack values.
+     */
+    struct RestoreInvariantCall {
+        Jump jump;
+        Label label;
+        bool ool;
+    };
+    Vector<RestoreInvariantCall> restoreInvariantCalls;
+
+    /*
      * Array bounds check hoisted out of the loop. This is a check that needs
-     * to be performed, in terms of the state at the loop head.
+     * to be performed, expressed in terms of the state at the loop head.
      */
     struct HoistedBoundsCheck
     {
         /* initializedLength(array) > value + constant */
         uint32 arraySlot;
         uint32 valueSlot;
         int32 constant;
     };
     Vector<HoistedBoundsCheck, 4, CompilerAllocPolicy> hoistedBoundsChecks;
 
     bool loopInvariantEntry(const FrameEntry *fe);
-    void addHoistedCheck(uint32 arraySlot, uint32 valueSlot, int32 constant);
+    bool addHoistedCheck(uint32 arraySlot, uint32 valueSlot, int32 constant);
+
+    /*
+     * Track analysis temporaries in the frame state which hold slots pointers
+     * for arrays throughout the loop.
+     */
+    struct InvariantArraySlots
+    {
+        uint32 arraySlot;
+        uint32 temporary;
+    };
+    Vector<InvariantArraySlots, 4, CompilerAllocPolicy> invariantArraySlots;
+
+    bool hasInvariants() { return !invariantArraySlots.empty(); }
+    void restoreInvariants(Assembler &masm);
 
   public:
 
     /* Outer loop to this one, in case of loop nesting. */
     LoopState *outer;
 
     /* Current bytecode for compilation. */
     jsbytecode *PC;
 
     LoopState(JSContext *cx, JSScript *script,
               Compiler *cc, FrameState *frame,
               analyze::Script *analysis, analyze::LifetimeScript *liveness);
     bool init(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
 
+    bool generatingInvariants() { return !skipAnalysis; }
+
+    /* Add a call with trailing jump/label, after which invariants need to be restored. */
+    void addInvariantCall(Jump jump, Label label, bool ool);
+
     uint32 headOffset() { return lifetime->head; }
     uint32 getLoopRegs() { return loopRegs.freeMask; }
 
     Jump entryJump() { return entry; }
     uint32 entryOffset() { return lifetime->entry; }
+    uint32 backedgeOffset() { return lifetime->backedge; }
 
     /* Whether the payload of slot is carried around the loop in a register. */
     bool carriesLoopReg(FrameEntry *fe) { return alloc->hasAnyReg(frame.indexOfFe(fe)); }
 
     void setLoopReg(AnyRegisterID reg, FrameEntry *fe);
 
     void clearLoopReg(AnyRegisterID reg)
     {
@@ -171,19 +213,22 @@ class LoopState : public MacroAssemblerT
         if (loopRegs.hasReg(reg)) {
             loopRegs.takeReg(reg);
             alloc->setUnassigned(reg);
             JaegerSpew(JSpew_Regalloc, "clearing loop register %s\n", reg.name());
         }
     }
 
     void addJoin(unsigned index, bool script);
-    void flushRegisters(StubCompiler &stubcc);
-    void clearRegisters();
+    void clearLoopRegisters();
+
+    void flushLoop(StubCompiler &stubcc);
 
     bool hoistArrayLengthCheck(const FrameEntry *obj, const FrameEntry *id);
+    FrameEntry *invariantSlots(const FrameEntry *obj);
+
     bool checkHoistedBounds(jsbytecode *PC, Assembler &masm, Vector<Jump> *jumps);
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif /* jsjaeger_loopstate_h__ */
--- a/js/src/methodjit/RematInfo.h
+++ b/js/src/methodjit/RematInfo.h
@@ -255,26 +255,46 @@ struct RematInfo {
         return fpreg_;
     }
 
     void setMemory() {
         location_ = PhysLoc_Memory;
         sync_ = SYNCED;
     }
 
+#ifdef DEBUG
     void invalidate() {
         location_ = PhysLoc_Invalid;
     }
+#else
+    void invalidate() {}
+#endif
 
     void setConstant() { location_ = PhysLoc_Constant; }
 
-    bool isConstant() const { return location_ == PhysLoc_Constant; }
-    bool inRegister() const { return location_ == PhysLoc_Register; }
-    bool inFPRegister() const { return location_ == PhysLoc_FPRegister; }
-    bool inMemory() const { return location_ == PhysLoc_Memory; }
+    bool isConstant() const {
+        JS_ASSERT(location_ != PhysLoc_Invalid);
+        return location_ == PhysLoc_Constant;
+    }
+
+    bool inRegister() const {
+        JS_ASSERT(location_ != PhysLoc_Invalid);
+        return location_ == PhysLoc_Register;
+    }
+
+    bool inFPRegister() const {
+        JS_ASSERT(location_ != PhysLoc_Invalid);
+        return location_ == PhysLoc_FPRegister;
+    }
+
+    bool inMemory() const {
+        JS_ASSERT(location_ != PhysLoc_Invalid);
+        return location_ == PhysLoc_Memory;
+    }
+
     bool synced() const { return sync_ == SYNCED; }
     void sync() {
         JS_ASSERT(!synced());
         sync_ = SYNCED;
     }
     void unsync() {
         sync_ = UNSYNCED;
     }
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -179,16 +179,23 @@ JSC::MacroAssembler::Call
 StubCompiler::emitStubCall(void *ptr, int32 slots)
 {
     JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
     DataLabelPtr inlinePatch;
     Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                   ptr, cc.outerPC(), &inlinePatch, slots);
     JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
 
+    /* Add a hook for restoring loop invariants if necessary. */
+    if (cc.loop && cc.loop->generatingInvariants()) {
+        Jump j = masm.jump();
+        Label l = masm.label();
+        cc.loop->addInvariantCall(j, l, true);
+    }
+
     /* Add the call site for debugging and recompilation. */
     Compiler::InternalCallSite site(masm.callReturnOffset(cl),
                                     cc.inlineIndex(), cc.inlinePC(),
                                     (size_t)ptr, true, true);
     site.inlinePatch = inlinePatch;
     cc.addCallSite(site);
     return cl;
 }