[INFER] Retest hoisted bounds checks after stub calls, bug 649693.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 14 Apr 2011 21:59:32 -0700
changeset 74948 cb06710a8eb7a21aba2453138c2497f4316bce24
parent 74947 5838535af4c7e275c22f8d7f3ca62e626769ac63
child 74949 50d7a9b2ecc5b373bf9bc25db6e8d3993b2d0302
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
bugs649693
milestone6.0a1
[INFER] Retest hoisted bounds checks after stub calls, bug 649693.
js/src/jit-test/tests/jaeger/loops/hoist-06.js
js/src/jit-test/tests/jaeger/loops/hoist-07.js
js/src/jsarray.cpp
js/src/jscntxt.h
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/LoopState.cpp
js/src/methodjit/LoopState.h
js/src/methodjit/StubCalls.cpp
js/src/methodjit/StubCalls.h
js/src/methodjit/StubCompiler.cpp
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/loops/hoist-06.js
@@ -0,0 +1,14 @@
+
+function foo(x, n, y) {
+  var q = 0;
+  for (var j = 0; j < n; j++) {
+    if (x[j] < y)
+      q++;
+  }
+  assertEq(q, 1);
+}
+
+var x = [1,2,3,4,5];
+var y = { valueOf: function() { x.length = 0; return 6; } };
+
+var a = foo(x, 5, y);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/loops/hoist-07.js
@@ -0,0 +1,17 @@
+
+var res = 0;
+
+function foo(x, n, y) {
+  for (var j = 0; j < n; j++) {
+    x[j];
+    y.f;
+  }
+}
+
+var x = [1,2,3,4,5];
+var y = {};
+Object.defineProperty(y, 'f', {get:function() { res++; x.length = 2; }});
+
+var a = foo(x, 5, y);
+
+assertEq(res, 5);
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -665,19 +665,16 @@ array_length_setter(JSContext *cx, JSObj
 
     if (oldlen == newlen)
         return true;
 
     vp->setNumber(newlen);
     if (oldlen < newlen)
         return obj->setArrayLength(cx, newlen);
 
-    if (!cx->markTypeArrayShrank(obj->getType()))
-        return false;
-
     if (obj->isDenseArray()) {
         /*
          * Don't reallocate if we're not actually shrinking our slots. If we do
          * shrink slots here, shrink the initialized length too.  This permits us
          * us to disregard length when reading from arrays as long we are within
          * the initialized capacity.
          */
         jsuint oldcap = obj->getDenseArrayCapacity();
@@ -2352,19 +2349,16 @@ array_pop_dense(JSContext *cx, JSObject*
     if (!GetElement(cx, obj, index, &hole, vp))
         return JS_FALSE;
     if (hole && !cx->markTypeCallerUnexpected(TYPE_UNDEFINED))
         return JS_FALSE;
     if (!hole && DeleteArrayElement(cx, obj, index, true) < 0)
         return JS_FALSE;
 
     obj->setDenseArrayLength(index);
-    if (!cx->markTypeArrayShrank(obj->getType()))
-        return JS_FALSE;
-
     return JS_TRUE;
 }
 
 static JSBool
 array_pop(JSContext *cx, uintN argc, Value *vp)
 {
     JSObject *obj = ToObject(cx, &vp[1]);
     if (!obj)
@@ -2398,23 +2392,20 @@ array_shift(JSContext *cx, uintN argc, V
             *vp = obj->getDenseArrayElement(0);
             if (vp->isMagic(JS_ARRAY_HOLE)) {
                 vp->setUndefined();
                 if (!cx->markTypeCallerUnexpected(TYPE_UNDEFINED))
                     return JS_FALSE;
             }
             Value *elems = obj->getDenseArrayElements();
             memmove(elems, elems + 1, length * sizeof(jsval));
-            if (cx->typeInferenceEnabled()) {
+            if (cx->typeInferenceEnabled())
                 obj->setDenseArrayInitializedLength(obj->getDenseArrayInitializedLength() - 1);
-                if (!cx->markTypeArrayShrank(obj->getType()))
-                    return JS_FALSE;
-            } else {
+            else
                 obj->setDenseArrayElement(length, MagicValue(JS_ARRAY_HOLE));
-            }
             JS_ALWAYS_TRUE(obj->setArrayLength(cx, length));
             if (!js_SuppressDeletedIndexProperties(cx, obj, length, length + 1))
                 return JS_FALSE;
             return JS_TRUE;
         }
 
         /* Get the to-be-deleted property's value into vp ASAP. */
         JSBool hole;
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -2150,19 +2150,16 @@ struct JSContext
     inline js::types::TypeObject *getTypeEmpty();
 
     /* Alias two properties in the type information for obj. */
     inline bool aliasTypeProperties(js::types::TypeObject *obj, jsid first, jsid second);
 
     /* Mark an array type as being not packed and, possibly, not dense. */
     inline bool markTypeArrayNotPacked(js::types::TypeObject *obj, bool notDense);
 
-    /* Mark an array type as having had its length shrink dynamically. */
-    inline bool markTypeArrayShrank(js::types::TypeObject *obj);
-
     /* Mark a function as being uninlineable (its .arguments property has been accessed). */
     inline bool markTypeFunctionUninlineable(js::types::TypeObject *obj);
 
     /* Monitor all properties of a type object as unknown. */
     inline bool markTypeObjectUnknownProperties(js::types::TypeObject *obj);
 
     /* Mark a type as possibly having special equality hooks. */
     inline bool markTypeObjectHasSpecialEquality(js::types::TypeObject *obj);
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -2818,18 +2818,16 @@ TypeObject::print(JSContext *cx)
 
     if (unknownProperties()) {
         printf(" unknown");
     } else {
         if (!hasFlags(OBJECT_FLAG_NON_PACKED_ARRAY))
             printf(" packed");
         if (!hasFlags(OBJECT_FLAG_NON_DENSE_ARRAY))
             printf(" dense");
-        if (hasFlags(OBJECT_FLAG_ARRAY_SHRANK))
-            printf(" arrayShrank");
         if (hasFlags(OBJECT_FLAG_UNINLINEABLE))
             printf(" uninlineable");
         if (hasFlags(OBJECT_FLAG_SPECIAL_EQUALITY))
             printf(" specialEquality");
     }
 
     if (propertyCount == 0) {
         printf(" {}\n");
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -219,17 +219,17 @@ public:
  *     NO_SPECIAL_EQUALITY
  *              |
  *           UNKNOWN
  */
 enum ObjectKind {
     OBJECT_NONE,
     OBJECT_UNKNOWN,
     OBJECT_PACKED_ARRAY,
-    OBJECT_DENSE_ARRAY,         /* Excludes arrays whose length has shrunk. */
+    OBJECT_DENSE_ARRAY,
     OBJECT_INLINEABLE_FUNCTION,
     OBJECT_SCRIPTED_FUNCTION,
     OBJECT_NO_SPECIAL_EQUALITY
 };
 
 /* Coarse flags for the contents of a type set. */
 enum {
     TYPE_FLAG_UNDEFINED = 1 << TYPE_UNDEFINED,
@@ -461,27 +461,21 @@ enum {
     OBJECT_FLAG_UNKNOWN_MASK = uint32(-1),
 
     /* Whether any objects this represents are not dense arrays. */
     OBJECT_FLAG_NON_DENSE_ARRAY = 1 << 0,
 
     /* Whether any objects this represents are not packed arrays. */
     OBJECT_FLAG_NON_PACKED_ARRAY = 1 << 1,
 
-    /*
-     * Whether any objects this represents are arrays whose length has shrunk
-     * due to explicit assignments to .length.
-     */
-    OBJECT_FLAG_ARRAY_SHRANK = 1 << 2,
-
     /* Whether any objects this represents have had their .arguments accessed. */
-    OBJECT_FLAG_UNINLINEABLE = 1 << 3,
+    OBJECT_FLAG_UNINLINEABLE = 1 << 2,
 
     /* Whether any objects this represents have an equality hook. */
-    OBJECT_FLAG_SPECIAL_EQUALITY = 1 << 4
+    OBJECT_FLAG_SPECIAL_EQUALITY = 1 << 3
 };
 typedef uint32 TypeObjectFlags;
 
 /* Type information about an object accessed by a script. */
 struct TypeObject
 {
 #ifdef DEBUG
     /* Name of this object. */
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -292,16 +292,18 @@ JSContext::markTypeCallerUnexpected(js::
     }
 
     return script->typeMonitorResult(this, pc, type);
 }
 
 inline bool
 JSContext::markTypeCallerUnexpected(const js::Value &value)
 {
+    if (!typeInferenceEnabled())
+        return true;
     return markTypeCallerUnexpected(js::types::GetValueType(this, value));
 }
 
 inline bool
 JSContext::markTypeCallerOverflow()
 {
     return markTypeCallerUnexpected(js::types::TYPE_DOUBLE);
 }
@@ -423,29 +425,16 @@ JSContext::addTypeFlags(js::types::TypeO
 inline bool
 JSContext::markTypeArrayNotPacked(js::types::TypeObject *obj, bool notDense)
 {
     return addTypeFlags(obj, js::types::OBJECT_FLAG_NON_PACKED_ARRAY |
                         (notDense ? js::types::OBJECT_FLAG_NON_DENSE_ARRAY : 0));
 }
 
 inline bool
-JSContext::markTypeArrayShrank(js::types::TypeObject *obj)
-{
-    /*
-     * For simplicity in determining whether to hoist array bounds checks,
-     * we mark types with arrays that have shrunk (a rare operation) as
-     * possibly non-dense.
-     */
-    return addTypeFlags(obj, js::types::OBJECT_FLAG_ARRAY_SHRANK |
-                        js::types::OBJECT_FLAG_NON_PACKED_ARRAY |
-                        js::types::OBJECT_FLAG_NON_DENSE_ARRAY);
-}
-
-inline bool
 JSContext::markTypeFunctionUninlineable(js::types::TypeObject *obj)
 {
     return addTypeFlags(obj, js::types::OBJECT_FLAG_UNINLINEABLE);
 }
 
 inline bool
 JSContext::markTypeObjectHasSpecialEquality(js::types::TypeObject *obj)
 {
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -882,28 +882,44 @@ mjit::Compiler::finishThisUp(JITScript *
         }
 
         JSScript *script =
             (from.inlineIndex == uint32(-1)) ? outerScript : inlineFrames[from.inlineIndex]->script;
         uint32 codeOffset = from.ool
                             ? masm.size() + from.returnOffset
                             : from.returnOffset;
         to.initialize(codeOffset, from.inlineIndex, from.inlinepc - script->code, from.id);
+
+        /*
+         * Patch stores of the base call's return address for InvariantFailure
+         * calls. InvariantFailure will patch its own return address to this
+         * pointer before triggering recompilation.
+         */
+        if (from.loopPatch.hasPatch)
+            stubCode.patch(from.loopPatch.codePatch, result + codeOffset);
     }
 
     /* Build the table of rejoin sites. */
     RejoinSite *jitRejoinSites = (RejoinSite *)cursor;
     jit->nRejoinSites = rejoinSites.length();
     cursor += sizeof(RejoinSite) * jit->nRejoinSites;
     for (size_t i = 0; i < jit->nRejoinSites; i++) {
         RejoinSite &to = jitRejoinSites[i];
         InternalRejoinSite &from = rejoinSites[i];
 
         uint32 codeOffset = (uint8 *) stubCode.locationOf(from.label).executableAddress() - result;
         to.initialize(codeOffset, from.pc - outerScript->code, from.id);
+
+        /*
+         * Patch stores of the rejoin site's return address for InvariantFailure
+         * calls. We need to preserve the rejoin site we were at in case of
+         * cascading recompilations and loop invariant failures.
+         */
+        if (from.loopPatch.hasPatch)
+            stubCode.patch(from.loopPatch.codePatch, result + codeOffset);
     }
 
 #if defined JS_MONOIC
     JS_INIT_CLIST(&jit->callers);
 
     if (script->fun && cx->typeInferenceEnabled()) {
         jit->argsCheckStub = stubCode.locationOf(argsCheckStub);
         jit->argsCheckFallthrough = stubCode.locationOf(argsCheckFallthrough);
@@ -3195,21 +3211,16 @@ mjit::Compiler::prepareStubCall(Uses use
 }
 
 JSC::MacroAssembler::Call
 mjit::Compiler::emitStubCall(void *ptr, DataLabelPtr *pinline)
 {
     JaegerSpew(JSpew_Insns, " ---- CALLING STUB ---- \n");
     Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                   ptr, outerPC(), pinline, frame.totalDepth());
-    if (loop && loop->generatingInvariants()) {
-        Jump j = masm.jump();
-        Label l = masm.label();
-        loop->addInvariantCall(j, l, false);
-    }
     JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
     return cl;
 }
 
 void
 mjit::Compiler::interruptCheckHelper()
 {
     REJOIN_SITE(stubs::Interrupt);
@@ -4110,16 +4121,21 @@ mjit::Compiler::addCallSite(const Intern
 void
 mjit::Compiler::inlineStubCall(void *stub, bool needsRejoin)
 {
     DataLabelPtr inlinePatch;
     Call cl = emitStubCall(stub, &inlinePatch);
     InternalCallSite site(masm.callReturnOffset(cl), a->inlineIndex, PC,
                           (size_t)stub, false, needsRejoin);
     site.inlinePatch = inlinePatch;
+    if (loop && loop->generatingInvariants()) {
+        Jump j = masm.jump();
+        Label l = masm.label();
+        loop->addInvariantCall(j, l, false, callSites.length(), true);
+    }
     addCallSite(site);
 }
 
 #ifdef DEBUG
 void
 mjit::Compiler::checkRejoinSite(uint32 nCallSites, uint32 nRejoinSites, void *stub)
 {
     JS_ASSERT(!variadicRejoin);
@@ -4166,17 +4182,17 @@ mjit::Compiler::addRejoinSite(void *stub
      * unknown in this compilation.
      */
     frame.ensureInMemoryDoubles(stubcc.masm);
 
     /* Regenerate any loop invariants. */
     if (loop && loop->generatingInvariants()) {
         Jump j = stubcc.masm.jump();
         Label l = stubcc.masm.label();
-        loop->addInvariantCall(j, l, true);
+        loop->addInvariantCall(j, l, true, rejoinSites.length() - 1, false);
     }
 
     if (ool) {
         /* Jump to the specified label, without syncing. */
         stubcc.masm.jump().linkTo(oolLabel, &stubcc.masm);
     } else {
         /* Rejoin as from an out of line stub call. */
         stubcc.rejoin(Changes(0));
@@ -6539,17 +6555,16 @@ mjit::Compiler::finishLoop(jsbytecode *h
         return true;
 
     /*
      * We're done processing the current loop. Every loop has exactly one backedge
      * at the end ('continue' statements are forward jumps to the loop test),
      * and after jumpAndTrace'ing on that edge we can pop it from the frame.
      */
     JS_ASSERT(loop && loop->headOffset() == uint32(head - script->code));
-    loop->flushLoop(stubcc);
 
     jsbytecode *entryTarget = script->code + loop->entryOffset();
 
     /*
      * Fix up the jump entering the loop. We are doing this after all code has
      * been emitted for the backedge, so that we are now in the loop's fallthrough
      * (where we will emit the entry code).
      */
@@ -6558,61 +6573,69 @@ mjit::Compiler::finishLoop(jsbytecode *h
 #ifdef DEBUG
     if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
         RegisterAllocation *alloc = a->liveness.getCode(head).allocation;
         JaegerSpew(JSpew_Regalloc, "loop allocation at %u:", head - script->code);
         frame.dumpAllocation(alloc);
     }
 #endif
 
-    Vector<Jump> hoistJumps(cx);
-
     loop->entryJump().linkTo(masm.label(), &masm);
 
-        if (!loop->checkHoistedBounds(entryTarget, masm, &hoistJumps))
-            return false;
-        for (unsigned i = 0; i < hoistJumps.length(); i++)
-            stubcc.linkExitDirect(hoistJumps[i], stubcc.masm.label());
-
     {
         REJOIN_SITE(stubs::MissedBoundsCheckEntry);
         OOL_STUBCALL(stubs::MissedBoundsCheckEntry);
+
+        if (loop->generatingInvariants()) {
+            /*
+             * To do the initial load of the invariants, jump to the invariant
+             * restore point after the call just emitted. :XXX: fix hackiness.
+             */
+            if (oomInVector)
+                return false;
+            Label label = callSites[callSites.length() - 1].loopJumpLabel;
+            stubcc.linkExitDirect(masm.jump(), label);
+        }
         stubcc.crossJump(stubcc.masm.jump(), masm.label());
-        hoistJumps.clear();
     }
 
     frame.prepareForJump(entryTarget, masm, true);
 
     if (!jumpInScript(masm.jump(), entryTarget))
         return false;
 
     if (!a->analysis.getCode(head).safePoint) {
         /*
          * Emit a stub into the OOL path which loads registers from a synced state
          * and jumps to the loop head, for rejoining from the interpreter.
          */
         LoopEntry entry;
         entry.pcOffset = head - script->code;
-        entry.label = stubcc.masm.label();
-        loopEntries.append(entry);
-
-        REJOIN_SITE(stubs::MissedBoundsCheckHead);
-        if (!loop->checkHoistedBounds(head, stubcc.masm, &hoistJumps))
-            return false;
-        Jump skipCall = stubcc.masm.jump();
-        for (unsigned i = 0; i < hoistJumps.length(); i++)
-            hoistJumps[i].linkTo(stubcc.masm.label(), &stubcc.masm);
+
+        AutoRejoinSite autoRejoinHead(this, JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckHead));
         OOL_STUBCALL(stubs::MissedBoundsCheckHead);
-        skipCall.linkTo(stubcc.masm.label(), &stubcc.masm);
-        hoistJumps.clear();
-
+
+        if (loop->generatingInvariants()) {
+            if (oomInVector)
+                return false;
+            entry.label = callSites[callSites.length() - 1].loopJumpLabel;
+        } else {
+            entry.label = stubcc.masm.label();
+        }
+
+        autoRejoinHead.oolRejoin(stubcc.masm.label());
         frame.prepareForJump(head, stubcc.masm, true);
         if (!stubcc.jumpInScript(stubcc.masm.jump(), head))
             return false;
-    }
+
+        loopEntries.append(entry);
+    }
+
+    /* Write out loads and tests of loop invariants at all calls in the loop body. */
+    loop->flushLoop(stubcc);
 
     LoopState *nloop = loop->outer;
     cx->delete_(loop);
     loop = nloop;
     frame.setLoop(loop);
 
     fallthrough.linkTo(masm.label(), &masm);
 
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -54,16 +54,26 @@ namespace js {
 namespace mjit {
 
 struct PatchableFrame {
     JSStackFrame *fp;
     jsbytecode *pc;
     bool scriptedCall;
 };
 
+/*
+ * Patch for storing call site and rejoin site return addresses at, for
+ * redirecting the return address in InvariantFailure.
+ */
+struct InvariantCodePatch {
+    bool hasPatch;
+    JSC::MacroAssembler::DataLabelPtr codePatch;
+    InvariantCodePatch() : hasPatch(false) {}
+};
+
 class Compiler : public BaseCompiler
 {
     friend class StubCompiler;
 
     struct BranchPatch {
         BranchPatch(const Jump &j, jsbytecode *pc, uint32 inlineIndex)
           : jump(j), pc(pc), inlineIndex(inlineIndex)
         { }
@@ -320,33 +330,36 @@ class Compiler : public BaseCompiler
 
     struct InternalCallSite {
         uint32 returnOffset;
         DataLabelPtr inlinePatch;
         uint32 inlineIndex;
         jsbytecode *inlinepc;
         size_t id;
         bool ool;
+        Label loopJumpLabel;
+        InvariantCodePatch loopPatch;
 
         // An AutoRejoinSite needs to capture this call site.
         bool needsRejoin;
 
         InternalCallSite(uint32 returnOffset,
                          uint32 inlineIndex, jsbytecode *inlinepc, size_t id,
                          bool ool, bool needsRejoin)
-          : returnOffset(returnOffset),
+          : returnOffset(returnOffset), inlinePatch(inlinePatch),
             inlineIndex(inlineIndex), inlinepc(inlinepc), id(id),
             ool(ool), needsRejoin(needsRejoin)
         { }
     };
 
     struct InternalRejoinSite {
         Label label;
         jsbytecode *pc;
         size_t id;
+        InvariantCodePatch loopPatch;
 
         InternalRejoinSite(Label label, jsbytecode *pc, size_t id)
             : label(label), pc(pc), id(id)
         { }
     };
 
     struct AutoRejoinSite {
         Compiler *cc;
@@ -580,16 +593,23 @@ class Compiler : public BaseCompiler
     jsbytecode *inlinePC() { return PC; }
     uint32 inlineIndex() { return a->inlineIndex; }
 
     types::TypeSet *getTypeSet(uint32 slot);
     types::TypeSet *getTypeSet(const FrameEntry *fe) { return getTypeSet(frame.indexOfFe(fe)); }
 
     Assembler &getAssembler(bool ool) { return ool ? stubcc.masm : masm; }
 
+    InvariantCodePatch *getInvariantPatch(unsigned index, bool call) {
+        return call ? &callSites[index].loopPatch : &rejoinSites[index].loopPatch;
+    }
+    jsbytecode *getInvariantPC(unsigned index, bool call) {
+        return call ? callSites[index].inlinepc : rejoinSites[index].pc;
+    }
+
   private:
     CompileStatus performCompilation(JITScript **jitp);
     CompileStatus generatePrologue();
     CompileStatus generateMethod();
     CompileStatus generateEpilogue();
     CompileStatus finishThisUp(JITScript **jitp);
     CompileStatus pushActiveFrame(JSScript *script, uint32 argc);
     void popActiveFrame();
--- a/js/src/methodjit/LoopState.cpp
+++ b/js/src/methodjit/LoopState.cpp
@@ -34,16 +34,17 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "methodjit/Compiler.h"
 #include "methodjit/LoopState.h"
 #include "methodjit/FrameState-inl.h"
+#include "methodjit/StubCalls.h"
 
 using namespace js;
 using namespace js::mjit;
 using namespace js::analyze;
 
 LoopState::LoopState(JSContext *cx, JSScript *script,
                      mjit::Compiler *cc, FrameState *frame,
                      Script *analysis, LifetimeScript *liveness)
@@ -141,22 +142,24 @@ LoopState::addJoin(unsigned index, bool 
 {
     StubJoin r;
     r.index = index;
     r.script = script;
     loopJoins.append(r);
 }
 
 void
-LoopState::addInvariantCall(Jump jump, Label label, bool ool)
+LoopState::addInvariantCall(Jump jump, Label label, bool ool, unsigned patchIndex, bool patchCall)
 {
     RestoreInvariantCall call;
     call.jump = jump;
     call.label = label;
     call.ool = ool;
+    call.patchIndex = patchIndex;
+    call.patchCall = patchCall;
     restoreInvariantCalls.append(call);
 }
 
 void
 LoopState::flushLoop(StubCompiler &stubcc)
 {
     clearLoopRegisters();
 
@@ -170,25 +173,46 @@ LoopState::flushLoop(StubCompiler &stubc
     }
     loopJoins.clear();
     loopPatches.clear();
 
     if (hasInvariants()) {
         for (unsigned i = 0; i < restoreInvariantCalls.length(); i++) {
             RestoreInvariantCall &call = restoreInvariantCalls[i];
             Assembler &masm = cc.getAssembler(true);
+            Vector<Jump> failureJumps(cx);
+
             if (call.ool) {
                 call.jump.linkTo(masm.label(), &masm);
-                restoreInvariants(masm);
+                restoreInvariants(masm, &failureJumps);
                 masm.jump().linkTo(call.label, &masm);
             } else {
                 stubcc.linkExitDirect(call.jump, masm.label());
-                restoreInvariants(masm);
+                restoreInvariants(masm, &failureJumps);
                 stubcc.crossJump(masm.jump(), call.label);
             }
+
+            if (!failureJumps.empty()) {
+                for (unsigned i = 0; i < failureJumps.length(); i++)
+                    failureJumps[i].linkTo(masm.label(), &masm);
+
+                /*
+                 * Call InvariantFailure, setting up the return address to
+                 * patch and any value for the call to return.
+                 */
+                InvariantCodePatch *patch = cc.getInvariantPatch(call.patchIndex, call.patchCall);
+                patch->hasPatch = true;
+                patch->codePatch = masm.storePtrWithPatch(ImmPtr(NULL),
+                                                          FrameAddress(offsetof(VMFrame, scratch)));
+                JS_STATIC_ASSERT(Registers::ReturnReg != Registers::ArgReg1);
+                masm.move(Registers::ReturnReg, Registers::ArgReg1);
+                jsbytecode *pc = cc.getInvariantPC(call.patchIndex, call.patchCall);
+                masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure),
+                                    pc, NULL, 0);
+            }
         }
     } else {
         for (unsigned i = 0; i < restoreInvariantCalls.length(); i++) {
             RestoreInvariantCall &call = restoreInvariantCalls[i];
             Assembler &masm = cc.getAssembler(call.ool);
             call.jump.linkTo(call.label, &masm);
         }
     }
@@ -428,58 +452,16 @@ LoopState::hoistArrayLengthCheck(const F
 
         return addHoistedCheck(frame.indexOfFe(obj), rhs, constant);
     }
 
     JaegerSpew(JSpew_Analysis, "No match found\n");
     return false;
 }
 
-bool
-LoopState::checkHoistedBounds(jsbytecode *PC, Assembler &masm, Vector<Jump> *jumps)
-{
-    restoreInvariants(masm);
-
-    /*
-     * Emit code to validate all hoisted bounds checks, filling jumps with all
-     * failure paths. This is done from a fully synced state, and all registers
-     * can be used as temporaries. Note: we assume that no modifications to the
-     * terms in the hoisted checks occur between PC and the head of the loop.
-     */
-
-    for (unsigned i = 0; i < hoistedBoundsChecks.length(); i++) {
-        /* Testing: initializedLength(array) > value + constant; */
-        const HoistedBoundsCheck &check = hoistedBoundsChecks[i];
-
-        RegisterID initlen = Registers::ArgReg0;
-        masm.loadPayload(frame.addressOf(check.arraySlot), initlen);
-        masm.load32(Address(initlen, offsetof(JSObject, initializedLength)), initlen);
-
-        if (check.valueSlot != uint32(-1)) {
-            RegisterID value = Registers::ArgReg1;
-            masm.loadPayload(frame.addressOf(check.valueSlot), value);
-            if (check.constant != 0) {
-                Jump overflow = masm.branchAdd32(Assembler::Overflow,
-                                                 Imm32(check.constant), value);
-                if (!jumps->append(overflow))
-                    return false;
-            }
-            Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, value);
-            if (!jumps->append(j))
-                return false;
-        } else {
-            Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, Imm32(check.constant));
-            if (!jumps->append(j))
-                return false;
-        }
-    }
-
-    return true;
-}
-
 FrameEntry *
 LoopState::invariantSlots(const FrameEntry *obj)
 {
     obj = obj->backing();
     uint32 slot = frame.indexOfFe(obj);
 
     for (unsigned i = 0; i < invariantArraySlots.length(); i++) {
         if (invariantArraySlots[i].arraySlot == slot)
@@ -487,33 +469,61 @@ LoopState::invariantSlots(const FrameEnt
     }
 
     /* addHoistedCheck should have ensured there is an entry for the slots. */
     JS_NOT_REACHED("Missing invariant slots");
     return NULL;
 }
 
 void
-LoopState::restoreInvariants(Assembler &masm)
+LoopState::restoreInvariants(Assembler &masm, Vector<Jump> *jumps)
 {
     /*
      * Restore all invariants in memory when entering the loop or after any
-     * scripted or C++ call. Care should be taken not to clobber the return
-     * register, which may still be live after some calls.
+     * scripted or C++ call, and check that all hoisted conditions. Care should
+     * be taken not to clobber the return register or callee-saved registers,
+     * which may still be live after some calls.
      */
 
-    Registers regs(Registers::AvailRegs);
+    Registers regs(Registers::TempRegs);
     regs.takeReg(Registers::ReturnReg);
 
     for (unsigned i = 0; i < invariantArraySlots.length(); i++) {
         const InvariantArraySlots &entry = invariantArraySlots[i];
         FrameEntry *fe = frame.getTemporary(entry.temporary);
 
         Address array = frame.addressOf(entry.arraySlot);
         Address address = frame.addressOf(fe);
 
         RegisterID reg = regs.takeAnyReg().reg();
         masm.loadPayload(array, reg);
         masm.loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
         masm.storePtr(reg, address);
         regs.putReg(reg);
     }
+
+    for (unsigned i = 0; i < hoistedBoundsChecks.length(); i++) {
+        /* Testing: initializedLength(array) > value + constant; */
+        const HoistedBoundsCheck &check = hoistedBoundsChecks[i];
+
+        RegisterID initlen = regs.takeAnyReg().reg();
+        masm.loadPayload(frame.addressOf(check.arraySlot), initlen);
+        masm.load32(Address(initlen, offsetof(JSObject, initializedLength)), initlen);
+
+        if (check.valueSlot != uint32(-1)) {
+            RegisterID value = regs.takeAnyReg().reg();
+            masm.loadPayload(frame.addressOf(check.valueSlot), value);
+            if (check.constant != 0) {
+                Jump overflow = masm.branchAdd32(Assembler::Overflow,
+                                                 Imm32(check.constant), value);
+                jumps->append(overflow);
+            }
+            Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, value);
+            jumps->append(j);
+            regs.putReg(value);
+        } else {
+            Jump j = masm.branch32(Assembler::BelowOrEqual, initlen, Imm32(check.constant));
+            jumps->append(j);
+        }
+
+        regs.putReg(initlen);
+    }
 }
--- a/js/src/methodjit/LoopState.h
+++ b/js/src/methodjit/LoopState.h
@@ -35,17 +35,17 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #if !defined jsjaeger_loopstate_h__ && defined JS_METHODJIT
 #define jsjaeger_loopstate_h__
 
 #include "jsanalyze.h"
-#include "methodjit/BaseCompiler.h"
+#include "methodjit/Compiler.h"
 
 namespace js {
 namespace mjit {
 
 /*
  * The LoopState keeps track of register and analysis state within the loop
  * currently being processed by the Compiler.
  *
@@ -63,22 +63,18 @@ namespace mjit {
  * (a) the register has not been touched since the loop head, (b) the slot
  * has not been modified or separately assigned a different register, and (c)
  * all prior slow path rejoins in the loop are patched with reloads of the
  * register. The register allocation at the loop head must have all entries
  * synced, so that prior slow path syncs do not also need patching.
  *
  * Bounds check hoisting. If we can determine a loop invariant test which
  * implies the bounds check at one or more array accesses, we hoist that and
- * only check it when initially entering the loop (from JIT code or the
- * interpreter). This condition never needs to be checked again within the
- * loop, but can be invalidated if the script's arguments are indirectly
- * written via the 'arguments' property/local (which loop analysis assumes
- * does not happen) or if the involved arrays shrink dynamically through
- * assignments to the length property.
+ * check it when initially entering the loop (from JIT code or the
+ * interpreter) and after every stub or C++ call.
  *
  * Loop invariant code motion. If we can determine a computation (arithmetic,
  * array slot pointer or property access) is loop invariant, we give it a slot
  * on the stack and preserve its value throughout the loop. We can allocate
  * and carry registers for loop invariant slots as for normal slots. These
  * slots sit above the frame's normal slots, and are transient --- they are
  * clobbered whenever a new frame is pushed. We thus regenerate the loop
  * invariant slots after every C++ and scripted call, and avoid doing LICM on
@@ -135,16 +131,20 @@ class LoopState : public MacroAssemblerT
     /*
      * Pair of a jump/label immediately after each call in the loop, to patch
      * with restores of the loop invariant stack values.
      */
     struct RestoreInvariantCall {
         Jump jump;
         Label label;
         bool ool;
+
+        /* Index into Compiler's callSites or rejoinSites */
+        unsigned patchIndex;
+        bool patchCall;
     };
     Vector<RestoreInvariantCall> restoreInvariantCalls;
 
     /*
      * Array bounds check hoisted out of the loop. This is a check that needs
      * to be performed, expressed in terms of the state at the loop head.
      */
     struct HoistedBoundsCheck
@@ -165,18 +165,18 @@ class LoopState : public MacroAssemblerT
      */
     struct InvariantArraySlots
     {
         uint32 arraySlot;
         uint32 temporary;
     };
     Vector<InvariantArraySlots, 4, CompilerAllocPolicy> invariantArraySlots;
 
-    bool hasInvariants() { return !invariantArraySlots.empty(); }
-    void restoreInvariants(Assembler &masm);
+    bool hasInvariants() { return !hoistedBoundsChecks.empty() || !invariantArraySlots.empty(); }
+    void restoreInvariants(Assembler &masm, Vector<Jump> *jumps);
 
   public:
 
     /* Outer loop to this one, in case of loop nesting. */
     LoopState *outer;
 
     /* Current bytecode for compilation. */
     jsbytecode *PC;
@@ -184,17 +184,17 @@ class LoopState : public MacroAssemblerT
     LoopState(JSContext *cx, JSScript *script,
               Compiler *cc, FrameState *frame,
               analyze::Script *analysis, analyze::LifetimeScript *liveness);
     bool init(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
 
     bool generatingInvariants() { return !skipAnalysis; }
 
     /* Add a call with trailing jump/label, after which invariants need to be restored. */
-    void addInvariantCall(Jump jump, Label label, bool ool);
+    void addInvariantCall(Jump jump, Label label, bool ool, unsigned patchIndex, bool patchCall);
 
     uint32 headOffset() { return lifetime->head; }
     uint32 getLoopRegs() { return loopRegs.freeMask; }
 
     Jump entryJump() { return entry; }
     uint32 entryOffset() { return lifetime->entry; }
     uint32 backedgeOffset() { return lifetime->backedge; }
 
@@ -219,16 +219,14 @@ class LoopState : public MacroAssemblerT
 
     void addJoin(unsigned index, bool script);
     void clearLoopRegisters();
 
     void flushLoop(StubCompiler &stubcc);
 
     bool hoistArrayLengthCheck(const FrameEntry *obj, const FrameEntry *id);
     FrameEntry *invariantSlots(const FrameEntry *obj);
-
-    bool checkHoistedBounds(jsbytecode *PC, Assembler &masm, Vector<Jump> *jumps);
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif /* jsjaeger_loopstate_h__ */
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -2881,37 +2881,49 @@ stubs::AssertArgumentTypes(VMFrame &f)
     if (!f.fp()->isConstructing())
         JS_ASSERT(script->thisTypes()->hasType(types::GetValueType(f.cx, fp->thisValue())));
 
     for (unsigned i = 0; i < fun->nargs; i++)
         JS_ASSERT(script->argTypes(i)->hasType(types::GetValueType(f.cx, fp->formalArg(i))));
 }
 #endif
 
-void JS_FASTCALL
-stubs::MissedBoundsCheckEntry(VMFrame &f)
+/*
+ * These two are never actually called, they just give us a place to rejoin if
+ * there is an invariant failure when initially entering a loop.
+ */
+void JS_FASTCALL stubs::MissedBoundsCheckEntry(VMFrame &f) {}
+void JS_FASTCALL stubs::MissedBoundsCheckHead(VMFrame &f) {}
+
+void * JS_FASTCALL
+stubs::InvariantFailure(VMFrame &f, void *rval)
 {
+    /*
+     * Patch this call to the return site of the call triggering the invariant
+     * failure (or a MissedBoundsCheck* function if the failure occurred on
+     * initial loop entry), and trigger a recompilation which will then
+     * redirect to the rejoin point for that call. We want to make things look
+     * to the recompiler like we are still inside that call, and that after
+     * recompilation we will return to the call's rejoin point.
+     */
+    void *repatchCode = f.scratch;
+    JS_ASSERT(repatchCode);
+    void **frameAddr = f.returnAddressLocation();
+    *frameAddr = repatchCode;
+
     /* Recompile the script, and don't hoist any bounds checks. */
     JS_ASSERT(!f.script()->failedBoundsCheck);
     f.script()->failedBoundsCheck = true;
 
     Recompiler recompiler(f.cx, f.script());
     if (!recompiler.recompile())
-        THROW();
-}
-
-void JS_FASTCALL
-stubs::MissedBoundsCheckHead(VMFrame &f)
-{
-    /*
-     * This stub is needed as we can emit bounds checks in two places when
-     * finishing a loop (for entry from JIT code, and entry from the
-     * interpreter), and need to rejoin at the right one.
-     */
-    stubs::MissedBoundsCheckEntry(f);
+        THROWV(NULL);
+
+    /* Return the same value (if any) as the call triggering the invariant failure. */
+    return rval;
 }
 
 void JS_FASTCALL
 stubs::Exception(VMFrame &f)
 {
     f.regs.sp[0] = f.cx->getPendingException();
     f.cx->clearPendingException();
 }
--- a/js/src/methodjit/StubCalls.h
+++ b/js/src/methodjit/StubCalls.h
@@ -224,16 +224,17 @@ void JS_FASTCALL CallPropSwap(VMFrame &f
 void JS_FASTCALL CheckArgumentTypes(VMFrame &f);
 
 #ifdef DEBUG
 void JS_FASTCALL AssertArgumentTypes(VMFrame &f);
 #endif
 
 void JS_FASTCALL MissedBoundsCheckEntry(VMFrame &f);
 void JS_FASTCALL MissedBoundsCheckHead(VMFrame &f);
+void * JS_FASTCALL InvariantFailure(VMFrame &f, void *repatchCode);
 
 template <bool strict> int32 JS_FASTCALL ConvertToTypedInt(JSContext *cx, Value *vp);
 void JS_FASTCALL ConvertToTypedFloat(JSContext *cx, Value *vp);
 
 void JS_FASTCALL Exception(VMFrame &f);
 
 JSObject * JS_FASTCALL
 NewDenseUnallocatedArray(VMFrame &f, uint32 length);
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -179,28 +179,30 @@ JSC::MacroAssembler::Call
 StubCompiler::emitStubCall(void *ptr, bool needsRejoin, int32 slots)
 {
     JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
     DataLabelPtr inlinePatch;
     Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                   ptr, cc.outerPC(), &inlinePatch, slots);
     JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
 
-    /* Add a hook for restoring loop invariants if necessary. */
-    if (cc.loop && cc.loop->generatingInvariants()) {
-        Jump j = masm.jump();
-        Label l = masm.label();
-        cc.loop->addInvariantCall(j, l, true);
-    }
-
     /* Add the call site for debugging and recompilation. */
     Compiler::InternalCallSite site(masm.callReturnOffset(cl),
                                     cc.inlineIndex(), cc.inlinePC(),
                                     (size_t)ptr, true, needsRejoin);
     site.inlinePatch = inlinePatch;
+
+    /* Add a hook for restoring loop invariants if necessary. */
+    if (cc.loop && cc.loop->generatingInvariants()) {
+        site.loopJumpLabel = masm.label();
+        Jump j = masm.jump();
+        Label l = masm.label();
+        cc.loop->addInvariantCall(j, l, true, cc.callSites.length(), true);
+    }
+
     cc.addCallSite(site);
     return cl;
 }
 
 void
 StubCompiler::fixCrossJumps(uint8 *ncode, size_t offset, size_t total)
 {
     JSC::LinkBuffer fast(ncode, total);