[INFER] Restore stock JM behavior with inference off, bug 647048.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 31 Mar 2011 18:11:41 -0700
changeset 74883 0b1dd5e20bb95d0550d3002ab5f36599c21fba3a
parent 74882 bde17df8b4b60dc36c9410a4c2a479d85c3dfbc1
child 74884 baccdc943514c3cd483304476627009ae85f4196
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
bugs647048
milestone2.0b13pre
[INFER] Restore stock JM behavior with inference off, bug 647048.
js/src/jit-test/tests/basic/bug641525.js
js/src/jsarray.cpp
js/src/jsarray.h
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/methodjit/BaseAssembler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/FastBuiltins.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameState-inl.h
js/src/methodjit/FrameState.cpp
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MonoIC.cpp
js/src/methodjit/StubCompiler.cpp
js/src/methodjit/TrampolineCompiler.cpp
--- a/js/src/jit-test/tests/basic/bug641525.js
+++ b/js/src/jit-test/tests/basic/bug641525.js
@@ -7,18 +7,19 @@ function f4(o) {
     o4 = o[key];
     o.prototype = {};
 }
 f4(f1);
 f4(f1);
 f4(f2);
 new f2(o2);
 
-assertEq(shapeOf(f1) == shapeOf(f2), false);
-assertEq(shapeOf(f1) == shapeOf(f4), false);
+// these will hold only if type inference is enabled.
+//assertEq(shapeOf(f1) == shapeOf(f2), false);
+//assertEq(shapeOf(f1) == shapeOf(f4), false);
 
 function factory() {
   function foo() {}
   foo.x = 0;
   return foo;
 }
 
 var fobjs = [];
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -3491,18 +3491,22 @@ NewArray(JSContext *cx, jsuint length, J
     gc::FinalizeKind kind = GuessObjectGCKind(length, true);
     JSObject *obj = detail::NewObject<WithProto::Class, false>(cx, &js_ArrayClass, proto, NULL, kind);
     if (!obj)
         return NULL;
 
     if (!obj->setArrayLength(cx, length))
         return NULL;
 
-    if (allocateCapacity && !obj->ensureSlots(cx, length))
-        return NULL;
+    if (allocateCapacity) {
+        if (!obj->ensureSlots(cx, length))
+            return NULL;
+        if (!cx->typeInferenceEnabled())
+            obj->backfillDenseArrayHoles();
+    }
 
     return obj;
 }
 
 JSObject * JS_FASTCALL
 NewDenseEmptyArray(JSContext *cx, JSObject *proto)
 {
     return NewArray<false>(cx, 0, proto);
--- a/js/src/jsarray.h
+++ b/js/src/jsarray.h
@@ -136,22 +136,26 @@ JSObject::ensureDenseArrayElements(JSCon
      */
     if (requiredCapacity > MIN_SPARSE_INDEX &&
         willBeSparseDenseArray(requiredCapacity, extra)) {
         return ED_SPARSE;
     }
     if (!growSlots(cx, requiredCapacity))
         return ED_FAILED;
 
-    if (index > initLength) {
-        if (!setDenseArrayNotPacked(cx))
-            return ED_FAILED;
-        ClearValueRange(getSlots() + initLength, index - initLength, true);
+    if (cx->typeInferenceEnabled()) {
+        if (index > initLength) {
+            if (!setDenseArrayNotPacked(cx))
+                return ED_FAILED;
+            ClearValueRange(getSlots() + initLength, index - initLength, true);
+        }
+        setDenseArrayInitializedLength(requiredCapacity);
+    } else {
+        backfillDenseArrayHoles();
     }
-    setDenseArrayInitializedLength(requiredCapacity);
 
     return ED_OK;
 }
 
 extern bool
 js_StringIsIndex(JSLinearString *str, jsuint *indexp);
 
 inline JSBool
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -812,17 +812,19 @@ struct JSObject : js::gc::Cell {
     inline uint32 getDenseArrayInitializedLength();
     inline void setDenseArrayLength(uint32 length);
     inline void setDenseArrayInitializedLength(uint32 length);
     inline js::Value* getDenseArrayElements();
     inline const js::Value &getDenseArrayElement(uintN idx);
     inline js::Value* addressOfDenseArrayElement(uintN idx);
     inline void setDenseArrayElement(uintN idx, const js::Value &val);
     inline void shrinkDenseArrayElements(JSContext *cx, uintN cap);
+    inline void backfillDenseArrayHoles();
 
+    /* Packed information for this array. May be incorrect if !cx->typeInferenceEnabled(). */
     inline bool isPackedDenseArray();
     inline bool setDenseArrayNotPacked(JSContext *cx);
 
     /*
      * ensureDenseArrayElements ensures that the dense array can hold at least
      * index + extra elements. It returns ED_OK on success, ED_FAILED on
      * failure to grow the array, ED_SPARSE when the array is too sparse to
      * grow (this includes the case of index + extra overflow). In the last
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -469,16 +469,25 @@ JSObject::setDenseArrayElement(uintN idx
 inline void
 JSObject::shrinkDenseArrayElements(JSContext *cx, uintN cap)
 {
     JS_ASSERT(isDenseArray());
     shrinkSlots(cx, cap);
 }
 
 inline void
+JSObject::backfillDenseArrayHoles()
+{
+    /* Only call this if !cx->typeInferenceEnabled(). */
+    JS_ASSERT(isDenseArray());
+    ClearValueRange(slots + initializedLength, capacity - initializedLength, true);
+    initializedLength = capacity;
+}
+
+inline void
 JSObject::setArgsLength(uint32 argc)
 {
     JS_ASSERT(isArguments());
     JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX);
     JS_ASSERT(UINT32_MAX > (uint64(argc) << ARGS_PACKED_BITS_COUNT));
     getSlotRef(JSSLOT_ARGS_LENGTH).setInt32(argc << ARGS_PACKED_BITS_COUNT);
     JS_ASSERT(!isArgsLengthOverridden());
 }
--- a/js/src/methodjit/BaseAssembler.h
+++ b/js/src/methodjit/BaseAssembler.h
@@ -576,18 +576,20 @@ static const JSC::MacroAssembler::Regist
 
     // Wrap AbstractMacroAssembler::getLinkerCallReturnOffset which is protected.
     unsigned callReturnOffset(Call call) {
         return getLinkerCallReturnOffset(call);
     }
 
 
 #define STUB_CALL_TYPE(type)                                                             \
-    Call callWithVMFrame(type stub, jsbytecode *pc, DataLabelPtr *pinlined, uint32 fd) { \
-        return fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stub), pc, pinlined, fd);      \
+    Call callWithVMFrame(bool inlining, type stub, jsbytecode *pc,                       \
+                         DataLabelPtr *pinlined, uint32 fd) {                            \
+        return fallibleVMCall(inlining, JS_FUNC_TO_DATA_PTR(void *, stub),               \
+                              pc, pinlined, fd);                                         \
     }
 
     STUB_CALL_TYPE(JSObjStub);
     STUB_CALL_TYPE(VoidPtrStubUInt32);
     STUB_CALL_TYPE(VoidStubUInt32);
     STUB_CALL_TYPE(VoidStub);
 
 #undef STUB_CALL_TYPE
@@ -605,50 +607,56 @@ static const JSC::MacroAssembler::Regist
         }
 
         // The JIT has moved Arg1 already, and we've guaranteed to not clobber
         // it. Move ArgReg0 into place now. setupFallibleVMFrame will not
         // clobber it either.
         move(MacroAssembler::stackPointerRegister, Registers::ArgReg0);
     }
 
-    void setupFallibleVMFrame(jsbytecode *pc, DataLabelPtr *pinlined, int32 frameDepth) {
+    void setupFallibleVMFrame(bool inlining, jsbytecode *pc,
+                              DataLabelPtr *pinlined, int32 frameDepth) {
         setupInfallibleVMFrame(frameDepth);
 
         /* regs->fp = fp */
         storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
 
         /* PC -> regs->pc :( */
         storePtr(ImmPtr(pc), FrameAddress(offsetof(VMFrame, regs.pc)));
 
-        /* inlined -> regs->inlined :( */
-        DataLabelPtr ptr = storePtrWithPatch(ImmPtr(NULL),
-                                             FrameAddress(offsetof(VMFrame, regs.inlined)));
-        if (pinlined)
-            *pinlined = ptr;
+        if (inlining) {
+            /* inlined -> regs->inlined :( */
+            DataLabelPtr ptr = storePtrWithPatch(ImmPtr(NULL),
+                                                 FrameAddress(offsetof(VMFrame, regs.inlined)));
+            if (pinlined)
+                *pinlined = ptr;
+        }
     }
 
     // An infallible VM call is a stub call (taking a VMFrame & and one
     // optional parameter) that does not need |pc| and |fp| updated, since
     // the call is guaranteed to not fail. However, |sp| is always coherent.
     Call infallibleVMCall(void *ptr, int32 frameDepth) {
         setupInfallibleVMFrame(frameDepth);
         return wrapVMCall(ptr);
     }
 
     // A fallible VM call is a stub call (taking a VMFrame & and one optional
     // parameter) that needs the entire VMFrame to be coherent, meaning that
     // |pc|, |inlined| and |fp| are guaranteed to be up-to-date.
-    Call fallibleVMCall(void *ptr, jsbytecode *pc, DataLabelPtr *pinlined, int32 frameDepth) {
-        setupFallibleVMFrame(pc, pinlined, frameDepth);
+    Call fallibleVMCall(bool inlining, void *ptr, jsbytecode *pc,
+                        DataLabelPtr *pinlined, int32 frameDepth) {
+        setupFallibleVMFrame(inlining, pc, pinlined, frameDepth);
         Call call = wrapVMCall(ptr);
 
-        // Restore the frame pointer from the VM, in case it pushed/popped
-        // some frames or expanded any inline frames.
-        loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
+        if (inlining) {
+            // Restore the frame pointer from the VM, in case it pushed/popped
+            // some frames or expanded any inline frames.
+            loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
+        }
 
         return call;
     }
 
     Call wrapVMCall(void *ptr) {
         JS_ASSERT(!callIsAligned);
 
         // Every stub call has at most two arguments.
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -200,23 +200,23 @@ mjit::Compiler::pushActiveFrame(JSScript
 
     if (newa->analysis.OOM())
         return Compile_Error;
     if (newa->analysis.failed()) {
         JaegerSpew(JSpew_Abort, "couldn't analyze bytecode; probably switchX or OOM\n");
         return Compile_Abort;
     }
 
-    if (!newa->liveness.analyze(cx, &newa->analysis, script)) {
+    if (cx->typeInferenceEnabled() && !newa->liveness.analyze(cx, &newa->analysis, script)) {
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
 
 #ifdef JS_METHODJIT_SPEW
-    if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
+    if (cx->typeInferenceEnabled() && IsJaegerSpewChannelActive(JSpew_Regalloc)) {
         for (unsigned i = 0; i < script->nfixed; i++) {
             if (!newa->analysis.localEscapes(i)) {
                 JaegerSpew(JSpew_Regalloc, "Local %u:", i);
                 newa->liveness.dumpLocal(i);
             }
         }
         for (unsigned i = 0; script->fun && i < script->fun->nargs; i++) {
             if (!newa->analysis.argEscapes(i)) {
@@ -693,18 +693,21 @@ mjit::Compiler::finishThisUp(JITScript *
     stubcc.masm.executableCopy(result + masm.size());
     
     JSC::LinkBuffer fullCode(result, totalSize);
     JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size());
 
     size_t nNmapLive = loopEntries.length();
     for (size_t i = 0; i < script->length; i++) {
         analyze::Bytecode *opinfo = a->analysis.maybeCode(i);
-        if (opinfo && opinfo->safePoint && !a->liveness.getCode(i).loopBackedge)
-            nNmapLive++;
+        if (opinfo && opinfo->safePoint) {
+            /* loopEntries cover any safe points which are at loop heads. */
+            if (!cx->typeInferenceEnabled() || !a->liveness.getCode(i).loopBackedge)
+                nNmapLive++;
+        }
     }
 
     size_t nUnsyncedEntries = 0;
     for (size_t i = 0; i < inlineFrames.length(); i++)
         nUnsyncedEntries += inlineFrames[i]->unsyncedEntries.length();
 
     /* Please keep in sync with JITScript::scriptDataSize! */
     size_t totalBytes = sizeof(JITScript) +
@@ -814,17 +817,18 @@ mjit::Compiler::finishThisUp(JITScript *
     CallSite *jitCallSites = (CallSite *)cursor;
     jit->nCallSites = callSites.length();
     cursor += sizeof(CallSite) * jit->nCallSites;
     for (size_t i = 0; i < jit->nCallSites; i++) {
         CallSite &to = jitCallSites[i];
         InternalCallSite &from = callSites[i];
 
         /* Patch stores of f.regs.inlined for stubs called from within inline frames. */
-        if (from.id != CallSite::NCODE_RETURN_ID &&
+        if (cx->typeInferenceEnabled() &&
+            from.id != CallSite::NCODE_RETURN_ID &&
             from.id != CallSite::MAGIC_TRAP_ID &&
             from.inlineIndex != uint32(-1)) {
             if (from.ool)
                 stubCode.patch(from.inlinePatch, &to);
             else
                 fullCode.patch(from.inlinePatch, &to);
         }
 
@@ -1269,32 +1273,37 @@ mjit::Compiler::generateMethod()
                 fixDoubleTypes(Uses(0));
 
                 /*
                  * Watch for fallthrough to the head of a 'do while' loop.
                  * We don't know what register state we will be using at the head
                  * of the loop so sync, branch, and fix it up after the loop
                  * has been processed.
                  */
-                if (a->liveness.getCode(PC).loopBackedge) {
+                if (cx->typeInferenceEnabled() && a->liveness.getCode(PC).loopBackedge) {
                     frame.syncAndForgetEverything();
                     Jump j = masm.jump();
                     if (!frame.pushLoop(PC, j, PC))
                         return Compile_Error;
                 } else {
                     if (!frame.syncForBranch(PC, Uses(0)))
                         return Compile_Error;
                     JS_ASSERT(frame.consistentRegisters(PC));
                 }
             }
 
             if (!frame.discardForJoin(PC, opinfo->stackDepth))
                 return Compile_Error;
             restoreAnalysisTypes(opinfo->stackDepth);
             fallthrough = true;
+
+            if (!cx->typeInferenceEnabled()) {
+                /* All join points have synced state if we aren't doing cross-branch regalloc. */
+                opinfo->safePoint = true;
+            }
         }
 
         a->jumpMap[uint32(PC - script->code)] = masm.label();
 
         SPEW_OPCODE();
         JS_ASSERT(frame.stackDepth() == opinfo->stackDepth);
 
         if (trap) {
@@ -1434,17 +1443,18 @@ mjit::Compiler::generateMethod()
                 }
             }
 
             /*
              * Watch for gotos which are entering a 'for' or 'while' loop. These jump
              * to the loop condition test and are immediately followed by the head of the loop.
              */
             jsbytecode *next = PC + JSOP_GOTO_LENGTH;
-            if (a->analysis.maybeCode(next) && a->liveness.getCode(next).loopBackedge) {
+            if (cx->typeInferenceEnabled() && a->analysis.maybeCode(next) &&
+                a->liveness.getCode(next).loopBackedge) {
                 frame.syncAndForgetEverything();
                 Jump j = masm.jump();
                 if (!frame.pushLoop(next, j, target))
                     return Compile_Error;
             } else {
                 if (!frame.syncForBranch(target, Uses(0)))
                     return Compile_Error;
                 Jump j = masm.jump();
@@ -2692,17 +2702,18 @@ mjit::Compiler::jsop_getglobal(uint32 in
     Address address = masm.objSlotRef(globalObj, reg, slot);
     frame.push(address, knownPushedType(0));
     frame.freeReg(reg);
 
     /*
      * If the global is currently undefined, it might still be undefined at the point
      * of this access, which type inference will not account for. Insert a check.
      */
-    if (globalObj->getSlot(slot).isUndefined() &&
+    if (cx->typeInferenceEnabled() &&
+        globalObj->getSlot(slot).isUndefined() &&
         (JSOp(*PC) == JSOP_CALLGLOBAL || PC[JSOP_GETGLOBAL_LENGTH] != JSOP_POP)) {
         Jump jump = masm.testUndefined(Assembler::Equal, address);
         stubcc.linkExit(jump, Uses(0));
         stubcc.leave();
         OOL_STUBCALL(stubs::UndefinedHelper);
         stubcc.rejoin(Changes(1));
     }
 }
@@ -2967,17 +2978,18 @@ mjit::Compiler::prepareStubCall(Uses use
     frame.syncAndKill(Registers(Registers::TempAnyRegs), uses);
     JaegerSpew(JSpew_Insns, " ---- FRAME SYNCING DONE ---- \n");
 }
 
 JSC::MacroAssembler::Call
 mjit::Compiler::emitStubCall(void *ptr, DataLabelPtr *pinline)
 {
     JaegerSpew(JSpew_Insns, " ---- CALLING STUB ---- \n");
-    Call cl = masm.fallibleVMCall(ptr, outerPC(), pinline, frame.totalDepth());
+    Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
+                                  ptr, outerPC(), pinline, frame.totalDepth());
     JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
     return cl;
 }
 
 void
 mjit::Compiler::interruptCheckHelper()
 {
     /*
@@ -3069,16 +3081,19 @@ mjit::Compiler::emitUncachedCall(uint32 
     if (recompiling) {
         /* In case we recompiled this call to an uncached call. */
         OOL_STUBCALL(JS_FUNC_TO_DATA_PTR(void *, callingNew ? ic::New : ic::Call));
         stubcc.crossJump(stubcc.masm.jump(), masm.label());
     }
 
     Jump notCompiled = masm.branchTestPtr(Assembler::Zero, r0, r0);
 
+    if (!cx->typeInferenceEnabled())
+        masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
+
     callPatch.hasFastNcode = true;
     callPatch.fastNcodePatch =
         masm.storePtrWithPatch(ImmPtr(NULL),
                                Address(JSFrameReg, JSStackFrame::offsetOfncode()));
 
     masm.jump(r0);
     callPatch.joinPoint = masm.label();
     addReturnSite(callPatch.joinPoint);
@@ -3157,16 +3172,19 @@ mjit::Compiler::checkCallApplySpeculatio
         JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
         OOL_STUBCALL_LOCAL_SLOTS(JS_FUNC_TO_DATA_PTR(void *, stubs::UncachedCall),
                                  frame.totalDepth() + frameDepthAdjust);
         JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
 
         RegisterID r0 = Registers::ReturnReg;
         Jump notCompiled = stubcc.masm.branchTestPtr(Assembler::Zero, r0, r0);
 
+        if (!cx->typeInferenceEnabled())
+            stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
+
         Address ncodeAddr(JSFrameReg, JSStackFrame::offsetOfncode());
         uncachedCallPatch->hasSlowNcode = true;
         uncachedCallPatch->slowNcodePatch = stubcc.masm.storePtrWithPatch(ImmPtr(NULL), ncodeAddr);
 
         stubcc.masm.jump(r0);
         notCompiled.linkTo(stubcc.masm.label(), &stubcc.masm);
 
         /*
@@ -3458,16 +3476,18 @@ mjit::Compiler::inlineCallHelper(uint32 
          * function pointer to jump to.
          */
         rejoin1 = stubcc.masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
                                             Registers::ReturnReg);
         if (callIC.frameSize.isStatic())
             stubcc.masm.move(Imm32(callIC.frameSize.staticArgc()), JSParamReg_Argc);
         else
             stubcc.masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), JSParamReg_Argc);
+        if (!cx->typeInferenceEnabled())
+            stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
         callPatch.hasSlowNcode = true;
         callPatch.slowNcodePatch =
             stubcc.masm.storePtrWithPatch(ImmPtr(NULL),
                                           Address(JSFrameReg, JSStackFrame::offsetOfncode()));
         stubcc.masm.jump(Registers::ReturnReg);
 
         /*
          * This ool path is the catch-all for everything but scripted function
@@ -4736,18 +4756,21 @@ mjit::Compiler::jsop_name(JSAtom *atom, 
         CHECK_OOL_SPACE();
     }
     pic.fastPathRejoin = masm.label();
 
     /* Initialize op labels. */
     ScopeNameLabels &labels = pic.scopeNameLabels();
     labels.setInlineJump(masm, pic.fastPathStart, inlineJump);
 
-    /* Always test for undefined. */
-    Jump undefinedGuard = masm.testUndefined(Assembler::Equal, pic.shapeReg);
+    MaybeJump undefinedGuard;
+    if (cx->typeInferenceEnabled()) {
+        /* Always test for undefined. */
+        undefinedGuard = masm.testUndefined(Assembler::Equal, pic.shapeReg);
+    }
 
     /*
      * We can't optimize away the PIC for the NAME access itself, but if we've
      * only seen a single value pushed by this access, mark it as such and
      * recompile if a different value becomes possible.
      */
     JSObject *singleton = pushedSingleton(0);
     if (singleton) {
@@ -4755,20 +4778,22 @@ mjit::Compiler::jsop_name(JSAtom *atom, 
         frame.freeReg(pic.shapeReg);
         frame.freeReg(pic.objReg);
     } else {
         frame.pushRegs(pic.shapeReg, pic.objReg, type);
     }
 
     stubcc.rejoin(Changes(1));
 
-    stubcc.linkExit(undefinedGuard, Uses(0));
-    stubcc.leave();
-    OOL_STUBCALL(stubs::UndefinedHelper);
-    stubcc.rejoin(Changes(1));
+    if (cx->typeInferenceEnabled()) {
+        stubcc.linkExit(undefinedGuard.get(), Uses(0));
+        stubcc.leave();
+        OOL_STUBCALL(stubs::UndefinedHelper);
+        stubcc.rejoin(Changes(1));
+    }
 
     pics.append(pic);
 }
 
 bool
 mjit::Compiler::jsop_xname(JSAtom *atom)
 {
     PICGenInfo pic(ic::PICInfo::XNAME, JSOp(*PC), true);
@@ -4812,25 +4837,30 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
 
     /* Initialize op labels. */
     ScopeNameLabels &labels = pic.scopeNameLabels();
     labels.setInlineJumpOffset(masm.differenceBetween(pic.fastPathStart, inlineJump));
 
     frame.pop();
     frame.pushRegs(pic.shapeReg, pic.objReg, knownPushedType(0));
 
-    /* Always test for undefined. */
-    Jump undefinedGuard = masm.testUndefined(Assembler::Equal, pic.shapeReg);
+    MaybeJump undefinedGuard;
+    if (cx->typeInferenceEnabled()) {
+        /* Always test for undefined. */
+        undefinedGuard = masm.testUndefined(Assembler::Equal, pic.shapeReg);
+    }
 
     stubcc.rejoin(Changes(1));
 
-    stubcc.linkExit(undefinedGuard, Uses(0));
-    stubcc.leave();
-    OOL_STUBCALL(stubs::UndefinedHelper);
-    stubcc.rejoin(Changes(1));
+    if (cx->typeInferenceEnabled()) {
+        stubcc.linkExit(undefinedGuard.get(), Uses(0));
+        stubcc.leave();
+        OOL_STUBCALL(stubs::UndefinedHelper);
+        stubcc.rejoin(Changes(1));
+    }
 
     pics.append(pic);
     return true;
 }
 
 void
 mjit::Compiler::jsop_bindname(JSAtom *atom, bool usePropCache)
 {
@@ -6021,16 +6051,19 @@ mjit::Compiler::jsop_newinit()
     frame.pushInitializerObject(Registers::ReturnReg, *PC == JSOP_NEWARRAY, baseobj);
 
     return true;
 }
 
 bool
 mjit::Compiler::finishLoop(jsbytecode *head)
 {
+    if (!cx->typeInferenceEnabled())
+        return true;
+
     /*
      * We're done processing the current loop. Every loop has exactly one backedge
      * at the end ('continue' statements are forward jumps to the loop test),
      * and after jumpAndTrace'ing on that edge we can pop it from the frame.
      */
 
     /*
      * Fix up the jump entering the loop. We are doing this after all code has
@@ -6086,33 +6119,37 @@ mjit::Compiler::jumpAndTrace(Jump j, jsb
 {
     if (trampoline)
         *trampoline = false;
 
     /*
      * Unless we are coming from a branch which synced everything, syncForBranch
      * must have been called and ensured an allocation at the target.
      */
-    RegisterAllocation *&lvtarget = a->liveness.getCode(target).allocation;
-    if (!lvtarget) {
-        lvtarget = ArenaNew<RegisterAllocation>(a->liveness.pool, false);
-        if (!lvtarget)
-            return false;
-    }
-
-    bool consistent = frame.consistentRegisters(target);
+    RegisterAllocation *lvtarget = NULL;
+    bool consistent = true;
+    if (cx->typeInferenceEnabled()) {
+        RegisterAllocation *&alloc = a->liveness.getCode(target).allocation;
+        if (!alloc) {
+            alloc = ArenaNew<RegisterAllocation>(a->liveness.pool, false);
+            if (!alloc)
+                return false;
+        }
+        lvtarget = alloc;
+        consistent = frame.consistentRegisters(target);
+    }
 
     if (!addTraceHints || target >= PC ||
         (JSOp(*target) != JSOP_TRACE && JSOp(*target) != JSOP_NOTRACE)
 #ifdef JS_MONOIC
         || GET_UINT16(target) == BAD_TRACEIC_INDEX
 #endif
         )
     {
-        if (lvtarget->synced()) {
+        if (!lvtarget || lvtarget->synced()) {
             JS_ASSERT(consistent);
             if (!jumpInScript(j, target))
                 return false;
             if (slow && !stubcc.jumpInScript(*slow, target))
                 return false;
         } else {
             if (consistent) {
                 if (!jumpInScript(j, target))
@@ -6186,16 +6223,19 @@ mjit::Compiler::jumpAndTrace(Jump j, jsb
         PC = pc;
     }
 
     Jump no = stubcc.masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
                                         Registers::ReturnReg);
     stubcc.masm.jump(Registers::ReturnReg);
     no.linkTo(stubcc.masm.label(), &stubcc.masm);
 
+    if (!cx->typeInferenceEnabled())
+        stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
+
 #ifdef JS_MONOIC
     ic.jumpTarget = target;
     ic.fastTrampoline = !consistent;
     ic.trampolineStart = stubcc.masm.label();
 
     traceICs[index] = ic;
 #endif
 
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -510,17 +510,16 @@ class Compiler : public BaseCompiler
     /* Non-emitting helpers. */
     void pushSyncedEntry(uint32 pushed);
     uint32 fullAtomIndex(jsbytecode *pc);
     bool jumpInScript(Jump j, jsbytecode *pc);
     bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
     bool canUseApplyTricks();
 
     /* Emitting helpers. */
-    void restoreFrameRegs(Assembler &masm);
     bool emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused);
     bool iter(uintN flags);
     void iterNext();
     bool iterMore();
     void iterEnd();
     MaybeJump loadDouble(FrameEntry *fe, FPRegisterID *fpReg, bool *allocated);
 #ifdef JS_POLYIC
     void passICAddress(BaseICInfo *ic);
--- a/js/src/methodjit/FastBuiltins.cpp
+++ b/js/src/methodjit/FastBuiltins.cpp
@@ -316,16 +316,19 @@ mjit::Compiler::compileGetChar(FrameEntr
 }
 
 
 CompileStatus
 mjit::Compiler::inlineNativeFunction(uint32 argc, bool callingNew)
 {
     JS_ASSERT(!callingNew);
 
+    if (cx->typeInferenceEnabled())
+        return Compile_InlineAbort;
+
     if (applyTricks == LazyArgsObj)
         return Compile_InlineAbort;
 
     FrameEntry *origCallee = frame.peek(-(argc + 2));
     FrameEntry *thisValue = frame.peek(-(argc + 1));
 
     if (!origCallee->isConstant() || !origCallee->isType(JSVAL_TYPE_OBJECT))
         return Compile_InlineAbort;
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -99,27 +99,30 @@ mjit::Compiler::ensureInteger(FrameEntry
         frame.freeReg(fptemp);
         frame.learnType(fe, JSVAL_TYPE_INT32, data);
     } else if (!fe->isType(JSVAL_TYPE_INT32)) {
         RegisterID typeReg = frame.tempRegForType(fe);
         frame.pinReg(typeReg);
         RegisterID dataReg = frame.copyDataIntoReg(fe);
         frame.unpinReg(typeReg);
 
-        Jump intGuard = masm.testInt32(Assembler::Equal, typeReg);
-        Jump doubleGuard = masm.testDouble(Assembler::NotEqual, typeReg);
-        stubcc.linkExit(doubleGuard, uses);
+        Jump intGuard = masm.testInt32(Assembler::NotEqual, typeReg);
+
+        Label syncPath = stubcc.syncExitAndJump(uses);
+        stubcc.linkExitDirect(intGuard, stubcc.masm.label());
 
-        FPRegisterID fpreg = frame.allocFPReg();
-        frame.loadDouble(fe, fpreg, masm);
-        Jump truncateGuard = masm.branchTruncateDoubleToInt32(fpreg, dataReg);
-        stubcc.linkExit(truncateGuard, uses);
-        intGuard.linkTo(masm.label(), &masm);
+        /* Try an OOL path to truncate doubles representing int32s. */
+        Jump doubleGuard = stubcc.masm.testDouble(Assembler::NotEqual, typeReg);
+        doubleGuard.linkTo(syncPath, &stubcc.masm);
 
-        frame.freeReg(fpreg);
+        frame.loadDouble(fe, Registers::FPConversionTemp, stubcc.masm);
+        Jump truncateGuard = stubcc.masm.branchTruncateDoubleToInt32(Registers::FPConversionTemp, dataReg);
+        truncateGuard.linkTo(syncPath, &stubcc.masm);
+        stubcc.crossJump(stubcc.masm.jump(), masm.label());
+
         frame.learnType(fe, JSVAL_TYPE_INT32, dataReg);
     }
 }
 
 void
 mjit::Compiler::jsop_bitnot()
 {
     FrameEntry *top = frame.peek(-1);
@@ -555,17 +558,18 @@ mjit::Compiler::jsop_relational(JSOp op,
     }
 
     if (frame.haveSameBacking(lhs, rhs)) {
         return jsop_relational_self(op, stub, target, fused);
     } else if (lhs->isType(JSVAL_TYPE_STRING) || rhs->isType(JSVAL_TYPE_STRING)) {
         return emitStubCmpOp(stub, target, fused);
     } else if (lhs->isType(JSVAL_TYPE_DOUBLE) || rhs->isType(JSVAL_TYPE_DOUBLE)) {
         return jsop_relational_double(op, stub, target, fused);
-    } else if (lhs->isType(JSVAL_TYPE_INT32) && rhs->isType(JSVAL_TYPE_INT32)) {
+    } else if (cx->typeInferenceEnabled() &&
+               lhs->isType(JSVAL_TYPE_INT32) && rhs->isType(JSVAL_TYPE_INT32)) {
         return jsop_relational_int(op, target, fused);
     } else {
         return jsop_relational_full(op, stub, target, fused);
     }
 }
 
 void
 mjit::Compiler::jsop_not()
@@ -2009,16 +2013,18 @@ mjit::Compiler::jsop_initelem()
         INLINE_STUBCALL(stubs::InitElem);
         return;
     }
 
     int32 idx = id->getValue().toInt32();
 
     RegisterID objReg = frame.copyDataIntoReg(obj);
 
-    /* Update the initialized length. */
-    masm.store32(Imm32(idx + 1), Address(objReg, offsetof(JSObject, initializedLength)));
+    if (cx->typeInferenceEnabled()) {
+        /* Update the initialized length. */
+        masm.store32(Imm32(idx + 1), Address(objReg, offsetof(JSObject, initializedLength)));
+    }
 
     /* Perform the store. */
     masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
     frame.storeTo(fe, Address(objReg, idx * sizeof(Value)));
     frame.freeReg(objReg);
 }
--- a/js/src/methodjit/FrameState-inl.h
+++ b/js/src/methodjit/FrameState-inl.h
@@ -1302,17 +1302,17 @@ FrameState::tryFastDoubleLoad(FrameEntry
     return false;
 }
 
 inline void
 FrameState::loadDouble(FrameEntry *fe, FPRegisterID fpReg, Assembler &masm) const
 {
     if (fe->isCopy()) {
         FrameEntry *backing = fe->copyOf();
-        if (tryFastDoubleLoad(fe, fpReg, masm))
+        if (tryFastDoubleLoad(fe, fpReg, masm)) 
             return;
         fe = backing;
     }
 
     if (tryFastDoubleLoad(fe, fpReg, masm))
         return;
 
     ensureFeSynced(fe, masm);
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -237,17 +237,16 @@ FrameState::syncInlinedEntry(FrameEntry 
         associateReg(fe, RematInfo::DATA, parent->data.reg());
     if (parent->data.inFPRegister())
         associateReg(fe, RematInfo::DATA, parent->data.fpreg());
 }
 
 void
 FrameState::associateReg(FrameEntry *fe, RematInfo::RematType type, AnyRegisterID reg)
 {
-    /* :XXX: handle args/this copying each other. */
     a->freeRegs.takeReg(reg);
 
     if (type == RematInfo::TYPE)
         fe->type.setRegister(reg.reg());
     else if (reg.isReg())
         fe->data.setRegister(reg.reg());
     else
         fe->data.setFPRegister(reg.fpreg());
@@ -290,16 +289,17 @@ FrameState::discardLocalRegisters()
 {
     /* Discard all local registers, without syncing. Must be followed by a discardFrame. */
     a->freeRegs = Registers::AvailAnyRegs;
 }
 
 void
 FrameState::evictInlineModifiedRegisters(Registers regs)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     a->parentRegs.freeMask &= ~regs.freeMask;
 
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (a->freeRegs.hasReg(reg))
             continue;
 
         FrameEntry *fe = regstate(reg).fe();
@@ -319,16 +319,17 @@ FrameState::evictInlineModifiedRegisters
         regstate(reg).forget();
         a->freeRegs.putReg(reg);
     }
 }
 
 void
 FrameState::tryCopyRegister(FrameEntry *fe, FrameEntry *callStart)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     JS_ASSERT(!fe->isCopied() || !isEntryCopied(fe));
 
     if (!fe->isCopy())
         return;
 
     /*
      * Uncopy the entry if it shares a backing with any other entry used
      * in the impending call. We want to ensure that within inline calls each
@@ -373,16 +374,18 @@ FrameState::tryCopyRegister(FrameEntry *
         else
             tempRegForData(fe);
     }
 }
 
 Registers
 FrameState::getTemporaryCallRegisters(FrameEntry *callStart) const
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
+
     /*
      * Get the registers in use for entries which will be popped once the
      * call at callStart finishes.
      */
     Registers regs(Registers::AvailAnyRegs & ~a->freeRegs.freeMask);
     Registers result = 0;
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
@@ -448,16 +451,18 @@ FrameState::evictReg(AnyRegisterID reg)
         syncFe(fe);
         fe->data.setMemory();
     }
 }
 
 inline Lifetime *
 FrameState::variableLive(FrameEntry *fe, jsbytecode *pc) const
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
+
     uint32 offset = pc - script->code;
     if (fe == this_)
         return a->liveness->thisLive(offset);
     if (isArg(fe)) {
         JS_ASSERT(!a->analysis->argEscapes(fe - args));
         return a->liveness->argLive(fe - args, offset);
     }
     if (isLocal(fe)) {
@@ -486,16 +491,18 @@ FrameState::isEntryCopied(FrameEntry *fe
     }
 
     return false;
 }
 
 AnyRegisterID
 FrameState::bestEvictReg(uint32 mask, bool includePinned) const
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
+
     /* Must be looking for a specific type of register. */
     JS_ASSERT((mask & Registers::AvailRegs) != (mask & Registers::AvailFPRegs));
 
     AnyRegisterID fallback;
     uint32 fallbackOffset = uint32(-1);
 
     JaegerSpew(JSpew_Regalloc, "picking best register to evict:\n");
 
@@ -600,19 +607,54 @@ FrameState::bestEvictReg(uint32 mask, bo
 
     JaegerSpew(JSpew_Regalloc, "result %s\n", fallback.name());
     return fallback;
 }
 
 AnyRegisterID
 FrameState::evictSomeReg(uint32 mask)
 {
-    AnyRegisterID reg = bestEvictReg(mask, false);
-    evictReg(reg);
-    return reg;
+    if (cx->typeInferenceEnabled()) {
+        AnyRegisterID reg = bestEvictReg(mask, false);
+        evictReg(reg);
+        return reg;
+    }
+
+    /* With inference disabled, only general purpose registers are managed. */
+    JS_ASSERT((mask & ~Registers::AvailRegs) == 0);
+
+    MaybeRegisterID fallback;
+
+    for (uint32 i = 0; i < JSC::MacroAssembler::TotalRegisters; i++) {
+        RegisterID reg = RegisterID(i);
+
+        /* Register is not allocatable, don't bother.  */
+        if (!(Registers::maskReg(reg) & mask))
+            continue;
+
+        /* Register is not owned by the FrameState. */
+        FrameEntry *fe = regstate(reg).fe();
+        if (!fe)
+            continue;
+
+        /* Try to find a candidate... that doesn't need spilling. */
+        fallback = reg;
+
+        if (regstate(reg).type() == RematInfo::TYPE && fe->type.synced()) {
+            fe->type.setMemory();
+            return fallback.reg();
+        }
+        if (regstate(reg).type() == RematInfo::DATA && fe->data.synced()) {
+            fe->data.setMemory();
+            return fallback.reg();
+        }
+    }
+
+    evictReg(fallback.reg());
+    return fallback.reg();
 }
 
 void
 FrameState::resetInternalState()
 {
     for (uint32 i = 0; i < a->tracker.nentries; i++)
         a->tracker[i]->untrack();
 
@@ -638,27 +680,29 @@ FrameState::forgetEverything()
         JS_ASSERT(!regstate(reg).usedBy());
     }
 #endif
 }
 
 void
 FrameState::flushLoopJoins()
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     for (unsigned i = 0; i < loopPatches.length(); i++) {
         const StubJoinPatch &p = loopPatches[i];
         stubcc.patchJoin(p.join.index, p.join.script, p.address, p.reg);
     }
     loopJoins.clear();
     loopPatches.clear();
 }
 
 bool
 FrameState::pushLoop(jsbytecode *head, Jump entry, jsbytecode *entryTarget)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     if (activeLoop) {
         /*
          * Convert all loop registers in the outer loop into unassigned registers.
          * We don't keep track of which registers the inner loop uses, so the only
          * registers that can be carried in the outer loop must be mentioned before
          * the inner loop starts.
          */
         activeLoop->alloc->clearLoops();
@@ -685,16 +729,17 @@ FrameState::pushLoop(jsbytecode *head, J
     loop->alloc = alloc;
     loopRegs = Registers::AvailAnyRegs;
     return true;
 }
 
 void
 FrameState::popLoop(jsbytecode *head, Jump *pjump, jsbytecode **ppc)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     JS_ASSERT(activeLoop && activeLoop->head == head && activeLoop->alloc);
     activeLoop->alloc->clearLoops();
 
 #ifdef DEBUG
     if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
         JaegerSpew(JSpew_Regalloc, "loop allocation at %u:", head - script->code);
         dumpAllocation(activeLoop->alloc);
     }
@@ -714,16 +759,17 @@ FrameState::popLoop(jsbytecode *head, Ju
     activeLoop = loop;
 
     loopRegs = 0;
 }
 
 void
 FrameState::setLoopReg(AnyRegisterID reg, FrameEntry *fe)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     JS_ASSERT(activeLoop && activeLoop->alloc->loop(reg));
     loopRegs.takeReg(reg);
 
     fe->lastLoop = activeLoop->head;
 
     uint32 slot = indexOfFe(fe);
     regstate(reg).associate(fe, RematInfo::DATA);
 
@@ -756,16 +802,17 @@ FrameState::setLoopReg(AnyRegisterID reg
         entry->set(reg, slot, true);
     }
 }
 
 #ifdef DEBUG
 void
 FrameState::dumpAllocation(RegisterAllocation *alloc)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     for (unsigned i = 0; i < Registers::TotalAnyRegisters; i++) {
         AnyRegisterID reg = AnyRegisterID::fromRaw(i);
         if (alloc->assigned(reg)) {
             printf(" (%s: %s%s)", reg.name(), entryName(entries + alloc->slot(reg)),
                    alloc->synced(reg) ? "" : " unsynced");
         }
     }
     Registers regs = alloc->getParentRegs();
@@ -775,16 +822,17 @@ FrameState::dumpAllocation(RegisterAlloc
     }
     printf("\n");
 }
 #endif
 
 RegisterAllocation *
 FrameState::computeAllocation(jsbytecode *target)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
     RegisterAllocation *alloc = ArenaNew<RegisterAllocation>(a->liveness->pool, false);
     if (!alloc)
         return NULL;
 
     if (a->analysis->getCode(target).exceptionEntry || a->analysis->getCode(target).switchTarget ||
         JSOp(*target) == JSOP_TRAP) {
         /* State must be synced at exception and switch targets, and at traps. */
 #ifdef DEBUG
@@ -821,16 +869,18 @@ FrameState::computeAllocation(jsbytecode
 #endif
 
     return alloc;
 }
 
 void
 FrameState::relocateReg(AnyRegisterID reg, RegisterAllocation *alloc, Uses uses)
 {
+    JS_ASSERT(cx->typeInferenceEnabled());
+
     /*
      * The reg needs to be freed to make room for a variable carried across
      * a branch. Either evict its entry, or try to move it to a different
      * register if it is needed to test the branch condition. :XXX: could also
      * watch for variables which are carried across the branch but are in a
      * the register for a different carried entry, we just spill these for now.
      */
     JS_ASSERT(!a->freeRegs.hasReg(reg));
@@ -868,16 +918,21 @@ FrameState::syncForBranch(jsbytecode *ta
 #ifdef DEBUG
     Registers checkRegs(Registers::AvailAnyRegs);
     while (!checkRegs.empty()) {
         AnyRegisterID reg = checkRegs.takeAnyReg();
         JS_ASSERT_IF(!a->freeRegs.hasReg(reg), regstate(reg).fe());
     }
 #endif
 
+    if (!cx->typeInferenceEnabled()) {
+        syncAndForgetEverything();
+        return true;
+    }
+
     Registers regs = 0;
 
     RegisterAllocation *&alloc = a->liveness->getCode(target).allocation;
     if (!alloc) {
         alloc = computeAllocation(target);
         if (!alloc)
             return false;
     }
@@ -997,16 +1052,23 @@ FrameState::syncForBranch(jsbytecode *ta
     }
 
     return true;
 }
 
 bool
 FrameState::discardForJoin(jsbytecode *target, uint32 stackDepth)
 {
+    if (!cx->typeInferenceEnabled()) {
+        resetInternalState();
+        PodArrayZero(a->regstate_);
+        sp = spBase + stackDepth;
+        return true;
+    }
+
     RegisterAllocation *&alloc = a->liveness->getCode(target).allocation;
 
     if (!alloc) {
         /*
          * This shows up for loop entries which are not reachable from the
          * loop head, and for exception, switch target and trap safe points.
          */
         alloc = ArenaNew<RegisterAllocation>(a->liveness->pool, false);
@@ -1049,16 +1111,21 @@ FrameState::discardForJoin(jsbytecode *t
     PodZero(a->typeSets, stackDepth);
 
     return true;
 }
 
 bool
 FrameState::consistentRegisters(jsbytecode *target)
 {
+    if (!cx->typeInferenceEnabled()) {
+        JS_ASSERT(a->freeRegs.freeMask == Registers::AvailAnyRegs);
+        return true;
+    }
+
     /*
      * Before calling this, either the entire state should have been synced or
      * syncForBranch should have been called. These will ensure that any FE
      * which is not consistent with the target's register state has already
      * been synced, and no stores will need to be issued by prepareForJump.
      */
     RegisterAllocation *alloc = a->liveness->getCode(target).allocation;
     JS_ASSERT(alloc);
@@ -1082,16 +1149,19 @@ FrameState::consistentRegisters(jsbyteco
         return false;
 
     return true;
 }
 
 void
 FrameState::prepareForJump(jsbytecode *target, Assembler &masm, bool synced)
 {
+    if (!cx->typeInferenceEnabled())
+        return;
+
     JS_ASSERT_IF(!synced, !consistentRegisters(target));
 
     RegisterAllocation *alloc = a->liveness->getCode(target).allocation;
     JS_ASSERT(alloc);
 
     Registers regs = 0;
 
     regs = Registers(Registers::AvailAnyRegs);
@@ -1505,17 +1575,19 @@ FrameState::sync(Assembler &masm, Uses u
 
     /*
      * Keep track of free registers using a bitmask. If we have to drop into
      * syncFancy(), then this mask will help avoid eviction.
      */
     Registers avail(a->freeRegs.freeMask & Registers::AvailRegs);
     Registers temp(Registers::TempAnyRegs);
 
-    for (FrameEntry *fe = sp - 1; fe >= entries; fe--) {
+    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : sp - uses.nuses;
+
+    for (FrameEntry *fe = sp - 1; fe >= bottom; fe--) {
         if (!fe->isTracked())
             continue;
 
         if (fe->isType(JSVAL_TYPE_DOUBLE)) {
             /* Copies of in-memory doubles can be synced without spilling. */
             ensureFeSynced(fe, masm);
             continue;
         }
@@ -1554,17 +1626,17 @@ FrameState::sync(Assembler &masm, Uses u
 
                 masm.storeValue(syncReg, addressOf(fe));
                 continue;
             }
 #elif defined JS_NUNBOX32
             /* Fall back to a slower sync algorithm if load required. */
             if ((!fe->type.synced() && backing->type.inMemory()) ||
                 (!fe->data.synced() && backing->data.inMemory())) {
-                syncFancy(masm, avail, fe, entries);
+                syncFancy(masm, avail, fe, bottom);
                 return;
             }
 #endif
         }
 
         /* If a part still needs syncing, it is either a copy or constant. */
 #if defined JS_PUNBOX64
         /* All register-backed FEs have been entirely synced up-front. */
@@ -1638,18 +1710,19 @@ FrameState::syncAndKill(Registers kill, 
         } else {
             JS_ASSERT(fe->type.reg() == reg.reg());
             syncType(fe);
         }
 #endif
     }
 
     uint32 maxvisits = a->tracker.nentries;
-
-    for (FrameEntry *fe = sp - 1; fe >= entries && maxvisits; fe--) {
+    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : sp - uses.nuses;
+
+    for (FrameEntry *fe = sp - 1; fe >= bottom && maxvisits; fe--) {
         if (!fe->isTracked())
             continue;
 
         maxvisits--;
 
         if (fe >= spStop)
             continue;
 
@@ -2765,16 +2838,17 @@ inline bool
 FrameState::binaryEntryLive(FrameEntry *fe) const
 {
     /*
      * Compute whether fe is live after the binary operation performed at the current
      * bytecode. This is similar to variableLive except that it returns false for the
      * top two stack entries and special cases LOCALINC/ARGINC and friends, which fuse
      * a binary operation before writing over the local/arg.
      */
+    JS_ASSERT(cx->typeInferenceEnabled());
 
     if (fe >= sp - 2)
         return false;
 
     switch (JSOp(*PC)) {
       case JSOP_INCLOCAL:
       case JSOP_DECLOCAL:
       case JSOP_LOCALINC:
@@ -2909,17 +2983,18 @@ FrameState::allocForBinary(FrameEntry *l
      * this point, if for some reason either must be in a register, that has
      * already been guaranteed at this point.
      */
 
     /*
      * Try to reuse operand registers without syncing for ADD and constant SUB,
      * so long as the backing for the operand is dead.
      */
-    if (backingLeft->data.inRegister() && !binaryEntryLive(backingLeft) &&
+    if (cx->typeInferenceEnabled() &&
+        backingLeft->data.inRegister() && !binaryEntryLive(backingLeft) &&
         (op == JSOP_ADD || (op == JSOP_SUB && backingRight->isConstant())) &&
         (lhs == backingLeft || hasOnlyCopy(backingLeft, lhs))) {
         alloc.result = backingLeft->data.reg();
         alloc.undoResult = true;
         alloc.resultHasRhs = false;
         goto skip;
     }
 
@@ -2930,17 +3005,17 @@ FrameState::allocForBinary(FrameEntry *l
             JS_ASSERT(alloc.rhsData.isSet());
             JS_ASSERT(commu);
             masm.move(alloc.rhsData.reg(), alloc.result);
             alloc.resultHasRhs = true;
         } else {
             masm.move(alloc.lhsData.reg(), alloc.result);
             alloc.resultHasRhs = false;
         }
-    } else {
+    } else if (cx->typeInferenceEnabled()) {
         /* No free regs. Evict a register or reuse one of the operands. */
         bool leftInReg = backingLeft->data.inRegister();
         bool rightInReg = backingRight->data.inRegister();
 
         /* If the LHS/RHS types are in registers, don't use them for the result. */
         uint32 mask = Registers::AvailRegs;
         if (backingLeft->type.inRegister())
             mask &= ~Registers::maskReg(backingLeft->type.reg());
@@ -2969,16 +3044,49 @@ FrameState::allocForBinary(FrameEntry *l
                 if (leftInReg) {
                     masm.move(alloc.lhsData.reg(), result);
                 } else {
                     masm.move(alloc.rhsData.reg(), result);
                     alloc.resultHasRhs = true;
                 }
             }
         }
+    } else {
+        /*
+         * No free regs. Find a good candidate to re-use. Best candidates don't
+         * require syncs on the inline path.
+         */
+        bool leftInReg = backingLeft->data.inRegister();
+        bool rightInReg = backingRight->data.inRegister();
+        bool leftSynced = backingLeft->data.synced();
+        bool rightSynced = backingRight->data.synced();
+        if (!commu || (leftInReg && (leftSynced || (!rightInReg || !rightSynced)))) {
+            JS_ASSERT(backingLeft->data.inRegister() || !commu);
+            JS_ASSERT_IF(backingLeft->data.inRegister(),
+                         backingLeft->data.reg() == alloc.lhsData.reg());
+            if (backingLeft->data.inRegister()) {
+                alloc.result = backingLeft->data.reg();
+                unpinReg(alloc.result);
+                takeReg(alloc.result);
+                alloc.lhsNeedsRemat = true;
+            } else {
+                /* For now, just spill... */
+                alloc.result = allocReg();
+                masm.move(alloc.lhsData.reg(), alloc.result);
+            }
+            alloc.resultHasRhs = false;
+        } else {
+            JS_ASSERT(commu);
+            JS_ASSERT(!leftInReg || (rightInReg && rightSynced));
+            alloc.result = backingRight->data.reg();
+            unpinReg(alloc.result);
+            takeReg(alloc.result);
+            alloc.resultHasRhs = true;
+            alloc.rhsNeedsRemat = true;
+        }
     }
 
   skip:
     /* Unpin everything that was pinned. */
     if (backingLeft->type.inRegister())
         unpinReg(backingLeft->type.reg());
     if (backingRight->type.inRegister())
         unpinReg(backingRight->type.reg());
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -119,16 +119,17 @@ static const size_t STUB_CALLS_FOR_OP_CO
 static uint32 StubCallsForOp[STUB_CALLS_FOR_OP_COUNT];
 #endif
 
 extern "C" void JS_FASTCALL
 PushActiveVMFrame(VMFrame &f)
 {
     f.entryfp->script()->compartment->jaegerCompartment->pushActiveFrame(&f);
     f.regs.fp->setNativeReturnAddress(JS_FUNC_TO_DATA_PTR(void*, JaegerTrampolineReturn));
+    f.regs.inlined = NULL;
 }
 
 extern "C" void JS_FASTCALL
 PopActiveVMFrame(VMFrame &f)
 {
     f.entryfp->script()->compartment->jaegerCompartment->popActiveFrame();
 }
 
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -650,21 +650,25 @@ class CallCompiler : public BaseCompiler
                         : offsetof(JSScript, jitArityCheckNormal);
         masm.loadPtr(Address(t0, offset), t0);
         Jump hasCode = masm.branchPtr(Assembler::Above, t0, ImmPtr(JS_UNJITTABLE_SCRIPT));
 
         /* Try and compile. On success we get back the nmap pointer. */
         void *compilePtr = JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction);
         if (ic.frameSize.isStatic()) {
             masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
-            masm.fallibleVMCall(compilePtr, NULL, NULL, ic.frameSize.staticLocalSlots());
+            masm.fallibleVMCall(cx->typeInferenceEnabled(),
+                                compilePtr, NULL, NULL, ic.frameSize.staticLocalSlots());
         } else {
             masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), Registers::ArgReg1);
-            masm.fallibleVMCall(compilePtr, NULL, NULL, -1);
+            masm.fallibleVMCall(cx->typeInferenceEnabled(),
+                                compilePtr, NULL, NULL, -1);
         }
+        if (!cx->typeInferenceEnabled())
+            masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
 
         Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
                                               Registers::ReturnReg);
 
         masm.jump(Registers::ReturnReg);
 
         hasCode.linkTo(masm.label(), &masm);
 
@@ -831,17 +835,18 @@ class CallCompiler : public BaseCompiler
         /* Generate fast-path for calling this native. */
         Assembler masm;
 
         /* Guard on the function object identity, for now. */
         Jump funGuard = masm.branchPtr(Assembler::NotEqual, ic.funObjReg, ImmPtr(obj));
 
         /* N.B. After this call, the frame will have a dynamic frame size. */
         if (ic.frameSize.isDynamic()) {
-            masm.fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
+            masm.fallibleVMCall(cx->typeInferenceEnabled(),
+                                JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
                                 f.regs.pc, NULL, initialFrameDepth);
         }
 
         Registers tempRegs(Registers::AvailRegs);
 #ifndef JS_CPU_X86
         tempRegs.takeReg(Registers::ArgReg0);
         tempRegs.takeReg(Registers::ArgReg1);
         tempRegs.takeReg(Registers::ArgReg2);
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -174,17 +174,18 @@ StubCompiler::emitStubCall(void *ptr)
     return emitStubCall(ptr, frame.totalDepth());
 }
 
 JSC::MacroAssembler::Call
 StubCompiler::emitStubCall(void *ptr, int32 slots)
 {
     JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
     DataLabelPtr inlinePatch;
-    Call cl = masm.fallibleVMCall(ptr, cc.outerPC(), &inlinePatch, slots);
+    Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
+                                  ptr, cc.outerPC(), &inlinePatch, slots);
     JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
 
     /* Add the call site for debugging and recompilation. */
     Compiler::InternalCallSite site(masm.callReturnOffset(cl),
                                     cc.inlineIndex(), cc.inlinePC(),
                                     (size_t)ptr, true, true);
     site.inlinePatch = inlinePatch;
     cc.addCallSite(site);
--- a/js/src/methodjit/TrampolineCompiler.cpp
+++ b/js/src/methodjit/TrampolineCompiler.cpp
@@ -114,17 +114,17 @@ TrampolineCompiler::compileTrampoline(Tr
  * - There is no stub buffer.
  */
 bool
 TrampolineCompiler::generateForceReturn(Assembler &masm)
 {
     /* if (hasArgsObj() || hasCallObj()) stubs::PutActivationObjects() */
     Jump noActObjs = masm.branchTest32(Assembler::Zero, FrameFlagsAddress(),
                                        Imm32(JSFRAME_HAS_CALL_OBJ | JSFRAME_HAS_ARGS_OBJ));
-    masm.fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::PutActivationObjects), NULL, NULL, 0);
+    masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::PutActivationObjects), NULL, NULL, 0);
     noActObjs.linkTo(masm.label(), &masm);
 
     /* Store any known return value */
     masm.loadValueAsComponents(UndefinedValue(), JSReturnReg_Type, JSReturnReg_Data);
     Jump rvalClear = masm.branchTest32(Assembler::Zero,
                                        FrameFlagsAddress(), Imm32(JSFRAME_HAS_RVAL));
     Address rvalAddress(JSFrameReg, JSStackFrame::offsetOfReturnValue());
     masm.loadValueAsComponents(rvalAddress, JSReturnReg_Type, JSReturnReg_Data);