[INFER] Allow call inlining and LICM to work together, bug 653962.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 05 May 2011 13:59:29 -0700
changeset 75003 e5d548c514276ca21245459b431a1c499a9dfe96
parent 74994 3062ff7fef8309febe7904d04a94c96193ed1f47
child 75004 c963b24694cd723f423c8024c8e3adfc96476a3c
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
bugs653962
milestone6.0a1
[INFER] Allow call inlining and LICM to work together, bug 653962.
js/src/jsanalyze.cpp
js/src/jsanalyze.h
js/src/jsinfer.cpp
js/src/jsinferinlines.h
js/src/methodjit/BaseAssembler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/FastArithmetic.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameEntry.h
js/src/methodjit/FrameState-inl.h
js/src/methodjit/FrameState.cpp
js/src/methodjit/FrameState.h
js/src/methodjit/ImmutableSync.cpp
js/src/methodjit/LoopState.cpp
js/src/methodjit/LoopState.h
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/MonoIC.cpp
js/src/methodjit/Retcon.cpp
js/src/methodjit/StubCalls.cpp
js/src/methodjit/StubCompiler.cpp
js/src/methodjit/TrampolineMasmX64.asm
js/src/methodjit/TrampolineMingwX64.s
js/src/methodjit/TrampolineSUNWX64.s
js/src/methodjit/TrampolineSUNWX86.s
js/src/methodjit/TrampolineSparc.s
--- a/js/src/jsanalyze.cpp
+++ b/js/src/jsanalyze.cpp
@@ -431,16 +431,21 @@ ScriptAnalysis::analyzeBytecode(JSContex
         unsigned ndefs = GetDefCount(script, offset);
 
         JS_ASSERT(stackDepth >= nuses);
         stackDepth -= nuses;
         stackDepth += ndefs;
 
         switch (op) {
 
+          case JSOP_RETURN:
+          case JSOP_STOP:
+            numReturnSites_++;
+            break;
+
           case JSOP_SETRVAL:
           case JSOP_POPV:
             usesRval = true;
             isInlineable = false;
             break;
 
           case JSOP_NAME:
           case JSOP_CALLNAME:
@@ -1403,39 +1408,46 @@ ScriptAnalysis::analyzeSSA(JSContext *cx
                 values[slot].initWritten(slot, offset);
             }
             if (op == JSOP_FORARG || op == JSOP_FORLOCAL)
                 stack[stackDepth - 1] = code->poppedValues[0];
             break;
           }
 
           case JSOP_GETARG:
-          case JSOP_CALLARG:
-          case JSOP_GETLOCAL:
-          case JSOP_CALLLOCAL: {
+          case JSOP_GETLOCAL: {
             uint32 slot = GetBytecodeSlot(script, pc);
             if (trackSlot(slot)) {
                 /*
                  * Propagate the current value of the local to the pushed value,
                  * and remember it with an extended use on the opcode.
                  */
                 stack[stackDepth - 1] = code->poppedValues[0] = values[slot];
             }
             break;
           }
 
+          case JSOP_CALLARG:
+          case JSOP_CALLLOCAL: {
+            uint32 slot = GetBytecodeSlot(script, pc);
+            if (trackSlot(slot))
+                stack[stackDepth - 2] = code->poppedValues[0] = values[slot];
+            break;
+          }
+
           /* Short circuit ops which push back one of their operands. */
 
           case JSOP_MOREITER:
           case JSOP_FORELEM:
             stack[stackDepth - 2] = code->poppedValues[0];
             break;
 
           case JSOP_FORNAME:
           case JSOP_FORGNAME:
+          case JSOP_CALLPROP:
             stack[stackDepth - 1] = code->poppedValues[0];
             break;
 
           case JSOP_FORPROP:
           case JSOP_INITPROP:
           case JSOP_INITMETHOD:
             stack[stackDepth - 1] = code->poppedValues[1];
             break;
@@ -1748,16 +1760,84 @@ ScriptAnalysis::freezeNewValues(JSContex
     for (unsigned i = 0; i < count; i++)
         code.newValues[i] = (*pending)[i];
     code.newValues[count].slot = 0;
     code.newValues[count].value.clear();
 
     cx->delete_(pending);
 }
 
+CrossSSAValue
+CrossScriptSSA::foldValue(const CrossSSAValue &cv)
+{
+    const Frame &frame = getFrame(cv.frame);
+    const SSAValue &v = cv.v;
+
+    JSScript *parentScript = NULL;
+    ScriptAnalysis *parentAnalysis = NULL;
+    if (frame.parent != INVALID_FRAME) {
+        parentScript = getFrame(frame.parent).script;
+        parentAnalysis = parentScript->analysis(cx);
+    }
+
+    if (v.kind() == SSAValue::VAR && v.varInitial() && parentScript) {
+        uint32 slot = v.varSlot();
+        if (slot >= ArgSlot(0) && slot < LocalSlot(frame.script, 0)) {
+            uint32 argc = GET_ARGC(frame.parentpc);
+            SSAValue argv = parentAnalysis->poppedValue(frame.parentpc, argc - 1 - (slot - ArgSlot(0)));
+            return foldValue(CrossSSAValue(frame.parent, argv));
+        }
+    }
+
+    if (v.kind() == SSAValue::PUSHED) {
+        jsbytecode *pc = frame.script->code + v.pushedOffset();
+        switch (JSOp(*pc)) {
+          case JSOP_THIS:
+            if (parentScript) {
+                uint32 argc = GET_ARGC(frame.parentpc);
+                SSAValue thisv = parentAnalysis->poppedValue(frame.parentpc, argc);
+                return foldValue(CrossSSAValue(frame.parent, thisv));
+            }
+            break;
+
+          case JSOP_CALL: {
+            /*
+             * If there is a single inline callee with a single return site,
+             * propagate back to that.
+             */
+            JSScript *callee = NULL;
+            uint32 calleeFrame = INVALID_FRAME;
+            for (unsigned i = 0; i < numFrames(); i++) {
+                if (iterFrame(i).parent == cv.frame && iterFrame(i).parentpc == pc) {
+                    if (callee)
+                        return cv;  /* Multiple callees */
+                    callee = iterFrame(i).script;
+                    calleeFrame = iterFrame(i).index;
+                }
+            }
+            if (callee && callee->analysis(cx)->numReturnSites() == 1) {
+                ScriptAnalysis *analysis = callee->analysis(cx);
+                uint32 offset = 0;
+                while (offset < callee->length) {
+                    jsbytecode *pc = callee->code + offset;
+                    if (analysis->maybeCode(pc) && JSOp(*pc) == JSOP_RETURN)
+                        return foldValue(CrossSSAValue(calleeFrame, analysis->poppedValue(pc, 0)));
+                    offset += GetBytecodeLength(pc);
+                }
+            }
+            break;
+          }
+
+          default:;
+        }
+    }
+
+    return cv;
+}
+
 #ifdef DEBUG
 
 void
 ScriptAnalysis::printSSA(JSContext *cx)
 {
     printf("\n");
 
     for (unsigned offset = 0; offset < script->length; offset++) {
--- a/js/src/jsanalyze.h
+++ b/js/src/jsanalyze.h
@@ -688,16 +688,22 @@ class SSAValue
 
     void initPushed(uint32 offset, uint32 index) {
         clear();
         u.pushed.kind = PUSHED;
         u.pushed.offset = offset;
         u.pushed.index = index;
     }
 
+    static SSAValue PushedValue(uint32 offset, uint32 index) {
+        SSAValue v;
+        v.initPushed(offset, index);
+        return v;
+    }
+
     void initInitial(uint32 slot) {
         clear();
         u.var.kind = VAR;
         u.var.initial = true;
         u.var.slot = slot;
     }
 
     void initWritten(uint32 slot, uint32 offset) {
@@ -824,16 +830,17 @@ class ScriptAnalysis
     /* --------- Bytecode analysis --------- */
 
     bool usesRval;
     bool usesScope;
     bool usesThis;
     bool hasCalls;
     bool canTrackVars;
     bool isInlineable;
+    uint32 numReturnSites_;
 
     /* Offsets at which each local becomes unconditionally defined, or a value below. */
     uint32 *definedLocals;
 
     static const uint32 LOCAL_USE_BEFORE_DEF = uint32(-1);
     static const uint32 LOCAL_CONDITIONALLY_DEFINED = uint32(-2);
 
     /* --------- Lifetime analysis --------- */
@@ -864,16 +871,17 @@ class ScriptAnalysis
     /* Whether there are POPV/SETRVAL bytecodes which can write to the frame's rval. */
     bool usesReturnValue() const { return usesRval; }
 
     /* Whether there are NAME bytecodes which can access the frame's scope chain. */
     bool usesScopeChain() const { return usesScope; }
 
     bool usesThisValue() const { return usesThis; }
     bool hasFunctionCalls() const { return hasCalls; }
+    uint32 numReturnSites() const { return numReturnSites_; }
 
     /* Accessors for bytecode information. */
 
     Bytecode& getCode(uint32 offset) {
         JS_ASSERT(script->compartment->activeAnalysis);
         JS_ASSERT(offset < script->length);
         JS_ASSERT(codeArray[offset]);
         return *codeArray[offset];
@@ -1108,16 +1116,86 @@ struct AutoEnterAnalysis
     }
 
     ~AutoEnterAnalysis()
     {
         cx->compartment->activeAnalysis = oldActiveAnalysis;
     }
 };
 
+/* SSA value as used by CrossScriptSSA, identifies the frame it came from. */
+struct CrossSSAValue
+{
+    unsigned frame;
+    SSAValue v;
+    CrossSSAValue(unsigned frame, const SSAValue &v) : frame(frame), v(v) {}
+};
+
+/*
+ * Analysis for managing SSA values from multiple call stack frames. These are
+ * created by the backend compiler when inlining functions, and allow for
+ * values to be tracked as they flow into or out of the inlined frames.
+ */
+class CrossScriptSSA
+{
+  public:
+
+    static const uint32 OUTER_FRAME = uint32(-1);
+    static const unsigned INVALID_FRAME = uint32(-2);
+
+    struct Frame {
+        uint32 index;
+        JSScript *script;
+        uint32 depth;  /* Distance from outer frame to this frame, in sizeof(Value) */
+        uint32 parent;
+        jsbytecode *parentpc;
+
+        Frame(uint32 index, JSScript *script, uint32 depth, uint32 parent, jsbytecode *parentpc)
+            : index(index), script(script), depth(depth), parent(parent), parentpc(parentpc)
+        {}
+    };
+
+    const Frame &getFrame(uint32 index) {
+        if (index == OUTER_FRAME)
+            return outerFrame;
+        return inlineFrames[index];
+    }
+
+    unsigned numFrames() { return 1 + inlineFrames.length(); }
+    const Frame &iterFrame(unsigned i) {
+        if (i == 0)
+            return outerFrame;
+        return inlineFrames[i - 1];
+    }
+
+    JSScript *outerScript() { return outerFrame.script; }
+
+    types::TypeSet *getValueTypes(const CrossSSAValue &cv) {
+        return getFrame(cv.frame).script->analysis(cx)->getValueTypes(cv.v);
+    }
+
+    bool addInlineFrame(JSScript *script, uint32 depth, uint32 parent, jsbytecode *parentpc)
+    {
+        uint32 index = inlineFrames.length();
+        return inlineFrames.append(Frame(index, script, depth, parent, parentpc));
+    }
+
+    CrossScriptSSA(JSContext *cx, JSScript *outer)
+        : cx(cx), outerFrame(OUTER_FRAME, outer, 0, INVALID_FRAME, NULL), inlineFrames(cx)
+    {}
+
+    CrossSSAValue foldValue(const CrossSSAValue &cv);
+
+  private:
+    JSContext *cx;
+
+    Frame outerFrame;
+    Vector<Frame> inlineFrames;
+};
+
 #ifdef DEBUG
 void PrintBytecode(JSContext *cx, JSScript *script, jsbytecode *pc);
 #endif
 
 } /* namespace analyze */
 } /* namespace js */
 
 #endif // jsanalyze_h___
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -1026,16 +1026,20 @@ TypeConstraintArith::newType(JSContext *
      */
     if (other) {
         /*
          * Addition operation, consider these cases:
          *   {int,bool} x {int,bool} -> int
          *   double x {int,bool,double} -> double
          *   string x any -> string
          */
+        if (other->unknown()) {
+            target->addType(cx, TYPE_UNKNOWN);
+            return;
+        }
         switch (type) {
           case TYPE_DOUBLE:
             if (other->hasAnyFlag(TYPE_FLAG_UNDEFINED | TYPE_FLAG_NULL |
                                   TYPE_FLAG_INT32 | TYPE_FLAG_DOUBLE | TYPE_FLAG_BOOLEAN) ||
                 other->getObjectCount() != 0) {
                 target->addType(cx, TYPE_DOUBLE);
             }
             break;
@@ -1460,16 +1464,19 @@ public:
     }
 };
 
 ObjectKind
 TypeSet::getKnownObjectKind(JSContext *cx)
 {
     ObjectKind kind = OBJECT_NONE;
 
+    if (unknown())
+        return OBJECT_UNKNOWN;
+
     unsigned count = getObjectCount();
     for (unsigned i = 0; i < count; i++) {
         TypeObject *object = getObject(i);
         if (object)
             kind = CombineObjectKind(object, kind);
     }
 
     if (kind == OBJECT_NONE)
@@ -3921,16 +3928,18 @@ AnalyzeScriptProperties(JSContext *cx, J
           case JSOP_NEWINIT:
           case JSOP_NEWARRAY:
           case JSOP_NEWOBJECT:
           case JSOP_ENDINIT:
           case JSOP_INITELEM:
           case JSOP_HOLE:
           case JSOP_INITPROP:
           case JSOP_INITMETHOD:
+          case JSOP_CALL:
+          case JSOP_NEW:
             break;
 
           default:
             return baseobj;
         }
 
         offset += analyze::GetBytecodeLength(pc);
     }
@@ -3960,33 +3969,38 @@ analyze::ScriptAnalysis::printTypes(JSCo
 
         unsigned defCount = analyze::GetDefCount(script, offset);
         if (!defCount)
             continue;
 
         for (unsigned i = 0; i < defCount; i++) {
             TypeSet *types = pushedTypes(offset, i);
 
+            if (types->unknown()) {
+                compartment->typeCountOver++;
+                continue;
+            }
+
             unsigned typeCount = types->getObjectCount() ? 1 : 0;
             for (jstype type = TYPE_UNDEFINED; type <= TYPE_STRING; type++) {
                 if (types->hasAnyFlag(1 << type))
                     typeCount++;
             }
 
             /*
              * Adjust the type counts for floats: values marked as floats
              * are also marked as ints by the inference, but for counting
              * we don't consider these to be separate types.
              */
             if (types->hasAnyFlag(TYPE_FLAG_DOUBLE)) {
                 JS_ASSERT(types->hasAnyFlag(TYPE_FLAG_INT32));
                 typeCount--;
             }
 
-            if (types->unknown() || typeCount > TypeCompartment::TYPE_COUNT_LIMIT) {
+            if (typeCount > TypeCompartment::TYPE_COUNT_LIMIT) {
                 compartment->typeCountOver++;
             } else if (typeCount == 0) {
                 /* Ignore values without types, this may be unreached code. */
             } else {
                 compartment->typeCounts[typeCount-1]++;
             }
         }
     }
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -1189,16 +1189,17 @@ TypeSet::setOwnProperty(JSContext *cx, b
         constraint->newPropertyState(cx, this);
         constraint = constraint->next;
     }
 }
 
 inline unsigned
 TypeSet::getObjectCount()
 {
+    JS_ASSERT(!unknown());
     if (objectCount > SET_ARRAY_SIZE)
         return HashSetCapacity(objectCount);
     return objectCount;
 }
 
 inline TypeObject *
 TypeSet::getObject(unsigned i)
 {
--- a/js/src/methodjit/BaseAssembler.h
+++ b/js/src/methodjit/BaseAssembler.h
@@ -168,26 +168,26 @@ class Assembler : public ValueAssembler
         , callIsAligned(false)
 #endif
     {
         startLabel = label();
     }
 
     /* Register pair storing returned type/data for calls. */
 #if defined(JS_CPU_X86) || defined(JS_CPU_X64)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::X86Registers::ecx;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::X86Registers::edx;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::X86Registers::edi;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::X86Registers::esi;
 static const JSC::MacroAssembler::RegisterID JSParamReg_Argc   = JSC::X86Registers::ecx;
 #elif defined(JS_CPU_ARM)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::ARMRegisters::r2;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::ARMRegisters::r1;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::ARMRegisters::r4;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::ARMRegisters::r5;
 static const JSC::MacroAssembler::RegisterID JSParamReg_Argc   = JSC::ARMRegisters::r1;
 #elif defined(JS_CPU_SPARC)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::SparcRegisters::i0;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::SparcRegisters::i1;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::SparcRegisters::l2;
+static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::SparcRegisters::l3;
 static const JSC::MacroAssembler::RegisterID JSParamReg_Argc  = JSC::SparcRegisters::i2;
 #endif
 
     size_t distanceOf(Label l) {
         return differenceBetween(startLabel, l);
     }
 
     void load32FromImm(void *ptr, RegisterID reg) {
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -63,16 +63,17 @@
 
 #include "jsautooplen.h"
 
 using namespace js;
 using namespace js::mjit;
 #if defined(JS_POLYIC) || defined(JS_MONOIC)
 using namespace js::mjit::ic;
 #endif
+using namespace js::analyze;
 
 #define RETURN_IF_OOM(retval)                                   \
     JS_BEGIN_MACRO                                              \
         if (oomInVector || masm.oom() || stubcc.masm.oom())     \
             return retval;                                      \
     JS_END_MACRO
 
 #if defined(JS_METHODJIT_SPEW)
@@ -89,16 +90,17 @@ static const char *OpcodeNames[] = {
  */
 static const size_t CALLS_BACKEDGES_BEFORE_INLINING = 10000;
 
 mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript, bool isConstructing,
                          const Vector<PatchableFrame> *patchFrames)
   : BaseCompiler(cx),
     outerScript(outerScript),
     isConstructing(isConstructing),
+    ssa(cx, outerScript),
     globalObj(outerScript->global),
     globalSlots((globalObj && globalObj->isGlobal()) ? globalObj->getRawSlots() : NULL),
     patchFrames(patchFrames),
     savedTraps(NULL),
     frame(cx, *thisFromCtor(), masm, stubcc),
     a(NULL), outer(NULL), script(NULL), PC(NULL), loop(NULL),
     inlineFrames(CompilerAllocPolicy(cx, *thisFromCtor())),
     branchPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
@@ -124,36 +126,37 @@ mjit::Compiler::Compiler(JSContext *cx, 
     loopEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
     stubcc(cx, *thisFromCtor(), frame),
     debugMode_(cx->compartment->debugMode),
 #if defined JS_TRACER
     addTraceHints(cx->traceJitEnabled),
 #else
     addTraceHints(false),
 #endif
-    inlining(false),
+    inlining_(false),
     hasGlobalReallocation(false),
     oomInVector(false),
     applyTricks(NoApplyTricks)
 {
     JS_ASSERT(!outerScript->isUncachedEval);
 
     /* :FIXME: bug 637856 disabling traceJit if inference is enabled */
     if (cx->typeInferenceEnabled())
         addTraceHints = false;
 
     /*
      * Note: we use callCount_ to count both calls and backedges in scripts
      * after they have been compiled and we are checking to recompile a version
      * with inline calls. :FIXME: should remove compartment->incBackEdgeCount
      * and do the same when deciding to initially compile.
      */
-    if (outerScript->callCount() >= CALLS_BACKEDGES_BEFORE_INLINING ||
-        cx->hasRunOption(JSOPTION_METHODJIT_ALWAYS)) {
-        inlining = true;
+    if (!debugMode() && cx->typeInferenceEnabled() &&
+        (outerScript->callCount() >= CALLS_BACKEDGES_BEFORE_INLINING ||
+         cx->hasRunOption(JSOPTION_METHODJIT_ALWAYS))) {
+        inlining_ = true;
     }
 }
 
 CompileStatus
 mjit::Compiler::compile()
 {
     JS_ASSERT_IF(isConstructing, !outerScript->jitCtor);
     JS_ASSERT_IF(!isConstructing, !outerScript->jitNormal);
@@ -177,82 +180,259 @@ mjit::Compiler::compile()
         if (outerScript->fun && !cx->markTypeFunctionUninlineable(outerScript->fun->getType()))
             return Compile_Error;
     }
 
     return status;
 }
 
 CompileStatus
+mjit::Compiler::checkAnalysis(JSScript *script)
+{
+    ScriptAnalysis *analysis = script->analysis(cx);
+
+    if (!analysis)
+        return Compile_Error;
+    if (!analysis->failed() && !analysis->ranBytecode())
+        analysis->analyzeBytecode(cx);
+
+    if (analysis->OOM())
+        return Compile_Error;
+    if (analysis->failed()) {
+        JaegerSpew(JSpew_Abort, "couldn't analyze bytecode; probably switchX or OOM\n");
+        return Compile_Abort;
+    }
+
+    if (cx->typeInferenceEnabled()) {
+        if (!analysis->ranSSA())
+            analysis->analyzeSSA(cx);
+        if (!analysis->failed() && !analysis->ranLifetimes())
+            analysis->analyzeLifetimes(cx);
+        if (!analysis->failed() && !analysis->ranInference())
+            analysis->analyzeTypes(cx);
+        if (analysis->failed()) {
+            js_ReportOutOfMemory(cx);
+            return Compile_Error;
+        }
+    }
+
+    return Compile_Okay;
+}
+
+CompileStatus
+mjit::Compiler::addInlineFrame(JSScript *script, uint32 depth,
+                               uint32 parent, jsbytecode *parentpc)
+{
+    JS_ASSERT(inlining());
+
+    CompileStatus status = checkAnalysis(script);
+    if (status != Compile_Okay)
+        return status;
+
+    if (!ssa.addInlineFrame(script, depth, parent, parentpc))
+        return Compile_Error;
+
+    uint32 index = ssa.iterFrame(ssa.numFrames() - 1).index;
+    return scanInlineCalls(index, depth);
+}
+
+CompileStatus
+mjit::Compiler::scanInlineCalls(uint32 index, uint32 depth)
+{
+    /* Maximum number of calls we will inline at the same site. */
+    static const uint32 INLINE_SITE_LIMIT = 5;
+
+    JS_ASSERT(inlining());
+
+    /* Not inlining yet from 'new' scripts. */
+    if (isConstructing)
+        return Compile_Okay;
+
+    JSScript *script = ssa.getFrame(index).script;
+    ScriptAnalysis *analysis = script->analysis(cx);
+
+    /* Don't inline from functions which could have a non-global scope object. */
+    if (!script->compileAndGo ||
+        (script->fun && script->fun->getParent() != globalObj) ||
+        (script->fun && script->fun->isHeavyweight()) ||
+        script->isActiveEval) {
+        return Compile_Okay;
+    }
+
+    uint32 nextOffset = 0;
+    while (nextOffset < script->length) {
+        uint32 offset = nextOffset;
+        jsbytecode *pc = script->code + offset;
+        nextOffset = offset + GetBytecodeLength(pc);
+
+        Bytecode *code = analysis->maybeCode(pc);
+        if (!code)
+            continue;
+
+        /* :XXX: Not yet inlining 'new' calls. */
+        if (JSOp(*pc) != JSOP_CALL)
+            continue;
+
+        uint32 argc = GET_ARGC(pc);
+        types::TypeSet *calleeTypes = analysis->poppedTypes(pc, argc + 1);
+
+        if (calleeTypes->getKnownTypeTag(cx) != JSVAL_TYPE_OBJECT)
+            continue;
+
+        /*
+         * Make sure no callees have had their .arguments accessed, and trigger
+         * recompilation if they ever are accessed.
+         */
+        types::ObjectKind kind = calleeTypes->getKnownObjectKind(cx);
+        if (kind != types::OBJECT_INLINEABLE_FUNCTION)
+            continue;
+
+        if (calleeTypes->getObjectCount() >= INLINE_SITE_LIMIT)
+            continue;
+
+        /*
+         * Compute the maximum height we can grow the stack for inlined frames.
+         * We always reserve space for loop temporaries and for an extra stack
+         * frame pushed when making a call from the deepest inlined frame.
+         */
+        uint32 stackLimit = outerScript->nslots + StackSpace::STACK_EXTRA
+            - VALUES_PER_STACK_FRAME - FrameState::TEMPORARY_LIMIT;
+
+        /* Compute the depth of any frames inlined at this site. */
+        uint32 nextDepth = depth + VALUES_PER_STACK_FRAME + script->nfixed + code->stackDepth;
+
+        /*
+         * Scan each of the possible callees for other conditions precluding
+         * inlining. We only inline at a call site if all callees are inlineable.
+         */
+        unsigned count = calleeTypes->getObjectCount();
+        bool okay = true;
+        for (unsigned i = 0; i < count; i++) {
+            types::TypeObject *object = calleeTypes->getObject(i);
+            if (!object)
+                continue;
+
+            if (!object->singleton || !object->singleton->isFunction()) {
+                okay = false;
+                break;
+            }
+
+            JSFunction *fun = object->singleton->getFunctionPrivate();
+            if (!fun->isInterpreted()) {
+                okay = false;
+                break;
+            }
+            JSScript *script = fun->script();
+
+            /*
+             * The outer and inner scripts must have the same scope. This only
+             * allows us to inline calls between non-inner functions. Also
+             * check for consistent strictness between the functions.
+             */
+            if (!script->compileAndGo ||
+                fun->getParent() != globalObj ||
+                outerScript->strictModeCode != script->strictModeCode) {
+                okay = false;
+                break;
+            }
+
+            /* We can't cope with inlining recursive functions yet. */
+            uint32 nindex = index;
+            while (nindex != CrossScriptSSA::INVALID_FRAME) {
+                if (ssa.getFrame(nindex).script == script)
+                    okay = false;
+                nindex = ssa.getFrame(nindex).parent;
+            }
+            if (!okay)
+                break;
+
+            /* Watch for excessively deep nesting of inlined frames. */
+            if (nextDepth + script->nslots >= stackLimit) {
+                okay = false;
+                break;
+            }
+
+            CompileStatus status = checkAnalysis(script);
+            if (status != Compile_Okay)
+                return status;
+
+            if (!script->analysis(cx)->inlineable(argc)) {
+                okay = false;
+                break;
+            }
+        }
+        if (!okay)
+            continue;
+
+        calleeTypes->addFreeze(cx);
+
+        /*
+         * Add the inline frames to the cross script SSA. We will pick these
+         * back up when compiling the call site.
+         */
+        for (unsigned i = 0; i < count; i++) {
+            types::TypeObject *object = calleeTypes->getObject(i);
+            if (!object)
+                continue;
+
+            JSFunction *fun = object->singleton->getFunctionPrivate();
+            JSScript *script = fun->script();
+
+            CompileStatus status = addInlineFrame(script, nextDepth, index, pc);
+            if (status != Compile_Okay)
+                return status;
+        }
+    }
+
+    return Compile_Okay;
+}
+
+CompileStatus
 mjit::Compiler::pushActiveFrame(JSScript *script, uint32 argc)
 {
     ActiveFrame *newa = cx->new_<ActiveFrame>(cx);
     if (!newa)
         return Compile_Error;
 
     newa->parent = a;
     if (a)
         newa->parentPC = PC;
     newa->script = script;
 
     if (outer) {
         newa->inlineIndex = uint32(inlineFrames.length());
         inlineFrames.append(newa);
     } else {
-        newa->inlineIndex = uint32(-1);
+        newa->inlineIndex = CrossScriptSSA::OUTER_FRAME;
         outer = newa;
     }
-
-    analyze::ScriptAnalysis *newAnalysis = script->analysis(cx);
-    if (!newAnalysis)
-        return Compile_Error;
-    if (!newAnalysis->failed() && !newAnalysis->ranBytecode())
-        newAnalysis->analyzeBytecode(cx);
-
-    if (newAnalysis->OOM())
-        return Compile_Error;
-    if (newAnalysis->failed()) {
-        JaegerSpew(JSpew_Abort, "couldn't analyze bytecode; probably switchX or OOM\n");
-        return Compile_Abort;
-    }
-
-    if (cx->typeInferenceEnabled()) {
-        if (!newAnalysis->ranSSA())
-            newAnalysis->analyzeSSA(cx);
-        if (!newAnalysis->failed() && !newAnalysis->ranLifetimes())
-            newAnalysis->analyzeLifetimes(cx);
-        if (newAnalysis->failed()) {
-            js_ReportOutOfMemory(cx);
-            return Compile_Error;
-        }
-    }
+    JS_ASSERT(ssa.getFrame(newa->inlineIndex).script == script);
+
+    ScriptAnalysis *newAnalysis = script->analysis(cx);
 
 #ifdef JS_METHODJIT_SPEW
     if (cx->typeInferenceEnabled() && IsJaegerSpewChannelActive(JSpew_Regalloc)) {
         unsigned nargs = script->fun ? script->fun->nargs : 0;
         for (unsigned i = 0; i < nargs; i++) {
-            uint32 slot = analyze::ArgSlot(i);
+            uint32 slot = ArgSlot(i);
             if (!newAnalysis->slotEscapes(slot)) {
                 JaegerSpew(JSpew_Regalloc, "Argument %u:", i);
                 newAnalysis->liveness(slot).print();
             }
         }
         for (unsigned i = 0; i < script->nfixed; i++) {
-            uint32 slot = analyze::LocalSlot(script, i);
+            uint32 slot = LocalSlot(script, i);
             if (!newAnalysis->slotEscapes(slot)) {
                 JaegerSpew(JSpew_Regalloc, "Local %u:", i);
                 newAnalysis->liveness(slot).print();
             }
         }
     }
 #endif
 
-    if (a)
-        frame.getUnsyncedEntries(&newa->depth, &newa->unsyncedEntries);
-
     if (!frame.pushActiveFrame(script, argc)) {
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
 
     newa->jumpMap = (Label *)cx->malloc_(sizeof(Label) * script->length);
     if (!newa->jumpMap) {
         js_ReportOutOfMemory(cx);
@@ -302,17 +482,17 @@ mjit::Compiler::popActiveFrame()
     JS_END_MACRO
 
 CompileStatus
 mjit::Compiler::performCompilation(JITScript **jitp)
 {
     JaegerSpew(JSpew_Scripts, "compiling script (file \"%s\") (line \"%d\") (length \"%d\")\n",
                outerScript->filename, outerScript->lineno, outerScript->length);
 
-    if (inlining) {
+    if (inlining()) {
         JaegerSpew(JSpew_Inlining, "inlining calls in script (file \"%s\") (line \"%d\")\n",
                    outerScript->filename, outerScript->lineno);
     }
 
 #ifdef JS_METHODJIT_SPEW
     Profiler prof;
     prof.start();
 #endif
@@ -321,16 +501,19 @@ mjit::Compiler::performCompilation(JITSc
     outerScript->debugMode = debugMode();
 #endif
 
     JS_ASSERT(cx->compartment->activeInference);
 
     {
         types::AutoEnterCompilation enter(cx, outerScript);
 
+        CHECK_STATUS(checkAnalysis(outerScript));
+        if (inlining())
+            CHECK_STATUS(scanInlineCalls(CrossScriptSSA::OUTER_FRAME, 0));
         CHECK_STATUS(pushActiveFrame(outerScript, 0));
         CHECK_STATUS(generatePrologue());
         CHECK_STATUS(generateMethod());
         CHECK_STATUS(generateEpilogue());
         CHECK_STATUS(finishThisUp(jitp));
     }
 
 #ifdef JS_METHODJIT_SPEW
@@ -383,21 +566,20 @@ mjit::Compiler::performCompilation(JITSc
     }
 
     return Compile_Okay;
 }
 
 #undef CHECK_STATUS
 
 mjit::Compiler::ActiveFrame::ActiveFrame(JSContext *cx)
-    : parent(NULL), parentPC(NULL), script(NULL), inlineIndex(uint32(-1)),
-      jumpMap(NULL), unsyncedEntries(cx),
-      needReturnValue(false), syncReturnValue(false),
-      returnValueDouble(false), returnSet(false), returnParentRegs(0),
-      temporaryParentRegs(0), returnJumps(NULL)
+    : parent(NULL), parentPC(NULL), script(NULL), jumpMap(NULL),
+      inlineIndex(uint32(-1)), needReturnValue(false), syncReturnValue(false),
+      returnValueDouble(false), returnSet(false), returnEntry(NULL),
+      returnJumps(NULL), exitState(NULL)
 {}
 
 mjit::Compiler::ActiveFrame::~ActiveFrame()
 {
     js::Foreground::free_(jumpMap);
 }
 
 mjit::Compiler::~Compiler()
@@ -408,24 +590,16 @@ mjit::Compiler::~Compiler()
         cx->delete_(inlineFrames[i]);
 
     cx->free_(savedTraps);
 }
 
 CompileStatus
 mjit::Compiler::prepareInferenceTypes(JSScript *script, ActiveFrame *a)
 {
-    /* Analyze the script if we have not already done so. */
-    analyze::ScriptAnalysis *analysis = script->analysis(cx);
-    if (!analysis->ranInference()) {
-        analysis->analyzeTypes(cx);
-        if (!analysis->ranInference())
-            return Compile_Error;
-    }
-
     /*
      * During our walk of the script, we need to preserve the invariant that at
      * join points the in memory type tag is always in sync with the known type
      * tag of the variable's SSA value at that join point. In particular, SSA
      * values inferred as (int|double) must in fact be doubles, stored either
      * in floating point registers or in memory. (There is an exception for
      * locals with a dead value at the current point, whose type may or may not
      * be synced).
@@ -440,21 +614,21 @@ mjit::Compiler::prepareInferenceTypes(JS
      *
      * When we get to a branch and need to know a variable's value at the
      * branch target, we know it will either be a phi node at the target or
      * the variable's current value, as no phi node is created at the target
      * only if a variable has the same value on all incoming edges.
      */
 
     a->varTypes = (VarType *)
-        cx->calloc_(analyze::TotalSlots(script) * sizeof(VarType));
+        cx->calloc_(TotalSlots(script) * sizeof(VarType));
     if (!a->varTypes)
         return Compile_Error;
 
-    for (uint32 slot = analyze::ArgSlot(0); slot < analyze::TotalSlots(script); slot++) {
+    for (uint32 slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
         VarType &vt = a->varTypes[slot];
         vt.types = script->slotTypes(slot);
         vt.type = vt.types->getKnownTypeTag(cx);
     }
 
     return Compile_Okay;
 }
 
@@ -632,17 +806,17 @@ mjit::Compiler::generatePrologue()
     if (debugMode() || Probes::callTrackingActive(cx)) {
         REJOIN_SITE(stubs::ScriptDebugPrologue);
         INLINE_STUBCALL(stubs::ScriptDebugPrologue);
     }
 
     if (cx->typeInferenceEnabled()) {
         /* Convert integer arguments which were inferred as (int|double) to doubles. */
         for (uint32 i = 0; script->fun && i < script->fun->nargs; i++) {
-            uint32 slot = analyze::ArgSlot(i);
+            uint32 slot = ArgSlot(i);
             if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE && analysis->trackSlot(slot))
                 frame.ensureDouble(frame.getArg(i));
         }
     }
 
     recompileCheckHelper();
 
     return Compile_Okay;
@@ -695,28 +869,24 @@ mjit::Compiler::finishThisUp(JITScript *
     masm.executableCopy(result);
     stubcc.masm.executableCopy(result + masm.size());
     
     JSC::LinkBuffer fullCode(result, totalSize);
     JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size());
 
     size_t nNmapLive = loopEntries.length();
     for (size_t i = 0; i < script->length; i++) {
-        analyze::Bytecode *opinfo = analysis->maybeCode(i);
+        Bytecode *opinfo = analysis->maybeCode(i);
         if (opinfo && opinfo->safePoint) {
             /* loopEntries cover any safe points which are at loop heads. */
             if (!cx->typeInferenceEnabled() || !opinfo->loopHead)
                 nNmapLive++;
         }
     }
 
-    size_t nUnsyncedEntries = 0;
-    for (size_t i = 0; i < inlineFrames.length(); i++)
-        nUnsyncedEntries += inlineFrames[i]->unsyncedEntries.length();
-
     /* Please keep in sync with JITScript::scriptDataSize! */
     size_t totalBytes = sizeof(JITScript) +
                         sizeof(NativeMapEntry) * nNmapLive +
                         sizeof(InlineFrame) * inlineFrames.length() +
                         sizeof(CallSite) * callSites.length() +
                         sizeof(RejoinSite) * rejoinSites.length() +
 #if defined JS_MONOIC
                         sizeof(ic::GetGlobalNameIC) * getGlobalNames.length() +
@@ -725,17 +895,17 @@ mjit::Compiler::finishThisUp(JITScript *
                         sizeof(ic::EqualityICInfo) * equalityICs.length() +
                         sizeof(ic::TraceICInfo) * traceICs.length() +
 #endif
 #if defined JS_POLYIC
                         sizeof(ic::PICInfo) * pics.length() +
                         sizeof(ic::GetElementIC) * getElemICs.length() +
                         sizeof(ic::SetElementIC) * setElemICs.length() +
 #endif
-                        sizeof(UnsyncedEntry) * nUnsyncedEntries;
+                        0;
 
     uint8 *cursor = (uint8 *)cx->calloc_(totalBytes);
     if (!cursor) {
         execPool->release();
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
 
@@ -764,17 +934,17 @@ mjit::Compiler::finishThisUp(JITScript *
 
     /* Build the pc -> ncode mapping. */
     NativeMapEntry *jitNmap = (NativeMapEntry *)cursor;
     jit->nNmapPairs = nNmapLive;
     cursor += sizeof(NativeMapEntry) * jit->nNmapPairs;
     size_t ix = 0;
     if (jit->nNmapPairs > 0) {
         for (size_t i = 0; i < script->length; i++) {
-            analyze::Bytecode *opinfo = analysis->maybeCode(i);
+            Bytecode *opinfo = analysis->maybeCode(i);
             if (opinfo && opinfo->safePoint) {
                 Label L = jumpMap[i];
                 JS_ASSERT(L.isValid());
                 jitNmap[ix].bcOff = i;
                 jitNmap[ix].ncode = (uint8 *)(result + masm.distanceOf(L));
                 ix++;
             }
         }
@@ -803,17 +973,17 @@ mjit::Compiler::finishThisUp(JITScript *
         InlineFrame &to = jitInlineFrames[i];
         ActiveFrame *from = inlineFrames[i];
         if (from->parent != outer)
             to.parent = &jitInlineFrames[from->parent->inlineIndex];
         else
             to.parent = NULL;
         to.parentpc = from->parentPC;
         to.fun = from->script->fun;
-        to.depth = from->depth;
+        to.depth = ssa.getFrame(from->inlineIndex).depth;
     }
 
     /* Build the table of call sites. */
     CallSite *jitCallSites = (CallSite *)cursor;
     jit->nCallSites = callSites.length();
     cursor += sizeof(CallSite) * jit->nCallSites;
     for (size_t i = 0; i < jit->nCallSites; i++) {
         CallSite &to = jitCallSites[i];
@@ -1163,26 +1333,16 @@ mjit::Compiler::finishThisUp(JITScript *
                 JS_ASSERT(distance <= 0);
                 jitPics[i].u.get.typeCheckOffset = distance;
             }
         }
         stubCode.patch(pics[i].paramAddr, &jitPics[i]);
     }
 #endif
 
-    for (size_t i = 0; i < jit->nInlineFrames; i++) {
-        InlineFrame &to = jitInlineFrames[i];
-        ActiveFrame *from = inlineFrames[i];
-        to.nUnsyncedEntries = from->unsyncedEntries.length();
-        to.unsyncedEntries = (UnsyncedEntry *) cursor;
-        cursor += sizeof(UnsyncedEntry) * to.nUnsyncedEntries;
-        for (size_t j = 0; j < to.nUnsyncedEntries; j++)
-            to.unsyncedEntries[j] = from->unsyncedEntries[j];
-    }
-
     JS_ASSERT(size_t(cursor - (uint8*)jit) == totalBytes);
 
     /* Link fast and slow paths together. */
     stubcc.fixCrossJumps(result, masm.size(), masm.size() + stubcc.size());
 
     size_t doubleOffset = masm.size() + stubcc.size();
     double *inlineDoubles = (double *) (result + doubleOffset);
     double *oolDoubles = (double*) (result + doubleOffset +
@@ -1293,56 +1453,55 @@ mjit::Compiler::generateMethod()
                 return Compile_Error;
             op = JSOp(*PC);
             trap |= stubs::JSTRAP_TRAP;
         }
         if (script->singleStepMode && scanner.firstOpInLine(PC - script->code))
             trap |= stubs::JSTRAP_SINGLESTEP;
         variadicRejoin = false;
 
-        analyze::Bytecode *opinfo = analysis->maybeCode(PC);
+        Bytecode *opinfo = analysis->maybeCode(PC);
 
         if (!opinfo) {
             if (op == JSOP_STOP)
                 break;
             if (js_CodeSpec[op].length != -1)
                 PC += js_CodeSpec[op].length;
             else
                 PC += js_GetVariableBytecodeLength(PC);
             continue;
         }
 
-        if (loop)
-            loop->PC = PC;
+        if (loop && !a->parent)
+            loop->setOuterPC(PC);
 
         frame.setPC(PC);
         frame.setInTryBlock(opinfo->inTryBlock);
 
         if (fallthrough) {
             /*
              * If there is fallthrough from the previous opcode and we changed
              * any entries into doubles for a branch at that previous op,
              * revert those entries into integers. Maintain an invariant that
              * for any variables inferred to be integers, the compiler
-             * maintains them as integers slots, both for faster code inside
+             * maintains them as integers, both for faster code inside
              * basic blocks and for fewer conversions needed when branching.
-             * :XXX: this code is hacky and slow, but doesn't run that much.
              */
             for (unsigned i = 0; i < fixedDoubleEntries.length(); i++) {
-                FrameEntry *fe = frame.getOrTrack(fixedDoubleEntries[i]);
+                FrameEntry *fe = frame.getSlotEntry(fixedDoubleEntries[i]);
                 frame.ensureInteger(fe);
             }
         }
         fixedDoubleEntries.clear();
 
 #ifdef DEBUG
         if (fallthrough && cx->typeInferenceEnabled()) {
-            for (uint32 slot = analyze::ArgSlot(0); slot < analyze::TotalSlots(script); slot++) {
+            for (uint32 slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
                 if (a->varTypes[slot].type == JSVAL_TYPE_INT32) {
-                    FrameEntry *fe = frame.getOrTrack(slot);
+                    FrameEntry *fe = frame.getSlotEntry(slot);
                     JS_ASSERT(!fe->isType(JSVAL_TYPE_DOUBLE));
                 }
             }
         }
 #endif
 
         if (opinfo->jumpTarget || trap) {
             if (fallthrough) {
@@ -1362,17 +1521,17 @@ mjit::Compiler::generateMethod()
                         return Compile_Error;
                 } else {
                     if (!frame.syncForBranch(PC, Uses(0)))
                         return Compile_Error;
                     JS_ASSERT(frame.consistentRegisters(PC));
                 }
             }
 
-            if (!frame.discardForJoin(PC, opinfo->stackDepth))
+            if (!frame.discardForJoin(analysis->getAllocation(PC), opinfo->stackDepth))
                 return Compile_Error;
             restoreAnalysisTypes();
             fallthrough = true;
 
             if (!cx->typeInferenceEnabled()) {
                 /* All join points have synced state if we aren't doing cross-branch regalloc. */
                 opinfo->safePoint = true;
             }
@@ -1464,17 +1623,17 @@ mjit::Compiler::generateMethod()
           BEGIN_CASE(JSOP_RETURN)
             emitReturn(frame.peek(-1));
             fallthrough = false;
           END_CASE(JSOP_RETURN)
 
           BEGIN_CASE(JSOP_GOTO)
           BEGIN_CASE(JSOP_DEFAULT)
           {
-            unsigned targetOffset = analyze::FollowBranch(script, PC - script->code);
+            unsigned targetOffset = FollowBranch(script, PC - script->code);
             jsbytecode *target = script->code + targetOffset;
 
             fixDoubleTypes(target);
 
             /*
              * Watch for gotos which are entering a 'for' or 'while' loop.
              * These jump to the loop condition test and are immediately
              * followed by the head of the loop.
@@ -1978,17 +2137,17 @@ mjit::Compiler::generateMethod()
             bool inlined = false;
             if (op == JSOP_CALL) {
                 CompileStatus status = inlineNativeFunction(GET_ARGC(PC), callingNew);
                 if (status == Compile_Okay)
                     done = true;
                 else if (status != Compile_InlineAbort)
                     return status;
             }
-            if (!done && inlining) {
+            if (!done && inlining()) {
                 CompileStatus status = inlineScriptedFunction(GET_ARGC(PC), callingNew);
                 if (status == Compile_Okay) {
                     done = true;
                     inlined = true;
                 }
                 else if (status != Compile_InlineAbort)
                     return status;
             }
@@ -2238,17 +2397,17 @@ mjit::Compiler::generateMethod()
             frame.storeArg(GET_SLOTNO(PC), pop);
 
             /*
              * Types of variables inferred as doubles need to be maintained as
              * doubles. We might forget the type of the variable by the next
              * call to fixDoubleTypes.
              */
             if (cx->typeInferenceEnabled()) {
-                uint32 slot = analyze::ArgSlot(GET_SLOTNO(PC));
+                uint32 slot = ArgSlot(GET_SLOTNO(PC));
                 if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE && fixDoubleSlot(slot))
                     frame.ensureDouble(frame.getArg(GET_SLOTNO(PC)));
             }
 
             if (pop) {
                 frame.pop();
                 PC += JSOP_SETARG_LENGTH + JSOP_POP_LENGTH;
                 break;
@@ -2266,17 +2425,17 @@ mjit::Compiler::generateMethod()
           BEGIN_CASE(JSOP_SETLOCAL)
           {
             updateVarType();
             jsbytecode *next = &PC[JSOP_SETLOCAL_LENGTH];
             bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
             frame.storeLocal(GET_SLOTNO(PC), pop, true);
 
             if (cx->typeInferenceEnabled()) {
-                uint32 slot = analyze::LocalSlot(script, GET_SLOTNO(PC));
+                uint32 slot = LocalSlot(script, GET_SLOTNO(PC));
                 if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE && fixDoubleSlot(slot))
                     frame.ensureDouble(frame.getLocal(GET_SLOTNO(PC)));
             }
 
             if (pop) {
                 frame.pop();
                 PC += JSOP_SETLOCAL_LENGTH + JSOP_POP_LENGTH;
                 break;
@@ -2810,24 +2969,24 @@ mjit::Compiler::generateMethod()
 #endif
             return Compile_Abort;
         }
 
     /**********************
      *  END COMPILER OPS  *
      **********************/ 
 
-        if (cx->typeInferenceEnabled() && PC == oldPC + analyze::GetBytecodeLength(oldPC)) {
+        if (cx->typeInferenceEnabled() && PC == oldPC + GetBytecodeLength(oldPC)) {
             /*
              * Inform the frame of the type sets for values just pushed. Skip
              * this if we did any opcode fusions, we don't keep track of the
              * associated type sets in such cases.
              */
-            unsigned nuses = analyze::GetUseCount(script, oldPC - script->code);
-            unsigned ndefs = analyze::GetDefCount(script, oldPC - script->code);
+            unsigned nuses = GetUseCount(script, oldPC - script->code);
+            unsigned ndefs = GetDefCount(script, oldPC - script->code);
             for (unsigned i = 0; i < ndefs; i++) {
                 FrameEntry *fe = frame.getStack(opinfo->stackDepth - nuses + i);
                 if (fe) {
                     /* fe may be NULL for conditionally pushed entries, e.g. JSOP_AND */
                     frame.extra(fe).types = analysis->pushedTypes(oldPC - script->code, i);
                 }
             }
         }
@@ -3049,46 +3208,64 @@ mjit::Compiler::emitInlineReturnValue(Fr
         Address address = frame.addressForInlineReturn();
         if (fe)
             frame.storeTo(fe, address);
         else
             masm.storeValue(UndefinedValue(), address);
         return;
     }
 
+    /*
+     * For inlined functions that simply return an entry present in the outer
+     * script (e.g. a loop invariant term), mark the copy and propagate it
+     * after popping the frame.
+     */
+    if (!a->exitState && fe && fe->isCopy() && frame.isOuterSlot(fe->backing())) {
+        a->returnEntry = fe->backing();
+        return;
+    }
+
     if (a->returnValueDouble) {
         JS_ASSERT(fe);
         frame.ensureDouble(fe);
         Registers mask(a->returnSet
                        ? Registers::maskReg(a->returnRegister)
                        : Registers::AvailFPRegs);
         FPRegisterID fpreg;
         if (!fe->isConstant()) {
             fpreg = frame.tempRegInMaskForData(fe, mask.freeMask).fpreg();
+            frame.syncAndForgetFe(fe, true);
+            frame.takeReg(fpreg);
         } else {
             fpreg = frame.allocReg(mask.freeMask).fpreg();
             masm.slowLoadConstantDouble(fe->getValue().toDouble(), fpreg);
         }
         JS_ASSERT_IF(a->returnSet, fpreg == a->returnRegister.fpreg());
         a->returnRegister = fpreg;
     } else {
         Registers mask(a->returnSet
                        ? Registers::maskReg(a->returnRegister)
                        : Registers::AvailRegs);
         RegisterID reg;
         if (fe && !fe->isConstant()) {
             reg = frame.tempRegInMaskForData(fe, mask.freeMask).reg();
+            frame.syncAndForgetFe(fe, true);
+            frame.takeReg(reg);
         } else {
             reg = frame.allocReg(mask.freeMask).reg();
             Value val = fe ? fe->getValue() : UndefinedValue();
             masm.loadValuePayload(val, reg);
         }
         JS_ASSERT_IF(a->returnSet, reg == a->returnRegister.reg());
         a->returnRegister = reg;
     }
+
+    a->returnSet = true;
+    if (a->exitState)
+        a->exitState->setUnassigned(a->returnRegister);
 }
 
 void
 mjit::Compiler::emitReturn(FrameEntry *fe)
 {
     JS_ASSERT_IF(!script->fun, JSOp(*PC) == JSOP_STOP);
 
     /* Only the top of the stack can be returned. */
@@ -3105,47 +3282,39 @@ mjit::Compiler::emitReturn(FrameEntry *f
          * Returning from an inlined script. The checks we do for inlineability
          * and recompilation triggered by args object construction ensure that
          * there can't be an arguments or call object.
          */
 
         if (a->needReturnValue)
             emitInlineReturnValue(fe);
 
-        /* Make sure the parent entries still in registers are consistent between return sites. */
-        if (!a->returnSet) {
-            a->returnParentRegs = frame.getParentRegs().freeMask & ~a->temporaryParentRegs.freeMask;
-            if (a->needReturnValue && !a->syncReturnValue &&
-                a->returnParentRegs.hasReg(a->returnRegister)) {
-                a->returnParentRegs.takeReg(a->returnRegister);
-            }
+        if (a->exitState) {
+            /*
+             * Restore the register state to reflect that at the original call,
+             * modulo entries which will be popped once the call finishes and any
+             * entry which will be clobbered by the return value register.
+             */
+            frame.syncForAllocation(a->exitState, true, Uses(0));
         }
 
-        frame.discardLocalRegisters();
-        frame.syncParentRegistersInMask(masm,
-            frame.getParentRegs().freeMask & ~a->returnParentRegs.freeMask &
-            ~a->temporaryParentRegs.freeMask, true);
-        frame.restoreParentRegistersInMask(masm,
-            a->returnParentRegs.freeMask & ~frame.getParentRegs().freeMask, true);
-
-        a->returnSet = true;
-
         /*
          * Simple tests to see if we are at the end of the script and will
          * fallthrough after the script body finishes, thus won't need to jump.
          */
         bool endOfScript =
             (JSOp(*PC) == JSOP_STOP) ||
             (JSOp(*PC) == JSOP_RETURN &&
              (JSOp(*(PC + JSOP_RETURN_LENGTH)) == JSOP_STOP &&
               !analysis->maybeCode(PC + JSOP_RETURN_LENGTH)));
         if (!endOfScript)
             a->returnJumps->append(masm.jump());
 
-        frame.discardFrame();
+        if (a->returnSet)
+            frame.freeReg(a->returnRegister);
         return;
     }
 
     /*
      * Outside the mjit, activation objects are put by StackSpace::pop*
      * members. For JSOP_RETURN, the interpreter only calls popInlineFrame if
      * fp != entryFrame since the VM protocol is that Invoke/Execute are
      * responsible for pushing/popping the initial frame. The mjit does not
@@ -3238,20 +3407,18 @@ mjit::Compiler::interruptCheckHelper()
     stubcc.rejoin(Changes(0));
 }
 
 void
 mjit::Compiler::recompileCheckHelper()
 {
     REJOIN_SITE(stubs::RecompileForInline);
 
-    if (!analysis->hasFunctionCalls() || !cx->typeInferenceEnabled() ||
-        script->callCount() >= CALLS_BACKEDGES_BEFORE_INLINING) {
+    if (inlining() || debugMode() || !analysis->hasFunctionCalls() || !cx->typeInferenceEnabled())
         return;
-    }
 
     size_t *addr = script->addressOfCallCount();
     masm.add32(Imm32(1), AbsoluteAddress(addr));
 #if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
     Jump jump = masm.branch32(Assembler::GreaterThanOrEqual, AbsoluteAddress(addr),
                               Imm32(CALLS_BACKEDGES_BEFORE_INLINING));
 #else
     /* Handle processors that can't load from absolute addresses. */
@@ -3858,235 +4025,160 @@ mjit::Compiler::callArrayBuiltin(uint32 
 }
 
 /* Maximum number of calls we will inline at the same site. */
 static const uint32 INLINE_SITE_LIMIT = 5;
 
 CompileStatus
 mjit::Compiler::inlineScriptedFunction(uint32 argc, bool callingNew)
 {
-    JS_ASSERT(inlining);
-
-    if (!cx->typeInferenceEnabled())
-        return Compile_InlineAbort;
-
-    /* :XXX: Not doing inlining yet when calling 'new' or calling from 'new'. */
-    if (isConstructing || callingNew)
-        return Compile_InlineAbort;
-
-    if (applyTricks == LazyArgsObj)
-        return Compile_InlineAbort;
-
-    /* Don't inline from functions which could have a non-global scope object. */
-    if (!outerScript->compileAndGo ||
-        (outerScript->fun && outerScript->fun->getParent() != globalObj) ||
-        (outerScript->fun && outerScript->fun->isHeavyweight()) ||
-        outerScript->isActiveEval) {
-        return Compile_InlineAbort;
-    }
-
-    FrameEntry *origCallee = frame.peek(-((int)argc + 2));
-    FrameEntry *origThis = frame.peek(-((int)argc + 1));
-
-    types::TypeSet *types = frame.extra(origCallee).types;
-    if (!types || types->getKnownTypeTag(cx) != JSVAL_TYPE_OBJECT)
-        return Compile_InlineAbort;
-
-    /*
-     * Make sure no callees have had their .arguments accessed, and trigger
-     * recompilation if they ever are accessed.
-     */
-    types::ObjectKind kind = types->getKnownObjectKind(cx);
-    if (kind != types::OBJECT_INLINEABLE_FUNCTION)
-        return Compile_InlineAbort;
-
-    if (types->getObjectCount() >= INLINE_SITE_LIMIT)
+    JS_ASSERT(inlining());
+
+    /* We already know which frames we are inlining at each PC, so scan the list of inline frames. */
+    bool calleeMultipleReturns = false;
+    Vector<JSScript *> inlineCallees(CompilerAllocPolicy(cx, *this));
+    for (unsigned i = 0; i < ssa.numFrames(); i++) {
+        if (ssa.iterFrame(i).parent == a->inlineIndex && ssa.iterFrame(i).parentpc == PC) {
+            JSScript *script = ssa.iterFrame(i).script;
+            inlineCallees.append(script);
+            if (script->analysis(cx)->numReturnSites() > 1)
+                calleeMultipleReturns = true;
+        }
+    }
+
+    if (inlineCallees.empty())
         return Compile_InlineAbort;
 
     /*
-     * Compute the maximum height we can grow the stack for inlined frames.
-     * We always reserve space for an extra stack frame pushed when making
-     * a call from the deepest inlined frame.
-     */
-    uint32 stackLimit = outerScript->nslots + StackSpace::STACK_EXTRA - VALUES_PER_STACK_FRAME;
-
-    /*
-     * Scan each of the possible callees for other conditions precluding
-     * inlining. We only inline at a call site if all callees are inlineable.
+     * Remove all dead entries from the frame's tracker. We will not recognize
+     * them as dead after pushing the new frame.
      */
-    unsigned count = types->getObjectCount();
-    for (unsigned i = 0; i < count; i++) {
-        types::TypeObject *object = types->getObject(i);
-        if (!object)
-            continue;
-
-        if (!object->singleton || !object->singleton->isFunction())
-            return Compile_InlineAbort;
-
-        JSFunction *fun = object->singleton->getFunctionPrivate();
-        if (!fun->isInterpreted())
-            return Compile_InlineAbort;
-        JSScript *script = fun->script();
-
+    frame.pruneDeadEntries();
+
+    RegisterAllocation *exitState = NULL;
+    if (inlineCallees.length() > 1 || calleeMultipleReturns) {
         /*
-         * The outer and inner scripts must have the same scope. This only
-         * allows us to inline calls between non-inner functions. Also check
-         * for consistent strictness between the functions.
+         * Multiple paths through the callees, get a register allocation for
+         * the various incoming edges.
          */
-        if (!script->compileAndGo ||
-            fun->getParent() != globalObj ||
-            outerScript->strictModeCode != script->strictModeCode) {
-            return Compile_InlineAbort;
-        }
-
-        /* We can't cope with inlining recursive functions yet. */
-        ActiveFrame *checka = a;
-        while (checka) {
-            if (checka->script == script)
-                return Compile_InlineAbort;
-            checka = checka->parent;
-        }
-
-        /* Watch for excessively deep nesting of inlined frames. */
-        if (frame.totalDepth() + VALUES_PER_STACK_FRAME + fun->script()->nslots >= stackLimit)
-            return Compile_InlineAbort;
-
-        analyze::ScriptAnalysis *analysis = script->analysis(cx);
-        if (analysis && !analysis->failed() && !analysis->ranBytecode())
-            analysis->analyzeBytecode(cx);
-        if (!analysis || analysis->OOM())
-            return Compile_Error;
-        if (analysis->failed())
-            return Compile_Abort;
-
-        if (!analysis->inlineable(argc))
-            return Compile_InlineAbort;
-
-        if (analysis->usesThisValue() && origThis->isNotType(JSVAL_TYPE_OBJECT))
-            return Compile_InlineAbort;
-    }
-
-    types->addFreeze(cx);
-
-    /*
-     * For 'this' and arguments which are copies of other entries still in
-     * memory, try to get registers now. This will let us carry these entries
-     * around loops if possible. (Entries first accessed within the inlined
-     * call can't be loop carried).
-     */
-    frame.tryCopyRegister(origThis, origCallee);
-    for (unsigned i = 0; i < argc; i++)
-        frame.tryCopyRegister(frame.peek(-((int)i + 1)), origCallee);
+        exitState = frame.computeAllocation(PC + JSOP_CALL_LENGTH);
+    }
 
     /*
      * If this is a polymorphic callsite, get a register for the callee too.
      * After this, do not touch the register state in the current frame until
      * stubs for all callees have been generated.
      */
+    FrameEntry *origCallee = frame.peek(-((int)argc + 2));
+    FrameEntry *entrySnapshot = NULL;
     MaybeRegisterID calleeReg;
-    if (count > 1) {
+    if (inlineCallees.length() > 1) {
         frame.forgetMismatchedObject(origCallee);
         calleeReg = frame.tempRegForData(origCallee);
+
+        entrySnapshot = frame.snapshotState();
+        if (!entrySnapshot)
+            return Compile_Error;
     }
     MaybeJump calleePrevious;
 
-    /*
-     * Registers for entries which will be popped after the call finishes do
-     * not need to be preserved by the inline frames.
-     */
-    Registers temporaryParentRegs = frame.getTemporaryCallRegisters(origCallee);
-
     JSValueType returnType = knownPushedType(0);
 
     bool needReturnValue = JSOP_POP != (JSOp)*(PC + JSOP_CALL_LENGTH);
     bool syncReturnValue = needReturnValue && returnType == JSVAL_TYPE_UNKNOWN;
 
     /* Track register state after the call. */
     bool returnSet = false;
     AnyRegisterID returnRegister;
-    Registers returnParentRegs = 0;
+    const FrameEntry *returnEntry = NULL;
 
     Vector<Jump, 4, CompilerAllocPolicy> returnJumps(CompilerAllocPolicy(cx, *this));
 
-    for (unsigned i = 0; i < count; i++) {
-        types::TypeObject *object = types->getObject(i);
-        if (!object)
-            continue;
-
-        JSFunction *fun = object->singleton->getFunctionPrivate();
-
+    for (unsigned i = 0; i < inlineCallees.length(); i++) {
+        if (entrySnapshot)
+            frame.restoreFromSnapshot(entrySnapshot);
+
+        JSScript *script = inlineCallees[i];
         CompileStatus status;
 
-        status = pushActiveFrame(fun->script(), argc);
+        status = pushActiveFrame(script, argc);
         if (status != Compile_Okay)
             return status;
 
+        a->exitState = exitState;
+
         JaegerSpew(JSpew_Inlining, "inlining call to script (file \"%s\") (line \"%d\")\n",
                    script->filename, script->lineno);
 
         if (calleePrevious.isSet()) {
             calleePrevious.get().linkTo(masm.label(), &masm);
             calleePrevious = MaybeJump();
         }
 
-        if (i + 1 != count) {
+        if (i + 1 != inlineCallees.length()) {
             /* Guard on the callee, except when this object must be the callee. */
             JS_ASSERT(calleeReg.isSet());
-            calleePrevious = masm.branchPtr(Assembler::NotEqual, calleeReg.reg(), ImmPtr(fun));
+            calleePrevious = masm.branchPtr(Assembler::NotEqual, calleeReg.reg(), ImmPtr(script->fun));
         }
 
         a->returnJumps = &returnJumps;
         a->needReturnValue = needReturnValue;
         a->syncReturnValue = syncReturnValue;
         a->returnValueDouble = returnType == JSVAL_TYPE_DOUBLE;
         if (returnSet) {
             a->returnSet = true;
             a->returnRegister = returnRegister;
-            a->returnParentRegs = returnParentRegs;
         }
-        a->temporaryParentRegs = temporaryParentRegs;
 
         status = generateMethod();
         if (status != Compile_Okay) {
             popActiveFrame();
             if (status == Compile_Abort) {
                 /* The callee is uncompileable, mark it as uninlineable and retry. */
-                if (!cx->markTypeFunctionUninlineable(fun->getType()))
+                if (!cx->markTypeFunctionUninlineable(script->fun->getType()))
                     return Compile_Error;
                 return Compile_Retry;
             }
             return status;
         }
 
-        if (!returnSet) {
-            JS_ASSERT(a->returnSet);
-            returnSet = true;
-            returnRegister = a->returnRegister;
-            returnParentRegs = a->returnParentRegs;
+        if (needReturnValue && !returnSet) {
+            if (a->returnSet) {
+                returnSet = true;
+                returnRegister = a->returnRegister;
+            } else {
+                returnEntry = a->returnEntry;
+            }
         }
 
         popActiveFrame();
 
-        if (i + 1 != count)
+        if (i + 1 != inlineCallees.length())
             returnJumps.append(masm.jump());
     }
 
     for (unsigned i = 0; i < returnJumps.length(); i++)
         returnJumps[i].linkTo(masm.label(), &masm);
 
-    Registers evictedRegisters = Registers(Registers::AvailAnyRegs & ~returnParentRegs.freeMask);
-    frame.evictInlineModifiedRegisters(evictedRegisters);
-
     frame.popn(argc + 2);
-    if (needReturnValue && !syncReturnValue) {
+
+    if (entrySnapshot)
+        cx->array_delete(entrySnapshot);
+
+    if (exitState)
+        frame.discardForJoin(exitState, analysis->getCode(PC).stackDepth - (argc + 2));
+
+    if (returnSet) {
         frame.takeReg(returnRegister);
         if (returnRegister.isReg())
             frame.pushTypedPayload(returnType, returnRegister.reg());
         else
             frame.pushDouble(returnRegister.fpreg());
+    } else if (returnEntry) {
+        frame.pushCopyOf((FrameEntry *) returnEntry);
     } else {
         frame.pushSynced(JSVAL_TYPE_UNKNOWN);
     }
 
     JaegerSpew(JSpew_Inlining, "finished inlining call to script (file \"%s\") (line \"%d\")\n",
                script->filename, script->lineno);
 
     return Compile_Okay;
@@ -4109,17 +4201,17 @@ mjit::Compiler::inlineStubCall(void *stu
     DataLabelPtr inlinePatch;
     Call cl = emitStubCall(stub, &inlinePatch);
     InternalCallSite site(masm.callReturnOffset(cl), a->inlineIndex, PC,
                           (size_t)stub, false, needsRejoin);
     site.inlinePatch = inlinePatch;
     if (loop && loop->generatingInvariants()) {
         Jump j = masm.jump();
         Label l = masm.label();
-        loop->addInvariantCall(j, l, false, callSites.length(), true);
+        loop->addInvariantCall(j, l, false, false, callSites.length(), true);
     }
     addCallSite(site);
 }
 
 #ifdef DEBUG
 void
 mjit::Compiler::checkRejoinSite(uint32 nCallSites, uint32 nRejoinSites, void *stub)
 {
@@ -4167,17 +4259,17 @@ mjit::Compiler::addRejoinSite(void *stub
      * unknown in this compilation.
      */
     frame.ensureInMemoryDoubles(stubcc.masm);
 
     /* Regenerate any loop invariants. */
     if (loop && loop->generatingInvariants()) {
         Jump j = stubcc.masm.jump();
         Label l = stubcc.masm.label();
-        loop->addInvariantCall(j, l, true, rejoinSites.length() - 1, false);
+        loop->addInvariantCall(j, l, true, false, rejoinSites.length() - 1, false);
     }
 
     if (ool) {
         /* Jump to the specified label, without syncing. */
         stubcc.masm.jump().linkTo(oolLabel, &stubcc.masm);
     } else {
         /* Rejoin as from an out of line stub call. */
         stubcc.rejoin(Changes(0));
@@ -4370,21 +4462,22 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
     frame.forgetMismatchedObject(top);
 
     if (JSOp(*PC) == JSOP_LENGTH) {
         /*
          * Check if this is an array we can make a loop invariant entry for.
          * This will fail for objects which are not definitely dense arrays.
          */
         if (loop && loop->generatingInvariants()) {
-            FrameEntry *fe = loop->invariantLength(top, frame.extra(top).types);
+            CrossSSAValue topv(a->inlineIndex, analysis->poppedValue(PC, 0));
+            FrameEntry *fe = loop->invariantLength(topv);
             if (fe) {
                 frame.learnType(fe, JSVAL_TYPE_INT32, false);
                 frame.pop();
-                frame.pushTemporary(fe);
+                frame.pushCopyOf(fe);
                 return true;
             }
         }
 
         /*
          * Check if we are accessing the 'length' property of a known dense array.
          * Note that if the types are known to indicate dense arrays, their lengths
          * must fit in an int32.
@@ -4405,21 +4498,23 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
             if (!isObject)
                 stubcc.rejoin(Changes(1));
             return true;
         }
     }
 
     /* Check if this is a property access we can make a loop invariant entry for. */
     if (loop && loop->generatingInvariants()) {
-        FrameEntry *fe = loop->invariantProperty(top, frame.extra(top).types, ATOM_TO_JSID(atom));
+        CrossSSAValue topv(a->inlineIndex, analysis->poppedValue(PC, 0));
+        FrameEntry *fe = loop->invariantProperty(topv, ATOM_TO_JSID(atom));
         if (fe) {
-            frame.learnType(fe, knownType, false);
+            if (knownType != JSVAL_TYPE_UNKNOWN && knownType != JSVAL_TYPE_DOUBLE)
+                frame.learnType(fe, knownType, false);
             frame.pop();
-            frame.pushTemporary(fe);
+            frame.pushCopyOf(fe);
             return true;
         }
     }
 
     /*
      * Check if we are accessing a known type which always has the property
      * in a particular inline slot. Get the property directly in this case,
      * without using an IC.
@@ -5389,22 +5484,24 @@ mjit::Compiler::jsop_this()
 
     /* 
      * In strict mode code, we don't wrap 'this'.
      * In direct-call eval code, we wrapped 'this' before entering the eval.
      * In global code, 'this' is always an object.
      */
     if (script->fun && !script->strictModeCode) {
         FrameEntry *thisFe = frame.peek(-1);
-        if (!thisFe->isTypeKnown()) {
+        if (!thisFe->isType(JSVAL_TYPE_OBJECT)) {
             JSValueType type = cx->typeInferenceEnabled()
                 ? script->thisTypes()->getKnownTypeTag(cx)
                 : JSVAL_TYPE_UNKNOWN;
             if (type != JSVAL_TYPE_OBJECT) {
-                Jump notObj = frame.testObject(Assembler::NotEqual, thisFe);
+                Jump notObj = thisFe->isTypeKnown()
+                    ? masm.jump()
+                    : frame.testObject(Assembler::NotEqual, thisFe);
                 stubcc.linkExit(notObj, Uses(1));
                 stubcc.leave();
                 OOL_STUBCALL(stubs::This);
                 stubcc.rejoin(Changes(1));
             }
 
             // Now we know that |this| is an object.
             frame.pop();
@@ -6531,17 +6628,17 @@ mjit::Compiler::startLoop(jsbytecode *he
          * Convert all loop registers in the outer loop into unassigned registers.
          * We don't keep track of which registers the inner loop uses, so the only
          * registers that can be carried in the outer loop must be mentioned before
          * the inner loop starts.
          */
         loop->clearLoopRegisters();
     }
 
-    LoopState *nloop = cx->new_<LoopState>(cx, script, this, &frame);
+    LoopState *nloop = cx->new_<LoopState>(cx, &ssa, this, &frame);
     if (!nloop || !nloop->init(head, entry, entryTarget))
         return false;
 
     nloop->outer = loop;
     loop = nloop;
     frame.setLoop(loop);
 
     return true;
@@ -6627,19 +6724,19 @@ mjit::Compiler::finishLoop(jsbytecode *h
 
         /*
          * The interpreter may store integers in slots we assume are doubles,
          * make sure state is consistent before joining. Note that we don't
          * need any handling for other safe points the interpreter can enter
          * from, i.e. from switch and try blocks, as we don't assume double
          * variables are coherent in such cases.
          */
-        for (uint32 slot = analyze::ArgSlot(0); slot < analyze::TotalSlots(script); slot++) {
+        for (uint32 slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
             if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE) {
-                FrameEntry *fe = frame.getOrTrack(slot);
+                FrameEntry *fe = frame.getSlotEntry(slot);
                 stubcc.masm.ensureInMemoryDouble(frame.addressOf(fe));
             }
         }
 
         autoRejoinHead.oolRejoin(stubcc.masm.label());
         frame.prepareForJump(head, stubcc.masm, true);
         if (!stubcc.jumpInScript(stubcc.masm.jump(), head))
             return false;
@@ -7097,17 +7194,17 @@ mjit::Compiler::fixDoubleSlot(uint32 slo
 {
     if (!analysis->trackSlot(slot))
         return false;
 
     /*
      * Don't preserve double arguments in inline calls across branches, as we
      * can't mutate them when inlining. :XXX: could be more precise here.
      */
-    if (slot < analyze::LocalSlot(script, 0) && a->parent)
+    if (slot < LocalSlot(script, 0) && a->parent)
         return false;
 
     return true;
 }
 
 void
 mjit::Compiler::fixDoubleTypes(jsbytecode *target)
 {
@@ -7115,30 +7212,30 @@ mjit::Compiler::fixDoubleTypes(jsbytecod
         return;
 
     /*
      * Fill fixedDoubleEntries with all variables that are known to be an int
      * here and a double at the branch target. Per prepareInferenceTypes, the
      * target state consists of the current state plus any phi nodes or other
      * new values introduced at the target.
      */
-    const analyze::SlotValue *newv = analysis->newValues(target);
+    const SlotValue *newv = analysis->newValues(target);
     if (newv) {
         while (newv->slot) {
-            if (newv->value.kind() != analyze::SSAValue::PHI ||
+            if (newv->value.kind() != SSAValue::PHI ||
                 newv->value.phiOffset() != uint32(target - script->code)) {
                 newv++;
                 continue;
             }
-            if (newv->slot < analyze::TotalSlots(script)) {
+            if (newv->slot < TotalSlots(script)) {
                 types::TypeSet *targetTypes = analysis->getValueTypes(newv->value);
                 VarType &vt = a->varTypes[newv->slot];
                 if (targetTypes->getKnownTypeTag(cx) == JSVAL_TYPE_DOUBLE &&
                     fixDoubleSlot(newv->slot)) {
-                    FrameEntry *fe = frame.getOrTrack(newv->slot);
+                    FrameEntry *fe = frame.getSlotEntry(newv->slot);
                     if (vt.type == JSVAL_TYPE_INT32) {
                         fixedDoubleEntries.append(newv->slot);
                         frame.ensureDouble(fe);
                     } else if (vt.type == JSVAL_TYPE_UNKNOWN) {
                         /*
                          * Unknown here but a double at the target. The type
                          * set for the existing value must be empty, so this
                          * code is doomed and we can just mark the value as
@@ -7157,33 +7254,33 @@ mjit::Compiler::fixDoubleTypes(jsbytecod
 
 void
 mjit::Compiler::restoreAnalysisTypes()
 {
     if (!cx->typeInferenceEnabled())
         return;
 
     /* Update variable types for all new values at this bytecode. */
-    const analyze::SlotValue *newv = analysis->newValues(PC);
+    const SlotValue *newv = analysis->newValues(PC);
     if (newv) {
         while (newv->slot) {
-            if (newv->slot < analyze::TotalSlots(script)) {
+            if (newv->slot < TotalSlots(script)) {
                 VarType &vt = a->varTypes[newv->slot];
                 vt.types = analysis->getValueTypes(newv->value);
                 vt.type = vt.types->getKnownTypeTag(cx);
             }
             newv++;
         }
     }
 
     /* Restore known types of locals/args. */
-    for (uint32 slot = analyze::ArgSlot(0); slot < analyze::TotalSlots(script); slot++) {
+    for (uint32 slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
         JSValueType type = a->varTypes[slot].type;
         if (type != JSVAL_TYPE_UNKNOWN && (type != JSVAL_TYPE_DOUBLE || fixDoubleSlot(slot))) {
-            FrameEntry *fe = frame.getOrTrack(slot);
+            FrameEntry *fe = frame.getSlotEntry(slot);
             JS_ASSERT_IF(fe->isTypeKnown(), fe->isType(type));
             if (!fe->isTypeKnown())
                 frame.learnType(fe, type, false);
         }
     }
 }
 
 void
@@ -7229,17 +7326,17 @@ mjit::Compiler::updateVarType()
       case JSOP_FORARG:
       case JSOP_FORLOCAL:
         types = pushedTypeSet(1);
         break;
       default:
         JS_NOT_REACHED("Bad op");
     }
 
-    uint32 slot = analyze::GetBytecodeSlot(script, PC);
+    uint32 slot = GetBytecodeSlot(script, PC);
 
     if (analysis->trackSlot(slot)) {
         VarType &vt = a->varTypes[slot];
         vt.types = types;
         vt.type = types->getKnownTypeTag(cx);
     }
 }
 
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -447,57 +447,64 @@ class Compiler : public BaseCompiler
         uint32 slot;
         VarType vt;
         SlotType(uint32 slot, VarType vt) : slot(slot), vt(vt) {}
     };
 
     JSScript *outerScript;
     bool isConstructing;
 
+    /* SSA information for the outer script and all frames we will be inlining. */
+    analyze::CrossScriptSSA ssa;
+
     JSObject *globalObj;
     Value *globalSlots;
 
     /* Existing frames on the stack whose slots may need to be updated. */
     const Vector<PatchableFrame> *patchFrames;
 
     bool *savedTraps;
     Assembler masm;
     FrameState frame;
 
     /*
-     * State for the current stack frame.
-     *
-     * When inlining function calls, we keep track of the state of each inline
-     * frame. The state of parent frames is not modified while analyzing an
-     * inner frame, though registers used by those parents can be spilled in
-     * the frame (reflected by the frame's active register state).
+     * State for the current stack frame, and links to its parents going up to
+     * the outermost script.
      */
 
     struct ActiveFrame {
         ActiveFrame *parent;
         jsbytecode *parentPC;
         JSScript *script;
+        Label *jumpMap;
+
+        /*
+         * Index into inlineFrames or OUTER_FRAME, matches this frame's index
+         * in the cross script SSA.
+         */
         uint32 inlineIndex;
-        Label *jumpMap;
-        uint32 depth;
-        Vector<UnsyncedEntry> unsyncedEntries; // :XXX: handle OOM
 
         /* Current types for non-escaping vars in the script. */
         VarType *varTypes;
 
         /* State for managing return from inlined frames. */
-        bool needReturnValue;
-        bool syncReturnValue;
-        bool returnValueDouble;
-        bool returnSet;
-        AnyRegisterID returnRegister;
-        Registers returnParentRegs;
-        Registers temporaryParentRegs;
+        bool needReturnValue;          /* Return value will be used. */
+        bool syncReturnValue;          /* Return value should be fully synced. */
+        bool returnValueDouble;        /* Return value should be a double. */
+        bool returnSet;                /* Whether returnRegister is valid. */
+        AnyRegisterID returnRegister;  /* Register holding return value. */
+        const FrameEntry *returnEntry; /* Entry copied by return value. */
         Vector<Jump, 4, CompilerAllocPolicy> *returnJumps;
 
+        /*
+         * Snapshot of the heap state to use after the call, in case
+         * there are multiple return paths the inlined frame could take.
+         */
+        RegisterAllocation *exitState;
+
         ActiveFrame(JSContext *cx);
         ~ActiveFrame();
     };
     ActiveFrame *a;
     ActiveFrame *outer;
 
     JSScript *script;
     analyze::ScriptAnalysis *analysis;
@@ -535,17 +542,17 @@ class Compiler : public BaseCompiler
     Label arityLabel;
 #ifdef JS_MONOIC
     Label argsCheckStub;
     Label argsCheckFallthrough;
     Jump argsCheckJump;
 #endif
     bool debugMode_;
     bool addTraceHints;
-    bool inlining;
+    bool inlining_;
     bool hasGlobalReallocation;
     bool oomInVector;       // True if we have OOM'd appending to a vector. 
     enum { NoApplyTricks, LazyArgsObj } applyTricks;
 
     Compiler *thisFromCtor() { return this; }
 
     friend class CompilerAllocPolicy;
   public:
@@ -559,16 +566,17 @@ class Compiler : public BaseCompiler
     bool knownJump(jsbytecode *pc);
     Label labelOf(jsbytecode *target, uint32 inlineIndex);
     void addCallSite(const InternalCallSite &callSite);
     void addReturnSite(bool ool);
     void inlineStubCall(void *stub, bool needsRejoin);
     bool loadOldTraps(const Vector<CallSite> &site);
 
     bool debugMode() { return debugMode_; }
+    bool inlining() { return inlining_; }
 
 #ifdef DEBUG
     void checkRejoinSite(uint32 nCallSites, uint32 nRejoinSites, void *stub);
 #endif
     void addRejoinSite(void *stub, bool ool, Label oolLabel);
 
     bool needRejoins(jsbytecode *pc)
     {
@@ -606,16 +614,26 @@ class Compiler : public BaseCompiler
         return call ? &callSites[index].loopPatch : &rejoinSites[index].loopPatch;
     }
     jsbytecode *getInvariantPC(unsigned index, bool call) {
         return call ? callSites[index].inlinepc : rejoinSites[index].pc;
     }
 
     bool arrayPrototypeHasIndexedProperty();
 
+    bool activeFrameHasMultipleExits() {
+        ActiveFrame *na = a;
+        while (na->parent) {
+            if (na->exitState)
+                return true;
+            na = na->parent;
+        }
+        return false;
+    }
+
   private:
     CompileStatus performCompilation(JITScript **jitp);
     CompileStatus generatePrologue();
     CompileStatus generateMethod();
     CompileStatus generateEpilogue();
     CompileStatus finishThisUp(JITScript **jitp);
     CompileStatus pushActiveFrame(JSScript *script, uint32 argc);
     void popActiveFrame();
@@ -628,16 +646,19 @@ class Compiler : public BaseCompiler
     void watchGlobalReallocation();
     void updateVarType();
     JSValueType knownPushedType(uint32 pushed);
     bool mayPushUndefined(uint32 pushed);
     types::TypeSet *pushedTypeSet(uint32 which);
     bool monitored(jsbytecode *pc);
     bool testSingletonProperty(JSObject *obj, jsid id);
     bool testSingletonPropertyTypes(FrameEntry *top, jsid id, bool *testObject);
+    CompileStatus addInlineFrame(JSScript *script, uint32 depth, uint32 parent, jsbytecode *parentpc);
+    CompileStatus scanInlineCalls(uint32 index, uint32 depth);
+    CompileStatus checkAnalysis(JSScript *script);
 
     /* Non-emitting helpers. */
     void pushSyncedEntry(uint32 pushed);
     uint32 fullAtomIndex(jsbytecode *pc);
     bool jumpInScript(Jump j, jsbytecode *pc);
     bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
     bool canUseApplyTricks();
 
--- a/js/src/methodjit/FastArithmetic.cpp
+++ b/js/src/methodjit/FastArithmetic.cpp
@@ -43,16 +43,17 @@
 #include "jsnum.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/Compiler.h"
 #include "methodjit/StubCalls.h"
 #include "methodjit/FrameState-inl.h"
 
 using namespace js;
 using namespace js::mjit;
+using namespace js::analyze;
 using namespace JSC;
 
 typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
 
 bool
 mjit::Compiler::tryBinaryConstantFold(JSContext *cx, FrameState &frame, JSOp op,
                                       FrameEntry *lhs, FrameEntry *rhs, Value *vp)
 {
@@ -239,18 +240,19 @@ mjit::Compiler::jsop_binary(JSOp op, Voi
     /*
      * If this is an operation on which integer overflows can be ignored, treat
      * the result as an integer even if it has been marked as overflowing by
      * the interpreter. Doing this changes the values we maintain on the stack
      * from those the interpreter would maintain; this is OK as values derived
      * from ignored overflows are not live across points where the interpreter
      * can join into JIT code (loop heads and safe points).
      */
-    bool cannotOverflow = loop && loop->cannotIntegerOverflow();
-    bool ignoreOverflow = loop && loop->ignoreIntegerOverflow();
+    CrossSSAValue pushv(a->inlineIndex, SSAValue::PushedValue(PC - script->code, 0));
+    bool cannotOverflow = loop && loop->cannotIntegerOverflow(pushv);
+    bool ignoreOverflow = loop && loop->ignoreIntegerOverflow(pushv);
 
     if (rhs->isType(JSVAL_TYPE_INT32) && lhs->isType(JSVAL_TYPE_INT32) &&
         op == JSOP_ADD && ignoreOverflow) {
         type = JSVAL_TYPE_INT32;
     }
 
     /* Can do int math iff there is no double constant and the op is not division. */
     bool canDoIntMath = op != JSOP_DIV && type != JSVAL_TYPE_DOUBLE &&
@@ -1508,17 +1510,17 @@ mjit::Compiler::jsop_relational_int(JSOp
     FrameEntry *lhs = frame.peek(-2);
 
     /* Reverse N cmp A comparisons.  The left side must be in a register. */
     if (lhs->isConstant()) {
         JS_ASSERT(!rhs->isConstant());
         FrameEntry *tmp = lhs;
         lhs = rhs;
         rhs = tmp;
-        op = analyze::ReverseCompareOp(op);
+        op = ReverseCompareOp(op);
     }
 
     JS_ASSERT_IF(!target, fused != JSOP_IFEQ);
     Assembler::Condition cond = GetCompareCondition(op, fused);
 
     if (target) {
         fixDoubleTypes(target);
         if (!frame.syncForBranch(target, Uses(2)))
@@ -1599,17 +1601,17 @@ mjit::Compiler::jsop_relational_full(JSO
         cmpReg = regs.lhsData.reg();
         if (!regs.rhsData.isSet())
             value = rhs->getValue().toInt32();
         else
             reg = regs.rhsData.reg();
     } else {
         cmpReg = regs.rhsData.reg();
         value = lhs->getValue().toInt32();
-        cmpOp = analyze::ReverseCompareOp(op);
+        cmpOp = ReverseCompareOp(op);
     }
 
     /*
      * Emit the actual comparisons. When a fusion is in play, it's faster to
      * combine the comparison with the jump, so these two cases are implemented
      * separately.
      */
 
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -1111,21 +1111,22 @@ mjit::Compiler::jsop_setelem_dense()
     bool pinKey = !key.isConstant() && !frame.haveSameBacking(id, value);
     if (pinKey)
         frame.pinReg(key.reg());
 
     // Register to hold the computed slots pointer for the object. If we can
     // hoist the initialized length check, we make the slots pointer loop
     // invariant and never access the object itself.
     RegisterID slotsReg;
-    bool hoisted = loop && !a->parent &&
-        loop->hoistArrayLengthCheck(obj, frame.extra(obj).types, 1);
+    analyze::CrossSSAValue objv(a->inlineIndex, analysis->poppedValue(PC, 2));
+    analyze::CrossSSAValue indexv(a->inlineIndex, analysis->poppedValue(PC, 1));
+    bool hoisted = loop && loop->hoistArrayLengthCheck(objv, indexv);
 
     if (hoisted) {
-        FrameEntry *slotsFe = loop->invariantSlots(obj);
+        FrameEntry *slotsFe = loop->invariantSlots(objv);
         slotsReg = frame.tempRegForData(slotsFe);
 
         frame.unpinEntry(vr);
         if (pinKey)
             frame.unpinReg(key.reg());
     } else {
         // Get a register for the object which we can clobber.
         RegisterID objReg;
@@ -1436,24 +1437,25 @@ mjit::Compiler::jsop_getelem_dense(bool 
 
     // Allocate registers.
 
     // If we know the result of the GETELEM may be undefined, then misses on the
     // initialized length or hole checks can just produce an undefined value.
     // We checked in the caller that prototypes do not have indexed properties.
     bool allowUndefined = mayPushUndefined(0);
 
-    bool hoisted = loop && !a->parent &&
-        loop->hoistArrayLengthCheck(obj, frame.extra(obj).types, 0);
+    analyze::CrossSSAValue objv(a->inlineIndex, analysis->poppedValue(PC, 1));
+    analyze::CrossSSAValue indexv(a->inlineIndex, analysis->poppedValue(PC, 0));
+    bool hoisted = loop && loop->hoistArrayLengthCheck(objv, indexv);
 
     // Get a register with either the object or its slots, depending on whether
     // we are hoisting the bounds check.
     RegisterID baseReg;
     if (hoisted) {
-        FrameEntry *slotsFe = loop->invariantSlots(obj);
+        FrameEntry *slotsFe = loop->invariantSlots(objv);
         baseReg = frame.tempRegForData(slotsFe);
     } else {
         baseReg = frame.tempRegForData(obj);
     }
     frame.pinReg(baseReg);
 
     Int32Key key = id->isConstant()
                  ? Int32Key::FromConstant(id->getValue().toInt32())
--- a/js/src/methodjit/FrameEntry.h
+++ b/js/src/methodjit/FrameEntry.h
@@ -153,17 +153,17 @@ class FrameEntry
     }
 
     bool hasSameBacking(const FrameEntry *other) const {
         return backing() == other->backing();
     }
 
   private:
     void setType(JSValueType type_) {
-        JS_ASSERT(!isCopy());
+        JS_ASSERT(!isCopy() && type_ != JSVAL_TYPE_UNKNOWN);
         type.setConstant();
 #if defined JS_NUNBOX32
         v_.s.tag = JSVAL_TYPE_TO_TAG(type_);
 #elif defined JS_PUNBOX64
         v_.asBits &= JSVAL_PAYLOAD_MASK;
         v_.asBits |= JSVAL_TYPE_TO_SHIFTED_TAG(type_);
 #endif
         knownType = type_;
@@ -267,17 +267,16 @@ class FrameEntry
     JSValueType knownType;
     jsval_layout v_;
     RematInfo  type;
     RematInfo  data;
     uint32     index_;
     FrameEntry *copy;
     bool       copied;
     bool       tracked;
-    bool       inlined;
     bool       temporary;
 
     /*
      * Offset of the last loop in which this entry was written or had a loop
      * register assigned.
      */
     uint32     lastLoop;
 
--- a/js/src/methodjit/FrameState-inl.h
+++ b/js/src/methodjit/FrameState-inl.h
@@ -44,27 +44,26 @@
 
 namespace js {
 namespace mjit {
 
 inline void
 FrameState::addToTracker(FrameEntry *fe)
 {
     JS_ASSERT(!fe->isTracked());
-    fe->track(a->tracker.nentries);
-    a->tracker.add(fe);
-    JS_ASSERT(a->tracker.nentries <= feLimit(script));
+    fe->track(tracker.nentries);
+    tracker.add(fe);
 }
 
 inline FrameEntry *
 FrameState::peek(int32 depth)
 {
     JS_ASSERT(depth < 0);
-    JS_ASSERT(sp + depth >= spBase);
-    FrameEntry *fe = &sp[depth];
+    JS_ASSERT(a->sp + depth >= a->spBase);
+    FrameEntry *fe = a->sp + depth;
     if (!fe->isTracked()) {
         addToTracker(fe);
         fe->resetSynced();
     }
     return fe;
 }
 
 inline void
@@ -87,24 +86,24 @@ FrameState::haveSameBacking(FrameEntry *
 inline FrameEntry *
 FrameState::getTemporary(uint32 which)
 {
     JS_ASSERT(which < TEMPORARY_LIMIT);
 
     FrameEntry *fe = temporaries + which;
     JS_ASSERT(fe < temporariesTop);
 
-    return getOrTrack(indexOfFe(fe));
+    return getOrTrack(uint32(fe - entries));
 }
 
 inline AnyRegisterID
 FrameState::allocReg(uint32 mask)
 {
-    if (a->freeRegs.hasRegInMask(mask)) {
-        AnyRegisterID reg = a->freeRegs.takeAnyReg(mask);
+    if (freeRegs.hasRegInMask(mask)) {
+        AnyRegisterID reg = freeRegs.takeAnyReg(mask);
         modifyReg(reg);
         return reg;
     }
 
     AnyRegisterID reg = evictSomeReg(mask);
     regstate(reg).forget();
     modifyReg(reg);
     return reg;
@@ -126,34 +125,33 @@ inline AnyRegisterID
 FrameState::allocAndLoadReg(FrameEntry *fe, bool fp, RematInfo::RematType type)
 {
     AnyRegisterID reg;
     uint32 mask = fp ? (uint32) Registers::AvailFPRegs : (uint32) Registers::AvailRegs;
 
     /*
      * Decide whether to retroactively mark a register as holding the entry
      * at the start of the current loop. We can do this if (a) the register has
-     * not been touched since the start of the loop (it is in loopRegs), and (b)
+     * not been touched since the start of the loop (it is in loopRegs), (b)
      * the entry has also not been written to or already had a loop register
-     * assigned.
+     * assigned, and (c) we are not in an inline call with multiple callees or
+     * exit points --- we won't pick up the new loop register when restoring.
      */
-    if (loop && a->freeRegs.hasRegInMask(loop->getLoopRegs() & mask) &&
-        type == RematInfo::DATA &&
-        (fe == this_ || isArg(fe) || isLocal(fe) || isTemporary(fe)) &&
-        fe->lastLoop < loop->headOffset() &&
-        !a->parent) {
-        reg = a->freeRegs.takeAnyReg(loop->getLoopRegs() & mask);
+    if (loop && freeRegs.hasRegInMask(loop->getLoopRegs() & mask) &&
+        type == RematInfo::DATA && isOuterSlot(fe) && !cc.activeFrameHasMultipleExits() &&
+        fe->lastLoop < loop->headOffset()) {
+        reg = freeRegs.takeAnyReg(loop->getLoopRegs() & mask);
         regstate(reg).associate(fe, RematInfo::DATA);
         fe->lastLoop = loop->headOffset();
         loop->setLoopReg(reg, fe);
         return reg;
     }
 
-    if (!a->freeRegs.empty(mask)) {
-        reg = a->freeRegs.takeAnyReg(mask);
+    if (!freeRegs.empty(mask)) {
+        reg = freeRegs.takeAnyReg(mask);
     } else {
         reg = evictSomeReg(mask);
         regstate(reg).forget();
     }
     modifyReg(reg);
 
     if (fp)
         masm.loadDouble(addressOf(fe), reg.fpreg());
@@ -164,20 +162,16 @@ FrameState::allocAndLoadReg(FrameEntry *
 
     regstate(reg).associate(fe, type);
     return reg;
 }
 
 inline void
 FrameState::modifyReg(AnyRegisterID reg)
 {
-    if (a->parentRegs.hasReg(reg)) {
-        a->parentRegs.takeReg(reg);
-        syncParentRegister(masm, reg);
-    }
     if (loop)
         loop->clearLoopReg(reg);
 }
 
 inline void
 FrameState::convertInt32ToDouble(Assembler &masm, FrameEntry *fe, FPRegisterID fpreg) const
 {
     JS_ASSERT(!fe->isConstant());
@@ -197,67 +191,68 @@ FrameState::peekTypeInRegister(FrameEntr
     if (fe->isCopy())
         fe = fe->copyOf();
     return fe->type.inRegister();
 }
 
 inline void
 FrameState::pop()
 {
-    JS_ASSERT(sp > spBase);
+    JS_ASSERT(a->sp > a->spBase);
 
-    FrameEntry *fe = --sp;
+    FrameEntry *fe = --a->sp;
     if (!fe->isTracked())
         return;
 
     forgetAllRegs(fe);
     fe->type.invalidate();
     fe->data.invalidate();
     fe->clear();
 
-    a->extraArray[fe - spBase].reset();
+    extraArray[fe - entries].reset();
 }
 
 inline void
 FrameState::freeReg(AnyRegisterID reg)
 {
     JS_ASSERT(!regstate(reg).usedBy());
 
-    a->freeRegs.putReg(reg);
+    freeRegs.putReg(reg);
 }
 
 inline void
 FrameState::forgetReg(AnyRegisterID reg)
 {
     /*
      * Important: Do not touch the fe here. We can peephole optimize away
      * loads and stores by re-using the contents of old FEs.
      */
     JS_ASSERT_IF(regstate(reg).fe(), !regstate(reg).fe()->isCopy());
 
     if (!regstate(reg).isPinned()) {
         regstate(reg).forget();
-        a->freeRegs.putReg(reg);
+        freeRegs.putReg(reg);
     }
 }
 
 inline FrameEntry *
 FrameState::rawPush()
 {
-    JS_ASSERT(sp < temporaries);
+    JS_ASSERT(a->sp < temporaries);
+    FrameEntry *fe = a->sp++;
 
-    if (!sp->isTracked())
-        addToTracker(sp);
-    sp->type.invalidate();
-    sp->data.invalidate();
-    sp->clear();
+    if (!fe->isTracked())
+        addToTracker(fe);
+    fe->type.invalidate();
+    fe->data.invalidate();
+    fe->clear();
 
-    a->extraArray[sp - spBase].reset();
+    extraArray[fe - entries].reset();
 
-    return sp++;
+    return fe;
 }
 
 inline void
 FrameState::push(const Value &v)
 {
     FrameEntry *fe = rawPush();
     fe->setConstant(Jsvalify(v));
 }
@@ -314,61 +309,63 @@ FrameState::push(Address address, JSValu
     if (knownType != JSVAL_TYPE_UNKNOWN) {
         RegisterID dataReg = reuseBase ? address.base : allocReg();
         masm.loadPayload(address, dataReg);
         pushTypedPayload(knownType, dataReg);
         return;
     }
 
     // Prevent us from clobbering this reg.
-    bool free = a->freeRegs.hasReg(address.base);
+    bool free = freeRegs.hasReg(address.base);
     bool needsPin = !free && regstate(address.base).fe();
     if (free)
-        a->freeRegs.takeReg(address.base);
+        freeRegs.takeReg(address.base);
     if (needsPin)
         pinReg(address.base);
 
     RegisterID typeReg = allocReg();
 
     masm.loadTypeTag(address, typeReg);
 
     // Allow re-use of the base register. This could avoid a spill, and
     // is safe because the following allocReg() won't actually emit any
     // writes to the register.
     if (free)
-        a->freeRegs.putReg(address.base);
+        freeRegs.putReg(address.base);
     if (needsPin)
         unpinReg(address.base);
 
     RegisterID dataReg = reuseBase ? address.base : allocReg();
     masm.loadPayload(address, dataReg);
 
 #endif
 
     pushRegs(typeReg, dataReg, knownType);
 }
 
 inline JSC::MacroAssembler::FPRegisterID
 FrameState::pushRegs(RegisterID type, RegisterID data, JSValueType knownType)
 {
-    JS_ASSERT(!a->freeRegs.hasReg(type) && !a->freeRegs.hasReg(data));
+    JS_ASSERT(!freeRegs.hasReg(type) && !freeRegs.hasReg(data));
 
     if (knownType == JSVAL_TYPE_UNKNOWN) {
         FrameEntry *fe = rawPush();
         fe->resetUnsynced();
         fe->type.setRegister(type);
         fe->data.setRegister(data);
         regstate(type).associate(fe, RematInfo::TYPE);
         regstate(data).associate(fe, RematInfo::DATA);
         return Registers::FPConversionTemp;
     }
 
     if (knownType == JSVAL_TYPE_DOUBLE) {
         FPRegisterID fpreg = allocFPReg();
-        masm.moveInt32OrDouble(data, type, addressOf(sp), fpreg);
+        pushSynced(JSVAL_TYPE_UNKNOWN);
+        masm.moveInt32OrDouble(data, type, addressOf(a->sp - 1), fpreg);
+        pop();
         pushDouble(fpreg);
         freeReg(type);
         freeReg(data);
         return fpreg;
     }
 
     freeReg(type);
     pushTypedPayload(knownType, data);
@@ -390,30 +387,30 @@ FrameState::reloadEntry(Assembler &masm,
         masm.moveInt32OrDouble(address, fe->data.fpreg());
     }
 }
 
 inline void
 FrameState::pushTypedPayload(JSValueType type, RegisterID payload)
 {
     JS_ASSERT(type != JSVAL_TYPE_DOUBLE);
-    JS_ASSERT(!a->freeRegs.hasReg(payload));
+    JS_ASSERT(!freeRegs.hasReg(payload));
 
     FrameEntry *fe = rawPush();
 
     fe->resetUnsynced();
     fe->setType(type);
     fe->data.setRegister(payload);
     regstate(payload).associate(fe, RematInfo::DATA);
 }
 
 inline void
 FrameState::pushNumber(RegisterID payload, bool asInt32)
 {
-    JS_ASSERT(!a->freeRegs.hasReg(payload));
+    JS_ASSERT(!freeRegs.hasReg(payload));
 
     FrameEntry *fe = rawPush();
     fe->clear();
 
     if (asInt32) {
         if (!fe->type.synced())
             masm.storeTypeTag(ImmType(JSVAL_TYPE_INT32), addressOf(fe));
         fe->type.setMemory();
@@ -438,17 +435,17 @@ FrameState::pushInt32(RegisterID payload
     fe->data.unsync();
     fe->data.setRegister(payload);
     regstate(payload).associate(fe, RematInfo::DATA);
 }
 
 inline void
 FrameState::pushUntypedPayload(JSValueType type, RegisterID payload)
 {
-    JS_ASSERT(!a->freeRegs.hasReg(payload));
+    JS_ASSERT(!freeRegs.hasReg(payload));
 
     FrameEntry *fe = rawPush();
 
     fe->clear();
 
     masm.storeTypeTag(ImmType(type), addressOf(fe));
 
     /* The forceful type sync will assert otherwise. */
@@ -826,18 +823,25 @@ FrameState::syncFe(FrameEntry *fe)
     if (!fe->type.synced())
         fe->type.sync();
     if (!fe->data.synced())
         fe->data.sync();
 #endif
 }
 
 inline void
-FrameState::syncAndForgetFe(FrameEntry *fe)
+FrameState::syncAndForgetFe(FrameEntry *fe, bool markSynced)
 {
+    if (markSynced) {
+        if (!fe->type.synced())
+            fe->type.sync();
+        if (!fe->data.synced())
+            fe->data.sync();
+    }
+
     syncFe(fe);
     forgetAllRegs(fe);
     fe->type.setMemory();
     fe->data.setMemory();
 }
 
 inline void
 FrameState::syncType(FrameEntry *fe)
@@ -936,62 +940,73 @@ FrameState::frameOffset(const FrameEntry
 {
     /*
      * The stored frame offsets for analysis temporaries are immediately above
      * the script's normal slots (and will thus be clobbered should a C++ or
      * scripted call push another frame). There must be enough room in the
      * reserved stack space.
      */
     JS_STATIC_ASSERT(StackSpace::STACK_EXTRA >= TEMPORARY_LIMIT);
-    JS_ASSERT(uint32(fe - a->entries) < feLimit(a->script));
+    JS_ASSERT(fe >= a->callee_ && fe < a->sp);
 
     if (fe >= a->locals)
         return StackFrame::offsetOfFixed(uint32(fe - a->locals));
     if (fe >= a->args)
         return StackFrame::offsetOfFormalArg(a->script->fun, uint32(fe - a->args));
     if (fe == a->this_)
         return StackFrame::offsetOfThis(a->script->fun);
     if (fe == a->callee_)
         return StackFrame::offsetOfCallee(a->script->fun);
     JS_NOT_REACHED("Bad fe");
     return 0;
 }
 
 inline JSC::MacroAssembler::Address
-FrameState::addressOf(const FrameEntry *fe, ActiveFrame *a) const
+FrameState::addressOf(const FrameEntry *fe) const
 {
-    if (fe->inlined) {
+    if (isTemporary(fe)) {
         /*
-         * For arguments/this to inlined frames, we should only be using the
-         * backing store in the parent. The address of the argument/this may
-         * not be synced (even if it is marked as synced). This inlined address
-         * will only be used for loads (arguments can't yet be mutated by
-         * inlined calls), and the caller must ensure the parent's entry is
-         * definitely synced.
+         * Temporary addresses are common to the outermost loop, and are shared
+         * by all active frames.
          */
-        JS_ASSERT(a->parent);
-        const FrameEntry *parentFE;
-        if (fe == callee_)
-            parentFE = a->parentSP - (a->parentArgc + 2);
-        else if (fe == this_)
-            parentFE = a->parentSP - (a->parentArgc + 1);
-        else
-            parentFE = a->parentSP - (a->parentArgc - (fe - a->args));
-
-        return addressOf(parentFE->backing(), a->parent);
+        return Address(JSFrameReg, (loop->temporariesStart + fe - temporaries) * sizeof(Value));
     }
 
-    int32 offset = frameOffset(fe, a);
-    return Address(JSFrameReg, offset + (a->depth * sizeof(Value)));
+    ActiveFrame *na = a;
+    while (fe < na->callee_)
+        na = na->parent;
+
+    int32 offset = frameOffset(fe, na);
+    return Address(JSFrameReg, offset + (na->depth * sizeof(Value)));
+}
+
+inline uint32
+FrameState::frameSlot(ActiveFrame *a, const FrameEntry *fe) const
+{
+    if (isTemporary(fe))
+        return fe - entries;
+
+    JS_ASSERT(fe >= a->callee_ && fe < a->sp);
+
+    if (fe >= a->locals)
+        return analyze::LocalSlot(a->script, fe - a->locals);
+    if (fe >= a->args)
+        return analyze::ArgSlot(fe - a->args);
+    if (fe == a->this_)
+        return analyze::ThisSlot();
+    if (fe == a->callee_)
+        return analyze::CalleeSlot();
+    JS_NOT_REACHED("Bad fe");
+    return 0;
 }
 
 inline JSC::MacroAssembler::Address
 FrameState::addressForInlineReturn() const
 {
-    return addressOf(callee_);
+    return addressOf(a->callee_);
 }
 
 inline JSC::MacroAssembler::Address
 FrameState::addressForDataRemat(const FrameEntry *fe) const
 {
     if (fe->isCopy() && !fe->data.synced())
         fe = fe->copyOf();
     JS_ASSERT(fe->data.synced());
@@ -1079,59 +1094,67 @@ FrameState::getOrTrack(uint32 index)
         fe->resetSynced();
     }
     return fe;
 }
 
 inline FrameEntry *
 FrameState::getStack(uint32 slot)
 {
-    if (slot >= uint32(sp - spBase))
+    if (slot >= uint32(a->sp - a->spBase))
         return NULL;
-    return getOrTrack(uint32(&spBase[slot] - entries));
+    return getOrTrack(uint32(a->spBase + slot - entries));
 }
 
 inline FrameEntry *
 FrameState::getLocal(uint32 slot)
 {
-    JS_ASSERT(slot < script->nslots);
-    return getOrTrack(uint32(&locals[slot] - entries));
+    JS_ASSERT(slot < a->script->nslots);
+    return getOrTrack(uint32(a->locals + slot - entries));
 }
 
 inline FrameEntry *
 FrameState::getArg(uint32 slot)
 {
-    JS_ASSERT(script->fun && slot < script->fun->nargs);
-    return getOrTrack(uint32(&args[slot] - entries));
+    JS_ASSERT(a->script->fun && slot < a->script->fun->nargs);
+    return getOrTrack(uint32(a->args + slot - entries));
 }
 
 inline FrameEntry *
 FrameState::getThis()
 {
-    return getOrTrack(uint32(this_ - entries));
+    return getOrTrack(uint32(a->this_ - entries));
+}
+
+inline FrameEntry *
+FrameState::getSlotEntry(uint32 slot)
+{
+    JS_ASSERT(slot < analyze::TotalSlots(a->script));
+    return getOrTrack(uint32(a->callee_ + slot - entries));
 }
 
 inline FrameEntry *
 FrameState::getCallee()
 {
     // Callee can only be used in function code, and it's always an object.
-    JS_ASSERT(script->fun);
-    if (!callee_->isTracked()) {
-        addToTracker(callee_);
-        callee_->resetSynced();
-        callee_->setType(JSVAL_TYPE_OBJECT);
+    JS_ASSERT(a->script->fun);
+    FrameEntry *fe = a->callee_;
+    if (!fe->isTracked()) {
+        addToTracker(fe);
+        fe->resetSynced();
+        fe->setType(JSVAL_TYPE_OBJECT);
     }
-    return callee_;
+    return fe;
 }
 
 inline void
 FrameState::unpinKilledReg(RegisterID reg)
 {
     regstate(reg).unpinUnsafe();
-    a->freeRegs.putReg(reg);
+    freeRegs.putReg(reg);
 }
 
 inline void
 FrameState::forgetAllRegs(FrameEntry *fe)
 {
     if (fe->isCopy())
         return;
     if (fe->type.inRegister())
@@ -1142,136 +1165,132 @@ FrameState::forgetAllRegs(FrameEntry *fe
         forgetReg(fe->data.fpreg());
 }
 
 inline void
 FrameState::swapInTracker(FrameEntry *lhs, FrameEntry *rhs)
 {
     uint32 li = lhs->trackerIndex();
     uint32 ri = rhs->trackerIndex();
-    JS_ASSERT(a->tracker[li] == lhs);
-    JS_ASSERT(a->tracker[ri] == rhs);
-    a->tracker.entries[ri] = lhs;
-    a->tracker.entries[li] = rhs;
+    JS_ASSERT(tracker[li] == lhs);
+    JS_ASSERT(tracker[ri] == rhs);
+    tracker.entries[ri] = lhs;
+    tracker.entries[li] = rhs;
     lhs->index_ = ri;
     rhs->index_ = li;
 }
 
 inline void
 FrameState::dup()
 {
     dupAt(-1);
 }
 
 inline void
 FrameState::dup2()
 {
     FrameEntry *lhs = peek(-2);
     FrameEntry *rhs = peek(-1);
-    pushCopyOf(indexOfFe(lhs));
-    pushCopyOf(indexOfFe(rhs));
+    pushCopyOf(lhs);
+    pushCopyOf(rhs);
 }
 
 inline void
 FrameState::dupAt(int32 n)
 {
     JS_ASSERT(n < 0);
     FrameEntry *fe = peek(n);
-    pushCopyOf(indexOfFe(fe));
+    pushCopyOf(fe);
 }
 
 inline void
 FrameState::syncAt(int32 n)
 {
     JS_ASSERT(n < 0);
     FrameEntry *fe = peek(n);
     syncFe(fe);
 }
 
 inline void
 FrameState::pushLocal(uint32 n)
 {
     FrameEntry *fe = getLocal(n);
-    if (!analysis->slotEscapes(analyze::LocalSlot(script, n))) {
-        pushCopyOf(indexOfFe(fe));
+    if (!a->analysis->slotEscapes(analyze::LocalSlot(a->script, n))) {
+        pushCopyOf(fe);
     } else {
 #ifdef DEBUG
         /*
          * We really want to assert on local variables, but in the presence of
          * SETLOCAL equivocation of stack slots, and let expressions, just
          * weakly assert on the fixed local vars.
          */
-        if (fe->isTracked() && n < script->nfixed)
+        if (fe->isTracked() && n < a->script->nfixed)
             JS_ASSERT(fe->data.inMemory());
 #endif
         JSValueType type = fe->isTypeKnown() ? fe->getKnownType() : JSVAL_TYPE_UNKNOWN;
         push(addressOf(fe), type);
     }
 }
 
 inline void
 FrameState::pushArg(uint32 n)
 {
     FrameEntry *fe = getArg(n);
-    if (!analysis->slotEscapes(analyze::ArgSlot(n))) {
-        pushCopyOf(indexOfFe(fe));
+    if (!a->analysis->slotEscapes(analyze::ArgSlot(n))) {
+        pushCopyOf(fe);
     } else {
 #ifdef DEBUG
         if (fe->isTracked())
             JS_ASSERT(fe->data.inMemory());
 #endif
         JSValueType type = fe->isTypeKnown() ? fe->getKnownType() : JSVAL_TYPE_UNKNOWN;
         push(addressOf(fe), type);
     }
 }
 
 inline void
 FrameState::pushCallee()
 {
     FrameEntry *fe = getCallee();
-    pushCopyOf(indexOfFe(fe));
+    pushCopyOf(fe);
 }
 
 inline void
 FrameState::pushThis()
 {
     FrameEntry *fe = getThis();
-    pushCopyOf(indexOfFe(fe));
-}
-
-inline void
-FrameState::pushTemporary(FrameEntry *fe)
-{
-    JS_ASSERT(isTemporary(fe));
-    pushCopyOf(indexOfFe(fe));
+    pushCopyOf(fe);
 }
 
 void
 FrameState::learnThisIsObject(bool unsync)
 {
-    // This is safe, albeit hacky. This is only called from the compiler,
-    // and only on the first use of |this| inside a basic block. Thus,
-    // there are no copies of |this| anywhere.
-    learnType(this_, JSVAL_TYPE_OBJECT, unsync);
+    // If the 'this' object is a copy, this must be an inline frame, in which
+    // case we will trigger recompilation if the 'this' entry isn't actually
+    // an object (thus, it is OK to modify the backing directly).
+    FrameEntry *fe = a->this_;
+    if (fe->isCopy())
+        fe = fe->copyOf();
+    learnType(fe, JSVAL_TYPE_OBJECT, unsync);
 }
 
 inline void
 FrameState::leaveBlock(uint32 n)
 {
     popn(n);
 }
 
 inline void
 FrameState::enterBlock(uint32 n)
 {
     /* expect that tracker has 0 entries, for now. */
-    JS_ASSERT(!a->tracker.nentries);
-    JS_ASSERT(uint32(sp + n - locals) <= script->nslots);
+    JS_ASSERT(!tracker.nentries);
+    JS_ASSERT(uint32(a->sp + n - a->locals) <= a->script->nslots);
 
-    sp += n;
+    a->sp += n;
 }
 
 inline void
 FrameState::eviscerate(FrameEntry *fe)
 {
     forgetAllRegs(fe);
     fe->type.invalidate();
     fe->data.invalidate();
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -47,398 +47,194 @@ using namespace js::analyze;
 
 /* Because of Value alignment */
 JS_STATIC_ASSERT(sizeof(FrameEntry) % 8 == 0);
 
 FrameState::FrameState(JSContext *cx, mjit::Compiler &cc,
                        Assembler &masm, StubCompiler &stubcc)
   : cx(cx),
     masm(masm), cc(cc), stubcc(stubcc),
-    a(NULL), script(NULL), entries(NULL),
-    callee_(NULL), this_(NULL), args(NULL), locals(NULL),
-    spBase(NULL), sp(NULL), PC(NULL),
+    a(NULL), entries(NULL), nentries(0), freeRegs(Registers::AvailAnyRegs),
     loop(NULL), inTryBlock(false)
 {
 }
 
 FrameState::~FrameState()
 {
     while (a) {
         ActiveFrame *parent = a->parent;
         a->script->analysis(cx)->clearAllocations();
-#if defined JS_NUNBOX32
-        a->reifier.~ImmutableSync();
-#endif
         cx->free_(a);
         a = parent;
     }
+    cx->free_(entries);
 }
 
 void
-FrameState::getUnsyncedEntries(uint32 *pdepth, Vector<UnsyncedEntry> *unsyncedEntries)
+FrameState::pruneDeadEntries()
 {
-    *pdepth = totalDepth() + VALUES_PER_STACK_FRAME;
-
-    /* Mark all unsynced entries in the frame. */
-    for (uint32 i = 0; i < a->tracker.nentries; i++) {
-        FrameEntry *fe = a->tracker[i];
-        if (fe >= sp)
-            continue;
-        if (fe->type.synced() && fe->data.synced())
-            continue;
-        if (fe->inlined)
-            continue;
-
-        UnsyncedEntry entry;
-        PodZero(&entry);
-
-        entry.offset = frameOffset(fe);
-
-        if (fe->isCopy()) {
-            FrameEntry *nfe = fe->copyOf();
-            entry.copy = true;
-            entry.u.copiedOffset = frameOffset(nfe);
-        } else if (fe->isConstant()) {
-            entry.constant = true;
-            entry.u.value = fe->getValue();
-        } else if (fe->isTypeKnown() && !fe->isType(JSVAL_TYPE_DOUBLE) && !fe->type.synced()) {
-            entry.knownType = true;
-            entry.u.type = fe->getKnownType();
-        } else {
-            /*
-             * All the unsynced portions of this entry are in registers. When
-             * making a call from within an inline frame, these will be synced
-             * beforehand.
-             */
-            continue;
+    unsigned shift = 0;
+    for (unsigned i = 0; i < tracker.nentries; i++) {
+        FrameEntry *fe = tracker[i];
+        if (deadEntry(fe)) {
+            fe->untrack();
+            shift++;
+        } else if (shift) {
+            fe->index_ -= shift;
+            tracker.entries[fe->index_] = fe;
         }
-
-        unsyncedEntries->append(entry);
     }
+    tracker.nentries -= shift;
 }
 
 bool
 FrameState::pushActiveFrame(JSScript *script, uint32 argc)
 {
-    uint32 depth = a ? totalDepth() : 0;
-    uint32 nentries = feLimit(script);
-
-    size_t totalBytes = sizeof(ActiveFrame) +
-                        sizeof(FrameEntry) * nentries +              // entries[]
-                        sizeof(FrameEntry *) * nentries +            // tracker.entries
-                        sizeof(StackEntryExtra) * script->nslots;    // extraArray
-
-    uint8 *cursor = (uint8 *)cx->calloc_(totalBytes);
-    if (!cursor)
-        return false;
-
-    ActiveFrame *newa = (ActiveFrame *) cursor;
-    cursor += sizeof(ActiveFrame);
+    if (!a) {
+        this->nentries = analyze::TotalSlots(script) + (script->nslots - script->nfixed) +
+            StackSpace::STACK_EXTRA - VALUES_PER_STACK_FRAME;
+        size_t totalBytes = sizeof(FrameEntry) * nentries +       // entries[]
+                            sizeof(FrameEntry *) * nentries +     // tracker.entries
+                            sizeof(StackEntryExtra) * nentries;   // extraArray
+        uint8 *cursor = (uint8 *)cx->calloc_(totalBytes);
+        if (!cursor)
+            return false;
+
+        this->entries = (FrameEntry *) cursor;
+        cursor += sizeof(FrameEntry) * nentries;
+
+        this->tracker.entries = (FrameEntry **)cursor;
+        cursor += sizeof(FrameEntry *) * nentries;
+
+        this->extraArray = (StackEntryExtra *)cursor;
+        cursor += sizeof(StackEntryExtra) * nentries;
+
+        JS_ASSERT(reinterpret_cast<uint8 *>(this->entries) + totalBytes == cursor);
 
 #if defined JS_NUNBOX32
-    if (!newa->reifier.init(cx, *this, nentries)) {
-        cx->free_(newa);
-        return false;
-    }
+        if (!reifier.init(cx, *this, nentries)) {
+            cx->free_(this->entries);
+            return false;
+        }
 #endif
 
+        this->temporaries = this->temporariesTop = this->entries + nentries - TEMPORARY_LIMIT;
+    }
+
+    /* We should have already checked that argc == nargs */
+    JS_ASSERT_IF(a, argc == script->fun->nargs);
+
+    ActiveFrame *newa = cx->new_<ActiveFrame>();
     newa->parent = a;
-    newa->parentPC = PC;
-    newa->parentSP = sp;
-    newa->parentArgc = argc;
+    newa->depth = a ? (totalDepth() + VALUES_PER_STACK_FRAME) : 0;
+
     newa->script = script;
-    newa->freeRegs = Registers(Registers::AvailAnyRegs);
-
-    newa->entries = (FrameEntry *)cursor;
-    cursor += sizeof(FrameEntry) * nentries;
-
-    newa->callee_ = newa->entries;
-    newa->this_ = newa->entries + 1;
-    newa->args = newa->entries + 2;
-    newa->locals = newa->args + (script->fun ? script->fun->nargs : 0);
-
-    newa->tracker.entries = (FrameEntry **)cursor;
-    cursor += sizeof(FrameEntry *) * nentries;
-
-    newa->extraArray = (StackEntryExtra *)cursor;
-    cursor += sizeof(StackEntryExtra) * script->nslots;
-
-    JS_ASSERT(reinterpret_cast<uint8 *>(newa) + totalBytes == cursor);
+    newa->PC = script->code;
+    newa->analysis = script->analysis(cx);
+
+    /*
+     * The callee/this/args in the new frame reuse the same entries as are on
+     * the stack in the old frame.
+     */
+    FrameEntry *entriesStart = a ? a->sp - (argc + 2) : entries;
+    newa->callee_ = entriesStart + analyze::CalleeSlot();
+    newa->this_   = entriesStart + analyze::ThisSlot();
+    newa->args    = entriesStart + analyze::ArgSlot(0);
+    newa->locals  = entriesStart + analyze::LocalSlot(script, 0);
+    newa->spBase  = entriesStart + analyze::TotalSlots(script);
+    newa->sp      = newa->spBase;
 
     this->a = newa;
-    updateActiveFrame();
-
-    if (a->parent && script->analysis(cx)->inlineable(argc)) {
-        a->depth = depth + VALUES_PER_STACK_FRAME;
-
-        /* Mark all registers which are in use by the parent or its own parent. */
-        a->parentRegs = 0;
-        Registers regs(Registers::AvailAnyRegs);
-        while (!regs.empty()) {
-            AnyRegisterID reg = regs.takeAnyReg();
-            if (a->parent->parentRegs.hasReg(reg) || !a->parent->freeRegs.hasReg(reg))
-                a->parentRegs.putReg(reg);
-        }
-
-        JS_ASSERT(argc == script->fun->nargs);
-
-        syncInlinedEntry(getCallee(), a->parentSP - (argc + 2));
-        syncInlinedEntry(getThis(), a->parentSP - (argc + 1));
-        for (unsigned i = 0; i < argc; i++)
-            syncInlinedEntry(getArg(i), a->parentSP - (argc - i));
-    }
 
     return true;
 }
 
 void
-FrameState::syncInlinedEntry(FrameEntry *fe, const FrameEntry *parent)
-{
-    /*
-     * Fill in the initial state of an entry in this inlined frame that
-     * corresponds to an entry in the caller's frame.
-     */
-
-    /*
-     * Make sure the initial sync state of the inlined entries matches the
-     * parent. These inlined entries will never unsync (since they are never
-     * modified) and will be marked as synced as necessary. Note that this
-     * follows any copies in the parent to get the eventual backing of the
-     * argument --- the slot we compute using getAddress. Syncing of the
-     * argument slots themselves is handled by the parent's unsyncedSlots.
-     */
-    JS_ASSERT(fe->type.synced() && fe->data.synced());
-    parent = parent->backing();
-    if (!parent->type.synced())
-        fe->type.unsync();
-    if (!parent->data.synced())
-        fe->data.unsync();
-
-    fe->inlined = true;
-
-    if (parent->isConstant()) {
-        fe->setConstant(Jsvalify(parent->getValue()));
-        return;
-    }
-
-    if (parent->isCopy())
-        parent = parent->copyOf();
-
-    if (parent->isTypeKnown())
-        fe->setType(parent->getKnownType());
-
-    if (parent->type.inRegister())
-        associateReg(fe, RematInfo::TYPE, parent->type.reg());
-    if (parent->data.inRegister())
-        associateReg(fe, RematInfo::DATA, parent->data.reg());
-    if (parent->data.inFPRegister())
-        associateReg(fe, RematInfo::DATA, parent->data.fpreg());
-}
-
-void
 FrameState::associateReg(FrameEntry *fe, RematInfo::RematType type, AnyRegisterID reg)
 {
-    a->freeRegs.takeReg(reg);
+    freeRegs.takeReg(reg);
 
     if (type == RematInfo::TYPE)
         fe->type.setRegister(reg.reg());
     else if (reg.isReg())
         fe->data.setRegister(reg.reg());
     else
         fe->data.setFPRegister(reg.fpreg());
     regstate(reg).associate(fe, type);
 }
 
 void
 FrameState::popActiveFrame()
 {
-    jsbytecode *parentPC = a->parentPC;
-    FrameEntry *parentSP = a->parentSP;
-    ActiveFrame *parent = a->parent;
-
-    analysis->clearAllocations();
-
-#if defined JS_NUNBOX32
-    a->reifier.~ImmutableSync();
-#endif
-    cx->free_(a);
-
-    a = parent;
-    updateActiveFrame();
-    PC = parentPC;
-    sp = parentSP;
-}
-
-void
-FrameState::updateActiveFrame()
-{
-    script = a->script;
-    analysis = script->analysis(cx);
-    entries = a->entries;
-    callee_ = a->callee_;
-    this_ = a->this_;
-    args = a->args;
-    locals = a->locals;
-    spBase = locals + script->nfixed;
-    sp = spBase;
-    temporaries = locals + script->nslots;
-    temporariesTop = temporaries;
-}
-
-void
-FrameState::discardLocalRegisters()
-{
-    /* Discard all local registers, without syncing. Must be followed by a discardFrame. */
-    a->freeRegs = Registers::AvailAnyRegs;
-}
-
-void
-FrameState::evictInlineModifiedRegisters(Registers regs)
-{
-    JS_ASSERT(cx->typeInferenceEnabled());
-    a->parentRegs.freeMask &= ~regs.freeMask;
-
-    while (!regs.empty()) {
-        AnyRegisterID reg = regs.takeAnyReg();
-        if (a->freeRegs.hasReg(reg))
-            continue;
-
-        FrameEntry *fe = regstate(reg).fe();
-        JS_ASSERT(fe);
-        if (regstate(reg).type() == RematInfo::TYPE) {
-            if (!fe->type.synced())
-                fe->type.sync();
-            fe->type.setMemory();
-        } else {
-            if (!fe->data.synced())
-                fe->data.sync();
-            if (fe->isType(JSVAL_TYPE_DOUBLE) && !fe->type.synced())
-                fe->type.sync();
-            fe->data.setMemory();
+    a->analysis->clearAllocations();
+
+    if (a->parent) {
+        /* Free registers associated with local variables. */
+        Registers regs(Registers::AvailAnyRegs);
+        while (!regs.empty()) {
+            AnyRegisterID reg = regs.takeAnyReg();
+            if (!freeRegs.hasReg(reg)) {
+                FrameEntry *fe = regstate(reg).usedBy();
+                if (fe >= a->locals && !isTemporary(fe)) {
+                    syncAndForgetFe(fe);
+                    fe->clear();
+                }
+            }
         }
-
-        regstate(reg).forget();
-        a->freeRegs.putReg(reg);
     }
-}
-
-void
-FrameState::tryCopyRegister(FrameEntry *fe, FrameEntry *callStart)
-{
-    JS_ASSERT(cx->typeInferenceEnabled());
-    JS_ASSERT(!fe->isCopied() || !isEntryCopied(fe));
-
-    if (!fe->isCopy())
-        return;
-
-    /*
-     * Uncopy the entry if it shares a backing with any other entry used
-     * in the impending call. We want to ensure that within inline calls each
-     * entry has its own set of registers.
-     */
-
-    FrameEntry *uncopyfe = NULL;
-    for (FrameEntry *nfe = callStart; !uncopyfe && nfe < fe; nfe++) {
-        if (!nfe->isTracked())
-            continue;
-        if (nfe->backing() == fe->copyOf())
-            uncopyfe = nfe;
-    }
-
-    if (uncopyfe) {
-        JSValueType type = fe->isTypeKnown() ? fe->getKnownType() : JSVAL_TYPE_UNKNOWN;
-        if (type == JSVAL_TYPE_UNKNOWN)
-            syncType(fe);
-        fe->resetUnsynced();
-        if (type == JSVAL_TYPE_UNKNOWN) {
-            fe->type.sync();
-            fe->type.setMemory();
-        } else {
-            fe->setType(type);
-        }
-        if (type == JSVAL_TYPE_DOUBLE) {
-            FPRegisterID fpreg = allocFPReg();
-            masm.moveDouble(tempFPRegForData(uncopyfe), fpreg);
-            fe->data.setFPRegister(fpreg);
-            regstate(fpreg).associate(fe, RematInfo::DATA);
-        } else {
-            RegisterID reg = allocReg();
-            masm.move(tempRegForData(uncopyfe), reg);
-            fe->data.setRegister(reg);
-            regstate(reg).associate(fe, RematInfo::DATA);
-        }
-    } else {
-        /* Try to put the entry in a register. */
-        fe = fe->copyOf();
-        if (fe->isType(JSVAL_TYPE_DOUBLE))
-            tempFPRegForData(fe);
-        else
-            tempRegForData(fe);
-    }
-}
-
-Registers
-FrameState::getTemporaryCallRegisters(FrameEntry *callStart) const
-{
-    JS_ASSERT(cx->typeInferenceEnabled());
-
-    /*
-     * Get the registers in use for entries which will be popped once the
-     * call at callStart finishes.
-     */
-    Registers regs(Registers::AvailAnyRegs & ~a->freeRegs.freeMask);
-    Registers result = 0;
-    while (!regs.empty()) {
-        AnyRegisterID reg = regs.takeAnyReg();
-        FrameEntry *fe = regstate(reg).usedBy();
-        JS_ASSERT(fe);
-
-        if (fe >= callStart)
-            result.putReg(reg);
-    }
-
-    return result;
+
+    ActiveFrame *parent = a->parent;
+    cx->delete_(a);
+    a = parent;
 }
 
 void
 FrameState::takeReg(AnyRegisterID reg)
 {
     modifyReg(reg);
-    if (a->freeRegs.hasReg(reg)) {
-        a->freeRegs.takeReg(reg);
+    if (freeRegs.hasReg(reg)) {
+        freeRegs.takeReg(reg);
         JS_ASSERT(!regstate(reg).usedBy());
     } else {
         JS_ASSERT(regstate(reg).fe());
         evictReg(reg);
         regstate(reg).forget();
     }
 }
 
 #ifdef DEBUG
 const char *
 FrameState::entryName(const FrameEntry *fe) const
 {
-    if (fe == this_)
-        return "'this'";
-    if (fe == callee_)
-        return "callee";
-
     static char bufs[4][50];
     static unsigned which = 0;
     which = (which + 1) & 3;
     char *buf = bufs[which];
 
+    if (isTemporary(fe)) {
+        JS_snprintf(buf, 50, "temp%d", fe - temporaries);
+        return buf;
+    }
+
+    if (fe < a->callee_)
+        return "parent";
+
+    JS_ASSERT(fe >= a->callee_ && fe < a->sp);
+
+    if (fe == a->callee_)
+        return "callee";
+    if (fe == a->this_)
+        return "'this'";
+
     if (isArg(fe))
-        JS_snprintf(buf, 50, "arg%d", fe - args);
+        JS_snprintf(buf, 50, "arg%d", fe - a->args);
     else if (isLocal(fe))
-        JS_snprintf(buf, 50, "local%d", fe - locals);
-    else if (isTemporary(fe))
-        JS_snprintf(buf, 50, "temp%d", fe - temporaries);
+        JS_snprintf(buf, 50, "local%d", fe - a->locals);
     else
-        JS_snprintf(buf, 50, "slot%d", fe - spBase);
+        JS_snprintf(buf, 50, "slot%d", fe - a->spBase);
     return buf;
 }
 #endif
 
 void
 FrameState::evictReg(AnyRegisterID reg)
 {
     FrameEntry *fe = regstate(reg).fe();
@@ -456,33 +252,33 @@ FrameState::evictReg(AnyRegisterID reg)
         fe->data.setMemory();
     }
 }
 
 inline Lifetime *
 FrameState::variableLive(FrameEntry *fe, jsbytecode *pc) const
 {
     JS_ASSERT(cx->typeInferenceEnabled());
-    JS_ASSERT(fe < spBase && fe != callee_);
-
-    uint32 offset = pc - script->code;
-    return analysis->liveness(indexOfFe(fe)).live(offset);
+    JS_ASSERT(fe > a->callee_ && fe < a->spBase);
+
+    uint32 offset = pc - a->script->code;
+    return a->analysis->liveness(entrySlot(fe)).live(offset);
 }
 
 bool
 FrameState::isEntryCopied(FrameEntry *fe) const
 {
     /*
      * :TODO: It would be better for fe->isCopied() to mean 'is actually copied'
      * rather than 'might have copies', removing the need for this walk.
      */
     JS_ASSERT(fe->isCopied());
 
-    for (uint32 i = fe->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-        FrameEntry *nfe = a->tracker[i];
+    for (uint32 i = fe->trackerIndex() + 1; i < tracker.nentries; i++) {
+        FrameEntry *nfe = tracker[i];
         if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe)
             return true;
     }
 
     return false;
 }
 
 AnyRegisterID
@@ -512,23 +308,22 @@ FrameState::bestEvictReg(uint32 mask, bo
 
         /*
          * Liveness is not tracked for the callee or for stack slot frame entries.
          * The callee is evicted as early as needed, stack slots are evicted as
          * late as possible. :XXX: This is unfortunate if the stack slot lives
          * a long time (especially if it gets spilled anyways when we hit a branch).
          */
 
-        if (fe == callee_) {
-            JS_ASSERT(fe->inlined || (fe->data.synced() && fe->type.synced()));
+        if (fe == a->callee_) {
             JaegerSpew(JSpew_Regalloc, "result: %s is callee\n", reg.name());
             return reg;
         }
 
-        if (fe >= spBase && !isTemporary(fe)) {
+        if (fe >= a->spBase && !isTemporary(fe)) {
             if (!fallback.isSet()) {
                 fallback = reg;
                 fallbackOffset = 0;
             }
             JaegerSpew(JSpew_Regalloc, "    %s is on stack\n", reg.name());
             continue;
         }
 
@@ -541,38 +336,34 @@ FrameState::bestEvictReg(uint32 mask, bo
             if (!fallback.isSet()) {
                 fallback = reg;
                 fallbackOffset = 0;
             }
             JaegerSpew(JSpew_Regalloc, "    %s has copies\n", reg.name());
             continue;
         }
 
-        if (isTemporary(fe)) {
+        if (isTemporary(fe) || fe < a->callee_) {
             /*
              * All temporaries we currently generate are for loop invariants,
              * which we treat as being live everywhere within the loop.
+             * Additionally, if this is an inlined frame then any entries
+             * belonging to parents are treated as live everywhere in the call.
              */
-            JS_ASSERT(loop);
-            if (!fallback.isSet() || loop->backedgeOffset() > fallbackOffset) {
+            uint32 offset = a->parent ? a->script->length : loop->backedgeOffset();
+            if (!fallback.isSet() || offset > fallbackOffset) {
                 fallback = reg;
-                fallbackOffset = loop->backedgeOffset();
+                fallbackOffset = offset;
             }
             JaegerSpew(JSpew_Regalloc, "    %s is a loop temporary\n", reg.name());
             continue;
         }
 
-        /*
-         * Any register for an entry dead at this bytecode is fine to evict.
-         * We require an entry to be live at the bytecode which kills it.
-         * This ensures that if multiple registers are used for the entry
-         * (i.e. type and payload), we do not haphazardly evict the first
-         * one when allocating the second one.
-         */
-        Lifetime *lifetime = variableLive(fe, PC);
+        /* Any register for an entry dead at this bytecode is fine to evict. */
+        Lifetime *lifetime = variableLive(fe, a->PC);
         if (!lifetime) {
             /*
              * Mark the entry as synced to avoid emitting a store, we don't need
              * to keep this value around.
              */
             if (!fe->data.synced())
                 fe->data.sync();
             if (!fe->type.synced())
@@ -656,28 +447,68 @@ FrameState::evictSomeReg(uint32 mask)
 
     evictReg(fallback.reg());
     return fallback.reg();
 }
 
 void
 FrameState::resetInternalState()
 {
-    for (uint32 i = 0; i < a->tracker.nentries; i++)
-        a->tracker[i]->untrack();
-
-    a->tracker.reset();
-    a->freeRegs = Registers(Registers::AvailAnyRegs);
+    for (uint32 i = 0; i < tracker.nentries; i++)
+        tracker[i]->untrack();
+
+    tracker.reset();
+    freeRegs = Registers(Registers::AvailAnyRegs);
 }
 
 void
 FrameState::discardFrame()
 {
     resetInternalState();
-    PodArrayZero(a->regstate_);
+    PodArrayZero(regstate_);
+}
+
+FrameEntry *
+FrameState::snapshotState()
+{
+    /* Everything can be recovered from a copy of the frame entries. */
+    FrameEntry *snapshot = cx->array_new<FrameEntry>(nentries);
+    if (!snapshot)
+        return NULL;
+    PodCopy(snapshot, entries, nentries);
+    return snapshot;
+}
+
+void
+FrameState::restoreFromSnapshot(FrameEntry *snapshot)
+{
+    discardFrame();
+    PodCopy(entries, snapshot, nentries);
+
+    for (unsigned i = 0; i < nentries; i++) {
+        FrameEntry *fe = entries + i;
+        if (!fe->isTracked())
+            continue;
+        tracker.entries[fe->index_] = fe;
+        tracker.nentries = Max(tracker.nentries, fe->index_ + 1);
+        if (fe->isCopy())
+            continue;
+        if (fe->type.inRegister()) {
+            freeRegs.takeReg(fe->type.reg());
+            regstate(fe->type.reg()).associate(fe, RematInfo::TYPE);
+        }
+        if (fe->data.inRegister()) {
+            freeRegs.takeReg(fe->data.reg());
+            regstate(fe->data.reg()).associate(fe, RematInfo::DATA);
+        }
+        if (fe->data.inFPRegister()) {
+            freeRegs.takeReg(fe->data.fpreg());
+            regstate(fe->data.fpreg()).associate(fe, RematInfo::DATA);
+        }
+    }
 }
 
 void
 FrameState::forgetEverything()
 {
     resetInternalState();
 
 #ifdef DEBUG
@@ -691,75 +522,64 @@ FrameState::forgetEverything()
 #ifdef DEBUG
 void
 FrameState::dumpAllocation(RegisterAllocation *alloc)
 {
     JS_ASSERT(cx->typeInferenceEnabled());
     for (unsigned i = 0; i < Registers::TotalAnyRegisters; i++) {
         AnyRegisterID reg = AnyRegisterID::fromRaw(i);
         if (alloc->assigned(reg)) {
-            printf(" (%s: %s%s)", reg.name(), entryName(entries + alloc->slot(reg)),
+            printf(" (%s: %s%s)", reg.name(), entryName(entries + alloc->index(reg)),
                    alloc->synced(reg) ? "" : " unsynced");
         }
     }
-    Registers regs = alloc->getParentRegs();
-    while (!regs.empty()) {
-        AnyRegisterID reg = regs.takeAnyReg();
-        printf(" (%s: parent)", reg.name());
-    }
     printf("\n");
 }
 #endif
 
 RegisterAllocation *
 FrameState::computeAllocation(jsbytecode *target)
 {
     JS_ASSERT(cx->typeInferenceEnabled());
     RegisterAllocation *alloc = ArenaNew<RegisterAllocation>(cx->compartment->pool, false);
     if (!alloc)
         return NULL;
 
-    if (analysis->getCode(target).exceptionEntry || analysis->getCode(target).switchTarget ||
+    if (a->analysis->getCode(target).exceptionEntry || a->analysis->getCode(target).switchTarget ||
         JSOp(*target) == JSOP_TRAP) {
         /* State must be synced at exception and switch targets, and at traps. */
 #ifdef DEBUG
         if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
-            JaegerSpew(JSpew_Regalloc, "allocation at %u:", target - script->code);
+            JaegerSpew(JSpew_Regalloc, "allocation at %u:", target - a->script->code);
             dumpAllocation(alloc);
         }
 #endif
         return alloc;
     }
 
-    alloc->setParentRegs(a->parentRegs);
-
     /*
-     * The allocation to use at the target consists of all non-stack entries
-     * currently in registers which are live at the target.
+     * The allocation to use at the target consists of all parent and non-stack
+     * entries currently in registers which are live at the target.
      */
-    Registers regs = Registers::AvailRegs;
+    Registers regs = Registers::AvailAnyRegs;
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
-        if (a->freeRegs.hasReg(reg) || regstate(reg).type() == RematInfo::TYPE)
+        if (freeRegs.hasReg(reg) || regstate(reg).type() == RematInfo::TYPE)
             continue;
         FrameEntry *fe = regstate(reg).fe();
-        if (fe == callee_)
-            continue;
-        if (fe < spBase && !variableLive(fe, target))
-            continue;
-        if (fe >= spBase && !isTemporary(fe))
-            continue;
-        if (isTemporary(fe) && uint32(target - script->code) > loop->backedgeOffset())
-            continue;
-        alloc->set(reg, indexOfFe(fe), fe->data.synced());
+        if (fe < a->callee_ ||
+            (fe > a->callee_ && fe < a->spBase && variableLive(fe, target)) ||
+            (isTemporary(fe) && (a->parent || uint32(target - a->script->code) <= loop->backedgeOffset()))) {
+            alloc->set(reg, fe - entries, fe->data.synced());
+        }
     }
 
 #ifdef DEBUG
     if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
-        JaegerSpew(JSpew_Regalloc, "allocation at %u:", target - script->code);
+        JaegerSpew(JSpew_Regalloc, "allocation at %u:", target - a->script->code);
         dumpAllocation(alloc);
     }
 #endif
 
     return alloc;
 }
 
 void
@@ -769,233 +589,226 @@ FrameState::relocateReg(AnyRegisterID re
 
     /*
      * The reg needs to be freed to make room for a variable carried across
      * a branch. Either evict its entry, or try to move it to a different
      * register if it is needed to test the branch condition. :XXX: could also
      * watch for variables which are carried across the branch but are in a
      * the register for a different carried entry, we just spill these for now.
      */
-    JS_ASSERT(!a->freeRegs.hasReg(reg));
+    JS_ASSERT(!freeRegs.hasReg(reg));
 
     for (unsigned i = 0; i < uses.nuses; i++) {
         FrameEntry *fe = peek(-1 - i);
         if (fe->isCopy())
             fe = fe->copyOf();
         if (reg.isReg() && fe->data.inRegister() && fe->data.reg() == reg.reg()) {
             pinReg(reg);
             RegisterID nreg = allocReg();
             unpinReg(reg);
 
             JaegerSpew(JSpew_Regalloc, "relocating %s\n", reg.name());
 
             masm.move(reg.reg(), nreg);
             regstate(reg).forget();
             regstate(nreg).associate(fe, RematInfo::DATA);
             fe->data.setRegister(nreg);
-            a->freeRegs.putReg(reg);
+            freeRegs.putReg(reg);
             return;
         }
     }
 
     JaegerSpew(JSpew_Regalloc, "could not relocate %s\n", reg.name());
 
     takeReg(reg);
-    a->freeRegs.putReg(reg);
+    freeRegs.putReg(reg);
 }
 
 bool
 FrameState::syncForBranch(jsbytecode *target, Uses uses)
 {
     /* There should be no unowned or pinned registers. */
 #ifdef DEBUG
     Registers checkRegs(Registers::AvailAnyRegs);
     while (!checkRegs.empty()) {
         AnyRegisterID reg = checkRegs.takeAnyReg();
-        JS_ASSERT_IF(!a->freeRegs.hasReg(reg), regstate(reg).fe());
+        JS_ASSERT_IF(!freeRegs.hasReg(reg), regstate(reg).fe());
     }
 #endif
 
     if (!cx->typeInferenceEnabled()) {
         syncAndForgetEverything();
         return true;
     }
 
-    Registers regs = 0;
-
-    RegisterAllocation *&alloc = analysis->getAllocation(target);
+    RegisterAllocation *&alloc = a->analysis->getAllocation(target);
     if (!alloc) {
         alloc = computeAllocation(target);
         if (!alloc)
             return false;
     }
 
+    syncForAllocation(alloc, false, uses);
+
+    return true;
+}
+
+void
+FrameState::syncForAllocation(RegisterAllocation *alloc, bool inlineReturn, Uses uses)
+{
     /*
      * First pass. Sync all entries which will not be carried in a register,
-     * and uncopy everything except values used in the branch.
+     * and uncopy everything except values popped by the branch or before the
+     * call returns.
      */
 
-    for (uint32 i = a->tracker.nentries - 1; i < a->tracker.nentries; i--) {
-        FrameEntry *fe = a->tracker[i];
-
-        if (deadEntry(fe, uses.nuses)) {
+    FrameEntry *topEntry;
+    if (inlineReturn)
+        topEntry = a->parent->sp - (GET_ARGC(a->parent->PC) + 2);
+    else
+        topEntry = a->sp - uses.nuses;
+
+    for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) {
+        FrameEntry *fe = tracker[i];
+
+        if (deadEntry(fe))
+            continue;
+        if (!isTemporary(fe) && fe >= topEntry) {
             /* No need to sync, this will get popped before branching. */
             continue;
         }
 
         /* Force syncs for locals which are dead at the current PC. */
-        if (isLocal(fe) && !analysis->slotEscapes(indexOfFe(fe))) {
-            Lifetime *lifetime = variableLive(fe, PC);
+        if (isLocal(fe) && !a->analysis->slotEscapes(entrySlot(fe))) {
+            Lifetime *lifetime = variableLive(fe, a->PC);
             if (!lifetime) {
                 if (!fe->data.synced())
                     fe->data.sync();
                 if (!fe->type.synced())
                     fe->type.sync();
             }
         }
 
-        unsigned index = indexOfFe(fe);
-        if (!fe->isCopy() && alloc->hasAnyReg(index)) {
+        if (!fe->isCopy() && alloc->hasAnyReg(fe - entries)) {
             /* Types are always synced, except for known doubles. */
             if (!fe->isType(JSVAL_TYPE_DOUBLE))
                 syncType(fe);
         } else {
             syncFe(fe);
             if (fe->isCopy())
                 fe->resetSynced();
         }
     }
 
-    syncParentRegistersInMask(masm, a->parentRegs.freeMask & ~alloc->getParentRegs().freeMask, true);
-
     /*
      * Second pass. Move entries carried in registers to the right register
      * provided no value used in the branch is evicted. After this pass,
      * everything will either be in the right register or will be in memory.
      */
 
-    regs = Registers(Registers::AvailAnyRegs);
+    Registers regs = Registers(Registers::AvailAnyRegs);
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (!alloc->assigned(reg))
             continue;
-        FrameEntry *fe = getOrTrack(alloc->slot(reg));
+        FrameEntry *fe = getOrTrack(alloc->index(reg));
         JS_ASSERT(!fe->isCopy());
 
         JS_ASSERT_IF(!fe->isType(JSVAL_TYPE_DOUBLE), fe->type.synced());
         if (!fe->data.synced() && alloc->synced(reg))
             syncFe(fe);
 
         if (fe->dataInRegister(reg))
             continue;
 
-        if (!a->freeRegs.hasReg(reg))
+        if (!freeRegs.hasReg(reg))
             relocateReg(reg, alloc, uses);
 
         /*
          * It is possible that the fe is known to be a double currently but is not
          * known to be a double at the join point (it may have non-double values
          * assigned elsewhere in the script). It is *not* possible for the fe to
          * be a non-double currently but a double at the join point --- the Compiler
          * must have called fixDoubleTypes before branching.
          */
         if (reg.isReg() && fe->isType(JSVAL_TYPE_DOUBLE)) {
             syncFe(fe);
             forgetAllRegs(fe);
             fe->resetSynced();
         }
-        JS_ASSERT_IF(!reg.isReg(), fe->isType(JSVAL_TYPE_DOUBLE));
+        if (!reg.isReg()) {
+            JS_ASSERT(!fe->isNotType(JSVAL_TYPE_DOUBLE));
+            if (!fe->isTypeKnown())
+                learnType(fe, JSVAL_TYPE_DOUBLE, false);
+        }
 
         if (reg.isReg()) {
             RegisterID nreg = reg.reg();
             if (fe->data.inMemory()) {
                 masm.loadPayload(addressOf(fe), nreg);
             } else if (fe->isConstant()) {
                 masm.loadValuePayload(fe->getValue(), nreg);
             } else {
                 JS_ASSERT(fe->data.inRegister() && fe->data.reg() != nreg);
                 masm.move(fe->data.reg(), nreg);
-                a->freeRegs.putReg(fe->data.reg());
+                freeRegs.putReg(fe->data.reg());
                 regstate(fe->data.reg()).forget();
             }
             fe->data.setRegister(nreg);
         } else {
             FPRegisterID nreg = reg.fpreg();
             if (fe->data.inMemory()) {
                 masm.loadDouble(addressOf(fe), nreg);
             } else if (fe->isConstant()) {
                 masm.slowLoadConstantDouble(fe->getValue().toDouble(), nreg);
             } else {
                 JS_ASSERT(fe->data.inFPRegister() && fe->data.fpreg() != nreg);
                 masm.moveDouble(fe->data.fpreg(), nreg);
-                a->freeRegs.putReg(fe->data.fpreg());
+                freeRegs.putReg(fe->data.fpreg());
                 regstate(fe->data.fpreg()).forget();
             }
             fe->data.setFPRegister(nreg);
         }
 
-        a->freeRegs.takeReg(reg);
+        freeRegs.takeReg(reg);
         regstate(reg).associate(fe, RematInfo::DATA);
-
-        /*
-         * If this register is also a parent register at the branch target,
-         * we are restoring a parent register we previously evicted.
-         */
-        if (alloc->getParentRegs().hasReg(reg))
-            a->parentRegs.putReg(reg);
     }
-
-    /* Restore any parent registers needed at the branch, evicting those still in use. */
-    Registers parents(alloc->getParentRegs().freeMask & ~a->parentRegs.freeMask);
-    while (!parents.empty()) {
-        AnyRegisterID reg = parents.takeAnyReg();
-        if (!a->freeRegs.hasReg(reg))
-            relocateReg(reg, alloc, uses);
-        a->parentRegs.putReg(reg);
-        restoreParentRegister(masm, reg);
-    }
-
-    return true;
 }
 
 bool
-FrameState::discardForJoin(jsbytecode *target, uint32 stackDepth)
+FrameState::discardForJoin(RegisterAllocation *&alloc, uint32 stackDepth)
 {
     if (!cx->typeInferenceEnabled()) {
         resetInternalState();
-        PodArrayZero(a->regstate_);
-        sp = spBase + stackDepth;
+        PodArrayZero(regstate_);
+        a->sp = a->spBase + stackDepth;
         return true;
     }
 
-    RegisterAllocation *&alloc = analysis->getAllocation(target);
-
     if (!alloc) {
         /*
          * This shows up for loop entries which are not reachable from the
          * loop head, and for exception, switch target and trap safe points.
          */
         alloc = ArenaNew<RegisterAllocation>(cx->compartment->pool, false);
         if (!alloc)
             return false;
     }
 
     resetInternalState();
-    PodArrayZero(a->regstate_);
-
-    a->parentRegs = alloc->getParentRegs();
+    PodArrayZero(regstate_);
 
     Registers regs(Registers::AvailAnyRegs);
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (!alloc->assigned(reg))
             continue;
-        FrameEntry *fe = getOrTrack(alloc->slot(reg));
-
-        a->freeRegs.takeReg(reg);
+        FrameEntry *fe = getOrTrack(alloc->index(reg));
+
+        freeRegs.takeReg(reg);
 
         /*
          * We can't look at the type of the fe as we haven't restored analysis types yet,
          * but if this is an FP reg it will be set to double type.
          */
         if (reg.isReg()) {
             fe->data.setRegister(reg.reg());
         } else {
@@ -1003,111 +816,101 @@ FrameState::discardForJoin(jsbytecode *t
             fe->data.setFPRegister(reg.fpreg());
         }
 
         regstate(reg).associate(fe, RematInfo::DATA);
         if (!alloc->synced(reg))
             fe->data.unsync();
     }
 
-    sp = spBase + stackDepth;
+    a->sp = a->spBase + stackDepth;
 
     for (unsigned i = 0; i < stackDepth; i++)
-        a->extraArray[i].reset();
+        extraArray[a->spBase + i - entries].reset();
 
     return true;
 }
 
 bool
 FrameState::consistentRegisters(jsbytecode *target)
 {
     if (!cx->typeInferenceEnabled()) {
-        JS_ASSERT(a->freeRegs.freeMask == Registers::AvailAnyRegs);
+        JS_ASSERT(freeRegs.freeMask == Registers::AvailAnyRegs);
         return true;
     }
 
     /*
      * Before calling this, either the entire state should have been synced or
      * syncForBranch should have been called. These will ensure that any FE
      * which is not consistent with the target's register state has already
      * been synced, and no stores will need to be issued by prepareForJump.
      */
-    RegisterAllocation *alloc = analysis->getAllocation(target);
+    RegisterAllocation *alloc = a->analysis->getAllocation(target);
     JS_ASSERT(alloc);
 
     Registers regs(Registers::AvailAnyRegs);
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (alloc->assigned(reg)) {
-            FrameEntry *needed = getOrTrack(alloc->slot(reg));
-            if (!a->freeRegs.hasReg(reg)) {
+            FrameEntry *needed = getOrTrack(alloc->index(reg));
+            if (!freeRegs.hasReg(reg)) {
                 FrameEntry *fe = regstate(reg).fe();
                 if (fe != needed)
                     return false;
             } else {
                 return false;
             }
         }
     }
 
-    if (!a->parentRegs.hasAllRegs(alloc->getParentRegs().freeMask))
-        return false;
-
     return true;
 }
 
 void
 FrameState::prepareForJump(jsbytecode *target, Assembler &masm, bool synced)
 {
     if (!cx->typeInferenceEnabled())
         return;
 
     JS_ASSERT_IF(!synced, !consistentRegisters(target));
 
-    RegisterAllocation *alloc = analysis->getAllocation(target);
+    RegisterAllocation *alloc = a->analysis->getAllocation(target);
     JS_ASSERT(alloc);
 
     Registers regs = 0;
 
     regs = Registers(Registers::AvailAnyRegs);
     while (!regs.empty()) {
         AnyRegisterID reg = regs.takeAnyReg();
         if (!alloc->assigned(reg))
             continue;
 
-        const FrameEntry *fe = getOrTrack(alloc->slot(reg));
+        const FrameEntry *fe = getOrTrack(alloc->index(reg));
         if (synced || !fe->backing()->dataInRegister(reg)) {
             JS_ASSERT_IF(!synced, fe->data.synced());
             if (reg.isReg())
                 masm.loadPayload(addressOf(fe), reg.reg());
             else
                 masm.loadDouble(addressOf(fe), reg.fpreg());
         }
     }
-
-    regs = Registers(alloc->getParentRegs());
-    while (!regs.empty()) {
-        AnyRegisterID reg = regs.takeAnyReg();
-        if (synced || !a->parentRegs.hasReg(reg))
-            restoreParentRegister(masm, reg);
-    }
 }
 
 void
 FrameState::storeTo(FrameEntry *fe, Address address, bool popped)
 {
     if (fe->isConstant()) {
         masm.storeValue(fe->getValue(), address);
         return;
     }
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
-    JS_ASSERT(!a->freeRegs.hasReg(address.base));
+    JS_ASSERT(!freeRegs.hasReg(address.base));
 
     /* If loading from memory, ensure destination differs. */
     JS_ASSERT_IF((fe->type.inMemory() || fe->data.inMemory()),
                  addressOf(fe).base != address.base ||
                  addressOf(fe).offset != address.offset);
 
     if (fe->data.inFPRegister()) {
         masm.storeDouble(fe->data.fpreg(), address);
@@ -1331,18 +1134,18 @@ void FrameState::loadForReturn(FrameEntr
 }
 
 #ifdef DEBUG
 void
 FrameState::assertValidRegisterState() const
 {
     Registers checkedFreeRegs(Registers::AvailAnyRegs);
 
-    for (uint32 i = 0; i < a->tracker.nentries; i++) {
-        FrameEntry *fe = a->tracker[i];
+    for (uint32 i = 0; i < tracker.nentries; i++) {
+        FrameEntry *fe = tracker[i];
         if (deadEntry(fe))
             continue;
 
         JS_ASSERT(i == fe->trackerIndex());
 
         if (fe->isCopy()) {
             JS_ASSERT_IF(!fe->copyOf()->temporary, fe > fe->copyOf());
             JS_ASSERT(fe->trackerIndex() > fe->copyOf()->trackerIndex());
@@ -1363,94 +1166,54 @@ FrameState::assertValidRegisterState() c
         }
         if (fe->data.inFPRegister()) {
             JS_ASSERT(fe->isType(JSVAL_TYPE_DOUBLE));
             checkedFreeRegs.takeReg(fe->data.fpreg());
             JS_ASSERT(regstate(fe->data.fpreg()).fe() == fe);
         }
     }
 
-    JS_ASSERT(checkedFreeRegs == a->freeRegs);
+    JS_ASSERT(checkedFreeRegs == freeRegs);
 
     for (uint32 i = 0; i < Registers::TotalRegisters; i++) {
         AnyRegisterID reg = (RegisterID) i;
         JS_ASSERT(!regstate(reg).isPinned());
-        JS_ASSERT_IF(regstate(reg).fe(), !a->freeRegs.hasReg(reg));
+        JS_ASSERT_IF(regstate(reg).fe(), !freeRegs.hasReg(reg));
         JS_ASSERT_IF(regstate(reg).fe(), regstate(reg).fe()->isTracked());
     }
 
     for (uint32 i = 0; i < Registers::TotalFPRegisters; i++) {
         AnyRegisterID reg = (FPRegisterID) i;
         JS_ASSERT(!regstate(reg).isPinned());
-        JS_ASSERT_IF(regstate(reg).fe(), !a->freeRegs.hasReg(reg));
+        JS_ASSERT_IF(regstate(reg).fe(), !freeRegs.hasReg(reg));
         JS_ASSERT_IF(regstate(reg).fe(), regstate(reg).fe()->isTracked());
         JS_ASSERT_IF(regstate(reg).fe(), regstate(reg).type() == RematInfo::DATA);
     }
 }
 #endif
 
 #if defined JS_NUNBOX32
 void
 FrameState::syncFancy(Assembler &masm, Registers avail, FrameEntry *resumeAt,
                       FrameEntry *bottom) const
 {
-    a->reifier.reset(&masm, avail, resumeAt, bottom);
+    reifier.reset(&masm, avail, resumeAt, bottom);
 
     for (FrameEntry *fe = resumeAt; fe >= bottom; fe--) {
         if (!fe->isTracked())
             continue;
 
-        a->reifier.sync(fe);
+        reifier.sync(fe);
     }
 }
+
 #endif
-
-void
-FrameState::syncParentRegister(Assembler &masm, AnyRegisterID reg) const
-{
-    ActiveFrame *which = a->parent;
-    while (which->freeRegs.hasReg(reg))
-        which = which->parent;
-
-    FrameEntry *fe = which->regstate(reg).usedBy();
-    Address address = addressOf(fe, which);
-
-    if (reg.isReg() && fe->type.inRegister() && fe->type.reg() == reg.reg()) {
-        if (!fe->type.synced())
-            masm.storeTypeTag(reg.reg(), address);
-    } else if (reg.isReg()) {
-        JS_ASSERT(fe->data.inRegister() && fe->data.reg() == reg.reg());
-        if (!fe->data.synced())
-            masm.storePayload(reg.reg(), address);
-    } else {
-        JS_ASSERT(fe->data.inFPRegister() && fe->data.fpreg() == reg.fpreg());
-        if (!fe->data.synced())
-            masm.storeDouble(reg.fpreg(), address);
-    }
-}
-
-void
-FrameState::syncParentRegistersInMask(Assembler &masm, uint32 mask, bool update) const
-{
-    JS_ASSERT((a->parentRegs.freeMask & mask) == mask);
-
-    Registers parents(mask);
-    while (!parents.empty()) {
-        AnyRegisterID reg = parents.takeAnyReg();
-        if (update)
-            a->parentRegs.takeReg(reg);
-        syncParentRegister(masm, reg);
-    }
-}
-
 void
 FrameState::sync(Assembler &masm, Uses uses) const
 {
-    syncParentRegistersInMask(masm, a->parentRegs.freeMask, false);
-
     if (!entries)
         return;
 
     /* Sync all registers up-front. */
     Registers allRegs(Registers::AvailAnyRegs);
     while (!allRegs.empty()) {
         AnyRegisterID reg = allRegs.takeAnyReg();
         FrameEntry *fe = regstate(reg).usedBy();
@@ -1481,22 +1244,22 @@ FrameState::sync(Assembler &masm, Uses u
         }
 #endif
     }
 
     /*
      * Keep track of free registers using a bitmask. If we have to drop into
      * syncFancy(), then this mask will help avoid eviction.
      */
-    Registers avail(a->freeRegs.freeMask & Registers::AvailRegs);
+    Registers avail(freeRegs.freeMask & Registers::AvailRegs);
     Registers temp(Registers::TempAnyRegs);
 
-    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : sp - uses.nuses;
-
-    for (FrameEntry *fe = sp - 1; fe >= bottom; fe--) {
+    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : a->sp - uses.nuses;
+
+    for (FrameEntry *fe = a->sp - 1; fe >= bottom; fe--) {
         if (!fe->isTracked())
             continue;
 
         if (fe->isType(JSVAL_TYPE_DOUBLE)) {
             /* Copies of in-memory doubles can be synced without spilling. */
             ensureFeSynced(fe, masm);
             continue;
         }
@@ -1561,29 +1324,26 @@ FrameState::sync(Assembler &masm, Uses u
             ensureTypeSynced(fe, masm);
 #endif
     }
 }
 
 void
 FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore)
 {
-    syncParentRegistersInMask(masm, a->parentRegs.freeMask, true);
-    JS_ASSERT(a->parentRegs.empty());
-
     if (loop) {
         /*
          * Drop any remaining loop registers so we don't do any more after-the-fact
          * allocation of the initial register state.
          */
         loop->clearLoopRegisters();
     }
 
     /* Sync all kill-registers up-front. */
-    Registers search(kill.freeMask & ~a->freeRegs.freeMask);
+    Registers search(kill.freeMask & ~freeRegs.freeMask);
     while (!search.empty()) {
         AnyRegisterID reg = search.takeAnyReg();
         FrameEntry *fe = regstate(reg).usedBy();
         if (!fe || deadEntry(fe, ignore.nuses))
             continue;
 
         JS_ASSERT(fe->isTracked());
 
@@ -1617,20 +1377,20 @@ FrameState::syncAndKill(Registers kill, 
             syncData(fe);
         } else {
             JS_ASSERT(fe->type.reg() == reg.reg());
             syncType(fe);
         }
 #endif
     }
 
-    uint32 maxvisits = a->tracker.nentries;
-    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : sp - uses.nuses;
-
-    for (FrameEntry *fe = sp - 1; fe >= bottom && maxvisits; fe--) {
+    uint32 maxvisits = tracker.nentries;
+    FrameEntry *bottom = cx->typeInferenceEnabled() ? entries : a->sp - uses.nuses;
+
+    for (FrameEntry *fe = a->sp - 1; fe >= bottom && maxvisits; fe--) {
         if (!fe->isTracked())
             continue;
 
         maxvisits--;
 
         if (deadEntry(fe, ignore.nuses))
             continue;
 
@@ -1653,17 +1413,17 @@ FrameState::syncAndKill(Registers kill, 
             fe->type.setMemory();
         }
     }
 
     /*
      * Anything still alive at this point is guaranteed to be synced. However,
      * it is necessary to evict temporary registers.
      */
-    search = Registers(kill.freeMask & ~a->freeRegs.freeMask);
+    search = Registers(kill.freeMask & ~freeRegs.freeMask);
     while (!search.empty()) {
         AnyRegisterID reg = search.takeAnyReg();
         FrameEntry *fe = regstate(reg).usedBy();
         if (!fe || deadEntry(fe, ignore.nuses))
             continue;
 
         JS_ASSERT(fe->isTracked() && !fe->isType(JSVAL_TYPE_DOUBLE));
 
@@ -1677,76 +1437,37 @@ FrameState::syncAndKill(Registers kill, 
             fe->type.setMemory();
         }
 
         forgetReg(reg);
     }
 }
 
 void
-FrameState::restoreParentRegister(Assembler &masm, AnyRegisterID reg) const
-{
-    ActiveFrame *which = a->parent;
-    while (which->freeRegs.hasReg(reg))
-        which = which->parent;
-
-    FrameEntry *fe = which->regstate(reg).usedBy();
-    Address address = addressOf(fe, which);
-
-    if (reg.isReg() && fe->type.inRegister() && fe->type.reg() == reg.reg()) {
-        masm.loadTypeTag(address, reg.reg());
-    } else if (reg.isReg()) {
-        JS_ASSERT(fe->data.inRegister() && fe->data.reg() == reg.reg());
-        masm.loadPayload(address, reg.reg());
-    } else {
-        JS_ASSERT(fe->data.inFPRegister() && fe->data.fpreg() == reg.fpreg());
-        masm.loadDouble(address, reg.fpreg());
-    }
-}
-
-void
-FrameState::restoreParentRegistersInMask(Assembler &masm, uint32 mask, bool update) const
-{
-    JS_ASSERT_IF(update, (a->parentRegs.freeMask & mask) == 0);
-
-    Registers parents(mask);
-    while (!parents.empty()) {
-        AnyRegisterID reg = parents.takeAnyReg();
-        if (update) {
-            JS_ASSERT(a->freeRegs.hasReg(reg));
-            a->parentRegs.putReg(reg);
-        }
-        restoreParentRegister(masm, reg);
-    }
-}
-
-void
 FrameState::merge(Assembler &masm, Changes changes) const
 {
     /*
      * Note: this should only be called by StubCompiler::rejoin, which will notify
      * this FrameState about the jump to patch up in case a new loop register is
      * allocated later.
      */
 
-    restoreParentRegistersInMask(masm, a->parentRegs.freeMask, false);
-
     /*
      * For any changed values we are merging back which we consider to be doubles,
      * ensure they actually are doubles.  They must be doubles or ints, but we
      * do not require stub paths to always generate a double when needed.
      * :FIXME: we check this on OOL stub calls, but not inline stub calls.
      */
     for (unsigned i = 0; i < changes.nchanges; i++) {
-        FrameEntry *fe = sp - 1 - i;
+        FrameEntry *fe = a->sp - 1 - i;
         if (fe->isTracked() && fe->isType(JSVAL_TYPE_DOUBLE))
             masm.ensureInMemoryDouble(addressOf(fe));
     }
 
-    uint32 mask = Registers::AvailAnyRegs & ~a->freeRegs.freeMask;
+    uint32 mask = Registers::AvailAnyRegs & ~freeRegs.freeMask;
     Registers search(mask);
 
     while (!search.empty(mask)) {
         AnyRegisterID reg = search.peekReg(mask);
         FrameEntry *fe = regstate(reg).usedBy();
 
         if (!fe) {
             search.takeReg(reg);
@@ -1789,17 +1510,17 @@ FrameState::copyDataIntoReg(FrameEntry *
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (!fe->data.inRegister())
         tempRegForData(fe);
 
     RegisterID reg = fe->data.reg();
     if (reg == hint) {
-        if (a->freeRegs.empty(Registers::AvailRegs)) {
+        if (freeRegs.empty(Registers::AvailRegs)) {
             ensureDataSynced(fe, masm);
             fe->data.setMemory();
         } else {
             reg = allocReg();
             masm.move(hint, reg);
             fe->data.setRegister(reg);
             regstate(reg).associate(regstate(hint).fe(), RematInfo::DATA);
         }
@@ -1819,32 +1540,32 @@ FrameState::copyDataIntoReg(Assembler &m
 {
     JS_ASSERT(!fe->isConstant());
 
     if (fe->isCopy())
         fe = fe->copyOf();
 
     if (fe->data.inRegister()) {
         RegisterID reg = fe->data.reg();
-        if (a->freeRegs.empty(Registers::AvailRegs)) {
+        if (freeRegs.empty(Registers::AvailRegs)) {
             ensureDataSynced(fe, masm);
             fe->data.setMemory();
             regstate(reg).forget();
             modifyReg(reg);
         } else {
             RegisterID newReg = allocReg();
             masm.move(reg, newReg);
             reg = newReg;
         }
         return reg;
     }
 
     RegisterID reg = allocReg();
 
-    if (!a->freeRegs.empty(Registers::AvailRegs))
+    if (!freeRegs.empty(Registers::AvailRegs))
         masm.move(tempRegForData(fe), reg);
     else
         masm.loadPayload(addressOf(fe),reg);
 
     return reg;
 }
 
 JSC::MacroAssembler::RegisterID
@@ -1852,32 +1573,32 @@ FrameState::copyTypeIntoReg(FrameEntry *
 {
     if (fe->isCopy())
         fe = fe->copyOf();
 
     JS_ASSERT(!fe->type.isConstant());
 
     if (fe->type.inRegister()) {
         RegisterID reg = fe->type.reg();
-        if (a->freeRegs.empty(Registers::AvailRegs)) {
+        if (freeRegs.empty(Registers::AvailRegs)) {
             ensureTypeSynced(fe, masm);
             fe->type.setMemory();
             regstate(reg).forget();
             modifyReg(reg);
         } else {
             RegisterID newReg = allocReg();
             masm.move(reg, newReg);
             reg = newReg;
         }
         return reg;
     }
 
     RegisterID reg = allocReg();
 
-    if (!a->freeRegs.empty(Registers::AvailRegs))
+    if (!freeRegs.empty(Registers::AvailRegs))
         masm.move(tempRegForType(fe), reg);
     else
         masm.loadTypeTag(addressOf(fe), reg);
 
     return reg;
 }
 
 JSC::MacroAssembler::RegisterID
@@ -1908,17 +1629,17 @@ FrameState::ownRegForType(FrameEntry *fe
     if (fe->isCopy()) {
         /* For now, just do an extra move. The reg must be mutable. */
         FrameEntry *backing = fe->copyOf();
         if (!backing->type.inRegister()) {
             JS_ASSERT(backing->type.inMemory());
             tempRegForType(backing);
         }
 
-        if (a->freeRegs.empty(Registers::AvailRegs)) {
+        if (freeRegs.empty(Registers::AvailRegs)) {
             /* For now... just steal the register that already exists. */
             ensureTypeSynced(backing, masm);
             reg = backing->type.reg();
             backing->type.setMemory();
             regstate(reg).forget();
             modifyReg(reg);
         } else {
             reg = allocReg();
@@ -1954,17 +1675,17 @@ FrameState::ownRegForData(FrameEntry *fe
     if (fe->isCopy()) {
         /* For now, just do an extra move. The reg must be mutable. */
         FrameEntry *backing = fe->copyOf();
         if (!backing->data.inRegister()) {
             JS_ASSERT(backing->data.inMemory());
             tempRegForData(backing);
         }
 
-        if (a->freeRegs.empty(Registers::AvailRegs)) {
+        if (freeRegs.empty(Registers::AvailRegs)) {
             /* For now... just steal the register that already exists. */
             ensureDataSynced(backing, masm);
             reg = backing->data.reg();
             backing->data.setMemory();
             regstate(reg).forget();
             modifyReg(reg);
         } else {
             reg = allocReg();
@@ -2039,18 +1760,18 @@ FrameState::ensureDouble(FrameEntry *fe)
 
     FrameEntry *backing = fe;
     if (fe->isCopy()) {
         /* Forget this entry is a copy.  We are converting this entry, not the backing. */
         backing = fe->copyOf();
         fe->clear();
     } else if (fe->isCopied()) {
         /* Sync and forget any copies of this entry. */
-        for (uint32 i = fe->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-            FrameEntry *nfe = a->tracker[i];
+        for (uint32 i = fe->trackerIndex() + 1; i < tracker.nentries; i++) {
+            FrameEntry *nfe = tracker[i];
             if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe) {
                 syncFe(nfe);
                 nfe->resetSynced();
             }
         }
     }
 
     FPRegisterID fpreg = allocFPReg();
@@ -2107,29 +1828,29 @@ FrameState::ensureInteger(FrameEntry *fe
     fe->data.unsync();
     fe->type.unsync();
 }
 
 void
 FrameState::ensureInMemoryDoubles(Assembler &masm)
 {
     JS_ASSERT(!a->parent);
-    for (uint32 i = 0; i < a->tracker.nentries; i++) {
-        FrameEntry *fe = a->tracker[i];
+    for (uint32 i = 0; i < tracker.nentries; i++) {
+        FrameEntry *fe = tracker[i];
         if (!deadEntry(fe) && fe->isType(JSVAL_TYPE_DOUBLE) &&
             !fe->isCopy() && !fe->isConstant()) {
             masm.ensureInMemoryDouble(addressOf(fe));
         }
     }
 }
 
 void
-FrameState::pushCopyOf(uint32 index)
+FrameState::pushCopyOf(FrameEntry *backing)
 {
-    FrameEntry *backing = entryFor(index);
+    JS_ASSERT(backing->isTracked());
     FrameEntry *fe = rawPush();
     fe->resetUnsynced();
     if (backing->isConstant()) {
         fe->setConstant(Jsvalify(backing->getValue()));
     } else {
         fe->type.invalidate();
         fe->data.invalidate();
         if (backing->isCopy()) {
@@ -2151,18 +1872,18 @@ FrameEntry *
 FrameState::walkTrackerForUncopy(FrameEntry *original)
 {
     /* Temporary entries are immutable and should never be uncopied. */
     JS_ASSERT(!isTemporary(original));
 
     uint32 firstCopy = InvalidIndex;
     FrameEntry *bestFe = NULL;
     uint32 ncopies = 0;
-    for (uint32 i = original->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-        FrameEntry *fe = a->tracker[i];
+    for (uint32 i = original->trackerIndex() + 1; i < tracker.nentries; i++) {
+        FrameEntry *fe = tracker[i];
         if (deadEntry(fe))
             continue;
         if (fe->isCopy() && fe->copyOf() == original) {
             if (firstCopy == InvalidIndex) {
                 firstCopy = i;
                 bestFe = fe;
             } else if (fe < bestFe) {
                 bestFe = fe;
@@ -2180,19 +1901,19 @@ FrameState::walkTrackerForUncopy(FrameEn
     JS_ASSERT(firstCopy != InvalidIndex);
     JS_ASSERT(bestFe);
     JS_ASSERT(bestFe > original);
 
     /* Mark all extra copies as copies of the new backing index. */
     bestFe->setCopyOf(NULL);
     if (ncopies > 1) {
         bestFe->setCopied();
-        for (uint32 i = firstCopy; i < a->tracker.nentries; i++) {
-            FrameEntry *other = a->tracker[i];
-            if (other >= sp || other == bestFe)
+        for (uint32 i = firstCopy; i < tracker.nentries; i++) {
+            FrameEntry *other = tracker[i];
+            if (deadEntry(other) || other == bestFe)
                 continue;
 
             /* The original must be tracked before copies. */
             JS_ASSERT(other != original);
 
             if (!other->isCopy() || other->copyOf() != original)
                 continue;
 
@@ -2217,19 +1938,19 @@ FrameState::walkTrackerForUncopy(FrameEn
 
 FrameEntry *
 FrameState::walkFrameForUncopy(FrameEntry *original)
 {
     FrameEntry *bestFe = NULL;
     uint32 ncopies = 0;
 
     /* It's only necessary to visit as many FEs are being tracked. */
-    uint32 maxvisits = a->tracker.nentries;
-
-    for (FrameEntry *fe = original + 1; fe < sp && maxvisits; fe++) {
+    uint32 maxvisits = tracker.nentries;
+
+    for (FrameEntry *fe = original + 1; fe < a->sp && maxvisits; fe++) {
         if (!fe->isTracked())
             continue;
 
         maxvisits--;
 
         if (fe->isCopy() && fe->copyOf() == original) {
             if (!bestFe) {
                 bestFe = fe;
@@ -2273,17 +1994,17 @@ FrameState::uncopy(FrameEntry *original)
      * and select B, not D (see bug 583684).
      *
      * Note: |tracker.nentries <= (nslots + nargs)|. However, this walk is
      * sub-optimal if |tracker.nentries - original->trackerIndex() > sp - original|.
      * With large scripts this may be a problem worth investigating. Note that
      * the tracker is walked twice, so we multiply by 2 for pessimism.
      */
     FrameEntry *fe;
-    if ((a->tracker.nentries - original->trackerIndex()) * 2 > uint32(sp - original))
+    if ((tracker.nentries - original->trackerIndex()) * 2 > uint32(a->sp - original))
         fe = walkFrameForUncopy(original);
     else
         fe = walkTrackerForUncopy(original);
     if (!fe) {
         original->setNotCopied();
         return NULL;
     }
 
@@ -2327,42 +2048,42 @@ FrameState::uncopy(FrameEntry *original)
     return fe;
 }
 
 bool
 FrameState::hasOnlyCopy(FrameEntry *backing, FrameEntry *fe)
 {
     JS_ASSERT(backing->isCopied() && fe->copyOf() == backing);
 
-    for (uint32 i = backing->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-        FrameEntry *nfe = a->tracker[i];
+    for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
+        FrameEntry *nfe = tracker[i];
         if (nfe != fe && !deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == backing)
             return false;
     }
 
     return true;
 }
 
 void
 FrameState::separateBinaryEntries(FrameEntry *lhs, FrameEntry *rhs)
 {
-    JS_ASSERT(lhs == sp - 2 && rhs == sp - 1);
+    JS_ASSERT(lhs == a->sp - 2 && rhs == a->sp - 1);
     if (rhs->isCopy() && rhs->copyOf() == lhs) {
         syncAndForgetFe(rhs);
         syncAndForgetFe(lhs);
         uncopy(lhs);
     }
 }
 
 void
 FrameState::storeLocal(uint32 n, bool popGuaranteed, bool fixedType)
 {
     FrameEntry *local = getLocal(n);
 
-    if (analysis->slotEscapes(indexOfFe(local))) {
+    if (a->analysis->slotEscapes(entrySlot(local))) {
         JS_ASSERT(local->data.inMemory());
         storeTo(peek(-1), addressOf(local), popGuaranteed);
         return;
     }
 
     storeTop(local, popGuaranteed);
 
     if (loop)
@@ -2374,17 +2095,17 @@ FrameState::storeLocal(uint32 n, bool po
 
 void
 FrameState::storeArg(uint32 n, bool popGuaranteed)
 {
     // Note that args are always immediately synced, because they can be
     // aliased (but not written to) via f.arguments.
     FrameEntry *arg = getArg(n);
 
-    if (analysis->slotEscapes(indexOfFe(arg))) {
+    if (a->analysis->slotEscapes(entrySlot(arg))) {
         JS_ASSERT(arg->data.inMemory());
         storeTo(peek(-1), addressOf(arg), popGuaranteed);
         return;
     }
 
     storeTop(arg, popGuaranteed);
 
     if (loop)
@@ -2399,18 +2120,17 @@ FrameState::forgetEntry(FrameEntry *fe)
     if (fe->isCopied()) {
         uncopy(fe);
         if (!fe->isCopied())
             forgetAllRegs(fe);
     } else {
         forgetAllRegs(fe);
     }
 
-    if (fe >= spBase && fe < sp)
-        a->extraArray[fe - spBase].reset();
+    extraArray[fe - entries].reset();
 }
 
 void
 FrameState::storeTop(FrameEntry *target, bool popGuaranteed)
 {
     JS_ASSERT(!isTemporary(target));
 
     /* Detect something like (x = x) which is a no-op. */
@@ -2486,18 +2206,18 @@ FrameState::storeTop(FrameEntry *target,
          * 
          * Because of |let| expressions, it's kind of hard to really know
          * whether a region on the stack will be popped all at once. Bleh!
          *
          * This should be rare except in browser code (and maybe even then),
          * but even so there's a quick workaround. We take all copies of the
          * backing fe, and redirect them to be copies of the destination.
          */
-        for (uint32 i = backing->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-            FrameEntry *fe = a->tracker[i];
+        for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
+            FrameEntry *fe = tracker[i];
             if (deadEntry(fe))
                 continue;
             if (fe->isCopy() && fe->copyOf() == backing) {
                 fe->setCopyOf(target);
                 copied = true;
             }
         }
     }
@@ -2559,27 +2279,27 @@ FrameState::storeTop(FrameEntry *target,
      */
     if (copied || !popGuaranteed)
         target->setCopied();
 }
 
 void
 FrameState::shimmy(uint32 n)
 {
-    JS_ASSERT(sp - n >= spBase);
+    JS_ASSERT(a->sp - n >= a->spBase);
     int32 depth = 0 - int32(n);
     storeTop(peek(depth - 1), true);
     popn(n);
 }
 
 void
 FrameState::shift(int32 n)
 {
     JS_ASSERT(n < 0);
-    JS_ASSERT(sp + n - 1 >= spBase);
+    JS_ASSERT(a->sp + n - 1 >= a->spBase);
     storeTop(peek(n - 1), true);
     pop();
 }
 
 void
 FrameState::forgetKnownDouble(FrameEntry *fe)
 {
     /*
@@ -2690,17 +2410,17 @@ FrameState::allocForSameBinary(FrameEntr
 
     if (!fe->isTypeKnown()) {
         alloc.lhsType = tempRegForType(fe);
         pinReg(alloc.lhsType.reg());
     }
 
     alloc.lhsData = tempRegForData(fe);
 
-    if (!a->freeRegs.empty(Registers::AvailRegs)) {
+    if (!freeRegs.empty(Registers::AvailRegs)) {
         alloc.result = allocReg();
         masm.move(alloc.lhsData.reg(), alloc.result);
         alloc.lhsNeedsRemat = false;
     } else {
         alloc.result = alloc.lhsData.reg();
         takeReg(alloc.result);
         alloc.lhsNeedsRemat = true;
     }
@@ -2762,36 +2482,36 @@ FrameState::binaryEntryLive(FrameEntry *
      * top two stack entries and special cases LOCALINC/ARGINC and friends, which fuse
      * a binary operation before writing over the local/arg.
      */
     JS_ASSERT(cx->typeInferenceEnabled());
 
     if (deadEntry(fe, 2))
         return false;
 
-    switch (JSOp(*PC)) {
+    switch (JSOp(*a->PC)) {
       case JSOP_INCLOCAL:
       case JSOP_DECLOCAL:
       case JSOP_LOCALINC:
       case JSOP_LOCALDEC:
-        if (fe - locals == (int) GET_SLOTNO(PC))
+        if (fe - a->locals == (int) GET_SLOTNO(a->PC))
             return false;
       case JSOP_INCARG:
       case JSOP_DECARG:
       case JSOP_ARGINC:
       case JSOP_ARGDEC:
-        if (fe - args == (int) GET_SLOTNO(PC))
+        if (fe - a->args == (int) GET_SLOTNO(a->PC))
             return false;
       default:;
     }
 
-    JS_ASSERT(fe != callee_);
+    JS_ASSERT(fe != a->callee_);
 
     /* Caller must check that no copies are invalidated by rewriting the entry. */
-    return fe >= spBase || variableLive(fe, PC);
+    return fe >= a->spBase || fe < a->callee_ || variableLive(fe, a->PC);
 }
 
 void
 FrameState::allocForBinary(FrameEntry *lhs, FrameEntry *rhs, JSOp op, BinaryAlloc &alloc,
                            bool needsResult)
 {
     FrameEntry *backingLeft = lhs;
     FrameEntry *backingRight = rhs;
@@ -2912,17 +2632,17 @@ FrameState::allocForBinary(FrameEntry *l
         (op == JSOP_ADD || (op == JSOP_SUB && backingRight->isConstant())) &&
         (lhs == backingLeft || hasOnlyCopy(backingLeft, lhs))) {
         alloc.result = backingLeft->data.reg();
         alloc.undoResult = true;
         alloc.resultHasRhs = false;
         goto skip;
     }
 
-    if (!a->freeRegs.empty(Registers::AvailRegs)) {
+    if (!freeRegs.empty(Registers::AvailRegs)) {
         /* Free reg - just grab it. */
         alloc.result = allocReg();
         if (!alloc.lhsData.isSet()) {
             JS_ASSERT(alloc.rhsData.isSet());
             JS_ASSERT(commu);
             masm.move(alloc.rhsData.reg(), alloc.result);
             alloc.resultHasRhs = true;
         } else {
@@ -3062,40 +2782,43 @@ FrameState::allocTemporary()
     fe->lastLoop = 0;
     fe->temporary = true;
     return fe - temporaries;
 }
 
 void
 FrameState::clearTemporaries()
 {
+    JS_ASSERT(!a->parent);
+
     for (FrameEntry *fe = temporaries; fe < temporariesTop; fe++) {
         if (!fe->isTracked())
             continue;
         forgetAllRegs(fe);
         fe->resetSynced();
     }
 
     temporariesTop = temporaries;
 }
 
 Vector<TemporaryCopy> *
 FrameState::getTemporaryCopies()
 {
+    /* :XXX: handle OOM */
     Vector<TemporaryCopy> *res = NULL;
 
     for (FrameEntry *fe = temporaries; fe < temporariesTop; fe++) {
         if (!fe->isTracked())
             continue;
         if (fe->isCopied()) {
-            for (uint32 i = fe->trackerIndex() + 1; i < a->tracker.nentries; i++) {
-                FrameEntry *nfe = a->tracker[i];
+            for (uint32 i = fe->trackerIndex() + 1; i < tracker.nentries; i++) {
+                FrameEntry *nfe = tracker[i];
                 if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe) {
                     if (!res)
                         res = cx->new_< Vector<TemporaryCopy> >(cx);
-                    res->append(TemporaryCopy(addressOf(nfe), addressOf(fe)));  /* :XXX: handle OOM */
+                    res->append(TemporaryCopy(addressOf(nfe), addressOf(fe)));
                 }
             }
         }
     }
 
     return res;
 }
--- a/js/src/methodjit/FrameState.h
+++ b/js/src/methodjit/FrameState.h
@@ -378,24 +378,23 @@ class FrameState
     inline void leaveBlock(uint32 n);
 
     // Pushes a copy of a slot (formal argument, local variable, or stack slot)
     // onto the operation stack.
     void pushLocal(uint32 n);
     void pushArg(uint32 n);
     void pushCallee();
     void pushThis();
-    void pushTemporary(FrameEntry *fe);
+    void pushCopyOf(FrameEntry *fe);
     inline void learnThisIsObject(bool unsync = true);
 
     inline FrameEntry *getStack(uint32 slot);
     inline FrameEntry *getLocal(uint32 slot);
     inline FrameEntry *getArg(uint32 slot);
-
-    inline FrameEntry *getOrTrack(uint32 index);
+    inline FrameEntry *getSlotEntry(uint32 slot);
 
     /*
      * Allocates a temporary register for a FrameEntry's type. The register
      * can be spilled or clobbered by the frame. The compiler may only operate
      * on it temporarily, and must take care not to clobber it.
      */
     inline RegisterID tempRegForType(FrameEntry *fe);
 
@@ -655,25 +654,35 @@ class FrameState
     }
 
     /*
      * Discard the entire framestate forcefully.
      */
     void discardFrame();
 
     /*
-     * Tries to syncs and shuffle registers in accordance with the register state
+     * Make a copy of the current frame state, and restore from that snapshot.
+     * The stack depth must match between the snapshot and restore points.
+     */
+    FrameEntry *snapshotState();
+    void restoreFromSnapshot(FrameEntry *snapshot);
+
+    /*
+     * Tries to sync and shuffle registers in accordance with the register state
      * at target, constructing that state if necessary. Forgets all constants and
      * copies, and nothing can be pinned. Keeps the top Uses in registers; if Uses
      * is non-zero the state may not actually be consistent with target.
      */
     bool syncForBranch(jsbytecode *target, Uses uses);
+    void syncForAllocation(RegisterAllocation *alloc, bool inlineReturn, Uses uses);
 
-    /* Discards the current frame state and updates to the register state at target. */
-    bool discardForJoin(jsbytecode *target, uint32 stackDepth);
+    /* Discards the current frame state and updates to a new register allocation. */
+    bool discardForJoin(RegisterAllocation *&alloc, uint32 stackDepth);
+
+    RegisterAllocation * computeAllocation(jsbytecode *target);
 
     /* Return whether the register state is consistent with that at target. */
     bool consistentRegisters(jsbytecode *target);
 
     /*
      * Load all registers to update from either the current register state (if synced
      * is unset) or a synced state (if synced is set) to target.
      */
@@ -700,18 +709,18 @@ class FrameState
     struct StackEntryExtra {
         bool initArray;
         JSObject *initObject;
         types::TypeSet *types;
         JSAtom *name;
         void reset() { PodZero(this); }
     };
     StackEntryExtra& extra(const FrameEntry *fe) {
-        JS_ASSERT(fe >= spBase && fe < sp);
-        return a->extraArray[fe - spBase];
+        JS_ASSERT(fe >= a->spBase && fe < a->sp);
+        return extraArray[fe - entries];
     }
     StackEntryExtra& extra(uint32 slot) { return extra(entries + slot); }
 
     /*
      * Helper function. Tests if a slot's type is null. Condition must
      * be Equal or NotEqual.
      */
     inline Jump testNull(Assembler::Condition cond, FrameEntry *fe);
@@ -802,45 +811,42 @@ class FrameState
     inline void syncAt(int32 n);
 
     /*
      * If the frameentry is a copy, give it its own registers.
      * This may only be called on the topmost fe.
      */
     inline void giveOwnRegs(FrameEntry *fe);
 
-    uint32 stackDepth() const { return sp - spBase; }
+    uint32 stackDepth() const { return a->sp - a->spBase; }
 
     /*
      * The stack depth of the current frame plus any locals and space
      * for inlined frames, i.e. the difference between the end of the
      * current fp and sp.
      */
-    uint32 totalDepth() const { return a->depth + script->nfixed + stackDepth(); }
+    uint32 totalDepth() const { return a->depth + a->script->nfixed + stackDepth(); }
 
     // Returns the number of entries in the frame, that is:
     //   2 for callee, this +
     //   nargs +
     //   nfixed +
     //   currently pushed stack slots
-    uint32 frameSlots() const { return uint32(sp - entries); }
+    uint32 frameSlots() const { return uint32(a->sp - a->callee_); }
 
 #ifdef DEBUG
     void assertValidRegisterState() const;
 #endif
 
     // Return an address, relative to the StackFrame, that represents where
     // this FrameEntry is stored in memory. Note that this is its canonical
     // address, not its backing store. There is no guarantee that the memory
     // is coherent.
-    Address addressOf(const FrameEntry *fe) const { return addressOf(fe, a); }
-    Address addressOf(uint32 slot) const { return addressOf(entries + slot); }
-    int32 frameOffset(const FrameEntry *fe) const {
-        return frameOffset(fe, a) + (a->depth * sizeof(Value));
-    }
+    Address addressOf(const FrameEntry *fe) const;
+    Address addressOf(uint32 slot) const { return addressOf(a->callee_ + slot); }
 
     // Returns an address, relative to the StackFrame, that represents where
     // this FrameEntry is backed in memory. This is not necessarily its
     // canonical address, but the address for which the payload has been synced
     // to memory. The caller guarantees that the payload has been synced.
     Address addressForDataRemat(const FrameEntry *fe) const;
 
     // Inside an inline frame, the address for the return value in the caller.
@@ -868,38 +874,42 @@ class FrameState
      * call shift(-2).
      */
     void shift(int32 n);
 
     inline void setInTryBlock(bool inTryBlock) {
         this->inTryBlock = inTryBlock;
     }
 
-    inline uint32 regsInUse() const { return Registers::AvailRegs & ~a->freeRegs.freeMask; }
+    inline uint32 regsInUse() const { return Registers::AvailRegs & ~freeRegs.freeMask; }
 
-    void setPC(jsbytecode *PC) { this->PC = PC; }
+    void setPC(jsbytecode *PC) { a->PC = PC; }
     void setLoop(LoopState *loop) { this->loop = loop; }
 
-    void getUnsyncedEntries(uint32 *pdepth, Vector<UnsyncedEntry> *unsyncedEntries);
+    void pruneDeadEntries();
 
     bool pushActiveFrame(JSScript *script, uint32 argc);
     void popActiveFrame();
 
-    void discardLocalRegisters();
-    void evictInlineModifiedRegisters(Registers regs);
-    void syncParentRegistersInMask(Assembler &masm, uint32 mask, bool update) const;
-    void restoreParentRegistersInMask(Assembler &masm, uint32 mask, bool update) const;
-    Registers getParentRegs() const { return a->parentRegs; }
+    uint32 entrySlot(const FrameEntry *fe) const {
+        return frameSlot(a, fe);
+    }
 
-    void tryCopyRegister(FrameEntry *fe, FrameEntry *callStart);
-    Registers getTemporaryCallRegisters(FrameEntry *callStart) const;
+    uint32 outerSlot(const FrameEntry *fe) const {
+        ActiveFrame *na = a;
+        while (na->parent) { na = na->parent; }
+        return frameSlot(na, fe);
+    }
 
-    uint32 indexOfFe(const FrameEntry *fe) const {
-        JS_ASSERT(uint32(fe - entries) < feLimit(script));
-        return uint32(fe - entries);
+    bool isOuterSlot(const FrameEntry *fe) const {
+        if (isTemporary(fe))
+            return true;
+        ActiveFrame *na = a;
+        while (na->parent) { na = na->parent; }
+        return fe < na->spBase && fe != na->callee_;
     }
 
 #ifdef DEBUG
     const char * entryName(const FrameEntry *fe) const;
     void dumpAllocation(RegisterAllocation *alloc);
 #else
     const char * entryName(const FrameEntry *fe) const { return NULL; }
 #endif
@@ -910,16 +920,18 @@ class FrameState
 
     uint32 allocTemporary();  /* -1 if limit reached. */
     void clearTemporaries();
     inline FrameEntry *getTemporary(uint32 which);
 
     /* Return NULL or a new vector with all current copies of temporaries. */
     Vector<TemporaryCopy> *getTemporaryCopies();
 
+    inline void syncAndForgetFe(FrameEntry *fe, bool markSynced = false);
+
   private:
     inline AnyRegisterID allocAndLoadReg(FrameEntry *fe, bool fp, RematInfo::RematType type);
     inline void forgetReg(AnyRegisterID reg);
     AnyRegisterID evictSomeReg(uint32 mask);
     void evictReg(AnyRegisterID reg);
     inline FrameEntry *rawPush();
     inline void addToTracker(FrameEntry *fe);
 
@@ -927,23 +939,23 @@ class FrameState
     inline void ensureFeSynced(const FrameEntry *fe, Assembler &masm) const;
     inline void ensureTypeSynced(const FrameEntry *fe, Assembler &masm) const;
     inline void ensureDataSynced(const FrameEntry *fe, Assembler &masm) const;
 
     /* Guarantee sync, even if register allocation is required, and set sync. */
     inline void syncFe(FrameEntry *fe);
     inline void syncType(FrameEntry *fe);
     inline void syncData(FrameEntry *fe);
-    inline void syncAndForgetFe(FrameEntry *fe);
 
     inline FrameEntry *getCallee();
     inline FrameEntry *getThis();
+    inline FrameEntry *getOrTrack(uint32 index);
+
     inline void forgetAllRegs(FrameEntry *fe);
     inline void swapInTracker(FrameEntry *lhs, FrameEntry *rhs);
-    void pushCopyOf(uint32 index);
 #if defined JS_NUNBOX32
     void syncFancy(Assembler &masm, Registers avail, FrameEntry *resumeAt,
                    FrameEntry *bottom) const;
 #endif
     inline bool tryFastDoubleLoad(FrameEntry *fe, FPRegisterID fpReg, Assembler &masm) const;
     void resetInternalState();
 
     /*
@@ -965,170 +977,127 @@ class FrameState
     bool isEntryCopied(FrameEntry *fe) const;
 
     /*
      * All registers in the FE are forgotten. If it is copied, it is uncopied
      * beforehand.
      */
     void forgetEntry(FrameEntry *fe);
 
-    FrameEntry *entryFor(uint32 index) const {
-        JS_ASSERT(entries[index].isTracked());
-        return &entries[index];
-    }
-
-    uint32 indexOf(int32 depth) const {
-        JS_ASSERT(uint32((sp + depth) - entries) < feLimit(script));
-        return uint32((sp + depth) - entries);
-    }
-
     /* Stack and temporary entries whose contents should be disregarded. */
     bool deadEntry(const FrameEntry *fe, unsigned uses = 0) const {
-        return (fe >= (sp - uses) && fe < temporaries) || fe >= temporariesTop;
-    }
-
-    static uint32 feLimit(JSScript *script) {
-        return script->nslots + 2 + (script->fun ? script->fun->nargs : 0) + TEMPORARY_LIMIT;
+        return (fe >= (a->sp - uses) && fe < temporaries) || fe >= temporariesTop;
     }
 
     RegisterState & regstate(AnyRegisterID reg) {
         JS_ASSERT(reg.reg_ < Registers::TotalAnyRegisters);
-        return a->regstate_[reg.reg_];
+        return regstate_[reg.reg_];
     }
 
     const RegisterState & regstate(AnyRegisterID reg) const {
         JS_ASSERT(reg.reg_ < Registers::TotalAnyRegisters);
-        return a->regstate_[reg.reg_];
+        return regstate_[reg.reg_];
     }
 
     AnyRegisterID bestEvictReg(uint32 mask, bool includePinned) const;
 
     inline analyze::Lifetime * variableLive(FrameEntry *fe, jsbytecode *pc) const;
     inline bool binaryEntryLive(FrameEntry *fe) const;
-    RegisterAllocation * computeAllocation(jsbytecode *target);
     void relocateReg(AnyRegisterID reg, RegisterAllocation *alloc, Uses uses);
 
+    bool isThis(const FrameEntry *fe) const {
+        return fe == a->this_;
+    }
+
     bool isArg(const FrameEntry *fe) const {
-        return script->fun && fe >= args && fe - args < script->fun->nargs;
+        return a->script->fun && fe >= a->args && fe - a->args < a->script->fun->nargs;
     }
 
     bool isLocal(const FrameEntry *fe) const {
-        return fe >= locals && fe - locals < script->nfixed;
+        return fe >= a->locals && fe - a->locals < a->script->nfixed;
     }
 
     bool isTemporary(const FrameEntry *fe) const {
         JS_ASSERT_IF(fe >= temporaries, fe < temporariesTop);
         return fe >= temporaries;
     }
 
     int32 frameOffset(const FrameEntry *fe, ActiveFrame *a) const;
     Address addressOf(const FrameEntry *fe, ActiveFrame *a) const;
+    uint32 frameSlot(ActiveFrame *a, const FrameEntry *fe) const;
 
-    void updateActiveFrame();
-    void syncInlinedEntry(FrameEntry *fe, const FrameEntry *parent);
     void associateReg(FrameEntry *fe, RematInfo::RematType type, AnyRegisterID reg);
 
     inline void modifyReg(AnyRegisterID reg);
 
     MaybeJump guardArrayLengthBase(FrameEntry *obj, Int32Key key);
 
-    void syncParentRegister(Assembler &masm, AnyRegisterID reg) const;
-    void restoreParentRegister(Assembler &masm, AnyRegisterID reg) const;
-
   private:
     JSContext *cx;
     Assembler &masm;
     Compiler &cc;
     StubCompiler &stubcc;
 
     /* State for the active stack frame. */
 
     struct ActiveFrame {
+        ActiveFrame() { PodZero(this); }
+
         ActiveFrame *parent;
-        jsbytecode *parentPC;
-        FrameEntry *parentSP;
-        uint32 parentArgc;
 
-        JSScript *script;
+        /* Number of values between the start of the outer frame and the start of this frame. */
         uint32 depth;
 
-        /* All unallocated registers. */
-        Registers freeRegs;
+        JSScript *script;
+        jsbytecode *PC;
+        analyze::ScriptAnalysis *analysis;
 
-        /*
-         * Registers which are in use by parents and still hold their original value.
-         * These may or may not be in freeRegs: a parent register is allocated to
-         * an fe if that fe copies an entry in the parent (i.e. an argument or this).
-         */
-        Registers parentRegs;
-
-        /* Cache of FrameEntry objects. */
-        FrameEntry *entries;
+        /* Indexes into the main FrameEntry buffer of entries for this frame. */
         FrameEntry *callee_;
         FrameEntry *this_;
         FrameEntry *args;
         FrameEntry *locals;
-
-        /* Vector of tracked slot indexes. */
-        Tracker tracker;
-
-        /* Compiler-owned metadata for the stack contents. */
-        StackEntryExtra *extraArray;
-
-        /*
-         * Register ownership state. This can't be used alone; to find whether an
-         * entry is active, you must check the allocated registers.
-         */
-        RegisterState regstate_[Registers::TotalAnyRegisters];
-
-        const RegisterState & regstate(AnyRegisterID reg) const {
-            JS_ASSERT(reg.reg_ < Registers::TotalAnyRegisters);
-            return regstate_[reg.reg_];
-        }
-
-#if defined JS_NUNBOX32
-        mutable ImmutableSync reifier;
-#endif
+        FrameEntry *spBase;
+        FrameEntry *sp;
     };
     ActiveFrame *a;
 
-    /* State derived/copied from the active frame. :XXX: remove? */
+    /* Common buffer of frame entries. */
+    FrameEntry *entries;
+    uint32 nentries;
 
-    JSScript *script;
-    analyze::ScriptAnalysis *analysis;
+    /* Compiler-owned metadata for stack contents. */
+    StackEntryExtra *extraArray;
 
-    FrameEntry *entries;
-    FrameEntry *callee_;
-    FrameEntry *this_;
+    /* Vector of tracked slot indexes. */
+    Tracker tracker;
 
-    /* Base pointer for arguments. */
-    FrameEntry *args;
-
-    /* Base pointer for local variables. */
-    FrameEntry *locals;
+#if defined JS_NUNBOX32
+    mutable ImmutableSync reifier;
+#endif
 
-    /* Base pointer for the stack. */
-    FrameEntry *spBase;
+    /*
+     * Register ownership state. This can't be used alone; to find whether an
+     * entry is active, you must check the allocated registers.
+     */
+    RegisterState regstate_[Registers::TotalAnyRegisters];
 
-    /* Dynamic stack pointer. */
-    FrameEntry *sp;
+    /* All unallocated registers. */
+    Registers freeRegs;
+
+    /* Stack of active loops. */
+    LoopState *loop;
 
     /*
      * Track state for analysis temporaries. The meaning of these temporaries
      * is opaque to the frame state, which just tracks where they are stored.
      */
     FrameEntry *temporaries;
     FrameEntry *temporariesTop;
 
-    /* Current PC, for managing register allocation. */
-    jsbytecode *PC;
-
-    /* Stack of active loops. */
-    LoopState *loop;
-
     bool inTryBlock;
 };
 
 /*
  * Register allocation overview. We want to allocate registers at the same time
  * as we emit code, in a single forward pass over the script. This is good both
  * for compilation speed and for design simplicity; we allocate registers for
  * variables and temporaries as the compiler needs them. To get a good allocation,
@@ -1159,34 +1128,31 @@ struct RegisterAllocation {
      * at the start of a loop body until after generating code for the entire loop,
      * so we can decide on which variables to carry around the loop after seeing
      * them accessed early on in the body.
      */
     static const uint32 LOOP_REGISTER = uint32(-2);
 
     /*
      * Assignment of registers to payloads. Type tags are always in memory,
-     * except for known doubles in FP registers.
+     * except for known doubles in FP registers. These are indexes into the
+     * frame's entries[] buffer, not slots.
      */
     uint32 regstate_[Registers::TotalAnyRegisters];
 
     /* Mask for regstate entries indicating if the slot is synced. */
     static const uint32 SYNCED = 0x80000000;
 
     uint32 & regstate(AnyRegisterID reg) {
         JS_ASSERT(reg.reg_ < Registers::TotalAnyRegisters);
         return regstate_[reg.reg_];
     }
 
-    /* Registers used for entries in parent frames which still hold their original value. */
-    Registers parentRegs;
-
   public:
     RegisterAllocation(bool forLoop)
-        : parentRegs(0)
     {
         uint32 entry = forLoop ? (uint32) LOOP_REGISTER : (uint32) UNASSIGNED_REGISTER;
         for (unsigned i = 0; i < Registers::TotalAnyRegisters; i++) {
             AnyRegisterID reg = AnyRegisterID::fromRaw(i);
             bool avail = Registers::maskReg(reg) & Registers::AvailAnyRegs;
             regstate_[i] = avail ? entry : UNASSIGNED_REGISTER;
         }
     }
@@ -1199,39 +1165,31 @@ struct RegisterAllocation {
         return regstate(reg) == LOOP_REGISTER;
     }
 
     bool synced(AnyRegisterID reg) {
         JS_ASSERT(assigned(reg));
         return regstate(reg) & SYNCED;
     }
 
-    uint32 slot(AnyRegisterID reg) {
+    uint32 index(AnyRegisterID reg) {
         JS_ASSERT(assigned(reg));
         return regstate(reg) & ~SYNCED;
     }
 
-    void set(AnyRegisterID reg, uint32 slot, bool synced) {
-        JS_ASSERT(slot != LOOP_REGISTER && slot != UNASSIGNED_REGISTER);
-        regstate(reg) = slot | (synced ? SYNCED : 0);
+    void set(AnyRegisterID reg, uint32 index, bool synced) {
+        JS_ASSERT(index != LOOP_REGISTER && index != UNASSIGNED_REGISTER);
+        regstate(reg) = index | (synced ? SYNCED : 0);
     }
 
     void setUnassigned(AnyRegisterID reg) {
         regstate(reg) = UNASSIGNED_REGISTER;
     }
 
-    void setParentRegs(Registers regs) {
-        parentRegs = regs;
-    }
-
-    Registers getParentRegs() { return parentRegs; }
-
     bool synced() {
-        if (parentRegs.freeMask != 0)
-            return false;
         for (unsigned i = 0; i < Registers::TotalAnyRegisters; i++) {
             if (assigned(AnyRegisterID::fromRaw(i)))
                 return false;
         }
         return true;
     }
 
     void clearLoops() {
@@ -1240,17 +1198,17 @@ struct RegisterAllocation {
             if (loop(reg))
                 setUnassigned(reg);
         }
     }
 
     bool hasAnyReg(uint32 n) {
         for (unsigned i = 0; i < Registers::TotalAnyRegisters; i++) {
             AnyRegisterID reg = AnyRegisterID::fromRaw(i);
-            if (assigned(reg) && slot(reg) == n)
+            if (assigned(reg) && index(reg) == n)
                 return true;
         }
         return false;
     }
 };
 
 class AutoPreserveAcrossSyncAndKill;
 
--- a/js/src/methodjit/ImmutableSync.cpp
+++ b/js/src/methodjit/ImmutableSync.cpp
@@ -141,17 +141,17 @@ ImmutableSync::allocReg()
 
     return reg;
 }
 
 inline ImmutableSync::SyncEntry &
 ImmutableSync::entryFor(FrameEntry *fe)
 {
     JS_ASSERT(fe <= top || frame->isTemporary(fe));
-    SyncEntry &e = entries[frame->indexOfFe(fe)];
+    SyncEntry &e = entries[fe - frame->entries];
     if (e.generation != generation)
         e.reset(generation);
     return e;
 }
 
 void
 ImmutableSync::sync(FrameEntry *fe)
 {
--- a/js/src/methodjit/LoopState.cpp
+++ b/js/src/methodjit/LoopState.cpp
@@ -73,54 +73,69 @@ SafeMul(int32 one, int32 two, int32 *res
     *res = one * two;
     int64 ores = (int64)one * (int64)two;
     if (ores == (int64)*res)
         return true;
     JaegerSpew(JSpew_Analysis, "Overflow computing %d * %d\n", one, two);
     return false;
 }
 
-LoopState::LoopState(JSContext *cx, JSScript *script,
+LoopState::LoopState(JSContext *cx, analyze::CrossScriptSSA *ssa,
                      mjit::Compiler *cc, FrameState *frame)
-    : cx(cx), script(script), analysis(script->analysis(cx)), cc(*cc), frame(*frame),
-      lifetime(NULL), alloc(NULL), loopRegs(0), skipAnalysis(false),
+    : cx(cx), ssa(ssa),
+      outerScript(ssa->outerScript()), outerAnalysis(outerScript->analysis(cx)),
+      cc(*cc), frame(*frame),
+      lifetime(NULL), alloc(NULL), reachedEntryPoint(false), loopRegs(0), skipAnalysis(false),
       loopJoins(CompilerAllocPolicy(cx, *cc)),
       loopPatches(CompilerAllocPolicy(cx, *cc)),
       restoreInvariantCalls(CompilerAllocPolicy(cx, *cc)),
       invariantEntries(CompilerAllocPolicy(cx, *cc)),
-      outer(NULL), PC(NULL),
+      outer(NULL), temporariesStart(0),
       testLHS(UNASSIGNED), testRHS(UNASSIGNED),
-      testConstant(0), testLessEqual(false), testLength(false),
+      testConstant(0), testLessEqual(false),
       increments(CompilerAllocPolicy(cx, *cc)), unknownModset(false),
       growArrays(CompilerAllocPolicy(cx, *cc)),
       modifiedProperties(CompilerAllocPolicy(cx, *cc)),
       constrainedLoop(true)
 {
     JS_ASSERT(cx->typeInferenceEnabled());
 }
 
 bool
 LoopState::init(jsbytecode *head, Jump entry, jsbytecode *entryTarget)
 {
-    this->lifetime = analysis->getLoop(head);
+    this->lifetime = outerAnalysis->getLoop(head);
     JS_ASSERT(lifetime &&
-              lifetime->head == uint32(head - script->code) &&
-              lifetime->entry == uint32(entryTarget - script->code));
+              lifetime->head == uint32(head - outerScript->code) &&
+              lifetime->entry == uint32(entryTarget - outerScript->code));
 
     this->entry = entry;
 
     analyzeLoopTest();
     analyzeLoopIncrements();
-    analyzeLoopBody();
+    for (unsigned i = 0; i < ssa->numFrames(); i++) {
+        /* Only analyze this frame if it is nested within the loop itself. */
+        uint32 index = ssa->iterFrame(i).index;
+        if (index != CrossScriptSSA::OUTER_FRAME) {
+            unsigned pframe = index;
+            while (ssa->getFrame(pframe).parent != CrossScriptSSA::OUTER_FRAME)
+                pframe = ssa->getFrame(pframe).parent;
+            uint32 offset = ssa->getFrame(pframe).parentpc - outerScript->code;
+            JS_ASSERT(offset < outerScript->length);
+            if (offset < lifetime->head || offset > lifetime->backedge)
+                continue;
+        }
+        if (!analyzeLoopBody(index))
+            return false;
+    }
 
     if (testLHS != UNASSIGNED) {
-        JaegerSpew(JSpew_Analysis, "loop test at %u: %s %s%s %s + %d\n", lifetime->head,
+        JaegerSpew(JSpew_Analysis, "loop test at %u: %s %s %s + %d\n", lifetime->head,
                    frame.entryName(testLHS),
                    testLessEqual ? "<=" : ">=",
-                   testLength ? " length" : "",
                    (testRHS == UNASSIGNED) ? "" : frame.entryName(testRHS),
                    testConstant);
     }
 
     for (unsigned i = 0; i < increments.length(); i++) {
         JaegerSpew(JSpew_Analysis, "loop increment at %u for %s: %u\n", lifetime->head,
                    frame.entryName(increments[i].slot),
                    increments[i].offset);
@@ -132,72 +147,65 @@ LoopState::init(jsbytecode *head, Jump e
     }
 
     for (unsigned i = 0; i < modifiedProperties.length(); i++) {
         JaegerSpew(JSpew_Analysis, "loop modified property at %u: %s %s\n", lifetime->head,
                    modifiedProperties[i].object->name(),
                    types::TypeIdString(modifiedProperties[i].id));
     }
 
-    RegisterAllocation *&alloc = analysis->getAllocation(head);
+    RegisterAllocation *&alloc = outerAnalysis->getAllocation(head);
     JS_ASSERT(!alloc);
 
     alloc = ArenaNew<RegisterAllocation>(cx->compartment->pool, true);
     if (!alloc)
         return false;
 
     this->alloc = alloc;
     this->loopRegs = Registers::AvailAnyRegs;
-    this->PC = head;
 
     /*
      * Don't hoist bounds checks or loop invariant code in scripts that have
      * had indirect modification of their arguments.
      */
-    if (script->fun) {
-        types::ObjectKind kind = types::TypeSet::GetObjectKind(cx, script->fun->getType());
+    if (outerScript->fun) {
+        types::ObjectKind kind = types::TypeSet::GetObjectKind(cx, outerScript->fun->getType());
         if (kind != types::OBJECT_INLINEABLE_FUNCTION && kind != types::OBJECT_SCRIPTED_FUNCTION)
             this->skipAnalysis = true;
     }
 
     /*
      * Don't hoist bounds checks or loop invariant code in loops with safe
      * points in the middle, which the interpreter can join at directly without
      * performing hoisted bounds checks or doing initial computation of loop
      * invariant terms.
      */
     if (lifetime->hasSafePoints)
         this->skipAnalysis = true;
 
-    /*
-     * Don't do hoisting in loops with inner loops or calls. This is way too
-     * pessimistic and needs to get fixed.
-     */
-    if (lifetime->hasCallsLoops)
-        this->skipAnalysis = true;
-
     return true;
 }
 
 void
 LoopState::addJoin(unsigned index, bool script)
 {
     StubJoin r;
     r.index = index;
     r.script = script;
     loopJoins.append(r);
 }
 
 void
-LoopState::addInvariantCall(Jump jump, Label label, bool ool, unsigned patchIndex, bool patchCall)
+LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, bool patchCall)
 {
     RestoreInvariantCall call;
     call.jump = jump;
     call.label = label;
     call.ool = ool;
+    call.entry = entry;
     call.patchIndex = patchIndex;
     call.patchCall = patchCall;
     call.temporaryCopies = frame.getTemporaryCopies();
 
     restoreInvariantCalls.append(call);
 }
 
 void
@@ -243,18 +251,24 @@ LoopState::flushLoop(StubCompiler &stubc
                  * patch and any value for the call to return.
                  */
                 InvariantCodePatch *patch = cc.getInvariantPatch(call.patchIndex, call.patchCall);
                 patch->hasPatch = true;
                 patch->codePatch = masm.storePtrWithPatch(ImmPtr(NULL),
                                                           FrameAddress(offsetof(VMFrame, scratch)));
                 JS_STATIC_ASSERT(Registers::ReturnReg != Registers::ArgReg1);
                 masm.move(Registers::ReturnReg, Registers::ArgReg1);
-                masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure),
-                                    pc, NULL, 0);
+
+                if (call.entry) {
+                    masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure),
+                                        pc, NULL, 0);
+                } else {
+                    /* f.regs are already coherent, don't write new values to them. */
+                    masm.infallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure), -1);
+                }
             }
         }
     } else {
         for (unsigned i = 0; i < restoreInvariantCalls.length(); i++) {
             RestoreInvariantCall &call = restoreInvariantCalls[i];
             Assembler &masm = cc.getAssembler(call.ool);
             call.jump.linkTo(call.label, &masm);
         }
@@ -267,19 +281,23 @@ LoopState::clearLoopRegisters()
 {
     alloc->clearLoops();
     loopRegs = 0;
 }
 
 bool
 LoopState::loopInvariantEntry(uint32 slot)
 {
-    if (slot == analyze::CalleeSlot() || analysis->slotEscapes(slot))
+    /* Watch for loop temporaries. :XXX: this is really gross. */
+    if (slot - analyze::LocalSlot(outerScript, 0) >= outerScript->nslots)
+        return true;
+
+    if (slot == analyze::CalleeSlot() || outerAnalysis->slotEscapes(slot))
         return false;
-    return analysis->liveness(slot).firstWrite(lifetime) == uint32(-1);
+    return outerAnalysis->liveness(slot).firstWrite(lifetime) == uint32(-1);
 }
 
 inline bool
 LoopState::entryRedundant(const InvariantEntry &e0, const InvariantEntry &e1)
 {
     JS_ASSERT(e0.isCheck() && e1.isCheck());
 
     uint32 array0 = e0.u.check.arraySlot;
@@ -442,17 +460,17 @@ LoopState::addRangeCheck(uint32 valueSlo
 }
 
 void
 LoopState::setLoopReg(AnyRegisterID reg, FrameEntry *fe)
 {
     JS_ASSERT(alloc->loop(reg));
     loopRegs.takeReg(reg);
 
-    uint32 slot = frame.indexOfFe(fe);
+    uint32 slot = frame.outerSlot(fe);
     JaegerSpew(JSpew_Regalloc, "allocating loop register %s for %s\n",
                reg.name(), frame.entryName(fe));
 
     alloc->set(reg, slot, true);
 
     /*
      * Mark pending rejoins to patch up with the load. We don't do this now as that would
      * cause us to emit into the slow path, which may be in progress.
@@ -460,52 +478,54 @@ LoopState::setLoopReg(AnyRegisterID reg,
     for (unsigned i = 0; i < loopJoins.length(); i++) {
         StubJoinPatch p;
         p.join = loopJoins[i];
         p.address = frame.addressOf(fe);
         p.reg = reg;
         loopPatches.append(p);
     }
 
-    if (lifetime->entry != lifetime->head && PC >= script->code + lifetime->entry) {
+    if (reachedEntryPoint) {
         /*
          * We've advanced past the entry point of the loop (we're analyzing the condition),
          * so need to update the register state at that entry point so that the right
          * things get loaded when we enter the loop.
          */
-        RegisterAllocation *entry = analysis->getAllocation(lifetime->entry);
-        JS_ASSERT(entry && !entry->assigned(reg));
-        entry->set(reg, slot, true);
+        RegisterAllocation *alloc = outerAnalysis->getAllocation(lifetime->entry);
+        JS_ASSERT(alloc && !alloc->assigned(reg));
+        alloc->set(reg, slot, true);
     }
 }
 
 bool
-LoopState::hoistArrayLengthCheck(const FrameEntry *obj, types::TypeSet *objTypes,
-                                 unsigned indexPopped)
+LoopState::hoistArrayLengthCheck(const CrossSSAValue &obj, const CrossSSAValue &index)
 {
-    if (skipAnalysis || script->failedBoundsCheck)
+    if (skipAnalysis)
         return false;
 
-    obj = obj->backing();
+    uint32 objSlot;
+    int32 objConstant;
+    if (!getEntryValue(obj, &objSlot, &objConstant) || objConstant != 0)
+        return false;
 
     JaegerSpew(JSpew_Analysis, "Trying to hoist bounds check on %s\n",
-               frame.entryName(obj));
+               frame.entryName(objSlot));
 
-    if (!loopInvariantEntry(frame.indexOfFe(obj))) {
+    if (!loopInvariantEntry(objSlot)) {
         JaegerSpew(JSpew_Analysis, "Object is not loop invariant\n");
         return false;
     }
 
     /*
      * Check for an overlap with the arrays we think might grow in this loop.
      * This information is only a guess; if we don't think the array can grow
      * but it actually can, we will probably recompile after the hoisted
      * bounds check fails.
      */
-    JS_ASSERT(objTypes && !objTypes->unknown());
+    types::TypeSet *objTypes = ssa->getValueTypes(obj);
     if (!growArrays.empty()) {
         unsigned count = objTypes->getObjectCount();
         for (unsigned i = 0; i < count; i++) {
             types::TypeObject *object = objTypes->getObject(i);
             if (object) {
                 for (unsigned j = 0; j < growArrays.length(); j++) {
                     if (object == growArrays[j]) {
                         JaegerSpew(JSpew_Analysis, "Object might grow inside loop\n");
@@ -515,188 +535,177 @@ LoopState::hoistArrayLengthCheck(const F
             }
         }
     }
 
     /*
      * Get an expression for the index 'index + indexConstant', where index
      * is the value of a slot at loop entry.
      */
-    uint32 index;
+    uint32 indexSlot;
     int32 indexConstant;
-    if (!getEntryValue(analysis->poppedValue(PC - script->code, indexPopped), &index, &indexConstant)) {
+    if (!getEntryValue(index, &indexSlot, &indexConstant)) {
         JaegerSpew(JSpew_Analysis, "Could not compute index in terms of loop entry state\n");
         return false;
     }
 
-    if (index == UNASSIGNED) {
+    if (indexSlot == UNASSIGNED) {
         /* Hoist checks on x[n] accesses for constant n. */
-        return addHoistedCheck(frame.indexOfFe(obj), UNASSIGNED, UNASSIGNED, indexConstant);
+        return addHoistedCheck(objSlot, UNASSIGNED, UNASSIGNED, indexConstant);
     }
 
-    if (loopInvariantEntry(index)) {
+    if (loopInvariantEntry(indexSlot)) {
         /* Hoist checks on x[y] accesses when y is loop invariant. */
-        return addHoistedCheck(frame.indexOfFe(obj), index, UNASSIGNED, indexConstant);
+        return addHoistedCheck(objSlot, indexSlot, UNASSIGNED, indexConstant);
     }
 
     /*
      * If the LHS can decrease in the loop, it could become negative and
      * underflow the array. We currently only hoist bounds checks for loops
      * which walk arrays going forward.
      */
-    if (!analysis->liveness(index).nonDecreasing(script, lifetime)) {
+    if (!outerAnalysis->liveness(indexSlot).nonDecreasing(outerScript, lifetime)) {
         JaegerSpew(JSpew_Analysis, "Index may decrease in future iterations\n");
         return false;
     }
 
     /*
      * If the access is of the form x[y + a] where we know that y <= z + b
      * (both in terms of state at the head of the loop), hoist as follows:
      *
      * y + a < initlen(x)
      * y < initlen(x) - a
      * z + b < initlen(x) - a
      * z + b + a < initlen(x)
      */
-    if (index == testLHS && testLessEqual) {
-        uint32 rhs = testRHS;
-
-        if (testLength) {
-            FrameEntry *rhsFE = frame.getOrTrack(rhs);
-            FrameEntry *lengthEntry = invariantLength(rhsFE, NULL);
-
-            /*
-             * An entry for the length should have been constructed while
-             * processing the test.
-             */
-            JS_ASSERT(lengthEntry);
-
-            rhs = frame.indexOfFe(lengthEntry);
-        }
-
+    if (indexSlot == testLHS && testLessEqual) {
         int32 constant;
         if (!SafeAdd(testConstant, indexConstant, &constant))
             return false;
 
         /*
          * Check that the LHS is nonnegative every time we rejoin the loop.
          * This is only really necessary on initial loop entry. Note that this
          * test is not sensitive to changes to the LHS between when we make
          * the test and the start of the next iteration, as we've ensured the
          * LHS is nondecreasing within the body of the loop.
          */
-        addNegativeCheck(index, indexConstant);
+        addNegativeCheck(indexSlot, indexConstant);
 
-        return addHoistedCheck(frame.indexOfFe(obj), rhs, UNASSIGNED, constant);
+        return addHoistedCheck(objSlot, testRHS, UNASSIGNED, constant);
     }
 
     /*
      * If the access is of the form x[y + a] where we know that z >= b at the
      * head of the loop and y has a linear relationship with z such that
      * (y + z) always has the same value at the head of the loop, hoist as
      * follows:
      *
      * y + a < initlen(x)
      * y + z < initlen(x) + z - a
      * y + z < initlen(x) + b - a
      * y + z + a - b < initlen(x)
      */
-    if (hasTestLinearRelationship(index)) {
+    if (hasTestLinearRelationship(indexSlot)) {
         int32 constant;
         if (!SafeSub(indexConstant, testConstant, &constant))
             return false;
 
-        addNegativeCheck(index, indexConstant);
-        return addHoistedCheck(frame.indexOfFe(obj), index, testLHS, constant);
+        addNegativeCheck(indexSlot, indexConstant);
+        return addHoistedCheck(objSlot, indexSlot, testLHS, constant);
     }
 
     JaegerSpew(JSpew_Analysis, "No match found\n");
     return false;
 }
 
 bool
 LoopState::hasTestLinearRelationship(uint32 slot)
 {
     /*
      * Determine whether slot has a linear relationship with the loop test
      * variable 'test', such that (slot + test) always has the same value at
      * the head of the loop.
      */
 
-    if (testLHS == UNASSIGNED || testRHS != UNASSIGNED || testLessEqual || testLength)
+    if (testLHS == UNASSIGNED || testRHS != UNASSIGNED || testLessEqual)
         return false;
 
     uint32 incrementOffset = getIncrement(slot);
     if (incrementOffset == uint32(-1)) {
         /*
          * Variable is not always incremented in the loop, or is incremented
          * multiple times. Note that the nonDecreasing test done earlier
          * ensures that if there is a single write, it is an increment.
          */
         return false;
     }
 
     uint32 decrementOffset = getIncrement(testLHS);
     if (decrementOffset == uint32(-1))
         return false;
 
-    JSOp op = JSOp(script->code[decrementOffset]);
+    JSOp op = JSOp(outerScript->code[decrementOffset]);
     switch (op) {
       case JSOP_DECLOCAL:
       case JSOP_LOCALDEC:
       case JSOP_DECARG:
       case JSOP_ARGDEC:
         return true;
       default:
         return false;
     }
 }
 
 FrameEntry *
-LoopState::invariantSlots(const FrameEntry *obj)
+LoopState::invariantSlots(const CrossSSAValue &obj)
 {
-    obj = obj->backing();
-    uint32 slot = frame.indexOfFe(obj);
+    uint32 objSlot;
+    int32 objConstant;
+    if (!getEntryValue(obj, &objSlot, &objConstant) || objConstant != 0) {
+        JS_NOT_REACHED("Bad value");
+        return NULL;
+    }
 
     for (unsigned i = 0; i < invariantEntries.length(); i++) {
         InvariantEntry &entry = invariantEntries[i];
         if (entry.kind == InvariantEntry::INVARIANT_SLOTS &&
-            entry.u.array.arraySlot == slot) {
+            entry.u.array.arraySlot == objSlot) {
             return frame.getTemporary(entry.u.array.temporary);
         }
     }
 
     /* addHoistedCheck should have ensured there is an entry for the slots. */
     JS_NOT_REACHED("Missing invariant slots");
     return NULL;
 }
 
 FrameEntry *
-LoopState::invariantLength(const FrameEntry *obj, types::TypeSet *objTypes)
+LoopState::invariantLength(const CrossSSAValue &obj)
 {
-    if (skipAnalysis || script->failedBoundsCheck)
+    if (skipAnalysis)
         return NULL;
 
-    obj = obj->backing();
-    uint32 slot = frame.indexOfFe(obj);
+    uint32 objSlot;
+    int32 objConstant;
+    if (!getEntryValue(obj, &objSlot, &objConstant) || objConstant != 0)
+        return NULL;
 
     for (unsigned i = 0; i < invariantEntries.length(); i++) {
         InvariantEntry &entry = invariantEntries[i];
         if (entry.kind == InvariantEntry::INVARIANT_LENGTH &&
-            entry.u.array.arraySlot == slot) {
+            entry.u.array.arraySlot == objSlot) {
             return frame.getTemporary(entry.u.array.temporary);
         }
     }
 
-    if (!objTypes)
+    if (!loopInvariantEntry(objSlot))
         return NULL;
 
-    if (!loopInvariantEntry(frame.indexOfFe(obj)))
-        return NULL;
-
+    types::TypeSet *objTypes = ssa->getValueTypes(obj);
     types::ObjectKind kind = objTypes->getKnownObjectKind(cx);
     if (kind != types::OBJECT_DENSE_ARRAY && kind != types::OBJECT_PACKED_ARRAY)
         return NULL;
 
     /*
      * Don't make 'length' loop invariant if the loop might directly write
      * to the elements of any of the accessed arrays. This could invoke an
      * inline path which updates the length. There is no need to check the
@@ -713,58 +722,56 @@ LoopState::invariantLength(const FrameEn
     objTypes->addFreeze(cx);
 
     uint32 which = frame.allocTemporary();
     if (which == uint32(-1))
         return NULL;
     FrameEntry *fe = frame.getTemporary(which);
 
     JaegerSpew(JSpew_Analysis, "Using %s for loop invariant length of %s\n",
-               frame.entryName(fe), frame.entryName(slot));
+               frame.entryName(fe), frame.entryName(objSlot));
 
     InvariantEntry entry;
     entry.kind = InvariantEntry::INVARIANT_LENGTH;
-    entry.u.array.arraySlot = slot;
+    entry.u.array.arraySlot = objSlot;
     entry.u.array.temporary = which;
     invariantEntries.append(entry);
 
     return fe;
 }
 
 FrameEntry *
-LoopState::invariantProperty(const FrameEntry *obj, types::TypeSet *objTypes, jsid id)
+LoopState::invariantProperty(const CrossSSAValue &obj, jsid id)
 {
-    if (skipAnalysis || script->failedBoundsCheck)
+    if (skipAnalysis)
         return NULL;
 
     if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom))
         return NULL;
 
-    obj = obj->backing();
-    uint32 slot = frame.indexOfFe(obj);
+    uint32 objSlot;
+    int32 objConstant;
+    if (!getEntryValue(obj, &objSlot, &objConstant) || objConstant != 0)
+        return NULL;
 
     for (unsigned i = 0; i < invariantEntries.length(); i++) {
         InvariantEntry &entry = invariantEntries[i];
         if (entry.kind == InvariantEntry::INVARIANT_PROPERTY &&
-            entry.u.property.objectSlot == slot &&
+            entry.u.property.objectSlot == objSlot &&
             entry.u.property.id == id) {
-            FrameEntry *fe = frame.getTemporary(entry.u.property.temporary);
-            frame.learnType(fe, JSVAL_TYPE_INT32, false);
-            return fe;
+            return frame.getTemporary(entry.u.property.temporary);
         }
     }
 
-    if (!objTypes)
-        return NULL;
-
-    if (!loopInvariantEntry(frame.indexOfFe(obj)))
+    if (!loopInvariantEntry(objSlot))
         return NULL;
 
     /* Check that the property is definite and not written anywhere in the loop. */
-    if (objTypes->getObjectCount() != 1)
+    types::TypeSet *objTypes = ssa->getValueTypes(obj);
+    if (objTypes->unknown() || objTypes->getObjectCount() != 1)
         return NULL;
     types::TypeObject *object = objTypes->getObject(0);
     if (object->unknownProperties() || hasModifiedProperty(object, id))
         return NULL;
     types::TypeSet *propertyTypes = object->getProperty(cx, id, false);
     if (!propertyTypes)
         return NULL;
     if (!propertyTypes->isDefiniteProperty() || propertyTypes->isOwnProperty(cx, true))
@@ -772,82 +779,88 @@ LoopState::invariantProperty(const Frame
     objTypes->addFreeze(cx);
 
     uint32 which = frame.allocTemporary();
     if (which == uint32(-1))
         return NULL;
     FrameEntry *fe = frame.getTemporary(which);
 
     JaegerSpew(JSpew_Analysis, "Using %s for loop invariant property of %s\n",
-               frame.entryName(fe), frame.entryName(slot));
+               frame.entryName(fe), frame.entryName(objSlot));
 
     InvariantEntry entry;
     entry.kind = InvariantEntry::INVARIANT_PROPERTY;
-    entry.u.property.objectSlot = slot;
+    entry.u.property.objectSlot = objSlot;
     entry.u.property.propertySlot = propertyTypes->definiteSlot();
     entry.u.property.temporary = which;
     entry.u.property.id = id;
     invariantEntries.append(entry);
 
     return fe;
 }
 
 bool
-LoopState::cannotIntegerOverflow()
+LoopState::cannotIntegerOverflow(const CrossSSAValue &pushed)
 {
-    if (skipAnalysis || script->failedBoundsCheck)
+    if (skipAnalysis)
         return false;
 
-    /* If the result of the operation fits in an integer, it can't overflow. */
-    SSAValue pushed;
-    pushed.initPushed(PC - script->code, 0);
-
     int32 min, max;
     if (computeInterval(pushed, &min, &max)) {
         JaegerSpew(JSpew_Analysis, "Integer operation fits in range [%d, %d]\n", min, max);
         return true;
     }
 
     /*
      * Compute a slot and constant such that the result of the binary op is
      * 'slot + constant', where slot is expressed in terms of its value at
      * the head of the loop.
      */
+    JS_ASSERT(pushed.v.kind() == SSAValue::PUSHED);
+    jsbytecode *PC = ssa->getFrame(pushed.frame).script->code + pushed.v.pushedOffset();
+    ScriptAnalysis *analysis = ssa->getFrame(pushed.frame).script->analysis(cx);
+
     uint32 baseSlot = UNASSIGNED;
     int32 baseConstant = 0;
     JSOp op = JSOp(*PC);
     switch (op) {
 
       case JSOP_INCLOCAL:
       case JSOP_LOCALINC:
       case JSOP_INCARG:
-      case JSOP_ARGINC:
-        if (!getEntryValue(analysis->poppedValue(PC - script->code, 0), &baseSlot, &baseConstant))
+      case JSOP_ARGINC: {
+        CrossSSAValue cv(pushed.frame, analysis->poppedValue(PC, 0));
+        if (!getEntryValue(cv, &baseSlot, &baseConstant))
             return false;
         if (!SafeAdd(baseConstant, 1, &baseConstant))
             return false;
         break;
+      }
 
       case JSOP_DECLOCAL:
       case JSOP_LOCALDEC:
       case JSOP_DECARG:
-      case JSOP_ARGDEC:
-          if (!getEntryValue(analysis->poppedValue(PC - script->code, 0), &baseSlot, &baseConstant))
+      case JSOP_ARGDEC: {
+        CrossSSAValue cv(pushed.frame, analysis->poppedValue(PC, 0));
+        if (!getEntryValue(cv, &baseSlot, &baseConstant))
             return false;
         if (!SafeSub(baseConstant, 1, &baseConstant))
             return false;
         break;
+      }
 
       case JSOP_ADD:
       case JSOP_SUB: {
         uint32 lhs = UNASSIGNED, rhs = UNASSIGNED;
         int32 lhsconstant = 0, rhsconstant = 0;
-        if (!getEntryValue(analysis->poppedValue(PC - script->code, 1), &lhs, &lhsconstant))
+        CrossSSAValue lcv(pushed.frame, analysis->poppedValue(PC, 1));
+        CrossSSAValue rcv(pushed.frame, analysis->poppedValue(PC, 0));
+        if (!getEntryValue(lcv, &lhs, &lhsconstant))
             return false;
-        if (!getEntryValue(analysis->poppedValue(PC - script->code, 0), &rhs, &rhsconstant))
+        if (!getEntryValue(rcv, &rhs, &rhsconstant))
             return false;
         if (op == JSOP_ADD) {
             if (!SafeAdd(lhsconstant, rhsconstant, &baseConstant))
                 return false;
             if (lhs != UNASSIGNED && rhs != UNASSIGNED)
                 return false;
             baseSlot = (lhs == UNASSIGNED) ? rhs : lhs;
         } else {
@@ -878,17 +891,17 @@ LoopState::cannotIntegerOverflow()
     if (baseConstant < 0) {
         /*
          * If the access is of the form 'y + a' where a is negative and we know
          * that y >= b at the head of the loop, we can eliminate as follows:
          *
          * y + a >= INT_MIN
          * b + a >= INT_MIN
          */
-        if (baseSlot == testLHS && !testLessEqual && !testLength && testRHS == UNASSIGNED) {
+        if (baseSlot == testLHS && !testLessEqual && testRHS == UNASSIGNED) {
             int32 constant;
             if (!SafeAdd(testConstant, baseConstant, &constant))
                 return false;
 
             JaegerSpew(JSpew_Analysis, "Loop test comparison must hold\n");
             return true;
         }
 
@@ -900,17 +913,17 @@ LoopState::cannotIntegerOverflow()
      * If the access is of the form 'y + a' where we know that y <= z + b
      * (both in terms of state at the head of the loop), hoist as follows:
      *
      * y + a <= INT_MAX
      * y <= INT_MAX - a
      * z + b <= INT_MAX - a
      * z <= INT_MAX - (a + b)
      */
-    if (baseSlot == testLHS && testLessEqual && !testLength) {
+    if (baseSlot == testLHS && testLessEqual) {
         int32 constant;
         if (!SafeAdd(testConstant, baseConstant, &constant))
             return false;
 
         if (testRHS == UNASSIGNED || constant <= 0) {
             /*
              * Reduces to '(a + b) <= INT_MAX', which SafeAdd ensures,
              * or 'z <= INT_MAX', which integer checks on z ensure.
@@ -948,19 +961,19 @@ LoopState::cannotIntegerOverflow()
         return true;
     }
 
     JaegerSpew(JSpew_Analysis, "No match found\n");
     return false;
 }
 
 bool
-LoopState::ignoreIntegerOverflow()
+LoopState::ignoreIntegerOverflow(const CrossSSAValue &pushed)
 {
-    if (skipAnalysis || script->failedBoundsCheck || unknownModset || !constrainedLoop)
+    if (skipAnalysis || unknownModset || !constrainedLoop)
         return false;
 
     /*
      * Under certain circumstances, we can ignore arithmetic overflow in adds
      * and multiplies. As long as the result of the add/mul is either only used
      * in bitwise arithmetic or is only used in additions whose result is only
      * used in bitwise arithmetic, then the conversion to integer performed by
      * the bitop will undo the effect of the earlier overflow. There are two
@@ -977,51 +990,55 @@ LoopState::ignoreIntegerOverflow()
      * results may produce different values (e.g. '(x + "e3") & y'). We must
      * restrict the loop body in such a way that no string operand is possible
      * or becomes possible due to dynamic type changes for such additions.
      * constrainedLoop indicates whether the only operations which can happen
      * in the loop body are int/double arithmetic and bitops, and reads/writes
      * from known dense arrays which can only produce ints and doubles.
      */
 
+    /* This value must be in the outer loop: loops with inline calls are not constrained. */
+    JS_ASSERT(pushed.frame == CrossScriptSSA::OUTER_FRAME);
+
+    JS_ASSERT(pushed.v.kind() == SSAValue::PUSHED);
+    jsbytecode *PC = outerScript->code + pushed.v.pushedOffset();
+
     JSOp op = JSOp(*PC);
     if (op != JSOP_MUL && op != JSOP_ADD)
         return false;
 
-    SSAValue v;
-    v.initPushed(PC - script->code, 0);
-    if (valueFlowsToBitops(v)) {
+    if (valueFlowsToBitops(pushed.v)) {
         JaegerSpew(JSpew_Analysis, "Integer result flows to bitops\n");
         return true;
     }
 
     if (op == JSOP_MUL) {
         /*
          * If the multiply will only be used in an addition, negative zero can
          * be ignored as long as the other operand in the addition cannot be
          * negative zero.
          */
-        if (!analysis->trackUseChain(v))
+        if (!outerAnalysis->trackUseChain(pushed.v))
             return false;
 
-        SSAUseChain *use = analysis->useChain(v);
-        if (!use || use->next || !use->popped || script->code[use->offset] != JSOP_ADD)
+        SSAUseChain *use = outerAnalysis->useChain(pushed.v);
+        if (!use || use->next || !use->popped || outerScript->code[use->offset] != JSOP_ADD)
             return false;
 
         if (use->u.which == 1) {
             /*
              * Only ignore negative zero if this is the RHS of an addition.
              * Otherwise the result of the other side could change to a double
              * after the first LHS has been computed, and be affected by a
              * negative zero LHS.
              */
             return false;
         }
 
-        types::TypeSet *lhsTypes = analysis->poppedTypes(use->offset, 1);
+        types::TypeSet *lhsTypes = outerAnalysis->poppedTypes(use->offset, 1);
         if (lhsTypes->getKnownTypeTag(cx) != JSVAL_TYPE_INT32)
             return false;
 
         JaegerSpew(JSpew_Analysis, "Integer result is RHS in integer addition\n");
         return true;
     }
 
     return false;
@@ -1030,55 +1047,55 @@ LoopState::ignoreIntegerOverflow()
 bool
 LoopState::valueFlowsToBitops(const analyze::SSAValue &v)
 {
     /*
      * Determine whether v can only be used in a bitop later in the same
      * iteration of this loop, or in additions whose result is also only
      * used in such a bitop.
      */
-    if (!analysis->trackUseChain(v))
+    if (!outerAnalysis->trackUseChain(v))
         return false;
 
-    SSAUseChain *use = analysis->useChain(v);
+    SSAUseChain *use = outerAnalysis->useChain(v);
     while (use) {
         if (!use->popped) {
             /*
              * Ignore variables used in phi nodes, so long as the variable is
              * dead at the phi. We don't track live variables across back edges
              * or complex control flow.
              */
             if (v.kind() == SSAValue::VAR) {
-                analyze::Lifetime *lifetime = analysis->liveness(v.varSlot()).live(use->offset);
+                analyze::Lifetime *lifetime = outerAnalysis->liveness(v.varSlot()).live(use->offset);
                 if (!lifetime) {
                     use = use->next;
                     continue;
                 }
             }
             return false;
         }
 
         if (use->offset > lifetime->backedge)
             return false;
 
-        jsbytecode *pc = script->code + use->offset;
+        jsbytecode *pc = outerScript->code + use->offset;
         JSOp op = JSOp(*pc);
         switch (op) {
           case JSOP_ADD:
           case JSOP_GETLOCAL: {
             SSAValue pushv;
             pushv.initPushed(use->offset, 0);
             if (!valueFlowsToBitops(pushv))
                 return false;
             break;
           }
 
           case JSOP_SETLOCAL: {
-            uint32 slot = GetBytecodeSlot(script, pc);
-            if (!analysis->trackSlot(slot))
+            uint32 slot = GetBytecodeSlot(outerScript, pc);
+            if (!outerAnalysis->trackSlot(slot))
                 return false;
             SSAValue writev;
             writev.initWritten(slot, use->offset);
             if (!valueFlowsToBitops(writev))
                 return false;
             break;
           }
 
@@ -1109,16 +1126,18 @@ LoopState::restoreInvariants(jsbytecode 
      * Restore all invariants in memory when entering the loop or after any
      * scripted or C++ call, and check that all hoisted conditions still hold.
      * Care should be taken not to clobber the return register or callee-saved
      * registers, which may still be live after some calls.
      */
 
     Registers regs(Registers::TempRegs);
     regs.takeReg(Registers::ReturnReg);
+    JS_ASSERT(!regs.hasReg(JSReturnReg_Data));
+    JS_ASSERT(!regs.hasReg(JSReturnReg_Type));
 
     RegisterID T0 = regs.takeAnyReg().reg();
     RegisterID T1 = regs.takeAnyReg().reg();
 
     for (unsigned i = 0; i < invariantEntries.length(); i++) {
         const InvariantEntry &entry = invariantEntries[i];
         switch (entry.kind) {
 
@@ -1264,26 +1283,26 @@ LoopState::getLoopTestAccess(const SSAVa
         uint32 offset;
         if (v.kind() == SSAValue::PHI) {
             slot = v.phiSlot();
             offset = v.phiOffset();
         } else {
             slot = v.varSlot();
             offset = v.varInitial() ? 0 : v.varOffset();
         }
-        if (analysis->slotEscapes(slot))
+        if (outerAnalysis->slotEscapes(slot))
             return false;
-        if (analysis->liveness(slot).firstWrite(offset + 1, lifetime->backedge) != uint32(-1))
+        if (outerAnalysis->liveness(slot).firstWrite(offset + 1, lifetime->backedge) != uint32(-1))
             return false;
         *pslot = slot;
         *pconstant = 0;
         return true;
     }
 
-    jsbytecode *pc = script->code + v.pushedOffset();
+    jsbytecode *pc = outerScript->code + v.pushedOffset();
 
     JSOp op = JSOp(*pc);
     const JSCodeSpec *cs = &js_CodeSpec[op];
 
     /*
      * If the pc is modifying a variable and the value tested is its earlier value
      * (e.g. 'x++ < n'), we need to account for the modification --- at the start
      * of the next iteration, the value compared will have been 'x - 1'.
@@ -1297,22 +1316,22 @@ LoopState::getLoopTestAccess(const SSAVa
       case JSOP_INCLOCAL:
       case JSOP_DECLOCAL:
       case JSOP_LOCALINC:
       case JSOP_LOCALDEC:
       case JSOP_INCARG:
       case JSOP_DECARG:
       case JSOP_ARGINC:
       case JSOP_ARGDEC: {
-        uint32 slot = GetBytecodeSlot(script, pc);
-        if (analysis->slotEscapes(slot))
+        uint32 slot = GetBytecodeSlot(outerScript, pc);
+        if (outerAnalysis->slotEscapes(slot))
             return false;
 
         /* Only consider tests on known integers. */
-        types::TypeSet *types = analysis->pushedTypes(pc, 0);
+        types::TypeSet *types = outerAnalysis->pushedTypes(pc, 0);
         if (types->getKnownTypeTag(cx) != JSVAL_TYPE_INT32)
             return false;
 
         *pslot = slot;
         if (cs->format & JOF_POST) {
             if (cs->format & JOF_INC)
                 *pconstant = -1;
             else
@@ -1342,77 +1361,60 @@ LoopState::analyzeLoopTest()
     if (lifetime->entry == lifetime->head)
         return;
 
     /* Don't handle loops with branching inside their condition. */
     if (lifetime->entry < lifetime->lastBlock)
         return;
 
     /* Get the test performed before branching. */
-    jsbytecode *backedge = script->code + lifetime->backedge;
+    jsbytecode *backedge = outerScript->code + lifetime->backedge;
     if (JSOp(*backedge) != JSOP_IFNE)
         return;
-    const SSAValue &test = analysis->poppedValue(backedge, 0);
+    const SSAValue &test = outerAnalysis->poppedValue(backedge, 0);
     if (test.kind() != SSAValue::PUSHED)
         return;
-    JSOp cmpop = JSOp(script->code[test.pushedOffset()]);
+    JSOp cmpop = JSOp(outerScript->code[test.pushedOffset()]);
     switch (cmpop) {
       case JSOP_GT:
       case JSOP_GE:
       case JSOP_LT:
       case JSOP_LE:
         break;
       default:
         return;
     }
 
-    SSAValue one = analysis->poppedValue(test.pushedOffset(), 1);
-    SSAValue two = analysis->poppedValue(test.pushedOffset(), 0);
+    SSAValue one = outerAnalysis->poppedValue(test.pushedOffset(), 1);
+    SSAValue two = outerAnalysis->poppedValue(test.pushedOffset(), 0);
 
     /* Reverse the condition if the RHS is modified by the loop. */
     uint32 swapRHS;
     int32 swapConstant;
     if (getLoopTestAccess(two, &swapRHS, &swapConstant)) {
-        if (swapRHS != UNASSIGNED && analysis->liveness(swapRHS).firstWrite(lifetime) != uint32(-1)) {
+        if (swapRHS != UNASSIGNED && outerAnalysis->liveness(swapRHS).firstWrite(lifetime) != uint32(-1)) {
             SSAValue tmp = one;
             one = two;
             two = tmp;
             cmpop = ReverseCompareOp(cmpop);
         }
     }
 
     uint32 lhs;
     int32 lhsConstant;
     if (!getLoopTestAccess(one, &lhs, &lhsConstant))
         return;
 
     uint32 rhs = UNASSIGNED;
     int32 rhsConstant = 0;
-    bool rhsLength = false;
-
-    if (two.kind() == SSAValue::PUSHED &&
-        JSOp(script->code[two.pushedOffset()] == JSOP_LENGTH)) {
-        /* Handle 'this.length' or 'x.length' for loop invariant 'x'. */
-        const SSAValue &array = analysis->poppedValue(two.pushedOffset(), 0);
-        if (!getLoopTestAccess(array, &rhs, &rhsConstant))
-            return;
-        if (rhsConstant != 0 || analysis->liveness(rhs).firstWrite(lifetime) != uint32(-1)) {
-            return;
-        }
-        if (!invariantLength(frame.getOrTrack(rhs), analysis->getValueTypes(array)))
-            return;
-        rhsLength = true;
-    } else {
-        if (!getLoopTestAccess(two, &rhs, &rhsConstant))
-            return;
-
-        /* Don't handle comparisons where both the LHS and RHS are modified in the loop. */
-        if (rhs != UNASSIGNED && analysis->liveness(rhs).firstWrite(lifetime) != uint32(-1))
-            return;
-    }
+    CrossSSAValue rhsv(CrossScriptSSA::OUTER_FRAME, two);
+    if (!getEntryValue(rhsv, &rhs, &rhsConstant))
+        return;
+    if (!loopInvariantEntry(rhs))
+        return;
 
     if (lhs == UNASSIGNED)
         return;
 
     int32 constant;
     if (!SafeSub(rhsConstant, lhsConstant, &constant))
         return;
 
@@ -1424,41 +1426,40 @@ LoopState::analyzeLoopTest()
     if (cmpop == JSOP_LT && !SafeSub(constant, 1, &constant))
         return;
 
     /* Passed all filters, this is a loop test we can capture. */
 
     this->testLHS = lhs;
     this->testRHS = rhs;
     this->testConstant = constant;
-    this->testLength = rhsLength;
     this->testLessEqual = (cmpop == JSOP_LT || cmpop == JSOP_LE);
 }
 
 void
 LoopState::analyzeLoopIncrements()
 {
     /*
      * Find locals and arguments which are used in exactly one inc/dec operation in every
      * iteration of the loop (we only match against the last basic block, but could
      * also handle the first basic block).
      */
 
-    for (uint32 slot = ArgSlot(0); slot < LocalSlot(script, script->nfixed); slot++) {
-        if (analysis->slotEscapes(slot))
+    for (uint32 slot = ArgSlot(0); slot < LocalSlot(outerScript, outerScript->nfixed); slot++) {
+        if (outerAnalysis->slotEscapes(slot))
             continue;
 
-        uint32 offset = analysis->liveness(slot).onlyWrite(lifetime);
+        uint32 offset = outerAnalysis->liveness(slot).onlyWrite(lifetime);
         if (offset == uint32(-1) || offset < lifetime->lastBlock)
             continue;
 
-        JSOp op = JSOp(script->code[offset]);
+        JSOp op = JSOp(outerScript->code[offset]);
         const JSCodeSpec *cs = &js_CodeSpec[op];
         if (cs->format & (JOF_INC | JOF_DEC)) {
-            types::TypeSet *types = analysis->pushedTypes(offset);
+            types::TypeSet *types = outerAnalysis->pushedTypes(offset);
             if (types->getKnownTypeTag(cx) != JSVAL_TYPE_INT32)
                 continue;
 
             Increment inc;
             inc.slot = slot;
             inc.offset = offset;
             increments.append(inc);
         }
@@ -1476,110 +1477,158 @@ LoopState::definiteArrayAccess(const SSA
      * integer.
      *
      * This is used to determine if we can ignore possible integer overflow in
      * an operation; if this site could read a non-integer element out of the
      * array or invoke a scripted getter/setter, it could produce a string or
      * other value by which the overflow could be observed.
      */
 
-    types::TypeSet *objTypes = analysis->getValueTypes(obj);
-    types::TypeSet *elemTypes = analysis->getValueTypes(index);
+    types::TypeSet *objTypes = outerAnalysis->getValueTypes(obj);
+    types::TypeSet *elemTypes = outerAnalysis->getValueTypes(index);
 
     if (objTypes->getKnownTypeTag(cx) != JSVAL_TYPE_OBJECT ||
         elemTypes->getKnownTypeTag(cx) != JSVAL_TYPE_INT32) {
         return false;
     }
 
     types::ObjectKind kind = objTypes->getKnownObjectKind(cx);
     if (kind != types::OBJECT_DENSE_ARRAY && kind != types::OBJECT_PACKED_ARRAY)
         return false;
 
     if (cc.arrayPrototypeHasIndexedProperty())
         return false;
 
     uint32 objSlot;
     int32 objConstant;
-    if (!getEntryValue(obj, &objSlot, &objConstant) || objConstant != 0)
+    CrossSSAValue objv(CrossScriptSSA::OUTER_FRAME, obj);
+    if (!getEntryValue(objv, &objSlot, &objConstant) || objConstant != 0)
         return false;
     if (!loopInvariantEntry(objSlot))
         return false;
 
     /* Bitops must produce integers. */
     if (index.kind() == SSAValue::PUSHED) {
-        JSOp op = JSOp(script->code[index.pushedOffset()]);
+        JSOp op = JSOp(outerScript->code[index.pushedOffset()]);
         switch (op) {
           case JSOP_BITAND:
           case JSOP_BITOR:
           case JSOP_BITXOR:
           case JSOP_BITNOT:
           case JSOP_RSH:
           case JSOP_LSH:
           case JSOP_URSH:
             return true;
           default:;
         }
     }
 
     uint32 indexSlot;
     int32 indexConstant;
-    if (!getEntryValue(index, &indexSlot, &indexConstant))
+    CrossSSAValue indexv(CrossScriptSSA::OUTER_FRAME, index);
+    if (!getEntryValue(indexv, &indexSlot, &indexConstant))
         return false;
 
     /*
      * The index is determined from a variable's value at loop entry. We don't
      * carry values with ignored overflows around loop back edges, so will know
      * the index is a non-integer before such overflows are ignored.
      */
     return true;
 }
 
-void
-LoopState::analyzeLoopBody()
+bool
+LoopState::analyzeLoopBody(unsigned frame)
 {
-    unsigned offset = lifetime->head;
-    while (offset < lifetime->backedge) {
+    JSScript *script = ssa->getFrame(frame).script;
+    analyze::ScriptAnalysis *analysis = script->analysis(cx);
+    JS_ASSERT(analysis && !analysis->failed() && analysis->ranInference());
+
+    /*
+     * The temporaries need to be positioned after all values in the deepest
+     * inlined frame plus another stack frame pushed by, e.g. ic::Call.
+     * This new frame will have been partially initialized by the call, and
+     * we don't want to scribble on that frame when restoring invariants.
+     */
+    temporariesStart = Max((unsigned long) temporariesStart,
+                           ssa->getFrame(frame).depth + VALUES_PER_STACK_FRAME * 2 + script->nslots);
+
+    if (script->failedBoundsCheck)
+        skipAnalysis = true;
+
+    /* Analyze the entire script for frames inlined in the loop body. */
+    unsigned start = (frame == CrossScriptSSA::OUTER_FRAME) ? lifetime->head + JSOP_TRACE_LENGTH : 0;
+    unsigned end = (frame == CrossScriptSSA::OUTER_FRAME) ? lifetime->backedge : script->length;
+
+    unsigned offset = start;
+    while (offset < end) {
         jsbytecode *pc = script->code + offset;
         uint32 successorOffset = offset + GetBytecodeLength(pc);
 
         analyze::Bytecode *opinfo = analysis->maybeCode(offset);
         if (!opinfo) {
             offset = successorOffset;
             continue;
         }
 
+        /* Don't do any hoisting for outer loops in case of nesting. */
+        if (opinfo->loopHead)
+            skipAnalysis = true;
+
         JSOp op = JSOp(*pc);
         switch (op) {
 
+          case JSOP_CALL: {
+            /*
+             * Don't hoist within this loop unless calls at this site are inlined.
+             * :XXX: also recognize native calls which will be inlined.
+             */
+            bool foundInline = false;
+            for (unsigned i = 0; !foundInline && i < ssa->numFrames(); i++) {
+                if (ssa->iterFrame(i).parent == frame && ssa->iterFrame(i).parentpc == pc)
+                    foundInline = true;
+            }
+            if (!foundInline)
+                skipAnalysis = true;
+            break;
+          }
+
+          case JSOP_EVAL:
+          case JSOP_FUNCALL:
+          case JSOP_FUNAPPLY:
+          case JSOP_NEW:
+            skipAnalysis = true;
+            break;
+
           case JSOP_SETHOLE:
           case JSOP_SETELEM: {
             SSAValue objValue = analysis->poppedValue(pc, 2);
             SSAValue elemValue = analysis->poppedValue(pc, 1);
 
             types::TypeSet *objTypes = analysis->getValueTypes(objValue);
             types::TypeSet *elemTypes = analysis->getValueTypes(elemValue);
 
             /*
              * Mark the modset as unknown if the index might be non-integer,
              * we don't want to consider the SETELEM PIC here.
              */
             if (objTypes->unknown() || elemTypes->getKnownTypeTag(cx) != JSVAL_TYPE_INT32) {
                 unknownModset = true;
-                return;
+                break;
             }
 
             objTypes->addFreeze(cx);
             for (unsigned i = 0; i < objTypes->getObjectCount(); i++) {
                 types::TypeObject *object = objTypes->getObject(i);
                 if (!object)
                     continue;
                 if (!addModifiedProperty(object, JSID_VOID))
-                    return;
+                    return false;
                 if (op == JSOP_SETHOLE && !addGrowArray(object))
-                    return;
+                    return false;
             }
 
             if (constrainedLoop && !definiteArrayAccess(objValue, elemValue))
                 constrainedLoop = false;
             break;
           }
 
           case JSOP_GETELEM: {
@@ -1594,17 +1643,17 @@ LoopState::analyzeLoopBody()
           case JSOP_SETPROP:
           case JSOP_SETMETHOD: {
             JSAtom *atom = script->getAtom(js_GetIndexFromBytecode(cx, script, pc, 0));
             jsid id = types::MakeTypeId(cx, ATOM_TO_JSID(atom));
 
             types::TypeSet *objTypes = analysis->poppedTypes(pc, 1);
             if (objTypes->unknown()) {
                 unknownModset = true;
-                return;
+                break;
             }
 
             objTypes->addFreeze(cx);
             for (unsigned i = 0; i < objTypes->getObjectCount(); i++) {
                 types::TypeObject *object = objTypes->getObject(i);
                 if (!object)
                     continue;
                 if (!addModifiedProperty(object, id))
@@ -1622,17 +1671,17 @@ LoopState::analyzeLoopBody()
           case JSOP_ENUMELEM:
           case JSOP_ENUMCONSTELEM:
           case JSOP_INCPROP:
           case JSOP_DECPROP:
           case JSOP_PROPINC:
           case JSOP_PROPDEC:
           case JSOP_FORPROP:
             unknownModset = true;
-            return;
+            break;
 
           case JSOP_TRACE:
           case JSOP_NOTRACE:
           case JSOP_POP:
           case JSOP_ZERO:
           case JSOP_ONE:
           case JSOP_INT8:
           case JSOP_INT32:
@@ -1702,16 +1751,18 @@ LoopState::analyzeLoopBody()
 
           default:
             constrainedLoop = false;
             break;
         }
 
         offset = successorOffset;
     }
+
+    return true;
 }
 
 bool
 LoopState::addGrowArray(types::TypeObject *object)
 {
     static const uint32 MAX_SIZE = 10;
     for (unsigned i = 0; i < growArrays.length(); i++) {
         if (growArrays[i] == object)
@@ -1759,16 +1810,17 @@ LoopState::hasGrowArray(types::TypeObjec
     return false;
 }
 
 bool
 LoopState::hasModifiedProperty(types::TypeObject *object, jsid id)
 {
     if (unknownModset)
         return true;
+    id = types::MakeTypeId(cx, id);
     for (unsigned i = 0; i < modifiedProperties.length(); i++) {
         if (modifiedProperties[i].object == object && modifiedProperties[i].id == id)
             return true;
     }
     return false;
 }
 
 uint32
@@ -1797,20 +1849,20 @@ LoopState::adjustConstantForIncrement(js
     /*
      * Note the '<' here. If this PC is at one of the increment opcodes, then
      * behave as if the increment has not happened yet. This is needed for loop
      * entry points, which can be directly at an increment. We won't rejoin
      * after the increment, as we only take stub calls in such situations on
      * integer overflow, which will disable hoisted conditions involving the
      * variable anyways.
      */
-    if (offset == uint32(-1) || offset < uint32(pc - script->code))
+    if (offset == uint32(-1) || offset < uint32(pc - outerScript->code))
         return 0;
 
-    switch (JSOp(script->code[offset])) {
+    switch (JSOp(outerScript->code[offset])) {
       case JSOP_INCLOCAL:
       case JSOP_LOCALINC:
       case JSOP_INCARG:
       case JSOP_ARGINC:
         return 1;
       case JSOP_DECLOCAL:
       case JSOP_LOCALDEC:
       case JSOP_DECARG:
@@ -1818,35 +1870,47 @@ LoopState::adjustConstantForIncrement(js
         return -1;
       default:
         JS_NOT_REACHED("Bad op");
         return 0;
     }
 }
 
 bool
-LoopState::getEntryValue(const SSAValue &v, uint32 *pslot, int32 *pconstant)
+LoopState::getEntryValue(const CrossSSAValue &iv, uint32 *pslot, int32 *pconstant)
 {
+    CrossSSAValue cv = ssa->foldValue(iv);
+
+    JSScript *script = ssa->getFrame(cv.frame).script;
+    ScriptAnalysis *analysis = script->analysis(cx);
+    const SSAValue &v = cv.v;
+
     /*
      * For a stack value popped by the bytecode at offset, try to get an
      * expression 'slot + constant' with the same value as the stack value
      * and expressed in terms of the state at loop entry.
      */
 
-    if (v.kind() == SSAValue::PHI && v.phiSlot() < TotalSlots(script)) {
+    if (v.kind() == SSAValue::PHI) {
+        if (cv.frame != CrossScriptSSA::OUTER_FRAME)
+            return false;
+        if (v.phiSlot() >= TotalSlots(script))
+            return false;
         if (v.phiOffset() > lifetime->head &&
-            analysis->liveness(v.phiSlot()).firstWrite(lifetime) < v.phiOffset()) {
+            outerAnalysis->liveness(v.phiSlot()).firstWrite(lifetime) < v.phiOffset()) {
             return false;
         }
         *pslot = v.phiSlot();
         *pconstant = 0;
         return true;
     }
 
     if (v.kind() == SSAValue::VAR) {
+        if (cv.frame != CrossScriptSSA::OUTER_FRAME)
+            return false;
         if (v.varInitial() || v.varOffset() < lifetime->head) {
             *pslot = v.varSlot();
             *pconstant = 0;
             return true;
         }
     }
 
     if (v.kind() != SSAValue::PUSHED)
@@ -1858,53 +1922,90 @@ LoopState::getEntryValue(const SSAValue 
     switch (op) {
 
       case JSOP_GETLOCAL:
       case JSOP_LOCALINC:
       case JSOP_INCLOCAL:
       case JSOP_GETARG:
       case JSOP_ARGINC:
       case JSOP_INCARG: {
-        uint32 slot = GetBytecodeSlot(script, pc);
-        if (analysis->slotEscapes(slot))
+        if (cv.frame != CrossScriptSSA::OUTER_FRAME)
             return false;
-        uint32 write = analysis->liveness(slot).firstWrite(lifetime);
+        uint32 slot = GetBytecodeSlot(outerScript, pc);
+        if (outerAnalysis->slotEscapes(slot))
+            return false;
+        uint32 write = outerAnalysis->liveness(slot).firstWrite(lifetime);
         if (write != uint32(-1) && write < v.pushedOffset()) {
             /* Variable has been modified since the start of the loop. */
             return false;
         }
         *pslot = slot;
         *pconstant = (op == JSOP_INCLOCAL || op == JSOP_INCARG) ? 1 : 0;
         return true;
       }
 
+      case JSOP_THIS:
+        if (cv.frame != CrossScriptSSA::OUTER_FRAME)
+            return false;
+        *pslot = ThisSlot();
+        *pconstant = 0;
+        return true;
+
       case JSOP_ZERO:
       case JSOP_ONE:
       case JSOP_UINT16:
       case JSOP_UINT24:
       case JSOP_INT8:
       case JSOP_INT32:
         *pslot = UNASSIGNED;
         *pconstant = GetBytecodeInteger(pc);
         return true;
 
+      case JSOP_LENGTH: {
+        CrossSSAValue lengthcv(cv.frame, analysis->poppedValue(v.pushedOffset(), 0));
+        FrameEntry *tmp = invariantLength(lengthcv);
+        if (!tmp)
+            return false;
+        *pslot = frame.outerSlot(tmp);
+        *pconstant = 0;
+        return true;
+      }
+
+      case JSOP_GETPROP: {
+        JSAtom *atom = script->getAtom(js_GetIndexFromBytecode(cx, script, pc, 0));
+        jsid id = ATOM_TO_JSID(atom);
+        CrossSSAValue objcv(cv.frame, analysis->poppedValue(v.pushedOffset(), 0));
+        FrameEntry *tmp = invariantProperty(objcv, id);
+        if (!tmp)
+            return false;
+        *pslot = frame.outerSlot(tmp);
+        *pconstant = 0;
+        return true;
+      }
+
       default:
         return false;
     }
 }
 
 bool
-LoopState::computeInterval(const analyze::SSAValue &v, int32 *pmin, int32 *pmax)
+LoopState::computeInterval(const CrossSSAValue &cv, int32 *pmin, int32 *pmax)
 {
+    JSScript *script = ssa->getFrame(cv.frame).script;
+    ScriptAnalysis *analysis = script->analysis(cx);
+    const SSAValue &v = cv.v;
+
     if (v.kind() == SSAValue::VAR && !v.varInitial()) {
         jsbytecode *pc = script->code + v.varOffset();
         switch (JSOp(*pc)) {
           case JSOP_SETLOCAL:
-          case JSOP_SETARG:
-            return computeInterval(analysis->poppedValue(pc, 0), pmin, pmax);
+          case JSOP_SETARG: {
+            CrossSSAValue ncv(cv.frame, analysis->poppedValue(pc, 0));
+            return computeInterval(ncv, pmin, pmax);
+          }
 
           default:
             return false;
         }
     }
 
     if (v.kind() != SSAValue::PUSHED)
         return false;
@@ -1924,18 +2025,20 @@ LoopState::computeInterval(const analyze
         int32 constant = GetBytecodeInteger(pc);
         *pmin = constant;
         *pmax = constant;
         return true;
       }
 
       case JSOP_BITAND: {
         int32 lhsmin, lhsmax, rhsmin, rhsmax;
-        bool haslhs = computeInterval(analysis->poppedValue(pc, 1), &lhsmin, &lhsmax);
-        bool hasrhs = computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax);
+        CrossSSAValue lhsv(cv.frame, analysis->poppedValue(pc, 1));
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        bool haslhs = computeInterval(lhsv, &lhsmin, &lhsmax);
+        bool hasrhs = computeInterval(rhsv, &rhsmin, &rhsmax);
 
         /* Only handle bitand with a constant operand. */
         haslhs = haslhs && lhsmin == lhsmax && lhsmin >= 0;
         hasrhs = hasrhs && rhsmin == rhsmax && rhsmin >= 0;
 
         if (haslhs && hasrhs) {
             *pmin = 0;
             *pmax = Min(lhsmax, rhsmax);
@@ -1948,75 +2051,78 @@ LoopState::computeInterval(const analyze
         } else {
             return false;
         }
         return true;
       }
 
       case JSOP_RSH: {
         int32 rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax) || rhsmin != rhsmax)
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(rhsv, &rhsmin, &rhsmax) || rhsmin != rhsmax)
             return false;
 
         /* Only use the bottom 5 bits. */
         int32 shift = rhsmin & 0x1f;
         *pmin = -(1 << (31 - shift));
         *pmax = (1 << (31 - shift)) - 1;
         return true;
       }
 
       case JSOP_URSH: {
         int32 rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax) || rhsmin != rhsmax)
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(rhsv, &rhsmin, &rhsmax) || rhsmin != rhsmax)
             return false;
 
         /* Only use the bottom 5 bits. */
         int32 shift = rhsmin & 0x1f;
         if (shift == 0)
             return false;
         *pmin = 0;
         *pmax = (1 << (31 - shift)) - 1;
         return true;
       }
 
       case JSOP_MOD: {
         int32 rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax) || rhsmin != rhsmax)
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(rhsv, &rhsmin, &rhsmax) || rhsmin != rhsmax)
             return false;
 
         int32 rhs = abs(rhsmax);
         *pmin = -(rhs - 1);
         *pmax = rhs - 1;
         return true;
       }
 
       case JSOP_ADD: {
         int32 lhsmin, lhsmax, rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 1), &lhsmin, &lhsmax) ||
-            !computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax)) {
+        CrossSSAValue lhsv(cv.frame, analysis->poppedValue(pc, 1));
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(lhsv, &lhsmin, &lhsmax) || !computeInterval(rhsv, &rhsmin, &rhsmax))
             return false;
-        }
         return SafeAdd(lhsmin, rhsmin, pmin) && SafeAdd(lhsmax, rhsmax, pmax);
       }
 
       case JSOP_SUB: {
         int32 lhsmin, lhsmax, rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 1), &lhsmin, &lhsmax) ||
-            !computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax)) {
+        CrossSSAValue lhsv(cv.frame, analysis->poppedValue(pc, 1));
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(lhsv, &lhsmin, &lhsmax) || !computeInterval(rhsv, &rhsmin, &rhsmax))
             return false;
-        }
         return SafeSub(lhsmin, rhsmax, pmin) && SafeSub(lhsmax, rhsmin, pmax);
       }
 
       case JSOP_MUL: {
         int32 lhsmin, lhsmax, rhsmin, rhsmax;
-        if (!computeInterval(analysis->poppedValue(pc, 1), &lhsmin, &lhsmax) ||
-            !computeInterval(analysis->poppedValue(pc, 0), &rhsmin, &rhsmax)) {
+        CrossSSAValue lhsv(cv.frame, analysis->poppedValue(pc, 1));
+        CrossSSAValue rhsv(cv.frame, analysis->poppedValue(pc, 0));
+        if (!computeInterval(lhsv, &lhsmin, &lhsmax) || !computeInterval(rhsv, &rhsmin, &rhsmax))
             return false;
-        }
         int32 nlhs = Max(abs(lhsmin), abs(lhsmax));
         int32 nrhs = Max(abs(rhsmin), abs(rhsmax));
 
         if (!SafeMul(nlhs, nrhs, pmax))
             return false;
 
         if (lhsmin < 0 || rhsmin < 0) {
             /* pmax is nonnegative, so can be negated without overflow. */
--- a/js/src/methodjit/LoopState.h
+++ b/js/src/methodjit/LoopState.h
@@ -85,28 +85,36 @@ namespace mjit {
  * after the call finishes.
  */
 
 struct TemporaryCopy;
 
 class LoopState : public MacroAssemblerTypedefs
 {
     JSContext *cx;
-    JSScript *script;
-    analyze::ScriptAnalysis *analysis;
+    analyze::CrossScriptSSA *ssa;
+    JSScript *outerScript;
+    analyze::ScriptAnalysis *outerAnalysis;
+
     Compiler &cc;
     FrameState &frame;
 
     /* Basic information about this loop. */
     analyze::LoopAnalysis *lifetime;
 
     /* Allocation at the head of the loop, has all loop carried variables. */
     RegisterAllocation *alloc;
 
     /*
+     * Set if this is not a do-while loop and the compiler has advanced past
+     * the loop's entry point.
+     */
+    bool reachedEntryPoint;
+
+    /*
      * Jump which initially enters the loop. The state is synced when this jump
      * occurs, and needs a trampoline generated to load the right registers
      * before going to entryTarget.
      */
     Jump entry;
 
     /* Registers available for loop variables. */
     Registers loopRegs;
@@ -132,16 +140,17 @@ class LoopState : public MacroAssemblerT
     /*
      * Pair of a jump/label immediately after each call in the loop, to patch
      * with restores of the loop invariant stack values.
      */
     struct RestoreInvariantCall {
         Jump jump;
         Label label;
         bool ool;
+        bool entry;
 
         /* Index into Compiler's callSites or rejoinSites */
         unsigned patchIndex;
         bool patchCall;
 
         /* Any copies of temporaries on the stack */
         Vector<TemporaryCopy> *temporaryCopies;
     };
@@ -210,37 +219,43 @@ class LoopState : public MacroAssemblerT
     void restoreInvariants(jsbytecode *pc, Assembler &masm,
                            Vector<TemporaryCopy> *temporaryCopies, Vector<Jump> *jumps);
 
   public:
 
     /* Outer loop to this one, in case of loop nesting. */
     LoopState *outer;
 
-    /* Current bytecode for compilation. */
-    jsbytecode *PC;
+    /* Offset from the outermost frame at which temporaries should be allocated. */
+    uint32 temporariesStart;
 
-    LoopState(JSContext *cx, JSScript *script,
+    LoopState(JSContext *cx, analyze::CrossScriptSSA *ssa,
               Compiler *cc, FrameState *frame);
     bool init(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
 
+    void setOuterPC(jsbytecode *pc)
+    {
+        if (uint32(pc - outerScript->code) == lifetime->entry && lifetime->entry != lifetime->head)
+            reachedEntryPoint = true;
+    }
+
     bool generatingInvariants() { return !skipAnalysis; }
 
     /* Add a call with trailing jump/label, after which invariants need to be restored. */
-    void addInvariantCall(Jump jump, Label label, bool ool, unsigned patchIndex, bool patchCall);
+    void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, bool patchCall);
 
     uint32 headOffset() { return lifetime->head; }
     uint32 getLoopRegs() { return loopRegs.freeMask; }
 
     Jump entryJump() { return entry; }
     uint32 entryOffset() { return lifetime->entry; }
     uint32 backedgeOffset() { return lifetime->backedge; }
 
     /* Whether the payload of slot is carried around the loop in a register. */
-    bool carriesLoopReg(FrameEntry *fe) { return alloc->hasAnyReg(frame.indexOfFe(fe)); }
+    bool carriesLoopReg(FrameEntry *fe) { return alloc->hasAnyReg(frame.entrySlot(fe)); }
 
     void setLoopReg(AnyRegisterID reg, FrameEntry *fe);
 
     void clearLoopReg(AnyRegisterID reg)
     {
         /*
          * Mark reg as having been modified since the start of the loop; it
          * cannot subsequently be marked to carry a register around the loop.
@@ -257,30 +272,30 @@ class LoopState : public MacroAssemblerT
     void clearLoopRegisters();
 
     void flushLoop(StubCompiler &stubcc);
 
     /*
      * These should only be used for entries which are known to be dense arrays
      * (if they are objects at all).
      */
-    bool hoistArrayLengthCheck(const FrameEntry *obj, types::TypeSet *objTypes,
-                               unsigned indexPopped);
-    FrameEntry *invariantSlots(const FrameEntry *obj);
-    FrameEntry *invariantLength(const FrameEntry *obj, types::TypeSet *objTypes);
-    FrameEntry *invariantProperty(const FrameEntry *obj, types::TypeSet *objTypes, jsid id);
+    bool hoistArrayLengthCheck(const analyze::CrossSSAValue &obj,
+                               const analyze::CrossSSAValue &index);
+    FrameEntry *invariantSlots(const analyze::CrossSSAValue &obj);
+    FrameEntry *invariantLength(const analyze::CrossSSAValue &obj);
+    FrameEntry *invariantProperty(const analyze::CrossSSAValue &obj, jsid id);
 
-    /* Whether the current PC's binary op cannot overflow. */
-    bool cannotIntegerOverflow();
+    /* Whether a binary or inc/dec op's result cannot overflow. */
+    bool cannotIntegerOverflow(const analyze::CrossSSAValue &pushed);
 
     /*
      * Whether integer overflow in addition or negative zeros in multiplication
-     * at the current PC can be safely ignored.
+     * at a binary op can be safely ignored.
      */
-    bool ignoreIntegerOverflow();
+    bool ignoreIntegerOverflow(const analyze::CrossSSAValue &pushed);
 
   private:
     /* Analysis information for the loop. */
 
     /*
      * Any inequality known to hold at the head of the loop. This has the
      * form 'lhs <= rhs + constant' or 'lhs >= rhs + constant', depending on
      * lessEqual. The lhs may be modified within the loop body (the test is
@@ -289,23 +304,16 @@ class LoopState : public MacroAssemblerT
      */
     enum { UNASSIGNED = uint32(-1) };
     uint32 testLHS;
     uint32 testRHS;
     int32 testConstant;
     bool testLessEqual;
 
     /*
-     * The rhs in the test is testRHS.length; for the test to be valid, the
-     * length must not be directly modified within the loop.
-     */
-    bool testLength;
-    bool testLengthKnownObject;
-
-    /*
      * A variable which will be incremented or decremented exactly once in each
      * iteration of the loop. The offset of the operation is indicated, which
      * may or may not run after the initial entry into the loop.
      */
     struct Increment {
         uint32 slot;
         uint32 offset;
     };
@@ -331,32 +339,31 @@ class LoopState : public MacroAssemblerT
      * Whether this loop only performs integer and double arithmetic and dense
      * array accesses. Integer overflows in this loop which only flow to bitops
      * can be ignored.
      */
     bool constrainedLoop;
 
     void analyzeLoopTest();
     void analyzeLoopIncrements();
-    void analyzeLoopBody();
+    bool analyzeLoopBody(unsigned frame);
+
     bool definiteArrayAccess(const analyze::SSAValue &obj, const analyze::SSAValue &index);
-    void markBitwiseOperand(const analyze::SSAValue &v);
-
     bool getLoopTestAccess(const analyze::SSAValue &v, uint32 *pslot, int32 *pconstant);
 
     bool addGrowArray(types::TypeObject *object);
     bool addModifiedProperty(types::TypeObject *object, jsid id);
 
     bool hasGrowArray(types::TypeObject *object);
     bool hasModifiedProperty(types::TypeObject *object, jsid id);
 
     uint32 getIncrement(uint32 slot);
     int32 adjustConstantForIncrement(jsbytecode *pc, uint32 slot);
 
-    bool getEntryValue(const analyze::SSAValue &v, uint32 *pslot, int32 *pconstant);
-    bool computeInterval(const analyze::SSAValue &v, int32 *pmin, int32 *pmax);
+    bool getEntryValue(const analyze::CrossSSAValue &v, uint32 *pslot, int32 *pconstant);
+    bool computeInterval(const analyze::CrossSSAValue &v, int32 *pmin, int32 *pmax);
     bool valueFlowsToBitops(const analyze::SSAValue &v);
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif /* jsjaeger_loopstate_h__ */
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -232,17 +232,17 @@ SYMBOL_STRING(JaegerTrampoline) ":"     
     /* Jump into the JIT'd code. */
     "jmp *0(%rsp)"                      "\n"
 );
 
 asm (
 ".text\n"
 ".globl " SYMBOL_STRING(JaegerTrampolineReturn) "\n"
 SYMBOL_STRING(JaegerTrampolineReturn) ":"       "\n"
-    "or   %rdx, %rcx"                    "\n"
+    "or   %rdi, %rsi"                    "\n"
     "movq %rcx, 0x30(%rbx)"              "\n"
     "movq %rsp, %rdi"                    "\n"
     "call " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
 
     "addq $0x58, %rsp"                   "\n"
     "popq %rbx"                          "\n"
     "popq %r15"                          "\n"
     "popq %r14"                          "\n"
@@ -318,18 +318,18 @@ SYMBOL_STRING(JaegerTrampoline) ":"     
     "movl 28(%esp), %ebp"                "\n"   /* load fp for JIT code */
     "jmp *72(%esp)"                      "\n"
 );
 
 asm (
 ".text\n"
 ".globl " SYMBOL_STRING(JaegerTrampolineReturn) "\n"
 SYMBOL_STRING(JaegerTrampolineReturn) ":" "\n"
-    "movl  %edx, 0x18(%ebp)"             "\n"
-    "movl  %ecx, 0x1C(%ebp)"             "\n"
+    "movl  %esi, 0x18(%ebp)"             "\n"
+    "movl  %edi, 0x1C(%ebp)"             "\n"
     "movl  %esp, %ebp"                   "\n"
     "addl  $0x38, %ebp"                  "\n" /* Restore stack at STACK_BASE_DIFFERENCE */
     "movl  %esp, %ecx"                   "\n"
     "call " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
 
     "addl $0x2C, %esp"                   "\n"
     "popl %ebx"                          "\n"
     "popl %edi"                          "\n"
@@ -451,18 +451,18 @@ SYMBOL_STRING(JaegerTrampoline) ":"     
 "   bx     r4"                                  "\n"
 );
 
 asm (
 ".text\n"
 FUNCTION_HEADER_EXTRA
 ".globl " SYMBOL_STRING(JaegerTrampolineReturn)   "\n"
 SYMBOL_STRING(JaegerTrampolineReturn) ":"         "\n"
-"   str r1, [r11, #24]"                    "\n" /* fp->rval data */
-"   str r2, [r11, #28]"                    "\n" /* fp->rval type */
+"   str r5, [r11, #24]"                    "\n" /* fp->rval data */
+"   str r4, [r11, #28]"                    "\n" /* fp->rval type */
 
     /* Tidy up. */
 "   mov     r0, sp"                             "\n"
 "   blx  " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
 
     /* Skip past the parameters we pushed (such as cx and the like). */
 "   add     sp, sp, #(4*7 + 4*4)"               "\n"
 
@@ -559,18 +559,18 @@ extern "C" {
             mov ebp, [esp + 28];  /* load fp for JIT code */
             jmp dword ptr [esp + 72];
         }
     }
 
     __declspec(naked) void JaegerTrampolineReturn()
     {
         __asm {
-            mov [ebp + 0x18], edx;
-            mov [ebp + 0x1C], ecx;
+            mov [ebp + 0x18], esi;
+            mov [ebp + 0x1C], edi;
             mov  ebp, esp;
             add  ebp, 0x38; /* Restore stack at STACK_BASE_DIFFERENCE */
             mov  ecx, esp;
             call PopActiveVMFrame;
 
             add esp, 0x2C;
 
             pop ebx;
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -532,52 +532,26 @@ TryCompile(JSContext *cx, StackFrame *fp
 
 void
 ReleaseScriptCode(JSContext *cx, JSScript *script, bool normal);
 
 // Expand either the topmost stack frame or all stack frames inlined by the JIT.
 void
 ExpandInlineFrames(JSContext *cx, bool all);
 
-// Information about an unsynced slot within a frame.
-struct UnsyncedEntry
-{
-    // Slot being updated, in bytes from the start of the outer JSStackFrame.
-    int32 offset;
-
-    bool copy : 1;
-    bool constant : 1;
-    bool knownType : 1;
-    union {
-        int32 copiedOffset;
-        Value value;
-        JSValueType type;
-    } u;
-};
-
 // Information about a frame inlined during compilation.
 struct InlineFrame
 {
     InlineFrame *parent;
     jsbytecode *parentpc;
     JSFunction *fun;
 
     // Total distance between the start of the outer JSStackFrame and the start
     // of this frame, in multiples of sizeof(Value).
     uint32 depth;
-
-    // When making a call from an inline frame, only the slots owned by that
-    // frame are guaranteed to be synced. Slots owned by parents (including the
-    // this/callee/args of the call) may not be synced, and if they are
-    // unsynced the entries here describe how to remat them in case of
-    // recompilation. Note that since the arguments cannot be modified within
-    // the call without triggering recompilation, the contents of these parent
-    // slots are invariant within the call.
-    uint32 nUnsyncedEntries;
-    UnsyncedEntry *unsyncedEntries;
 };
 
 struct CallSite
 {
     uint32 codeOffset;
     uint32 inlineIndex;
     uint32 pcOffset;
     size_t id;
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -1110,17 +1110,21 @@ BumpStackFull(VMFrame &f, uintN inc)
      * effectively starts a fresh stackLimit. Here, we bump f.stackLimit,
      * if necessary, to allow for this 'apply' call, and a reasonable number of
      * subsequent calls, to succeed without hitting the stackLimit. In theory,
      * this a recursive chain containing apply to circumvent the stackLimit.
      * However, since each apply call must consume at least MANY_ARGS slots,
      * this sequence will quickly reach the end of the stack and OOM.
      */
     StackSpace &space = f.cx->stack.space();
-    return space.bumpLimit(f.cx, f.entryfp, f.regs.sp, inc, &f.stackLimit);
+    if (!space.bumpLimit(f.cx, f.entryfp, f.regs.sp, inc, &f.stackLimit)) {
+        js_ReportOutOfScriptQuota(f.cx);
+        return false;
+    }
+    return true;
 }
 
 static JS_ALWAYS_INLINE bool
 BumpStack(VMFrame &f, uintN inc)
 {
     /* Fast path BumpStackFull. */
     if (inc < MANY_ARGS && f.regs.sp + inc < f.stackLimit)
         return true;
--- a/js/src/methodjit/Retcon.cpp
+++ b/js/src/methodjit/Retcon.cpp
@@ -79,17 +79,17 @@ AutoScriptRetrapper::untrap(jsbytecode *
 Recompiler::PatchableAddress
 Recompiler::findPatch(JITScript *jit, void **location)
 {
     uint8* codeStart = (uint8 *)jit->code.m_code.executableAddress();
 
     CallSite *callSites_ = jit->callSites();
     for (uint32 i = 0; i < jit->nCallSites; i++) {
         if (callSites_[i].codeOffset + codeStart == *location) {
-            JS_ASSERT(callSites_[i].inlineIndex == uint32(-1));
+            JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
             PatchableAddress result;
             result.location = location;
             result.callSite = callSites_[i];
             return result;
         }
     }
 
     RejoinSite *rejoinSites_ = jit->rejoinSites();
@@ -240,37 +240,17 @@ StackFrame *
 Recompiler::expandInlineFrameChain(JSContext *cx, StackFrame *outer, InlineFrame *inner)
 {
     StackFrame *parent;
     if (inner->parent)
         parent = expandInlineFrameChain(cx, outer, inner->parent);
     else
         parent = outer;
 
-    JaegerSpew(JSpew_Recompile, "Expanding inline frame, %u unsynced entries\n",
-               inner->nUnsyncedEntries);
-
-    /*
-     * Remat any slots in the parent frame which may not be fully synced.
-     * Note that we need to do this *after* fixing the slots in parent frames,
-     * as the parent's own parents may need to be coherent for, e.g. copies
-     * of arguments to get the correct value.
-     */
-    for (unsigned i = 0; i < inner->nUnsyncedEntries; i++) {
-        const UnsyncedEntry &e = inner->unsyncedEntries[i];
-        Value *slot = (Value *) ((uint8 *)outer + e.offset);
-        if (e.copy) {
-            Value *copied = (Value *) ((uint8 *)outer + e.u.copiedOffset);
-            *slot = *copied;
-        } else if (e.constant) {
-            *slot = e.u.value;
-        } else if (e.knownType) {
-            slot->boxNonDoubleFrom(e.u.type, (uint64 *) slot);
-        }
-    }
+    JaegerSpew(JSpew_Recompile, "Expanding inline frame\n");
 
     StackFrame *fp = (StackFrame *) ((uint8 *)outer + sizeof(Value) * inner->depth);
     fp->initInlineFrame(inner->fun, parent, inner->parentpc);
     uint32 pcOffset = inner->parentpc - parent->script()->code;
 
     /*
      * We should have ensured during compilation that the erased frame has JIT
      * code with rejoin points added. We don't try to compile such code on
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -2908,21 +2908,24 @@ stubs::InvariantFailure(VMFrame &f, void
      * to the recompiler like we are still inside that call, and that after
      * recompilation we will return to the call's rejoin point.
      */
     void *repatchCode = f.scratch;
     JS_ASSERT(repatchCode);
     void **frameAddr = f.returnAddressLocation();
     *frameAddr = repatchCode;
 
-    /* Recompile the script, and don't hoist any bounds checks. */
-    JS_ASSERT(!f.script()->failedBoundsCheck);
-    f.script()->failedBoundsCheck = true;
-
-    Recompiler recompiler(f.cx, f.script());
+    /* Recompile the outermost script, and don't hoist any bounds checks. */
+    JSScript *script = f.fp()->script();
+    JS_ASSERT(!script->failedBoundsCheck);
+    script->failedBoundsCheck = true;
+
+    ExpandInlineFrames(f.cx, true);
+
+    Recompiler recompiler(f.cx, script);
     if (!recompiler.recompile())
         THROWV(NULL);
 
     /* Return the same value (if any) as the call triggering the invariant failure. */
     return rval;
 }
 
 void JS_FASTCALL
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -190,17 +190,20 @@ StubCompiler::emitStubCall(void *ptr, bo
                                     (size_t)ptr, true, needsRejoin);
     site.inlinePatch = inlinePatch;
 
     /* Add a hook for restoring loop invariants if necessary. */
     if (cc.loop && cc.loop->generatingInvariants()) {
         site.loopJumpLabel = masm.label();
         Jump j = masm.jump();
         Label l = masm.label();
-        cc.loop->addInvariantCall(j, l, true, cc.callSites.length(), true);
+        /* MissedBoundsCheck* are not actually called, so f.regs need to be written before InvariantFailure. */
+        bool entry = (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckEntry))
+                  || (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckHead));
+        cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length(), true);
     }
 
     cc.addCallSite(site);
     return cl;
 }
 
 void
 StubCompiler::fixCrossJumps(uint8 *ncode, size_t offset, size_t total)
--- a/js/src/methodjit/TrampolineMasmX64.asm
+++ b/js/src/methodjit/TrampolineMasmX64.asm
@@ -99,17 +99,17 @@ JaegerTrampoline PROC FRAME
 
     ; Jump into the JIT code.
     jmp     qword ptr [rsp]
 JaegerTrampoline ENDP
 
 ; void JaegerTrampolineReturn();
 JaegerTrampolineReturn PROC FRAME
     .ENDPROLOG
-    or      rcx, rdx
+    or      rsi, rdi
     mov     qword ptr [rbx + 30h], rcx
     sub     rsp, 20h
     lea     rcx, [rsp+20h]
     call    PopActiveVMFrame
 
     add     rsp, 58h+20h
     pop     rbx
     pop     rsi
--- a/js/src/methodjit/TrampolineMingwX64.s
+++ b/js/src/methodjit/TrampolineMingwX64.s
@@ -109,17 +109,17 @@ JaegerTrampoline:
 # void JaegerTrampolineReturn()#
 .globl JaegerTrampolineReturn
 .def JaegerTrampolineReturn
    .scl 3
    .type 46
 .endef
 JaegerTrampolineReturn:
     # .ENDPROLOG
-    or      rcx, rdx
+    or      rsi, rdi
     mov     qword ptr [rbx + 0x30], rcx
     sub     rsp, 0x20
     lea     rcx, [rsp+0x20]
     call    PopActiveVMFrame
 
     add     rsp, 0x58+0x20
     pop     rbx
     pop     rsi
--- a/js/src/methodjit/TrampolineSUNWX64.s
+++ b/js/src/methodjit/TrampolineSUNWX64.s
@@ -84,17 +84,17 @@ JaegerTrampoline:
     /* Jump into into the JIT'd code. */
     jmp *0(%rsp)
 .size   JaegerTrampoline, . - JaegerTrampoline
 
 / void JaegerTrampolineReturn()
 .global JaegerTrampolineReturn
 .type   JaegerTrampolineReturn, @function
 JaegerTrampolineReturn:
-    or   %rdx, %rcx
+    or   %rsi, %rdi
     movq %rcx, 0x30(%rbx)
     movq %rsp, %rdi
     call PopActiveVMFrame
 
     addq $0x58, %rsp
     popq %rbx
     popq %r15
     popq %r14
--- a/js/src/methodjit/TrampolineSUNWX86.s
+++ b/js/src/methodjit/TrampolineSUNWX86.s
@@ -69,18 +69,18 @@ JaegerTrampoline:
     movl 28(%esp), %ebp
     jmp  *72(%esp)
 .size   JaegerTrampoline, . - JaegerTrampoline
 
 / void JaegerTrampolineReturn()
 .global JaegerTrampolineReturn
 .type   JaegerTrampolineReturn, @function
 JaegerTrampolineReturn:
-    movl  %edx, 0x18(%ebp)
-    movl  %ecx, 0x1C(%ebp)
+    movl  %esi, 0x18(%ebp)
+    movl  %edi, 0x1C(%ebp)
     movl  %esp, %ebp
     addl  $0x38, %ebp
     pushl %esp
     call PopActiveVMFrame
 
     addl $0x30, %esp
     popl %ebx
     popl %edi
--- a/js/src/methodjit/TrampolineSparc.s
+++ b/js/src/methodjit/TrampolineSparc.s
@@ -57,18 +57,18 @@ JaegerTrampoline:
     jmp     %i2
     st      %i7, [%fp - 12]         ! return address
 .size   JaegerTrampoline, . - JaegerTrampoline
 
 ! void JaegerTrampolineReturn()
 .global JaegerTrampolineReturn
 .type   JaegerTrampolineReturn, #function
 JaegerTrampolineReturn:
-    st      %i0, [%l0 + 0x18]                        /* fp->rval type */
-    st      %i1, [%l0 + 0x1c]                        /* fp->rval data */
+    st      %l3, [%l0 + 0x18]                        /* fp->rval data */
+    st      %l2, [%l0 + 0x1c]                        /* fp->rval type */
     call    PopActiveVMFrame
     mov     %sp, %o0
     ld      [%fp - 12], %i7         ! return address
     mov     1, %i0
     ret
     restore		
 .size   JaegerTrampolineReturn, . - JaegerTrampolineReturn