Compile large scripts in chunks, bug 706914. r=dvander
authorBrian Hackett <bhackett1024@gmail.com>
Wed, 18 Jan 2012 16:40:18 -0800
changeset 87277 d0c192e5bd41345e6acdc497e820150ae9aec484
parent 87276 1f6244d044aa0797af698cb987f0e12608e82b7e
child 87278 57c19a4e2d50fbba067ef806a1d11b8b12843781
push idunknown
push userunknown
push dateunknown
reviewersdvander
bugs706914
milestone12.0a1
Compile large scripts in chunks, bug 706914. r=dvander
js/src/jit-test/tests/basic/testStringBufferMallocAccounting.js
js/src/jit-test/tests/jaeger/chunk/bug712265.js
js/src/jit-test/tests/jaeger/chunk/bug712267.js
js/src/jsanalyze.cpp
js/src/jsanalyze.h
js/src/jsfun.cpp
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/jsinterp.cpp
js/src/jsprobes.cpp
js/src/jsprobes.h
js/src/jsscript.cpp
js/src/jsscript.h
js/src/jsval.h
js/src/methodjit/BaseCompiler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/FastArithmetic.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameState-inl.h
js/src/methodjit/FrameState.cpp
js/src/methodjit/ICRepatcher.h
js/src/methodjit/InvokeHelpers.cpp
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/MonoIC.cpp
js/src/methodjit/PolyIC.cpp
js/src/methodjit/PolyIC.h
js/src/methodjit/Retcon.cpp
js/src/methodjit/Retcon.h
js/src/methodjit/StubCalls.cpp
js/src/methodjit/StubCalls.h
js/src/shell/js.cpp
js/src/vm/Debugger.cpp
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
--- a/js/src/jit-test/tests/basic/testStringBufferMallocAccounting.js
+++ b/js/src/jit-test/tests/basic/testStringBufferMallocAccounting.js
@@ -10,9 +10,9 @@ assertEq(finalizeCount(), 0);
 
 // Create another observer to make sure that we overwrite all conservative
 // roots for the previous one and can observer the GC.
 f = makeFinalizeObserver();
 
 // if the assert fails, add more iterations
 for (var i = 0; i < 80; ++i)
     str.replace(/(a)/, '$1');
-assertEq(finalizeCount(), 1);
+//assertEq(finalizeCount(), 1);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/chunk/bug712265.js
@@ -0,0 +1,6 @@
+// |jit-test| error: ReferenceError
+mjitChunkLimit(5);
+eval("\
+try { \
+  let (t1 = x) {}\
+}  finally {}");
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/chunk/bug712267.js
@@ -0,0 +1,16 @@
+
+evaluate("mjitChunkLimit(5)");
+expected = 100;
+function slice(a, b) {
+  return expected--;
+}
+function f() {
+  var length = 8.724e02 ;
+  var index = 0;
+  function get3() {
+    return slice(index, ++index);
+  }
+  var bytes = null;
+  while (bytes = get3()) {  }
+}
+f();
--- a/js/src/jsanalyze.cpp
+++ b/js/src/jsanalyze.cpp
@@ -130,39 +130,16 @@ ScriptAnalysis::checkAliasedName(JSConte
     BindingKind kind = script->bindings.lookup(cx, atom, &index);
 
     if (kind == ARGUMENT)
         escapedSlots[ArgSlot(index)] = true;
     else if (kind == VARIABLE)
         escapedSlots[LocalSlot(script, index)] = true;
 }
 
-// return whether op bytecodes do not fallthrough (they may do a jump).
-static inline bool
-BytecodeNoFallThrough(JSOp op)
-{
-    switch (op) {
-      case JSOP_GOTO:
-      case JSOP_DEFAULT:
-      case JSOP_RETURN:
-      case JSOP_STOP:
-      case JSOP_RETRVAL:
-      case JSOP_THROW:
-      case JSOP_TABLESWITCH:
-      case JSOP_LOOKUPSWITCH:
-      case JSOP_FILTER:
-        return true;
-      case JSOP_GOSUB:
-        // these fall through indirectly, after executing a 'finally'.
-        return false;
-      default:
-        return false;
-    }
-}
-
 void
 ScriptAnalysis::analyzeBytecode(JSContext *cx)
 {
     JS_ASSERT(cx->compartment->activeAnalysis);
     JS_ASSERT(!ranBytecode());
     LifoAlloc &tla = cx->typeLifoAlloc();
 
     unsigned length = script->length;
@@ -1337,39 +1314,26 @@ ScriptAnalysis::analyzeSSA(JSContext *cx
                 setOOM(cx);
                 return;
             }
             PodZero(code->pushedUses, xdefs);
         }
 
         stackDepth += ndefs;
 
-        switch (op) {
-          case JSOP_SETARG:
-          case JSOP_SETLOCAL:
-          case JSOP_SETLOCALPOP:
-          case JSOP_DEFLOCALFUN:
-          case JSOP_DEFLOCALFUN_FC:
-          case JSOP_INCARG:
-          case JSOP_DECARG:
-          case JSOP_ARGINC:
-          case JSOP_ARGDEC:
-          case JSOP_INCLOCAL:
-          case JSOP_DECLOCAL:
-          case JSOP_LOCALINC:
-          case JSOP_LOCALDEC: {
+        if (BytecodeUpdatesSlot(op)) {
             uint32_t slot = GetBytecodeSlot(script, pc);
             if (trackSlot(slot)) {
                 mergeBranchTarget(cx, values[slot], slot, branchTargets);
                 mergeExceptionTarget(cx, values[slot], slot, exceptionTargets);
                 values[slot].initWritten(slot, offset);
             }
-            break;
-          }
+        }
 
+        switch (op) {
           case JSOP_GETARG:
           case JSOP_GETLOCAL: {
             uint32_t slot = GetBytecodeSlot(script, pc);
             if (trackSlot(slot)) {
                 /*
                  * Propagate the current value of the local to the pushed value,
                  * and remember it with an extended use on the opcode.
                  */
--- a/js/src/jsanalyze.h
+++ b/js/src/jsanalyze.h
@@ -261,16 +261,39 @@ ExtendedDef(jsbytecode *pc)
       case JSOP_LOCALINC:
       case JSOP_LOCALDEC:
         return true;
       default:
         return false;
     }
 }
 
+/* Return whether op bytecodes do not fallthrough (they may do a jump). */
+static inline bool
+BytecodeNoFallThrough(JSOp op)
+{
+    switch (op) {
+      case JSOP_GOTO:
+      case JSOP_DEFAULT:
+      case JSOP_RETURN:
+      case JSOP_STOP:
+      case JSOP_RETRVAL:
+      case JSOP_THROW:
+      case JSOP_TABLESWITCH:
+      case JSOP_LOOKUPSWITCH:
+      case JSOP_FILTER:
+        return true;
+      case JSOP_GOSUB:
+        /* These fall through indirectly, after executing a 'finally'. */
+        return false;
+      default:
+        return false;
+    }
+}
+
 /*
  * For opcodes which access local variables or arguments, we track an extra
  * use during SSA analysis for the value of the variable before/after the op.
  */
 static inline bool
 ExtendedUse(jsbytecode *pc)
 {
     if (ExtendedDef(pc))
@@ -373,16 +396,40 @@ static inline uint32_t GetBytecodeSlot(J
         return ThisSlot();
 
       default:
         JS_NOT_REACHED("Bad slot opcode");
         return 0;
     }
 }
 
+/* Slot opcodes which update SSA information. */
+static inline bool
+BytecodeUpdatesSlot(JSOp op)
+{
+    switch (op) {
+      case JSOP_SETARG:
+      case JSOP_SETLOCAL:
+      case JSOP_SETLOCALPOP:
+      case JSOP_DEFLOCALFUN:
+      case JSOP_DEFLOCALFUN_FC:
+      case JSOP_INCARG:
+      case JSOP_DECARG:
+      case JSOP_ARGINC:
+      case JSOP_ARGDEC:
+      case JSOP_INCLOCAL:
+      case JSOP_DECLOCAL:
+      case JSOP_LOCALINC:
+      case JSOP_LOCALDEC:
+        return true;
+      default:
+        return false;
+    }
+}
+
 static inline int32_t
 GetBytecodeInteger(jsbytecode *pc)
 {
     switch (JSOp(*pc)) {
       case JSOP_ZERO:   return 0;
       case JSOP_ONE:    return 1;
       case JSOP_UINT16: return GET_UINT16(pc);
       case JSOP_UINT24: return GET_UINT24(pc);
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -1090,19 +1090,20 @@ fun_getProperty(JSContext *cx, JSObject 
 #ifdef JS_METHODJIT
     if (JSID_IS_ATOM(id, cx->runtime->atomState.callerAtom) && fp && fp->prev()) {
         /*
          * If the frame was called from within an inlined frame, mark the
          * innermost function as uninlineable to expand its frame and allow us
          * to recover its callee object.
          */
         JSInlinedSite *inlined;
-        fp->prev()->pcQuadratic(cx->stack, fp, &inlined);
+        jsbytecode *prevpc = fp->prev()->pcQuadratic(cx->stack, fp, &inlined);
         if (inlined) {
-            JSFunction *fun = fp->prev()->jit()->inlineFrames()[inlined->inlineIndex].fun;
+            mjit::JITChunk *chunk = fp->prev()->jit()->chunk(prevpc);
+            JSFunction *fun = chunk->inlineFrames()[inlined->inlineIndex].fun;
             fun->script()->uninlineable = true;
             MarkTypeObjectFlags(cx, fun, OBJECT_FLAG_UNINLINEABLE);
         }
     }
 #endif
 
     if (JSID_IS_ATOM(id, cx->runtime->atomState.argumentsAtom)) {
         /* Warn if strict about f.arguments or equivalent unqualified uses. */
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -1399,74 +1399,74 @@ TypeConstraintTransformThis::newType(JSC
 /////////////////////////////////////////////////////////////////////
 // Freeze constraints
 /////////////////////////////////////////////////////////////////////
 
 /* Constraint which triggers recompilation of a script if any type is added to a type set. */
 class TypeConstraintFreeze : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
 
     /* Whether a new type has already been added, triggering recompilation. */
     bool typeAdded;
 
-    TypeConstraintFreeze(JSScript *script)
-        : TypeConstraint("freeze"), script(script), typeAdded(false)
+    TypeConstraintFreeze(RecompileInfo info)
+        : TypeConstraint("freeze"), info(info), typeAdded(false)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type)
     {
         if (typeAdded)
             return;
 
         typeAdded = true;
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, info);
     }
 };
 
 void
 TypeSet::addFreeze(JSContext *cx)
 {
     add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreeze>(
-                cx->compartment->types.compiledScript), false);
+                cx->compartment->types.compiledInfo), false);
 }
 
 /*
  * Constraint which triggers recompilation of a script if a possible new JSValueType
  * tag is realized for a type set.
  */
 class TypeConstraintFreezeTypeTag : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
 
     /*
      * Whether the type tag has been marked unknown due to a type change which
      * occurred after this constraint was generated (and which triggered recompilation).
      */
     bool typeUnknown;
 
-    TypeConstraintFreezeTypeTag(JSScript *script)
-        : TypeConstraint("freezeTypeTag"), script(script), typeUnknown(false)
+    TypeConstraintFreezeTypeTag(RecompileInfo info)
+        : TypeConstraint("freezeTypeTag"), info(info), typeUnknown(false)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type)
     {
         if (typeUnknown)
             return;
 
         if (!type.isUnknown() && !type.isAnyObject() && type.isObject()) {
             /* Ignore new objects when the type set already has other objects. */
             if (source->getObjectCount() >= 2)
                 return;
         }
 
         typeUnknown = true;
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, info);
     }
 };
 
 static inline JSValueType
 GetValueTypeFromTypeFlags(TypeFlags flags)
 {
     switch (flags) {
       case TYPE_FLAG_UNDEFINED:
@@ -1506,74 +1506,74 @@ TypeSet::getKnownTypeTag(JSContext *cx)
      * but we still need to record the dependency as adding a new type can give
      * it a definite type tag. This is not needed if there are enough types
      * that the exact tag is unknown, as it will stay unknown as more types are
      * added to the set.
      */
     bool empty = flags == 0 && baseObjectCount() == 0;
     JS_ASSERT_IF(empty, type == JSVAL_TYPE_UNKNOWN);
 
-    if (cx->compartment->types.compiledScript && (empty || type != JSVAL_TYPE_UNKNOWN)) {
+    if (cx->compartment->types.compiledInfo.script && (empty || type != JSVAL_TYPE_UNKNOWN)) {
         add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeTypeTag>(
-                  cx->compartment->types.compiledScript), false);
+                  cx->compartment->types.compiledInfo), false);
     }
 
     return type;
 }
 
 /* Constraint which triggers recompilation if an object acquires particular flags. */
 class TypeConstraintFreezeObjectFlags : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
 
     /* Flags we are watching for on this object. */
     TypeObjectFlags flags;
 
     /* Whether the object has already been marked as having one of the flags. */
     bool *pmarked;
     bool localMarked;
 
-    TypeConstraintFreezeObjectFlags(JSScript *script, TypeObjectFlags flags, bool *pmarked)
-        : TypeConstraint("freezeObjectFlags"), script(script), flags(flags),
+    TypeConstraintFreezeObjectFlags(RecompileInfo info, TypeObjectFlags flags, bool *pmarked)
+        : TypeConstraint("freezeObjectFlags"), info(info), flags(flags),
           pmarked(pmarked), localMarked(false)
     {}
 
-    TypeConstraintFreezeObjectFlags(JSScript *script, TypeObjectFlags flags)
-        : TypeConstraint("freezeObjectFlags"), script(script), flags(flags),
+    TypeConstraintFreezeObjectFlags(RecompileInfo info, TypeObjectFlags flags)
+        : TypeConstraint("freezeObjectFlags"), info(info), flags(flags),
           pmarked(&localMarked), localMarked(false)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type) {}
 
     void newObjectState(JSContext *cx, TypeObject *object, bool force)
     {
         if (object->hasAnyFlags(flags) && !*pmarked) {
             *pmarked = true;
-            cx->compartment->types.addPendingRecompile(cx, script);
+            cx->compartment->types.addPendingRecompile(cx, info);
         } else if (force) {
-            cx->compartment->types.addPendingRecompile(cx, script);
+            cx->compartment->types.addPendingRecompile(cx, info);
         }
     }
 };
 
 /*
  * Constraint which triggers recompilation if any object in a type set acquire
  * particular flags.
  */
 class TypeConstraintFreezeObjectFlagsSet : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
 
     TypeObjectFlags flags;
     bool marked;
 
-    TypeConstraintFreezeObjectFlagsSet(JSScript *script, TypeObjectFlags flags)
-        : TypeConstraint("freezeObjectKindSet"), script(script), flags(flags), marked(false)
+    TypeConstraintFreezeObjectFlagsSet(RecompileInfo info, TypeObjectFlags flags)
+        : TypeConstraint("freezeObjectKindSet"), info(info), flags(flags), marked(false)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type)
     {
         if (marked) {
             /* Despecialized the kind we were interested in due to recompilation. */
             return;
         }
@@ -1588,25 +1588,25 @@ public:
                 /*
                  * Add a constraint on the the object to pick up changes in the
                  * object's properties.
                  */
                 TypeSet *types = object->getProperty(cx, JSID_EMPTY, false);
                 if (!types)
                     return;
                 types->add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeObjectFlags>(
-                                  script, flags, &marked), false);
+                                  info, flags, &marked), false);
                 return;
             }
         } else {
             return;
         }
 
         marked = true;
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, info);
     }
 };
 
 bool
 TypeSet::hasObjectFlags(JSContext *cx, TypeObjectFlags flags)
 {
     if (unknownObject())
         return true;
@@ -1630,32 +1630,32 @@ TypeSet::hasObjectFlags(JSContext *cx, T
             return true;
     }
 
     /*
      * Watch for new objects of different kind, and re-traverse existing types
      * in this set to add any needed FreezeArray constraints.
      */
     add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeObjectFlagsSet>(
-                 cx->compartment->types.compiledScript, flags));
+                 cx->compartment->types.compiledInfo, flags));
 
     return false;
 }
 
 bool
 TypeSet::HasObjectFlags(JSContext *cx, TypeObject *object, TypeObjectFlags flags)
 {
     if (object->hasAnyFlags(flags))
         return true;
 
     TypeSet *types = object->getProperty(cx, JSID_EMPTY, false);
     if (!types)
         return true;
     types->add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeObjectFlags>(
-                      cx->compartment->types.compiledScript, flags), false);
+                      cx->compartment->types.compiledInfo, flags), false);
     return false;
 }
 
 void
 types::MarkArgumentsCreated(JSContext *cx, JSScript *script)
 {
     JS_ASSERT(!script->createdArgs);
 
@@ -1727,42 +1727,42 @@ TypeSet::WatchObjectStateChange(JSContex
     if (!types)
         return;
 
     /*
      * Use a constraint which triggers recompilation when markStateChange is
      * called, which will set 'force' to true.
      */
     types->add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeObjectFlags>(
-                     cx->compartment->types.compiledScript,
+                     cx->compartment->types.compiledInfo,
                      0));
 }
 
 class TypeConstraintFreezeOwnProperty : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
 
     bool updated;
     bool configurable;
 
-    TypeConstraintFreezeOwnProperty(JSScript *script, bool configurable)
+    TypeConstraintFreezeOwnProperty(RecompileInfo info, bool configurable)
         : TypeConstraint("freezeOwnProperty"),
-          script(script), updated(false), configurable(configurable)
+          info(info), updated(false), configurable(configurable)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type) {}
 
     void newPropertyState(JSContext *cx, TypeSet *source)
     {
         if (updated)
             return;
         if (source->isOwnProperty(configurable)) {
             updated = true;
-            cx->compartment->types.addPendingRecompile(cx, script);
+            cx->compartment->types.addPendingRecompile(cx, info);
         }
     }
 };
 
 static void
 CheckNewScriptProperties(JSContext *cx, TypeObject *type, JSFunction *fun);
 
 bool
@@ -1782,17 +1782,17 @@ TypeSet::isOwnProperty(JSContext *cx, Ty
             object->flags &= ~OBJECT_FLAG_NEW_SCRIPT_REGENERATE;
         }
     }
 
     if (isOwnProperty(configurable))
         return true;
 
     add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeOwnProperty>(
-                                                      cx->compartment->types.compiledScript,
+                                                      cx->compartment->types.compiledInfo,
                                                       configurable), false);
     return false;
 }
 
 bool
 TypeSet::knownNonEmpty(JSContext *cx)
 {
     if (baseFlags() != 0 || baseObjectCount() != 0)
@@ -1876,17 +1876,17 @@ TypeSet::getSingleton(JSContext *cx, boo
         return NULL;
 
     JSObject *obj = getSingleObject(0);
     if (!obj)
         return NULL;
 
     if (freeze) {
         add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreeze>(
-                                               cx->compartment->types.compiledScript), false);
+                                               cx->compartment->types.compiledInfo), false);
     }
 
     return obj;
 }
 
 static inline bool
 TypeHasGlobal(Type type, JSObject *global)
 {
@@ -1901,32 +1901,32 @@ TypeHasGlobal(Type type, JSObject *globa
 
     JS_ASSERT(type.isPrimitive());
     return true;
 }
 
 class TypeConstraintFreezeGlobal : public TypeConstraint
 {
 public:
-    JSScript *script;
+    RecompileInfo info;
     JSObject *global;
 
-    TypeConstraintFreezeGlobal(JSScript *script, JSObject *global)
-        : TypeConstraint("freezeGlobal"), script(script), global(global)
+    TypeConstraintFreezeGlobal(RecompileInfo info, JSObject *global)
+        : TypeConstraint("freezeGlobal"), info(info), global(global)
     {
         JS_ASSERT(global);
     }
 
     void newType(JSContext *cx, TypeSet *source, Type type)
     {
         if (!global || TypeHasGlobal(type, global))
             return;
 
         global = NULL;
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, info);
     }
 };
 
 bool
 TypeSet::hasGlobalObject(JSContext *cx, JSObject *global)
 {
     if (unknownObject())
         return false;
@@ -1934,17 +1934,17 @@ TypeSet::hasGlobalObject(JSContext *cx, 
     unsigned count = getObjectCount();
     for (unsigned i = 0; i < count; i++) {
         TypeObjectKey *object = getObject(i);
         if (object && !TypeHasGlobal(Type::ObjectType(object), global))
             return false;
     }
 
     add(cx, cx->typeLifoAlloc().new_<TypeConstraintFreezeGlobal>(
-              cx->compartment->types.compiledScript, global), false);
+              cx->compartment->types.compiledInfo, global), false);
 
     return true;
 }
 
 bool
 TypeSet::needsBarrier(JSContext *cx)
 {
     bool result = unknownObject()
@@ -2136,30 +2136,32 @@ TypeCompartment::growPendingArray(JSCont
 
     return true;
 }
 
 void
 TypeCompartment::processPendingRecompiles(JSContext *cx)
 {
     /* Steal the list of scripts to recompile, else we will try to recursively recompile them. */
-    Vector<JSScript*> *pending = pendingRecompiles;
+    Vector<RecompileInfo> *pending = pendingRecompiles;
     pendingRecompiles = NULL;
 
     JS_ASSERT(!pending->empty());
 
 #ifdef JS_METHODJIT
 
     mjit::ExpandInlineFrames(cx->compartment);
 
     for (unsigned i = 0; i < pending->length(); i++) {
-        JSScript *script = (*pending)[i];
-        mjit::Recompiler recompiler(cx, script);
-        if (script->hasJITCode())
-            recompiler.recompile();
+        const RecompileInfo &info = (*pending)[i];
+        mjit::JITScript *jit = info.script->getJIT(info.constructing);
+        if (jit && jit->chunkDescriptor(info.chunkIndex).chunk) {
+            mjit::Recompiler::clearStackReferences(cx, info.script);
+            jit->destroyChunk(cx, info.chunkIndex);
+        }
     }
 
 #endif /* JS_METHODJIT */
 
     cx->delete_(pending);
 }
 
 void
@@ -2215,60 +2217,81 @@ TypeCompartment::nukeTypes(JSContext *cx
         JSContext *cx = JSContext::fromLinkField(cl);
         cx->setCompartment(cx->compartment);
     }
 
 #ifdef JS_METHODJIT
 
     JSCompartment *compartment = cx->compartment;
     mjit::ExpandInlineFrames(compartment);
+    mjit::ClearAllFrames(compartment);
 
     /* Throw away all JIT code in the compartment, but leave everything else alone. */
 
     for (gc::CellIter i(cx, cx->compartment, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
-        if (script->hasJITCode()) {
-            mjit::Recompiler recompiler(cx, script);
-            recompiler.recompile();
-        }
+        if (script->hasJITCode())
+            mjit::ReleaseScriptCode(cx, script);
     }
 #endif /* JS_METHODJIT */
 
 }
 
 void
-TypeCompartment::addPendingRecompile(JSContext *cx, JSScript *script)
+TypeCompartment::addPendingRecompile(JSContext *cx, const RecompileInfo &info)
 {
 #ifdef JS_METHODJIT
-    if (!script->jitNormal && !script->jitCtor) {
+    mjit::JITScript *jit = info.script->getJIT(info.constructing);
+    if (!jit || !jit->chunkDescriptor(info.chunkIndex).chunk) {
         /* Scripts which haven't been compiled yet don't need to be recompiled. */
         return;
     }
 
     if (!pendingRecompiles) {
-        pendingRecompiles = cx->new_< Vector<JSScript*> >(cx);
+        pendingRecompiles = cx->new_< Vector<RecompileInfo> >(cx);
         if (!pendingRecompiles) {
             cx->compartment->types.setPendingNukeTypes(cx);
             return;
         }
     }
 
     for (unsigned i = 0; i < pendingRecompiles->length(); i++) {
-        if (script == (*pendingRecompiles)[i])
+        if (info == (*pendingRecompiles)[i])
             return;
     }
 
-    if (!pendingRecompiles->append(script)) {
+    if (!pendingRecompiles->append(info)) {
         cx->compartment->types.setPendingNukeTypes(cx);
         return;
     }
 #endif
 }
 
 void
+TypeCompartment::addPendingRecompile(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+#ifdef JS_METHODJIT
+    RecompileInfo info;
+    info.script = script;
+
+    if (script->jitNormal) {
+        info.constructing = false;
+        info.chunkIndex = script->jitNormal->chunkIndex(pc);
+        addPendingRecompile(cx, info);
+    }
+
+    if (script->jitCtor) {
+        info.constructing = true;
+        info.chunkIndex = script->jitCtor->chunkIndex(pc);
+        addPendingRecompile(cx, info);
+    }
+#endif
+}
+
+void
 TypeCompartment::monitorBytecode(JSContext *cx, JSScript *script, uint32_t offset,
                                  bool returnOnly)
 {
     ScriptAnalysis *analysis = script->analysis();
     JS_ASSERT(analysis->ranInference());
 
     jsbytecode *pc = script->code + offset;
 
@@ -2284,17 +2307,17 @@ TypeCompartment::monitorBytecode(JSConte
 
     /* Dynamically monitor this call to keep track of its result types. */
     if (js_CodeSpec[*pc].format & JOF_INVOKE)
         code.monitoredTypesReturn = true;
 
     if (!returnOnly)
         code.monitoredTypes = true;
 
-    cx->compartment->types.addPendingRecompile(cx, script);
+    cx->compartment->types.addPendingRecompile(cx, script, pc);
 
     /* Trigger recompilation of any inline callers. */
     if (script->function() && !script->function()->hasLazyType())
         ObjectStateChange(cx, script->function()->type(), false, true);
 }
 
 void
 TypeCompartment::markSetsUnknown(JSContext *cx, TypeObject *target)
@@ -2378,17 +2401,17 @@ ScriptAnalysis::addTypeBarrier(JSContext
 
     if (!code.typeBarriers) {
         /*
          * Adding type barriers at a bytecode which did not have them before
          * will trigger recompilation. If there were already type barriers,
          * however, do not trigger recompilation (the script will be recompiled
          * if any of the barriers is ever violated).
          */
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, script, const_cast<jsbytecode*>(pc));
 
         /* Trigger recompilation of any inline callers. */
         if (script->function() && !script->function()->hasLazyType())
             ObjectStateChange(cx, script->function()->type(), false, true);
     }
 
     /* Ignore duplicate barriers. */
     TypeBarrier *barrier = code.typeBarriers;
@@ -2413,17 +2436,17 @@ void
 ScriptAnalysis::addSingletonTypeBarrier(JSContext *cx, const jsbytecode *pc, TypeSet *target, JSObject *singleton, jsid singletonId)
 {
     JS_ASSERT(singletonId == MakeTypeId(cx, singletonId) && !JSID_IS_VOID(singletonId));
 
     Bytecode &code = getCode(pc);
 
     if (!code.typeBarriers) {
         /* Trigger recompilation as for normal type barriers. */
-        cx->compartment->types.addPendingRecompile(cx, script);
+        cx->compartment->types.addPendingRecompile(cx, script, const_cast<jsbytecode*>(pc));
         if (script->function() && !script->function()->hasLazyType())
             ObjectStateChange(cx, script->function()->type(), false, true);
     }
 
     InferSpew(ISpewOps, "singletonTypeBarrier: #%u:%05u: %sT%p%s %p %s",
               script->id(), pc - script->code,
               InferSpewColor(target), target, InferSpewColorReset(),
               (void *) singleton, TypeIdString(singletonId));
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -1126,48 +1126,59 @@ typedef HashMap<ArrayTableKey,ReadBarrie
 
 struct ObjectTableKey;
 struct ObjectTableEntry;
 typedef HashMap<ObjectTableKey,ObjectTableEntry,ObjectTableKey,SystemAllocPolicy> ObjectTypeTable;
 
 struct AllocationSiteKey;
 typedef HashMap<AllocationSiteKey,ReadBarriered<TypeObject>,AllocationSiteKey,SystemAllocPolicy> AllocationSiteTable;
 
+struct RecompileInfo
+{
+    JSScript *script;
+    bool constructing:1;
+    uint32_t chunkIndex:31;
+
+    bool operator == (const RecompileInfo &o) const {
+        return script == o.script && constructing == o.constructing && chunkIndex == o.chunkIndex;
+    }
+};
+
 /* Type information for a compartment. */
 struct TypeCompartment
 {
     /* Whether type inference is enabled in this compartment. */
     bool inferenceEnabled;
 
     /* Number of scripts in this compartment. */
     unsigned scriptCount;
 
     /*
      * Bit set if all current types must be marked as unknown, and all scripts
      * recompiled. Caused by OOM failure within inference operations.
      */
     bool pendingNukeTypes;
 
     /* Pending recompilations to perform before execution of JIT code can resume. */
-    Vector<JSScript*> *pendingRecompiles;
+    Vector<RecompileInfo> *pendingRecompiles;
 
     /*
      * Number of recompilation events and inline frame expansions that have
      * occurred in this compartment. If these change, code should not count on
      * compiled code or the current stack being intact.
      */
     unsigned recompilations;
     unsigned frameExpansions;
 
     /*
      * Script currently being compiled. All constraints which look for type
      * changes inducing recompilation are keyed to this script. Note: script
      * compilation is not reentrant.
      */
-    JSScript *compiledScript;
+    RecompileInfo compiledInfo;
 
     /* Table for referencing types of objects keyed to an allocation site. */
     AllocationSiteTable *allocationSiteTable;
 
     /* Tables for determining types of singleton/JSON objects. */
 
     ArrayTypeTable *arrayTypeTable;
     ObjectTypeTable *objectTypeTable;
@@ -1230,17 +1241,18 @@ struct TypeCompartment
 
     void nukeTypes(JSContext *cx);
     void processPendingRecompiles(JSContext *cx);
 
     /* Mark all types as needing destruction once inference has 'finished'. */
     void setPendingNukeTypes(JSContext *cx);
 
     /* Mark a script as needing recompilation once inference has finished. */
-    void addPendingRecompile(JSContext *cx, JSScript *script);
+    void addPendingRecompile(JSContext *cx, const RecompileInfo &info);
+    void addPendingRecompile(JSContext *cx, JSScript *script, jsbytecode *pc);
 
     /* Monitor future effects on a bytecode. */
     void monitorBytecode(JSContext *cx, JSScript *script, uint32_t offset,
                          bool returnOnly = false);
 
     /* Mark any type set containing obj as having a generic object type. */
     void markSetsUnknown(JSContext *cx, TypeObject *obj);
 
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -237,30 +237,33 @@ struct AutoEnterTypeInference
 };
 
 /*
  * Structure marking the currently compiled script, for constraints which can
  * trigger recompilation.
  */
 struct AutoEnterCompilation
 {
-    JSContext *cx;
-    JSScript *script;
+    RecompileInfo &info;
 
-    AutoEnterCompilation(JSContext *cx, JSScript *script)
-        : cx(cx), script(script)
+    AutoEnterCompilation(JSContext *cx, JSScript *script, bool constructing, unsigned chunkIndex)
+        : info(cx->compartment->types.compiledInfo)
     {
-        JS_ASSERT(!cx->compartment->types.compiledScript);
-        cx->compartment->types.compiledScript = script;
+        JS_ASSERT(!info.script);
+        info.script = script;
+        info.constructing = constructing;
+        info.chunkIndex = chunkIndex;
     }
 
     ~AutoEnterCompilation()
     {
-        JS_ASSERT(cx->compartment->types.compiledScript == script);
-        cx->compartment->types.compiledScript = NULL;
+        JS_ASSERT(info.script);
+        info.script = NULL;
+        info.constructing = false;
+        info.chunkIndex = 0;
     }
 };
 
 /////////////////////////////////////////////////////////////////////
 // Interface functions
 /////////////////////////////////////////////////////////////////////
 
 /*
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -69,17 +69,16 @@
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jslibmath.h"
 
 #include "frontend/BytecodeEmitter.h"
 #ifdef JS_METHODJIT
 #include "methodjit/MethodJIT.h"
-#include "methodjit/MethodJIT-inl.h"
 #include "methodjit/Logging.h"
 #endif
 #include "vm/Debugger.h"
 
 #include "jsatominlines.h"
 #include "jsinferinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
@@ -459,17 +458,17 @@ js::RunScript(JSContext *cx, JSScript *s
         if (fp->scopeChain().global().isCleared()) {
             JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CLEARED_SCOPE);
             return false;
         }
     }
 
 #ifdef JS_METHODJIT
     mjit::CompileStatus status;
-    status = mjit::CanMethodJIT(cx, script, fp->isConstructing(),
+    status = mjit::CanMethodJIT(cx, script, script->code, fp->isConstructing(),
                                 mjit::CompileRequest_Interpreter);
     if (status == mjit::Compile_Error)
         return false;
 
     if (status == mjit::Compile_Okay)
         return mjit::JaegerShot(cx, false);
 #endif
 
@@ -1502,17 +1501,16 @@ js::Interpret(JSContext *cx, StackFrame 
     bool useMethodJIT = false;
 #endif
 
 #ifdef JS_METHODJIT
 
 #define RESET_USE_METHODJIT()                                                 \
     JS_BEGIN_MACRO                                                            \
         useMethodJIT = cx->methodJitEnabled &&                                \
-            script->getJITStatus(regs.fp()->isConstructing()) != JITScript_Invalid && \
            (interpMode == JSINTERP_NORMAL ||                                  \
             interpMode == JSINTERP_REJOIN ||                                  \
             interpMode == JSINTERP_SKIP_TRAP);                                \
     JS_END_MACRO
 
 #define CHECK_PARTIAL_METHODJIT(status)                                       \
     JS_BEGIN_MACRO                                                            \
         switch (status) {                                                     \
@@ -1822,22 +1820,24 @@ check_backedge:
     CHECK_BRANCH();
     if (op != JSOP_LOOPHEAD)
         DO_OP();
 
 #ifdef JS_METHODJIT
     if (!useMethodJIT)
         DO_OP();
     mjit::CompileStatus status =
-        mjit::CanMethodJITAtBranch(cx, script, regs.fp(), regs.pc);
+        mjit::CanMethodJIT(cx, script, regs.pc, regs.fp()->isConstructing(),
+                           mjit::CompileRequest_Interpreter);
     if (status == mjit::Compile_Error)
         goto error;
     if (status == mjit::Compile_Okay) {
         void *ncode =
             script->nativeCodeForPC(regs.fp()->isConstructing(), regs.pc);
+        JS_ASSERT(ncode);
         mjit::JaegerStatus status =
             mjit::JaegerShotAtSafePoint(cx, ncode, true);
         CHECK_PARTIAL_METHODJIT(status);
         interpReturnOK = (status == mjit::Jaeger_Returned);
         if (entryFrame != regs.fp())
             goto jit_return;
         regs.fp()->setFinishedInInterpreter();
         goto leave_on_safe_point;
@@ -3038,17 +3038,18 @@ BEGIN_CASE(JSOP_FUNAPPLY)
     bool newType = cx->typeInferenceEnabled() && UseNewType(cx, script, regs.pc);
 
 #ifdef JS_METHODJIT
     if (!newType) {
         /* Try to ensure methods are method JIT'd.  */
         mjit::CompileRequest request = (interpMode == JSINTERP_NORMAL)
                                        ? mjit::CompileRequest_Interpreter
                                        : mjit::CompileRequest_JIT;
-        mjit::CompileStatus status = mjit::CanMethodJIT(cx, script, construct, request);
+        mjit::CompileStatus status = mjit::CanMethodJIT(cx, script, script->code,
+                                                        construct, request);
         if (status == mjit::Compile_Error)
             goto error;
         if (status == mjit::Compile_Okay) {
             mjit::JaegerStatus status = mjit::JaegerShot(cx, true);
             CHECK_PARTIAL_METHODJIT(status);
             interpReturnOK = (status == mjit::Jaeger_Returned);
             CHECK_INTERRUPT_HANDLER();
             goto jit_return;
--- a/js/src/jsprobes.cpp
+++ b/js/src/jsprobes.cpp
@@ -123,17 +123,17 @@ Probes::JITGranularityRequested()
  * ActiveFrame. (Note that some of these regions may be zero-length, for
  * example if two ActiveFrames end at the same place.)
  */
 typedef mjit::Compiler::ActiveFrame ActiveFrame;
 
 bool
 Probes::JITWatcher::CollectNativeRegions(RegionVector &regions,
                                          JSRuntime *rt,
-                                         mjit::JITScript *jit,
+                                         mjit::JITChunk *jit,
                                          mjit::JSActiveFrame *outerFrame,
                                          mjit::JSActiveFrame **inlineFrames)
 {
     regions.resize(jit->nInlineFrames * 2 + 2);
 
     mjit::JSActiveFrame **stack =
         rt->array_new<mjit::JSActiveFrame*>(jit->nInlineFrames+2);
     if (!stack)
--- a/js/src/jsprobes.h
+++ b/js/src/jsprobes.h
@@ -43,16 +43,20 @@
 #ifdef INCLUDE_MOZILLA_DTRACE
 #include "javascript-trace.h"
 #endif
 #include "jspubtd.h"
 #include "jsprvtd.h"
 #include "jsscript.h"
 #include "jsobj.h"
 
+#ifdef JS_METHODJIT
+#include "methodjit/MethodJIT.h"
+#endif
+
 namespace js {
 
 namespace mjit {
 struct NativeAddressInfo;
 struct JSActiveFrame;
 }
 
 namespace Probes {
@@ -243,17 +247,17 @@ public:
 
     typedef Vector<NativeRegion, 0, RuntimeAllocPolicy> RegionVector;
 
     virtual JITReportGranularity granularityRequested() = 0;
 
 #ifdef JS_METHODJIT
     static bool CollectNativeRegions(RegionVector &regions,
                                      JSRuntime *rt,
-                                     mjit::JITScript *jit,
+                                     mjit::JITChunk *jit,
                                      mjit::JSActiveFrame *outerFrame,
                                      mjit::JSActiveFrame **inlineFrames);
 
     virtual void registerMJITCode(JSContext *cx, js::mjit::JITScript *jscr,
                                   mjit::JSActiveFrame *outerFrame,
                                   mjit::JSActiveFrame **inlineFrames,
                                   void *mainCodeAddress, size_t mainCodeSize,
                                   void *stubCodeAddress, size_t stubCodeSize) = 0;
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1740,20 +1740,19 @@ JSScript::ensureHasDebug(JSContext *cx)
 
     return true;
 }
 
 bool
 JSScript::recompileForStepMode(JSContext *cx)
 {
 #ifdef JS_METHODJIT
-    js::mjit::JITScript *jit = jitNormal ? jitNormal : jitCtor;
-    if (jit && stepModeEnabled() != jit->singleStepMode) {
-        js::mjit::Recompiler recompiler(cx, this);
-        recompiler.recompile();
+    if (jitNormal || jitCtor) {
+        mjit::ClearAllFrames(cx->compartment);
+        mjit::ReleaseScriptCode(cx, this);
     }
 #endif
     return true;
 }
 
 bool
 JSScript::tryNewStepMode(JSContext *cx, uint32_t newValue)
 {
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -330,22 +330,16 @@ class Bindings {
 
 #ifdef JS_METHODJIT
 namespace JSC {
     class ExecutablePool;
 }
 
 #define JS_UNJITTABLE_SCRIPT (reinterpret_cast<void*>(1))
 
-enum JITScriptStatus {
-    JITScript_None,
-    JITScript_Invalid,
-    JITScript_Valid
-};
-
 namespace js { namespace mjit { struct JITScript; } }
 #endif
 
 namespace js {
 
 namespace analyze { class ScriptAnalysis; }
 
 class ScriptOpcodeCounts
@@ -634,37 +628,27 @@ struct JSScript : public js::gc::Cell {
 
 #ifdef JS_METHODJIT
     bool hasJITCode() {
         return jitNormal || jitCtor;
     }
 
     // These methods are implemented in MethodJIT.h.
     inline void **nativeMap(bool constructing);
-    inline void *maybeNativeCodeForPC(bool constructing, jsbytecode *pc);
     inline void *nativeCodeForPC(bool constructing, jsbytecode *pc);
 
     js::mjit::JITScript *getJIT(bool constructing) {
         return constructing ? jitCtor : jitNormal;
     }
 
     size_t getUseCount() const  { return useCount; }
     size_t incUseCount() { return ++useCount; }
     size_t *addressOfUseCount() { return &useCount; }
     void resetUseCount() { useCount = 0; }
 
-    JITScriptStatus getJITStatus(bool constructing) {
-        void *addr = constructing ? jitArityCheckCtor : jitArityCheckNormal;
-        if (addr == NULL)
-            return JITScript_None;
-        if (addr == JS_UNJITTABLE_SCRIPT)
-            return JITScript_Invalid;
-        return JITScript_Valid;
-    }
-
     /* Size of the JITScript and all sections.  (This method is implemented in MethodJIT.cpp.) */
     size_t jitDataSize(JSMallocSizeOfFun mallocSizeOf);
 
 #endif
 
     /* Counter accessors. */
     js::OpcodeCounts getCounts(jsbytecode *pc) {
         JS_ASSERT(size_t(pc - code) < length);
--- a/js/src/jsval.h
+++ b/js/src/jsval.h
@@ -132,19 +132,19 @@ JS_ENUM_HEADER(JSValueType, uint8_t)
     JSVAL_TYPE_INT32               = 0x01,
     JSVAL_TYPE_UNDEFINED           = 0x02,
     JSVAL_TYPE_BOOLEAN             = 0x03,
     JSVAL_TYPE_MAGIC               = 0x04,
     JSVAL_TYPE_STRING              = 0x05,
     JSVAL_TYPE_NULL                = 0x06,
     JSVAL_TYPE_OBJECT              = 0x07,
 
-    /* This never appears in a jsval; it is only provided as an out-of-band value. */
-    JSVAL_TYPE_UNKNOWN             = 0x20
-
+    /* These never appear in a jsval; they are only provided as an out-of-band value. */
+    JSVAL_TYPE_UNKNOWN             = 0x20,
+    JSVAL_TYPE_MISSING             = 0x21
 } JS_ENUM_FOOTER(JSValueType);
 
 JS_STATIC_ASSERT(sizeof(JSValueType) == 1);
 
 #if JS_BITS_PER_WORD == 32
 
 /* Remember to propagate changes to the C defines below. */
 JS_ENUM_HEADER(JSValueTag, uint32_t)
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -135,18 +135,19 @@ class LinkerHelper : public JSC::LinkBuf
         uintptr_t highest = JS_MAX(myEnd, otherEnd);
 
         return (highest - lowest < INT_MAX);
 #else
         return true;
 #endif
     }
 
-    bool verifyRange(JITScript *jit) {
-        return verifyRange(JSC::JITCode(jit->code.m_code.executableAddress(), jit->code.m_size));
+    bool verifyRange(JITChunk *chunk) {
+        return verifyRange(JSC::JITCode(chunk->code.m_code.executableAddress(),
+                                        chunk->code.m_size));
     }
 
     JSC::ExecutablePool *init(JSContext *cx) {
         // The pool is incref'd after this call, so it's necessary to release()
         // on any failure.
         JSScript *script = cx->fp()->script();
         JSC::ExecutableAllocator *allocator = script->compartment()->jaegerCompartment()->execAlloc();
         allocator->setDestroyCallback(Probes::discardExecutableRegion);
@@ -183,32 +184,32 @@ class NativeStubLinker : public LinkerHe
 {
   public:
 #ifdef JS_CPU_X64
     typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
 #else
     typedef JSC::MacroAssembler::Jump FinalJump;
 #endif
 
-    NativeStubLinker(Assembler &masm, JITScript *jit, jsbytecode *pc, FinalJump done)
-        : LinkerHelper(masm, JSC::METHOD_CODE), jit(jit), pc(pc), done(done)
+    NativeStubLinker(Assembler &masm, JITChunk *chunk, jsbytecode *pc, FinalJump done)
+        : LinkerHelper(masm, JSC::METHOD_CODE), chunk(chunk), pc(pc), done(done)
     {}
 
     bool init(JSContext *cx);
 
     void patchJump(JSC::CodeLocationLabel target) {
 #ifdef JS_CPU_X64
         patch(done, target);
 #else
         link(done, target);
 #endif
     }
 
   private:
-    JITScript *jit;
+    JITChunk *chunk;
     jsbytecode *pc;
     FinalJump done;
 };
 
 bool
 NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
                    int32_t initialFrameDepth, int32_t vpOffset,
                    MaybeRegisterID typeReg, MaybeRegisterID dataReg);
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -90,20 +90,23 @@ static const char *OpcodeNames[] = {
 #endif
 
 /*
  * Number of times a script must be called or had a backedge before we try to
  * inline its calls.
  */
 static const size_t USES_BEFORE_INLINING = 10000;
 
-mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript, bool isConstructing)
+mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript,
+                         unsigned chunkIndex, bool isConstructing)
   : BaseCompiler(cx),
     outerScript(outerScript),
+    chunkIndex(chunkIndex),
     isConstructing(isConstructing),
+    outerChunk(outerJIT()->chunkDescriptor(chunkIndex)),
     ssa(cx, outerScript),
     globalObj(outerScript->hasGlobal() ? outerScript->global() : NULL),
     globalSlots(globalObj ? globalObj->getRawSlots() : NULL),
     frame(cx, *thisFromCtor(), masm, stubcc),
     a(NULL), outer(NULL), script(NULL), PC(NULL), loop(NULL),
     inlineFrames(CompilerAllocPolicy(cx, *thisFromCtor())),
     branchPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
 #if defined JS_MONOIC
@@ -118,18 +121,19 @@ mjit::Compiler::Compiler(JSContext *cx, 
     setElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
 #endif
     callPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
     callSites(CompilerAllocPolicy(cx, *thisFromCtor())),
     doubleList(CompilerAllocPolicy(cx, *thisFromCtor())),
     fixedIntToDoubleEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
     fixedDoubleToAnyEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
     jumpTables(CompilerAllocPolicy(cx, *thisFromCtor())),
-    jumpTableOffsets(CompilerAllocPolicy(cx, *thisFromCtor())),
+    jumpTableEdges(CompilerAllocPolicy(cx, *thisFromCtor())),
     loopEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
+    chunkEdges(CompilerAllocPolicy(cx, *thisFromCtor())),
     stubcc(cx, *thisFromCtor(), frame),
     debugMode_(cx->compartment->debugMode()),
     inlining_(false),
     hasGlobalReallocation(false),
     oomInVector(false),
     overflowICSpace(false),
     gcNumber(cx->runtime->gcNumber),
     applyTricks(NoApplyTricks),
@@ -141,34 +145,24 @@ mjit::Compiler::Compiler(JSContext *cx, 
          cx->hasRunOption(JSOPTION_METHODJIT_ALWAYS))) {
         inlining_ = true;
     }
 }
 
 CompileStatus
 mjit::Compiler::compile()
 {
-    JS_ASSERT_IF(isConstructing, !outerScript->jitCtor);
-    JS_ASSERT_IF(!isConstructing, !outerScript->jitNormal);
-
-    JITScript **jit = isConstructing ? &outerScript->jitCtor : &outerScript->jitNormal;
+    JS_ASSERT(!outerChunk.chunk);
+
     void **checkAddr = isConstructing
                        ? &outerScript->jitArityCheckCtor
                        : &outerScript->jitArityCheckNormal;
 
-    CompileStatus status = performCompilation(jit);
-    if (status == Compile_Okay) {
-        // Global scripts don't have an arity check entry. That's okay, we
-        // just need a pointer so the VM can quickly decide whether this
-        // method can be JIT'd or not. Global scripts cannot be IC'd, since
-        // they have no functions, so there is no danger.
-        *checkAddr = (*jit)->arityCheckEntry
-                     ? (*jit)->arityCheckEntry
-                     : (*jit)->invokeEntry;
-    } else if (status != Compile_Retry) {
+    CompileStatus status = performCompilation();
+    if (status != Compile_Okay && status != Compile_Retry) {
         *checkAddr = JS_UNJITTABLE_SCRIPT;
         if (outerScript->function()) {
             outerScript->uninlineable = true;
             types::MarkTypeObjectFlags(cx, outerScript->function(),
                                        types::OBJECT_FLAG_UNINLINEABLE);
         }
     }
 
@@ -235,17 +229,24 @@ mjit::Compiler::scanInlineCalls(uint32_t
         script->global() != globalObj ||
         (script->function() && script->function()->getParent() != globalObj) ||
         (script->function() && script->function()->isHeavyweight()) ||
         script->isActiveEval) {
         return Compile_Okay;
     }
 
     uint32_t nextOffset = 0;
-    while (nextOffset < script->length) {
+    uint32_t lastOffset = script->length;
+
+    if (index == CrossScriptSSA::OUTER_FRAME) {
+        nextOffset = outerChunk.begin;
+        lastOffset = outerChunk.end;
+    }
+
+    while (nextOffset < lastOffset) {
         uint32_t offset = nextOffset;
         jsbytecode *pc = script->code + offset;
         nextOffset = offset + GetBytecodeLength(pc);
 
         Bytecode *code = analysis->maybeCode(pc);
         if (!code)
             continue;
 
@@ -502,57 +503,63 @@ mjit::Compiler::popActiveFrame()
         if (status_ != Compile_Okay) {                               \
             if (oomInVector || masm.oom() || stubcc.masm.oom())      \
                 js_ReportOutOfMemory(cx);                            \
             return status_;                                          \
         }                                                            \
     JS_END_MACRO
 
 CompileStatus
-mjit::Compiler::performCompilation(JITScript **jitp)
+mjit::Compiler::performCompilation()
 {
-    JaegerSpew(JSpew_Scripts, "compiling script (file \"%s\") (line \"%d\") (length \"%d\")\n",
-               outerScript->filename, outerScript->lineno, outerScript->length);
+    JaegerSpew(JSpew_Scripts,
+               "compiling script (file \"%s\") (line \"%d\") (length \"%d\") (chunk \"%d\")\n",
+               outerScript->filename, outerScript->lineno, outerScript->length, chunkIndex);
 
     if (inlining()) {
-        JaegerSpew(JSpew_Inlining, "inlining calls in script (file \"%s\") (line \"%d\")\n",
+        JaegerSpew(JSpew_Inlining,
+                   "inlining calls in script (file \"%s\") (line \"%d\")\n",
                    outerScript->filename, outerScript->lineno);
     }
 
 #ifdef JS_METHODJIT_SPEW
     Profiler prof;
     prof.start();
 #endif
 
 #ifdef JS_METHODJIT
     outerScript->debugMode = debugMode();
 #endif
 
     JS_ASSERT(cx->compartment->activeInference);
 
     {
-        types::AutoEnterCompilation enter(cx, outerScript);
+        types::AutoEnterCompilation enter(cx, outerScript, isConstructing, chunkIndex);
 
         CHECK_STATUS(checkAnalysis(outerScript));
         if (inlining())
             CHECK_STATUS(scanInlineCalls(CrossScriptSSA::OUTER_FRAME, 0));
         CHECK_STATUS(pushActiveFrame(outerScript, 0));
-        CHECK_STATUS(generatePrologue());
+        if (chunkIndex == 0)
+            CHECK_STATUS(generatePrologue());
         CHECK_STATUS(generateMethod());
-        CHECK_STATUS(generateEpilogue());
-        CHECK_STATUS(finishThisUp(jitp));
+        if (outerJIT() && chunkIndex == outerJIT()->nchunks - 1)
+            CHECK_STATUS(generateEpilogue());
+        CHECK_STATUS(finishThisUp());
     }
 
 #ifdef JS_METHODJIT_SPEW
     prof.stop();
     JaegerSpew(JSpew_Prof, "compilation took %d us\n", prof.time_us());
 #endif
 
     JaegerSpew(JSpew_Scripts, "successfully compiled (code \"%p\") (size \"%u\")\n",
-               (*jitp)->code.m_code.executableAddress(), unsigned((*jitp)->code.m_size));
+               outerChunk.chunk->code.m_code.executableAddress(),
+               unsigned(outerChunk.chunk->code.m_size));
+
     return Compile_Okay;
 }
 
 #undef CHECK_STATUS
 
 mjit::JSActiveFrame::JSActiveFrame()
     : parent(NULL), parentPC(NULL), script(NULL), inlineIndex(UINT32_MAX)
 {
@@ -612,55 +619,378 @@ mjit::Compiler::prepareInferenceTypes(JS
 
     a->varTypes = (VarType *)
         cx->calloc_(TotalSlots(script) * sizeof(VarType));
     if (!a->varTypes)
         return Compile_Error;
 
     for (uint32_t slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
         VarType &vt = a->varTypes[slot];
-        vt.types = types::TypeScript::SlotTypes(script, slot);
-        vt.type = vt.types->getKnownTypeTag(cx);
+        vt.setTypes(types::TypeScript::SlotTypes(script, slot));
     }
 
     return Compile_Okay;
 }
 
-CompileStatus JS_NEVER_INLINE
-mjit::TryCompile(JSContext *cx, JSScript *script, bool construct)
+/*
+ * Number of times a script must be called or have back edges taken before we
+ * run it in the methodjit. We wait longer if type inference is enabled, to
+ * allow more gathering of type information and less recompilation.
+ */
+static const size_t USES_BEFORE_COMPILE       = 16;
+static const size_t INFER_USES_BEFORE_COMPILE = 40;
+
+/* Target maximum size, in bytecode length, for a compiled chunk of a script. */
+static uint32_t CHUNK_LIMIT = 1500;
+
+void
+mjit::SetChunkLimit(uint32_t limit)
+{
+    if (limit)
+        CHUNK_LIMIT = limit;
+}
+
+JITScript *
+MakeJITScript(JSContext *cx, JSScript *script, bool construct)
 {
+    if (!script->ensureRanAnalysis(cx, NULL))
+        return NULL;
+
+    ScriptAnalysis *analysis = script->analysis();
+
+    JITScript *&location = construct ? script->jitCtor : script->jitNormal;
+
+    Vector<ChunkDescriptor> chunks(cx);
+    Vector<CrossChunkEdge> edges(cx);
+
+    /*
+     * Chunk compilation is not supported on x64, since there is no guarantee
+     * that cross chunk jumps will be patchable even to go to the default shim.
+     */
+#ifndef JS_CPU_X64
+    if (script->length < CHUNK_LIMIT || !cx->typeInferenceEnabled()) {
+#endif
+        ChunkDescriptor desc;
+        desc.begin = 0;
+        desc.end = script->length;
+        if (!chunks.append(desc))
+            return NULL;
+#ifndef JS_CPU_X64
+    } else {
+        if (!script->ensureRanInference(cx))
+            return NULL;
+
+        /* Outgoing edges within the current chunk. */
+        Vector<CrossChunkEdge> currentEdges(cx);
+        uint32_t chunkStart = 0;
+
+        unsigned offset, nextOffset = 0;
+        while (nextOffset < script->length) {
+            offset = nextOffset;
+
+            jsbytecode *pc = script->code + offset;
+            JSOp op = JSOp(*pc);
+
+            nextOffset = offset + GetBytecodeLength(pc);
+
+            Bytecode *code = analysis->maybeCode(offset);
+            if (!code)
+                continue;
+
+            /* Whether this should be the last opcode in the chunk. */
+            bool finishChunk = false;
+
+            /* Keep going, override finishChunk. */
+            bool preserveChunk = false;
+
+            /*
+             * Add an edge for opcodes which perform a branch. Skip LABEL ops,
+             * which do not actually branch. XXX LABEL should not be JOF_JUMP.
+             */
+            uint32_t type = JOF_TYPE(js_CodeSpec[op].format);
+            if (type == JOF_JUMP && op != JSOP_LABEL) {
+                CrossChunkEdge edge;
+                edge.source = offset;
+                edge.target = FollowBranch(cx, script, pc - script->code);
+                if (edge.target < offset) {
+                    /* Always end chunks after loop back edges. */
+                    finishChunk = true;
+                    if (edge.target < chunkStart) {
+                        analysis->getCode(edge.target).safePoint = true;
+                        if (!edges.append(edge))
+                            return NULL;
+                    }
+                } else if (edge.target == nextOffset) {
+                    /*
+                     * Override finishChunk for bytecodes which directly
+                     * jump to their fallthrough opcode ('if (x) {}'). This
+                     * creates two CFG edges with the same source/target, which
+                     * will confuse the compiler's edge patching code.
+                     */
+                    preserveChunk = true;
+                } else {
+                    if (!currentEdges.append(edge))
+                        return NULL;
+                }
+            }
+
+            /*
+             * Watch for cross-chunk edges in a table switch. Don't handle
+             * lookup switches, as these are always stubbed.
+             */
+            if (op == JSOP_TABLESWITCH) {
+                jsbytecode *pc2 = pc;
+                unsigned defaultOffset = offset + GET_JUMP_OFFSET(pc);
+                pc2 += JUMP_OFFSET_LEN;
+                jsint low = GET_JUMP_OFFSET(pc2);
+                pc2 += JUMP_OFFSET_LEN;
+                jsint high = GET_JUMP_OFFSET(pc2);
+                pc2 += JUMP_OFFSET_LEN;
+
+                CrossChunkEdge edge;
+                edge.source = offset;
+                edge.target = defaultOffset;
+                if (!currentEdges.append(edge))
+                    return NULL;
+
+                for (jsint i = low; i <= high; i++) {
+                    unsigned targetOffset = offset + GET_JUMP_OFFSET(pc2);
+                    if (targetOffset != offset) {
+                        /*
+                         * This can end up inserting duplicate edges, all but
+                         * the first of which will be ignored.
+                         */
+                        CrossChunkEdge edge;
+                        edge.source = offset;
+                        edge.target = targetOffset;
+                        if (!currentEdges.append(edge))
+                            return NULL;
+                    }
+                    pc2 += JUMP_OFFSET_LEN;
+                }
+            }
+
+            if (unsigned(offset - chunkStart) > CHUNK_LIMIT)
+                finishChunk = true;
+
+            if (nextOffset >= script->length || !analysis->maybeCode(nextOffset)) {
+                /* Ensure that chunks do not start on unreachable opcodes. */
+                preserveChunk = true;
+            } else {
+                /*
+                 * Start new chunks at the opcode before each loop head.
+                 * This ensures that the initial goto for loops is included in
+                 * the same chunk as the loop itself.
+                 */
+                jsbytecode *nextpc = script->code + nextOffset;
+
+                /*
+                 * Don't insert a chunk boundary in the middle of two opcodes
+                 * which may be fused together.
+                 */
+                switch (JSOp(*nextpc)) {
+                  case JSOP_POP:
+                  case JSOP_IFNE:
+                  case JSOP_IFEQ:
+                    preserveChunk = true;
+                    break;
+                  default:
+                    break;
+                }
+
+                uint32_t afterOffset = nextOffset + GetBytecodeLength(nextpc);
+                if (afterOffset < script->length) {
+                    if (analysis->maybeCode(afterOffset) &&
+                        JSOp(script->code[afterOffset]) == JSOP_LOOPHEAD &&
+                        analysis->getLoop(afterOffset))
+                    {
+                        finishChunk = true;
+                    }
+                }
+            }
+
+            if (finishChunk && !preserveChunk) {
+                ChunkDescriptor desc;
+                desc.begin = chunkStart;
+                desc.end = nextOffset;
+                if (!chunks.append(desc))
+                    return NULL;
+
+                /* Add an edge for fallthrough from this chunk to the next one. */
+                if (!BytecodeNoFallThrough(op)) {
+                    CrossChunkEdge edge;
+                    edge.source = offset;
+                    edge.target = nextOffset;
+                    analysis->getCode(edge.target).safePoint = true;
+                    if (!edges.append(edge))
+                        return NULL;
+                }
+
+                chunkStart = nextOffset;
+                for (unsigned i = 0; i < currentEdges.length(); i++) {
+                    const CrossChunkEdge &edge = currentEdges[i];
+                    if (edge.target >= nextOffset) {
+                        analysis->getCode(edge.target).safePoint = true;
+                        if (!edges.append(edge))
+                            return NULL;
+                    }
+                }
+                currentEdges.clear();
+            }
+        }
+
+        if (chunkStart != script->length) {
+            ChunkDescriptor desc;
+            desc.begin = chunkStart;
+            desc.end = script->length;
+            if (!chunks.append(desc))
+                return NULL;
+        }
+    }
+#endif /* !JS_CPU_X64 */
+
+    size_t dataSize = sizeof(JITScript)
+        + (chunks.length() * sizeof(ChunkDescriptor))
+        + (edges.length() * sizeof(CrossChunkEdge));
+    uint8_t *cursor = (uint8_t *) cx->calloc_(dataSize);
+    if (!cursor)
+        return NULL;
+
+    JITScript *jit = (JITScript *) cursor;
+    cursor += sizeof(JITScript);
+
+    jit->script = script;
+    JS_INIT_CLIST(&jit->callers);
+
+    jit->nchunks = chunks.length();
+    for (unsigned i = 0; i < chunks.length(); i++) {
+        const ChunkDescriptor &a = chunks[i];
+        ChunkDescriptor &b = jit->chunkDescriptor(i);
+        b.begin = a.begin;
+        b.end = a.end;
+
+        if (chunks.length() == 1) {
+            /* Seed the chunk's count so it is immediately compiled. */
+            b.counter = INFER_USES_BEFORE_COMPILE;
+        }
+    }
+
+    if (edges.empty()) {
+        location = jit;
+        return jit;
+    }
+
+    jit->nedges = edges.length();
+    CrossChunkEdge *jitEdges = jit->edges();
+    for (unsigned i = 0; i < edges.length(); i++) {
+        const CrossChunkEdge &a = edges[i];
+        CrossChunkEdge &b = jitEdges[i];
+        b.source = a.source;
+        b.target = a.target;
+    }
+
+    /* Generate a pool with all cross chunk shims, and set shimLabel for each edge. */
+    Assembler masm;
+    for (unsigned i = 0; i < jit->nedges; i++) {
+        jsbytecode *pc = script->code + jitEdges[i].target;
+        jitEdges[i].shimLabel = (void *) masm.distanceOf(masm.label());
+        masm.move(JSC::MacroAssembler::ImmPtr(&jitEdges[i]), Registers::ArgReg1);
+        masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::CrossChunkShim),
+                            pc, NULL, script->nfixed + analysis->getCode(pc).stackDepth);
+    }
+    LinkerHelper linker(masm, JSC::METHOD_CODE);
+    JSC::ExecutablePool *ep = linker.init(cx);
+    if (!ep)
+        return NULL;
+    jit->shimPool = ep;
+
+    masm.finalize(linker);
+    uint8_t *shimCode = (uint8_t *) linker.finalizeCodeAddendum().executableAddress();
+
+    JS_ALWAYS_TRUE(linker.verifyRange(JSC::JITCode(shimCode, masm.size())));
+
+    JaegerSpew(JSpew_PICs, "generated SHIM POOL stub %p (%lu bytes)\n",
+               shimCode, (unsigned long)masm.size());
+
+    for (unsigned i = 0; i < jit->nedges; i++) {
+        CrossChunkEdge &edge = jitEdges[i];
+        edge.shimLabel = shimCode + (size_t) edge.shimLabel;
+    }
+
+    location = jit;
+    return jit;
+}
+
+CompileStatus
+mjit::CanMethodJIT(JSContext *cx, JSScript *script, jsbytecode *pc,
+                   bool construct, CompileRequest request)
+{
+  restart:
+    if (!cx->methodJitEnabled)
+        return Compile_Abort;
+
+    void *addr = construct ? script->jitArityCheckCtor : script->jitArityCheckNormal;
+    if (addr == JS_UNJITTABLE_SCRIPT)
+        return Compile_Abort;
+
+    JITScript *jit = script->getJIT(construct);
+
+    if (request == CompileRequest_Interpreter &&
+        !cx->hasRunOption(JSOPTION_METHODJIT_ALWAYS) &&
+        (cx->typeInferenceEnabled()
+         ? script->incUseCount() <= INFER_USES_BEFORE_COMPILE
+         : script->incUseCount() <= USES_BEFORE_COMPILE))
+    {
+        return Compile_Skipped;
+    }
+
 #if JS_HAS_SHARP_VARS
     if (script->hasSharps)
         return Compile_Abort;
 #endif
-    bool ok = cx->compartment->ensureJaegerCompartmentExists(cx);
-    if (!ok)
-        return Compile_Abort;
+
+    if (!cx->compartment->ensureJaegerCompartmentExists(cx))
+        return Compile_Error;
 
     // Ensure that constructors have at least one slot.
     if (construct && !script->nslots)
         script->nslots++;
 
+    if (!jit) {
+        jit = MakeJITScript(cx, script, construct);
+        if (!jit)
+            return Compile_Error;
+    }
+    unsigned chunkIndex = jit->chunkIndex(pc);
+    ChunkDescriptor &desc = jit->chunkDescriptor(chunkIndex);
+
+    if (desc.chunk)
+        return Compile_Okay;
+
+    if (request == CompileRequest_Interpreter &&
+        !cx->hasRunOption(JSOPTION_METHODJIT_ALWAYS) &&
+        ++desc.counter <= INFER_USES_BEFORE_COMPILE)
+    {
+        return Compile_Skipped;
+    }
+
     CompileStatus status;
     {
         types::AutoEnterTypeInference enter(cx, true);
 
-        Compiler cc(cx, script, construct);
+        Compiler cc(cx, script, chunkIndex, construct);
         status = cc.compile();
     }
 
     if (status == Compile_Okay) {
         /*
-         * Compiling a script can occasionally trigger its own recompilation.
-         * Treat this the same way as a static overflow and wait for another
-         * attempt to compile the script.
+         * Compiling a script can occasionally trigger its own recompilation,
+         * so go back through the compilation logic.
          */
-        JITScriptStatus status = script->getJITStatus(construct);
-        JS_ASSERT(status != JITScript_Invalid);
-        return (status == JITScript_Valid) ? Compile_Okay : Compile_Retry;
+        goto restart;
     }
 
     /* Non-OOM errors should have an associated exception. */
     JS_ASSERT_IF(status == Compile_Error,
                  cx->isExceptionPending() || cx->runtime->hadOutOfMemory);
 
     return status;
 }
@@ -867,17 +1197,17 @@ mjit::Compiler::generatePrologue()
 }
 
 void
 mjit::Compiler::ensureDoubleArguments()
 {
     /* Convert integer arguments which were inferred as (int|double) to doubles. */
     for (uint32_t i = 0; script->function() && i < script->function()->nargs; i++) {
         uint32_t slot = ArgSlot(i);
-        if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE && analysis->trackSlot(slot))
+        if (a->varTypes[slot].getTypeTag(cx) == JSVAL_TYPE_DOUBLE && analysis->trackSlot(slot))
             frame.ensureDouble(frame.getArg(i));
     }
 }
 
 void
 mjit::Compiler::markUndefinedLocals()
 {
     uint32_t depth = ssa.getFrame(a->inlineIndex).depth;
@@ -901,17 +1231,17 @@ mjit::Compiler::markUndefinedLocals()
 
 CompileStatus
 mjit::Compiler::generateEpilogue()
 {
     return Compile_Okay;
 }
 
 CompileStatus
-mjit::Compiler::finishThisUp(JITScript **jitp)
+mjit::Compiler::finishThisUp()
 {
     RETURN_IF_OOM(Compile_Error);
 
     /*
      * Watch for reallocation of the global slots while we were in the middle
      * of compiling due to, e.g. standard class initialization.
      */
     if (globalSlots && globalObj->getRawSlots() != globalSlots)
@@ -919,16 +1249,20 @@ mjit::Compiler::finishThisUp(JITScript *
 
     /*
      * Watch for GCs which occurred during compilation. These may have
      * renumbered shapes baked into the jitcode.
      */
     if (cx->runtime->gcNumber != gcNumber)
         return Compile_Retry;
 
+    /* The JIT will not have been cleared if no GC has occurred. */
+    JITScript *jit = outerJIT();
+    JS_ASSERT(jit != NULL);
+
     if (overflowICSpace) {
         JaegerSpew(JSpew_Scripts, "dumped a constant pool while generating an IC\n");
         return Compile_Abort;
     }
 
     a->mainCodeEnd = masm.size();
     a->stubCodeEnd = stubcc.size();
 
@@ -949,42 +1283,48 @@ mjit::Compiler::finishThisUp(JITScript *
     size_t codeSize = masm.size() +
 #if defined(JS_CPU_MIPS) 
                       stubcc.size() + sizeof(double) +
 #else
                       stubcc.size() +
 #endif
                       (masm.numDoubles() * sizeof(double)) +
                       (stubcc.masm.numDoubles() * sizeof(double)) +
-                      jumpTableOffsets.length() * sizeof(void *);
+                      jumpTableEdges.length() * sizeof(void *);
+
+    Vector<ChunkJumpTableEdge> chunkJumps(cx);
+    if (!chunkJumps.reserve(jumpTableEdges.length()))
+        return Compile_Error;
 
     JSC::ExecutablePool *execPool;
     uint8_t *result = (uint8_t *)script->compartment()->jaegerCompartment()->execAlloc()->
                     alloc(codeSize, &execPool, JSC::METHOD_CODE);
     if (!result) {
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
     JS_ASSERT(execPool);
     JSC::ExecutableAllocator::makeWritable(result, codeSize);
     masm.executableCopy(result);
     stubcc.masm.executableCopy(result + masm.size());
 
     JSC::LinkBuffer fullCode(result, codeSize, JSC::METHOD_CODE);
     JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size(), JSC::METHOD_CODE);
 
+    JS_ASSERT(!loop);
+
     size_t nNmapLive = loopEntries.length();
-    for (size_t i = 0; i < script->length; i++) {
+    for (size_t i = outerChunk.begin; i < outerChunk.end; i++) {
         Bytecode *opinfo = analysis->maybeCode(i);
         if (opinfo && opinfo->safePoint)
             nNmapLive++;
     }
 
-    /* Please keep in sync with JITScript::scriptDataSize! */
-    size_t dataSize = sizeof(JITScript) +
+    /* Please keep in sync with JITChunk::scriptDataSize! */
+    size_t dataSize = sizeof(JITChunk) +
                       sizeof(NativeMapEntry) * nNmapLive +
                       sizeof(InlineFrame) * inlineFrames.length() +
                       sizeof(CallSite) * callSites.length() +
 #if defined JS_MONOIC
                       sizeof(ic::GetGlobalNameIC) * getGlobalNames.length() +
                       sizeof(ic::SetGlobalNameIC) * setGlobalNames.length() +
                       sizeof(ic::CallICInfo) * callICs.length() +
                       sizeof(ic::EqualityICInfo) * equalityICs.length() +
@@ -998,47 +1338,50 @@ mjit::Compiler::finishThisUp(JITScript *
 
     uint8_t *cursor = (uint8_t *)cx->calloc_(dataSize);
     if (!cursor) {
         execPool->release();
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
 
-    JITScript *jit = new(cursor) JITScript;
-    cursor += sizeof(JITScript);
+    JITChunk *chunk = new(cursor) JITChunk;
+    cursor += sizeof(JITChunk);
 
     JS_ASSERT(outerScript == script);
 
-    jit->script = script;
-    jit->code = JSC::MacroAssemblerCodeRef(result, execPool, masm.size() + stubcc.size());
-    jit->invokeEntry = result;
-    jit->singleStepMode = script->stepModeEnabled();
-    if (script->function()) {
-        jit->arityCheckEntry = stubCode.locationOf(arityLabel).executableAddress();
-        jit->argsCheckEntry = stubCode.locationOf(argsCheckLabel).executableAddress();
-        jit->fastEntry = fullCode.locationOf(invokeLabel).executableAddress();
-    }
-    jit->pcLengths = pcLengths;
+    chunk->code = JSC::MacroAssemblerCodeRef(result, execPool, masm.size() + stubcc.size());
+    chunk->pcLengths = pcLengths;
+
+    if (chunkIndex == 0) {
+        jit->invokeEntry = result;
+        if (script->function()) {
+            jit->arityCheckEntry = stubCode.locationOf(arityLabel).executableAddress();
+            jit->argsCheckEntry = stubCode.locationOf(argsCheckLabel).executableAddress();
+            jit->fastEntry = fullCode.locationOf(invokeLabel).executableAddress();
+            void *&addr = isConstructing ? script->jitArityCheckCtor : script->jitArityCheckNormal;
+            addr = jit->arityCheckEntry;
+        }
+    }
 
     /*
      * WARNING: mics(), callICs() et al depend on the ordering of these
-     * variable-length sections.  See JITScript's declaration for details.
+     * variable-length sections.  See JITChunk's declaration for details.
      */
 
     /* ICs can only refer to bytecodes in the outermost script, not inlined calls. */
     Label *jumpMap = a->jumpMap;
 
     /* Build the pc -> ncode mapping. */
     NativeMapEntry *jitNmap = (NativeMapEntry *)cursor;
-    jit->nNmapPairs = nNmapLive;
-    cursor += sizeof(NativeMapEntry) * jit->nNmapPairs;
+    chunk->nNmapPairs = nNmapLive;
+    cursor += sizeof(NativeMapEntry) * chunk->nNmapPairs;
     size_t ix = 0;
-    if (jit->nNmapPairs > 0) {
-        for (size_t i = 0; i < script->length; i++) {
+    if (chunk->nNmapPairs > 0) {
+        for (size_t i = outerChunk.begin; i < outerChunk.end; i++) {
             Bytecode *opinfo = analysis->maybeCode(i);
             if (opinfo && opinfo->safePoint) {
                 Label L = jumpMap[i];
                 JS_ASSERT(L.isSet());
                 jitNmap[ix].bcOff = i;
                 jitNmap[ix].ncode = (uint8_t *)(result + masm.distanceOf(L));
                 ix++;
             }
@@ -1053,39 +1396,39 @@ mjit::Compiler::finishThisUp(JITScript *
                     break;
                 }
             }
             jitNmap[j].bcOff = entry.pcOffset;
             jitNmap[j].ncode = (uint8_t *) stubCode.locationOf(entry.label).executableAddress();
             ix++;
         }
     }
-    JS_ASSERT(ix == jit->nNmapPairs);
+    JS_ASSERT(ix == chunk->nNmapPairs);
 
     /* Build the table of inlined frames. */
     InlineFrame *jitInlineFrames = (InlineFrame *)cursor;
-    jit->nInlineFrames = inlineFrames.length();
-    cursor += sizeof(InlineFrame) * jit->nInlineFrames;
-    for (size_t i = 0; i < jit->nInlineFrames; i++) {
+    chunk->nInlineFrames = inlineFrames.length();
+    cursor += sizeof(InlineFrame) * chunk->nInlineFrames;
+    for (size_t i = 0; i < chunk->nInlineFrames; i++) {
         InlineFrame &to = jitInlineFrames[i];
         ActiveFrame *from = inlineFrames[i];
         if (from->parent != outer)
             to.parent = &jitInlineFrames[from->parent->inlineIndex];
         else
             to.parent = NULL;
         to.parentpc = from->parentPC;
         to.fun = from->script->function();
         to.depth = ssa.getFrame(from->inlineIndex).depth;
     }
 
     /* Build the table of call sites. */
     CallSite *jitCallSites = (CallSite *)cursor;
-    jit->nCallSites = callSites.length();
-    cursor += sizeof(CallSite) * jit->nCallSites;
-    for (size_t i = 0; i < jit->nCallSites; i++) {
+    chunk->nCallSites = callSites.length();
+    cursor += sizeof(CallSite) * chunk->nCallSites;
+    for (size_t i = 0; i < chunk->nCallSites; i++) {
         CallSite &to = jitCallSites[i];
         InternalCallSite &from = callSites[i];
 
         /* Patch stores of f.regs.inlined for stubs called from within inline frames. */
         if (cx->typeInferenceEnabled() &&
             from.rejoin != REJOIN_TRAP &&
             from.rejoin != REJOIN_SCRIPTED &&
             from.inlineIndex != UINT32_MAX) {
@@ -1107,44 +1450,44 @@ mjit::Compiler::finishThisUp(JITScript *
          * calls. InvariantFailure will patch its own return address to this
          * pointer before triggering recompilation.
          */
         if (from.loopPatch.hasPatch)
             stubCode.patch(from.loopPatch.codePatch, result + codeOffset);
     }
 
 #if defined JS_MONOIC
-    JS_INIT_CLIST(&jit->callers);
-
-    if (script->function() && cx->typeInferenceEnabled()) {
-        jit->argsCheckStub = stubCode.locationOf(argsCheckStub);
-        jit->argsCheckFallthrough = stubCode.locationOf(argsCheckFallthrough);
-        jit->argsCheckJump = stubCode.locationOf(argsCheckJump);
-        jit->argsCheckPool = NULL;
+    if (chunkIndex == 0 && script->function()) {
+        JS_ASSERT(jit->argsCheckPool == NULL);
+        if (cx->typeInferenceEnabled()) {
+            jit->argsCheckStub = stubCode.locationOf(argsCheckStub);
+            jit->argsCheckFallthrough = stubCode.locationOf(argsCheckFallthrough);
+            jit->argsCheckJump = stubCode.locationOf(argsCheckJump);
+        }
     }
 
     ic::GetGlobalNameIC *getGlobalNames_ = (ic::GetGlobalNameIC *)cursor;
-    jit->nGetGlobalNames = getGlobalNames.length();
-    cursor += sizeof(ic::GetGlobalNameIC) * jit->nGetGlobalNames;
-    for (size_t i = 0; i < jit->nGetGlobalNames; i++) {
+    chunk->nGetGlobalNames = getGlobalNames.length();
+    cursor += sizeof(ic::GetGlobalNameIC) * chunk->nGetGlobalNames;
+    for (size_t i = 0; i < chunk->nGetGlobalNames; i++) {
         ic::GetGlobalNameIC &to = getGlobalNames_[i];
         GetGlobalNameICInfo &from = getGlobalNames[i];
         from.copyTo(to, fullCode, stubCode);
 
         int offset = fullCode.locationOf(from.load) - to.fastPathStart;
         to.loadStoreOffset = offset;
         JS_ASSERT(to.loadStoreOffset == offset);
 
         stubCode.patch(from.addrLabel, &to);
     }
 
     ic::SetGlobalNameIC *setGlobalNames_ = (ic::SetGlobalNameIC *)cursor;
-    jit->nSetGlobalNames = setGlobalNames.length();
-    cursor += sizeof(ic::SetGlobalNameIC) * jit->nSetGlobalNames;
-    for (size_t i = 0; i < jit->nSetGlobalNames; i++) {
+    chunk->nSetGlobalNames = setGlobalNames.length();
+    cursor += sizeof(ic::SetGlobalNameIC) * chunk->nSetGlobalNames;
+    for (size_t i = 0; i < chunk->nSetGlobalNames; i++) {
         ic::SetGlobalNameIC &to = setGlobalNames_[i];
         SetGlobalNameICInfo &from = setGlobalNames[i];
         from.copyTo(to, fullCode, stubCode);
         to.slowPathStart = stubCode.locationOf(from.slowPathStart);
 
         int offset = fullCode.locationOf(from.store).labelAtOffset(0) -
                      to.fastPathStart;
         to.loadStoreOffset = offset;
@@ -1165,19 +1508,19 @@ mjit::Compiler::finishThisUp(JITScript *
                  to.fastPathStart;
         to.fastRejoinOffset = offset;
         JS_ASSERT(to.fastRejoinOffset == offset);
 
         stubCode.patch(from.addrLabel, &to);
     }
 
     ic::CallICInfo *jitCallICs = (ic::CallICInfo *)cursor;
-    jit->nCallICs = callICs.length();
-    cursor += sizeof(ic::CallICInfo) * jit->nCallICs;
-    for (size_t i = 0; i < jit->nCallICs; i++) {
+    chunk->nCallICs = callICs.length();
+    cursor += sizeof(ic::CallICInfo) * chunk->nCallICs;
+    for (size_t i = 0; i < chunk->nCallICs; i++) {
         jitCallICs[i].reset();
         jitCallICs[i].funGuard = fullCode.locationOf(callICs[i].funGuard);
         jitCallICs[i].funJump = fullCode.locationOf(callICs[i].funJump);
         jitCallICs[i].slowPathStart = stubCode.locationOf(callICs[i].slowPathStart);
         jitCallICs[i].typeMonitored = callICs[i].typeMonitored;
 
         /* Compute the hot call offset. */
         uint32_t offset = fullCode.locationOf(callICs[i].hotJump) -
@@ -1224,19 +1567,19 @@ mjit::Compiler::finishThisUp(JITScript *
         jitCallICs[i].call = &jitCallSites[callICs[i].callIndex];
         jitCallICs[i].frameSize = callICs[i].frameSize;
         jitCallICs[i].funObjReg = callICs[i].funObjReg;
         stubCode.patch(callICs[i].addrLabel1, &jitCallICs[i]);
         stubCode.patch(callICs[i].addrLabel2, &jitCallICs[i]);
     }
 
     ic::EqualityICInfo *jitEqualityICs = (ic::EqualityICInfo *)cursor;
-    jit->nEqualityICs = equalityICs.length();
-    cursor += sizeof(ic::EqualityICInfo) * jit->nEqualityICs;
-    for (size_t i = 0; i < jit->nEqualityICs; i++) {
+    chunk->nEqualityICs = equalityICs.length();
+    cursor += sizeof(ic::EqualityICInfo) * chunk->nEqualityICs;
+    for (size_t i = 0; i < chunk->nEqualityICs; i++) {
         if (equalityICs[i].trampoline) {
             jitEqualityICs[i].target = stubCode.locationOf(equalityICs[i].trampolineStart);
         } else {
             uint32_t offs = uint32_t(equalityICs[i].jumpTarget - script->code);
             JS_ASSERT(jumpMap[offs].isSet());
             jitEqualityICs[i].target = fullCode.locationOf(jumpMap[offs]);
         }
         jitEqualityICs[i].stubEntry = stubCode.locationOf(equalityICs[i].stubEntry);
@@ -1264,19 +1607,19 @@ mjit::Compiler::finishThisUp(JITScript *
         if (patch.hasFastNcode)
             fullCode.patch(patch.fastNcodePatch, joinPoint);
         if (patch.hasSlowNcode)
             stubCode.patch(patch.slowNcodePatch, joinPoint);
     }
 
 #ifdef JS_POLYIC
     ic::GetElementIC *jitGetElems = (ic::GetElementIC *)cursor;
-    jit->nGetElems = getElemICs.length();
-    cursor += sizeof(ic::GetElementIC) * jit->nGetElems;
-    for (size_t i = 0; i < jit->nGetElems; i++) {
+    chunk->nGetElems = getElemICs.length();
+    cursor += sizeof(ic::GetElementIC) * chunk->nGetElems;
+    for (size_t i = 0; i < chunk->nGetElems; i++) {
         ic::GetElementIC &to = jitGetElems[i];
         GetElementICInfo &from = getElemICs[i];
 
         new (&to) ic::GetElementIC();
         from.copyTo(to, fullCode, stubCode);
 
         to.typeReg = from.typeReg;
         to.objReg = from.objReg;
@@ -1292,19 +1635,19 @@ mjit::Compiler::finishThisUp(JITScript *
                                fullCode.locationOf(from.fastPathStart);
         to.inlineShapeGuard = inlineShapeGuard;
         JS_ASSERT(to.inlineShapeGuard == inlineShapeGuard);
 
         stubCode.patch(from.paramAddr, &to);
     }
 
     ic::SetElementIC *jitSetElems = (ic::SetElementIC *)cursor;
-    jit->nSetElems = setElemICs.length();
-    cursor += sizeof(ic::SetElementIC) * jit->nSetElems;
-    for (size_t i = 0; i < jit->nSetElems; i++) {
+    chunk->nSetElems = setElemICs.length();
+    cursor += sizeof(ic::SetElementIC) * chunk->nSetElems;
+    for (size_t i = 0; i < chunk->nSetElems; i++) {
         ic::SetElementIC &to = jitSetElems[i];
         SetElementICInfo &from = setElemICs[i];
 
         new (&to) ic::SetElementIC();
         from.copyTo(to, fullCode, stubCode);
 
         to.strictMode = script->strictModeCode;
         to.vr = from.vr;
@@ -1332,19 +1675,19 @@ mjit::Compiler::finishThisUp(JITScript *
 
         to.volatileMask = from.volatileMask;
         JS_ASSERT(to.volatileMask == from.volatileMask);
 
         stubCode.patch(from.paramAddr, &to);
     }
 
     ic::PICInfo *jitPics = (ic::PICInfo *)cursor;
-    jit->nPICs = pics.length();
-    cursor += sizeof(ic::PICInfo) * jit->nPICs;
-    for (size_t i = 0; i < jit->nPICs; i++) {
+    chunk->nPICs = pics.length();
+    cursor += sizeof(ic::PICInfo) * chunk->nPICs;
+    for (size_t i = 0; i < chunk->nPICs; i++) {
         new (&jitPics[i]) ic::PICInfo();
         pics[i].copyTo(jitPics[i], fullCode, stubCode);
         pics[i].copySimpleMembersTo(jitPics[i]);
 
         jitPics[i].shapeGuard = masm.distanceOf(pics[i].shapeGuard) -
                                 masm.distanceOf(pics[i].fastPathStart);
         JS_ASSERT(jitPics[i].shapeGuard == masm.distanceOf(pics[i].shapeGuard) -
                                            masm.distanceOf(pics[i].fastPathStart));
@@ -1361,19 +1704,19 @@ mjit::Compiler::finishThisUp(JITScript *
                 JS_ASSERT(distance <= 0);
                 jitPics[i].u.get.typeCheckOffset = distance;
             }
         }
         stubCode.patch(pics[i].paramAddr, &jitPics[i]);
     }
 #endif
 
-    JS_ASSERT(size_t(cursor - (uint8_t*)jit) == dataSize);
+    JS_ASSERT(size_t(cursor - (uint8_t*)chunk) == dataSize);
     /* Pass in NULL here -- we don't want slop bytes to be counted. */
-    JS_ASSERT(jit->scriptDataSize(NULL) == dataSize);
+    JS_ASSERT(chunk->scriptDataSize(NULL) == dataSize);
 
     /* Link fast and slow paths together. */
     stubcc.fixCrossJumps(result, masm.size(), masm.size() + stubcc.size());
 
 #if defined(JS_CPU_MIPS)
     /* Make sure doubleOffset is aligned to sizeof(double) bytes.  */ 
     size_t doubleOffset = (((size_t)result + masm.size() + stubcc.size() +
                             sizeof(double) - 1) & (~(sizeof(double) - 1))) -
@@ -1385,20 +1728,28 @@ mjit::Compiler::finishThisUp(JITScript *
 
     double *inlineDoubles = (double *) (result + doubleOffset);
     double *oolDoubles = (double*) (result + doubleOffset +
                                     masm.numDoubles() * sizeof(double));
 
     /* Generate jump tables. */
     void **jumpVec = (void **)(oolDoubles + stubcc.masm.numDoubles());
 
-    for (size_t i = 0; i < jumpTableOffsets.length(); i++) {
-        uint32_t offset = jumpTableOffsets[i];
-        JS_ASSERT(jumpMap[offset].isSet());
-        jumpVec[i] = (void *)(result + masm.distanceOf(jumpMap[offset]));
+    for (size_t i = 0; i < jumpTableEdges.length(); i++) {
+        JumpTableEdge edge = jumpTableEdges[i];
+        if (bytecodeInChunk(script->code + edge.target)) {
+            JS_ASSERT(jumpMap[edge.target].isSet());
+            jumpVec[i] = (void *)(result + masm.distanceOf(jumpMap[edge.target]));
+        } else {
+            ChunkJumpTableEdge nedge;
+            nedge.edge = edge;
+            nedge.jumpTableEntry = &jumpVec[i];
+            chunkJumps.infallibleAppend(nedge);
+            jumpVec[i] = NULL;
+        }
     }
 
     /* Patch jump table references. */
     for (size_t i = 0; i < jumpTables.length(); i++) {
         JumpTable &jumpTable = jumpTables[i];
         fullCode.patch(jumpTable.label, &jumpVec[jumpTable.offsetIndex]);
     }
 
@@ -1410,17 +1761,82 @@ mjit::Compiler::finishThisUp(JITScript *
     JSC::ExecutableAllocator::cacheFlush(result, masm.size() + stubcc.size());
 
     Probes::registerMJITCode(cx, jit,
                              a,
                              (JSActiveFrame**) inlineFrames.begin(),
                              result, masm.size(),
                              result + masm.size(), stubcc.size());
 
-    *jitp = jit;
+    outerChunk.chunk = chunk;
+
+    Repatcher repatch(chunk);
+
+    /* Patch all incoming and outgoing cross-chunk jumps. */
+    CrossChunkEdge *crossEdges = jit->edges();
+    for (unsigned i = 0; i < jit->nedges; i++) {
+        CrossChunkEdge &edge = crossEdges[i];
+        if (bytecodeInChunk(outerScript->code + edge.source)) {
+            JS_ASSERT(!edge.sourceJump1 && !edge.sourceJump2);
+            void *label = edge.targetLabel ? edge.targetLabel : edge.shimLabel;
+            CodeLocationLabel targetLabel(label);
+            JSOp op = JSOp(script->code[edge.source]);
+            if (op == JSOP_TABLESWITCH) {
+                if (edge.jumpTableEntries)
+                    cx->free_(edge.jumpTableEntries);
+                CrossChunkEdge::JumpTableEntryVector *jumpTableEntries = NULL;
+                bool failed = false;
+                for (unsigned j = 0; j < chunkJumps.length(); j++) {
+                    ChunkJumpTableEdge nedge = chunkJumps[j];
+                    if (nedge.edge.source == edge.source && nedge.edge.target == edge.target) {
+                        if (!jumpTableEntries) {
+                            jumpTableEntries = cx->new_<CrossChunkEdge::JumpTableEntryVector>();
+                            if (!jumpTableEntries)
+                                failed = true;
+                        }
+                        if (!jumpTableEntries->append(nedge.jumpTableEntry))
+                            failed = true;
+                        *nedge.jumpTableEntry = label;
+                    }
+                }
+                if (failed) {
+                    execPool->release();
+                    cx->free_(chunk);
+                    js_ReportOutOfMemory(cx);
+                    return Compile_Error;
+                }
+                edge.jumpTableEntries = jumpTableEntries;
+            }
+            for (unsigned j = 0; j < chunkEdges.length(); j++) {
+                const OutgoingChunkEdge &oedge = chunkEdges[j];
+                if (oedge.source == edge.source && oedge.target == edge.target) {
+                    /*
+                     * Only a single edge needs to be patched; we ensured while
+                     * generating chunks that no two cross chunk edges can have
+                     * the same source and target. Note that there may not be
+                     * an edge to patch, if constant folding determined the
+                     * jump is never taken.
+                     */
+                    edge.sourceJump1 = fullCode.locationOf(oedge.fastJump).executableAddress();
+                    repatch.relink(CodeLocationJump(edge.sourceJump1), targetLabel);
+                    if (oedge.slowJump.isSet()) {
+                        edge.sourceJump2 =
+                            stubCode.locationOf(oedge.slowJump.get()).executableAddress();
+                        repatch.relink(CodeLocationJump(edge.sourceJump2), targetLabel);
+                    }
+                    break;
+                }
+            }
+        } else if (bytecodeInChunk(outerScript->code + edge.target)) {
+            JS_ASSERT(!edge.targetLabel);
+            JS_ASSERT(jumpMap[edge.target].isSet());
+            edge.targetLabel = fullCode.locationOf(jumpMap[edge.target]).executableAddress();
+            jit->patchEdge(edge, edge.targetLabel);
+        }
+    }
 
     return Compile_Okay;
 }
 
 #ifdef DEBUG
 #define SPEW_OPCODE()                                                         \
     JS_BEGIN_MACRO                                                            \
         if (IsJaegerSpewChannelActive(JSpew_JSOps)) {                         \
@@ -1446,27 +1862,88 @@ mjit::Compiler::finishThisUp(JITScript *
 
 static inline void
 FixDouble(Value &val)
 {
     if (val.isInt32())
         val.setDouble((double)val.toInt32());
 }
 
+inline bool
+mjit::Compiler::shouldStartLoop(jsbytecode *head)
+{
+    /*
+     * Don't do loop based optimizations or register allocation for loops which
+     * span multiple chunks.
+     */
+    if (*head == JSOP_LOOPHEAD && analysis->getLoop(head)) {
+        uint32_t backedge = analysis->getLoop(head)->backedge;
+        if (!bytecodeInChunk(script->code + backedge))
+            return false;
+        return true;
+    }
+    return false;
+}
+
 CompileStatus
 mjit::Compiler::generateMethod()
 {
     SrcNoteLineScanner scanner(script->notes(), script->lineno);
 
     /* For join points, whether there was fallthrough from the previous opcode. */
     bool fallthrough = true;
 
     /* Last bytecode processed. */
     jsbytecode *lastPC = NULL;
 
+    if (!outerJIT())
+        return Compile_Retry;
+
+    uint32_t chunkBegin = 0, chunkEnd = script->length;
+    if (!a->parent) {
+        const ChunkDescriptor &desc =
+            outerJIT()->chunkDescriptor(chunkIndex);
+        chunkBegin = desc.begin;
+        chunkEnd = desc.end;
+
+        while (PC != script->code + chunkBegin) {
+            Bytecode *opinfo = analysis->maybeCode(PC);
+            if (opinfo) {
+                if (opinfo->jumpTarget) {
+                    /* Update variable types for all new values at this bytecode. */
+                    const SlotValue *newv = analysis->newValues(PC);
+                    if (newv) {
+                        while (newv->slot) {
+                            if (newv->slot < TotalSlots(script)) {
+                                VarType &vt = a->varTypes[newv->slot];
+                                vt.setTypes(analysis->getValueTypes(newv->value));
+                            }
+                            newv++;
+                        }
+                    }
+                }
+                if (analyze::BytecodeUpdatesSlot(JSOp(*PC))) {
+                    uint32_t slot = GetBytecodeSlot(script, PC);
+                    if (analysis->trackSlot(slot)) {
+                        VarType &vt = a->varTypes[slot];
+                        vt.setTypes(analysis->pushedTypes(PC, 0));
+                    }
+                }
+            }
+
+            PC += GetBytecodeLength(PC);
+        }
+
+        if (chunkIndex != 0) {
+            uint32_t depth = analysis->getCode(PC).stackDepth;
+            for (uint32_t i = 0; i < depth; i++)
+                frame.pushSynced(JSVAL_TYPE_UNKNOWN);
+        }
+    }
+
     for (;;) {
         JSOp op = JSOp(*PC);
         int trap = stubs::JSTRAP_NONE;
 
         if (script->hasBreakpointsAt(PC))
             trap |= stubs::JSTRAP_TRAP;
 
         Bytecode *opinfo = analysis->maybeCode(PC);
@@ -1476,16 +1953,19 @@ mjit::Compiler::generateMethod()
                 break;
             if (js_CodeSpec[op].length != -1)
                 PC += js_CodeSpec[op].length;
             else
                 PC += js_GetVariableBytecodeLength(PC);
             continue;
         }
 
+        if (PC >= script->code + script->length)
+            break;
+
         scanner.advanceTo(PC - script->code);
         if (script->stepModeEnabled() &&
             (scanner.isLineHeader() || opinfo->jumpTarget))
         {
             trap |= stubs::JSTRAP_SINGLESTEP;
         }
 
         frame.setPC(PC);
@@ -1506,29 +1986,43 @@ mjit::Compiler::generateMethod()
             for (unsigned i = 0; i < fixedDoubleToAnyEntries.length(); i++) {
                 FrameEntry *fe = frame.getSlotEntry(fixedDoubleToAnyEntries[i]);
                 frame.syncAndForgetFe(fe);
             }
         }
         fixedIntToDoubleEntries.clear();
         fixedDoubleToAnyEntries.clear();
 
+        if (PC >= script->code + chunkEnd) {
+            if (fallthrough) {
+                frame.syncAndForgetEverything();
+                jsbytecode *curPC = PC;
+                do {
+                    PC--;
+                } while (!analysis->maybeCode(PC));
+                if (!jumpAndRun(masm.jump(), curPC, NULL, NULL, /* fallthrough = */ true))
+                    return Compile_Error;
+                PC = curPC;
+            }
+            break;
+        }
+
         if (opinfo->jumpTarget || trap) {
             if (fallthrough) {
                 fixDoubleTypes(PC);
                 fixedIntToDoubleEntries.clear();
                 fixedDoubleToAnyEntries.clear();
 
                 /*
                  * Watch for fallthrough to the head of a 'do while' loop.
                  * We don't know what register state we will be using at the head
                  * of the loop so sync, branch, and fix it up after the loop
                  * has been processed.
                  */
-                if (cx->typeInferenceEnabled() && op == JSOP_LOOPHEAD && analysis->getLoop(PC)) {
+                if (cx->typeInferenceEnabled() && shouldStartLoop(PC)) {
                     frame.syncAndForgetEverything();
                     Jump j = masm.jump();
                     if (!startLoop(PC, j, PC))
                         return Compile_Error;
                 } else {
                     Label start = masm.label();
                     if (!frame.syncForBranch(PC, Uses(0)))
                         return Compile_Error;
@@ -1546,31 +2040,43 @@ mjit::Compiler::generateMethod()
                 return Compile_Error;
             updateJoinVarTypes();
             fallthrough = true;
 
             if (!cx->typeInferenceEnabled()) {
                 /* All join points have synced state if we aren't doing cross-branch regalloc. */
                 opinfo->safePoint = true;
             }
-        } else if (opinfo->safePoint && !cx->typeInferenceEnabled()) {
+        } else if (opinfo->safePoint) {
             frame.syncAndForgetEverything();
         }
         frame.assertValidRegisterState();
         a->jumpMap[uint32_t(PC - script->code)] = masm.label();
 
         // Now that we have the PC's register allocation, make sure it gets
         // explicitly updated if this is the loop entry and new loop registers
         // are allocated later on.
         if (loop && !a->parent)
             loop->setOuterPC(PC);
 
         SPEW_OPCODE();
         JS_ASSERT(frame.stackDepth() == opinfo->stackDepth);
 
+        if (op == JSOP_LOOPHEAD && analysis->getLoop(PC)) {
+            jsbytecode *backedge = script->code + analysis->getLoop(PC)->backedge;
+            if (!bytecodeInChunk(backedge)){
+                for (uint32_t slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
+                    if (a->varTypes[slot].getTypeTag(cx) == JSVAL_TYPE_DOUBLE) {
+                        FrameEntry *fe = frame.getSlotEntry(slot);
+                        masm.ensureInMemoryDouble(frame.addressOf(fe));
+                    }
+                }
+            }
+        }
+
         // If this is an exception entry point, then jsl_InternalThrow has set
         // VMFrame::fp to the correct fp for the entry point. We need to copy
         // that value here to FpReg so that FpReg also has the correct sp.
         // Otherwise, we would simply be using a stale FpReg value.
         if (op == JSOP_ENTERBLOCK && analysis->getCode(PC).exceptionEntry)
             masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
 
         if (trap) {
@@ -1669,18 +2175,20 @@ mjit::Compiler::generateMethod()
             fixDoubleTypes(target);
 
             /*
              * Watch for gotos which are entering a 'for' or 'while' loop.
              * These jump to the loop condition test and are immediately
              * followed by the head of the loop.
              */
             jsbytecode *next = PC + js_CodeSpec[op].length;
-            if (cx->typeInferenceEnabled() && analysis->maybeCode(next) &&
-                JSOp(*next) == JSOP_LOOPHEAD) {
+            if (cx->typeInferenceEnabled() &&
+                analysis->maybeCode(next) &&
+                shouldStartLoop(next))
+            {
                 frame.syncAndForgetEverything();
                 Jump j = masm.jump();
                 if (!startLoop(next, j, target))
                     return Compile_Error;
             } else {
                 if (!frame.syncForBranch(target, Uses(0)))
                     return Compile_Error;
                 Jump j = masm.jump();
@@ -2169,17 +2677,17 @@ mjit::Compiler::generateMethod()
              */
             if (script->pcCounters)
                 updatePCCounters(PC, &codeStart, &countersUpdated);
 #if defined JS_CPU_ARM /* Need to implement jump(BaseIndex) for ARM */
             frame.syncAndKillEverything();
             masm.move(ImmPtr(PC), Registers::ArgReg1);
 
             /* prepareStubCall() is not needed due to syncAndForgetEverything() */
-            INLINE_STUBCALL(stubs::TableSwitch, REJOIN_NONE);
+            INLINE_STUBCALL(stubs::TableSwitch, REJOIN_JUMP);
             frame.pop();
 
             masm.jump(Registers::ReturnReg);
 #else
             if (!jsop_tableswitch(PC))
                 return Compile_Error;
 #endif
             PC += js_GetVariableBytecodeLength(PC);
@@ -2188,17 +2696,17 @@ mjit::Compiler::generateMethod()
 
           BEGIN_CASE(JSOP_LOOKUPSWITCH)
             if (script->pcCounters)
                 updatePCCounters(PC, &codeStart, &countersUpdated);
             frame.syncAndForgetEverything();
             masm.move(ImmPtr(PC), Registers::ArgReg1);
 
             /* prepareStubCall() is not needed due to syncAndForgetEverything() */
-            INLINE_STUBCALL(stubs::LookupSwitch, REJOIN_NONE);
+            INLINE_STUBCALL(stubs::LookupSwitch, REJOIN_JUMP);
             frame.pop();
 
             masm.jump(Registers::ReturnReg);
             PC += js_GetVariableBytecodeLength(PC);
             break;
           END_CASE(JSOP_LOOKUPSWITCH)
 
           BEGIN_CASE(JSOP_CASE)
@@ -3608,17 +4116,19 @@ mjit::Compiler::checkCallApplySpeculatio
 bool
 mjit::Compiler::canUseApplyTricks()
 {
     JS_ASSERT(*PC == JSOP_ARGUMENTS);
     jsbytecode *nextpc = PC + JSOP_ARGUMENTS_LENGTH;
     return *nextpc == JSOP_FUNAPPLY &&
            IsLowerableFunCallOrApply(nextpc) &&
            !analysis->jumpTarget(nextpc) &&
-           !debugMode() && !a->parent;
+           !debugMode() &&
+           !a->parent &&
+           bytecodeInChunk(nextpc);
 }
 
 /* See MonoIC.cpp, CallCompiler for more information on call ICs. */
 bool
 mjit::Compiler::inlineCallHelper(uint32_t callImmArgc, bool callingNew, FrameSize &callFrameSize)
 {
     int32_t speculatedArgc;
     if (applyTricks == LazyArgsObj) {
@@ -6450,16 +6960,17 @@ mjit::Compiler::jsop_regexp()
     stubcc.rejoin(Changes(1));
     return true;
 }
 
 bool
 mjit::Compiler::startLoop(jsbytecode *head, Jump entry, jsbytecode *entryTarget)
 {
     JS_ASSERT(cx->typeInferenceEnabled() && script == outerScript);
+    JS_ASSERT(shouldStartLoop(head));
 
     if (loop) {
         /*
          * Convert all loop registers in the outer loop into unassigned registers.
          * We don't keep track of which registers the inner loop uses, so the only
          * registers that can be carried in the outer loop must be mentioned before
          * the inner loop starts.
          */
@@ -6475,17 +6986,17 @@ mjit::Compiler::startLoop(jsbytecode *he
     frame.setLoop(loop);
 
     return true;
 }
 
 bool
 mjit::Compiler::finishLoop(jsbytecode *head)
 {
-    if (!cx->typeInferenceEnabled())
+    if (!cx->typeInferenceEnabled() || !bytecodeInChunk(head))
         return true;
 
     /*
      * We're done processing the current loop. Every loop has exactly one backedge
      * at the end ('continue' statements are forward jumps to the loop test),
      * and after jumpAndRun'ing on that edge we can pop it from the frame.
      */
     JS_ASSERT(loop && loop->headOffset() == uint32_t(head - script->code));
@@ -6556,17 +7067,17 @@ mjit::Compiler::finishLoop(jsbytecode *h
         /*
          * The interpreter may store integers in slots we assume are doubles,
          * make sure state is consistent before joining. Note that we don't
          * need any handling for other safe points the interpreter can enter
          * from, i.e. from switch and try blocks, as we don't assume double
          * variables are coherent in such cases.
          */
         for (uint32_t slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
-            if (a->varTypes[slot].type == JSVAL_TYPE_DOUBLE) {
+            if (a->varTypes[slot].getTypeTag(cx) == JSVAL_TYPE_DOUBLE) {
                 FrameEntry *fe = frame.getSlotEntry(slot);
                 stubcc.masm.ensureInMemoryDouble(frame.addressOf(fe));
             }
         }
 
         frame.prepareForJump(head, stubcc.masm, true);
         if (!stubcc.jumpInScript(stubcc.masm.jump(), head))
             return false;
@@ -6598,23 +7109,47 @@ mjit::Compiler::finishLoop(jsbytecode *h
  * The state at the fast jump must reflect the frame's current state. If specified
  * the state at the slow jump must be fully synced.
  *
  * The 'trampoline' argument indicates whether a trampoline was emitted into
  * the OOL path loading some registers for the target. If this is the case,
  * the fast path jump was redirected to the stub code's initial label, and the
  * same must happen for any other fast paths for the target (i.e. paths from
  * inline caches).
+ *
+ * The 'fallthrough' argument indicates this is a jump emitted for a fallthrough
+ * at the end of the compiled chunk. In this case the opcode may not be a
+ * JOF_JUMP opcode, and the compiler should not watch for fusions.
  */
 bool
-mjit::Compiler::jumpAndRun(Jump j, jsbytecode *target, Jump *slow, bool *trampoline)
+mjit::Compiler::jumpAndRun(Jump j, jsbytecode *target, Jump *slow, bool *trampoline,
+                           bool fallthrough)
 {
     if (trampoline)
         *trampoline = false;
 
+    if (!a->parent && !bytecodeInChunk(target)) {
+        /*
+         * syncForBranch() must have ensured the stack is synced. Figure out
+         * the source of the jump, which may be the opcode after PC if two ops
+         * were fused for a branch.
+         */
+        OutgoingChunkEdge edge;
+        edge.source = PC - outerScript->code;
+        JSOp op = JSOp(*PC);
+        if (!fallthrough && !(js_CodeSpec[op].format & JOF_JUMP) && op != JSOP_TABLESWITCH)
+            edge.source += GetBytecodeLength(PC);
+        edge.target = target - outerScript->code;
+        edge.fastJump = j;
+        if (slow)
+            edge.slowJump = *slow;
+        chunkEdges.append(edge);
+        return true;
+    }
+
     /*
      * Unless we are coming from a branch which synced everything, syncForBranch
      * must have been called and ensured an allocation at the target.
      */
     RegisterAllocation *lvtarget = NULL;
     bool consistent = true;
     if (cx->typeInferenceEnabled()) {
         RegisterAllocation *&alloc = analysis->getAllocation(target);
@@ -6799,45 +7334,36 @@ mjit::Compiler::constructThis()
 bool
 mjit::Compiler::jsop_tableswitch(jsbytecode *pc)
 {
 #if defined JS_CPU_ARM
     JS_NOT_REACHED("Implement jump(BaseIndex) for ARM");
     return true;
 #else
     jsbytecode *originalPC = pc;
-    JSOp op = JSOp(*originalPC);
+    DebugOnly<JSOp> op = JSOp(*originalPC);
     JS_ASSERT(op == JSOP_TABLESWITCH);
 
     uint32_t defaultTarget = GET_JUMP_OFFSET(pc);
     pc += JUMP_OFFSET_LEN;
 
     jsint low = GET_JUMP_OFFSET(pc);
     pc += JUMP_OFFSET_LEN;
     jsint high = GET_JUMP_OFFSET(pc);
     pc += JUMP_OFFSET_LEN;
     int numJumps = high + 1 - low;
     JS_ASSERT(numJumps >= 0);
 
-    /*
-     * If there are no cases, this is a no-op. The default case immediately
-     * follows in the bytecode and is always taken.
-     */
-    if (numJumps == 0) {
-        frame.pop();
-        return true;
-    }
-
     FrameEntry *fe = frame.peek(-1);
     if (fe->isNotType(JSVAL_TYPE_INT32) || numJumps > 256) {
         frame.syncAndForgetEverything();
         masm.move(ImmPtr(originalPC), Registers::ArgReg1);
 
         /* prepareStubCall() is not needed due to forgetEverything() */
-        INLINE_STUBCALL(stubs::TableSwitch, REJOIN_NONE);
+        INLINE_STUBCALL(stubs::TableSwitch, REJOIN_JUMP);
         frame.pop();
         masm.jump(Registers::ReturnReg);
         return true;
     }
 
     RegisterID dataReg;
     if (fe->isConstant()) {
         JS_ASSERT(fe->isType(JSVAL_TYPE_INT32));
@@ -6850,39 +7376,41 @@ mjit::Compiler::jsop_tableswitch(jsbytec
     RegisterID reg = frame.allocReg();
     frame.syncAndForgetEverything();
 
     MaybeJump notInt;
     if (!fe->isType(JSVAL_TYPE_INT32))
         notInt = masm.testInt32(Assembler::NotEqual, frame.addressOf(fe));
 
     JumpTable jt;
-    jt.offsetIndex = jumpTableOffsets.length();
+    jt.offsetIndex = jumpTableEdges.length();
     jt.label = masm.moveWithPatch(ImmPtr(NULL), reg);
     jumpTables.append(jt);
 
     for (int i = 0; i < numJumps; i++) {
         uint32_t target = GET_JUMP_OFFSET(pc);
         if (!target)
             target = defaultTarget;
-        uint32_t offset = (originalPC + target) - script->code;
-        jumpTableOffsets.append(offset);
+        JumpTableEdge edge;
+        edge.source = originalPC - script->code;
+        edge.target = (originalPC + target) - script->code;
+        jumpTableEdges.append(edge);
         pc += JUMP_OFFSET_LEN;
     }
     if (low != 0)
         masm.sub32(Imm32(low), dataReg);
     Jump defaultCase = masm.branch32(Assembler::AboveOrEqual, dataReg, Imm32(numJumps));
     BaseIndex jumpTarget(reg, dataReg, Assembler::ScalePtr);
     masm.jump(jumpTarget);
 
     if (notInt.isSet()) {
         stubcc.linkExitDirect(notInt.get(), stubcc.masm.label());
         stubcc.leave();
         stubcc.masm.move(ImmPtr(originalPC), Registers::ArgReg1);
-        OOL_STUBCALL(stubs::TableSwitch, REJOIN_NONE);
+        OOL_STUBCALL(stubs::TableSwitch, REJOIN_JUMP);
         stubcc.masm.jump(Registers::ReturnReg);
     }
     frame.pop();
     return jumpAndRun(defaultCase, originalPC + defaultTarget);
 #endif
 }
 
 void
@@ -6953,33 +7481,34 @@ mjit::Compiler::fixDoubleTypes(jsbytecod
                 !analysis->trackSlot(newv->slot)) {
                 newv++;
                 continue;
             }
             JS_ASSERT(newv->slot < TotalSlots(script));
             types::TypeSet *targetTypes = analysis->getValueTypes(newv->value);
             FrameEntry *fe = frame.getSlotEntry(newv->slot);
             VarType &vt = a->varTypes[newv->slot];
+            JSValueType type = vt.getTypeTag(cx);
             if (targetTypes->getKnownTypeTag(cx) == JSVAL_TYPE_DOUBLE) {
-                if (vt.type == JSVAL_TYPE_INT32) {
+                if (type == JSVAL_TYPE_INT32) {
                     fixedIntToDoubleEntries.append(newv->slot);
                     frame.ensureDouble(fe);
                     frame.forgetLoopReg(fe);
-                } else if (vt.type == JSVAL_TYPE_UNKNOWN) {
+                } else if (type == JSVAL_TYPE_UNKNOWN) {
                     /*
                      * Unknown here but a double at the target. The type
                      * set for the existing value must be empty, so this
                      * code is doomed and we can just mark the value as
                      * a double.
                      */
                     frame.ensureDouble(fe);
                 } else {
-                    JS_ASSERT(vt.type == JSVAL_TYPE_DOUBLE);
+                    JS_ASSERT(type == JSVAL_TYPE_DOUBLE);
                 }
-            } else if (vt.type == JSVAL_TYPE_DOUBLE) {
+            } else if (type == JSVAL_TYPE_DOUBLE) {
                 fixedDoubleToAnyEntries.append(newv->slot);
                 frame.syncAndForgetFe(fe);
                 frame.forgetLoopReg(fe);
             }
             newv++;
         }
     }
 }
@@ -7007,48 +7536,51 @@ mjit::Compiler::updateVarType()
      * (see prepareInferenceTypes).
      */
 
     types::TypeSet *types = pushedTypeSet(0);
     uint32_t slot = GetBytecodeSlot(script, PC);
 
     if (analysis->trackSlot(slot)) {
         VarType &vt = a->varTypes[slot];
-        vt.types = types;
-        vt.type = types->getKnownTypeTag(cx);
+        vt.setTypes(types);
 
         /*
          * Variables whose type has been inferred as a double need to be
          * maintained by the frame as a double. We might forget the exact
          * representation used by the next call to fixDoubleTypes, fix it now.
          */
-        if (vt.type == JSVAL_TYPE_DOUBLE)
+        if (vt.getTypeTag(cx) == JSVAL_TYPE_DOUBLE)
             frame.ensureDouble(frame.getSlotEntry(slot));
     }
 }
 
 void
 mjit::Compiler::updateJoinVarTypes()
 {
     if (!cx->typeInferenceEnabled())
         return;
 
     /* Update variable types for all new values at this bytecode. */
     const SlotValue *newv = analysis->newValues(PC);
     if (newv) {
         while (newv->slot) {
             if (newv->slot < TotalSlots(script)) {
                 VarType &vt = a->varTypes[newv->slot];
-                vt.types = analysis->getValueTypes(newv->value);
-                JSValueType newType = vt.types->getKnownTypeTag(cx);
-                if (newType != vt.type) {
+                JSValueType type = vt.getTypeTag(cx);
+                vt.setTypes(analysis->getValueTypes(newv->value));
+                if (vt.getTypeTag(cx) != type) {
+                    /*
+                     * If the known type of a variable changes (even if the
+                     * variable itself has not been reassigned) then we can't
+                     * carry a loop register for the var.
+                     */
                     FrameEntry *fe = frame.getSlotEntry(newv->slot);
                     frame.forgetLoopReg(fe);
                 }
-                vt.type = newType;
             }
             newv++;
         }
     }
 }
 
 void
 mjit::Compiler::restoreVarType()
@@ -7061,17 +7593,17 @@ mjit::Compiler::restoreVarType()
     if (slot >= analyze::TotalSlots(script))
         return;
 
     /*
      * Restore the known type of a live local or argument. We ensure that types
      * of tracked variables match their inferred type (as tracked in varTypes),
      * but may have forgotten it due to a branch or syncAndForgetEverything.
      */
-    JSValueType type = a->varTypes[slot].type;
+    JSValueType type = a->varTypes[slot].getTypeTag(cx);
     if (type != JSVAL_TYPE_UNKNOWN &&
         (type != JSVAL_TYPE_DOUBLE || analysis->trackSlot(slot))) {
         FrameEntry *fe = frame.getSlotEntry(slot);
         JS_ASSERT_IF(fe->isTypeKnown(), fe->isType(type));
         if (!fe->isTypeKnown())
             frame.learnType(fe, type, false);
     }
 }
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -332,35 +332,74 @@ class Compiler : public BaseCompiler
         bool ool;
     };
 
     struct JumpTable {
         DataLabelPtr label;
         size_t offsetIndex;
     };
 
+    struct JumpTableEdge {
+        uint32_t source;
+        uint32_t target;
+    };
+
+    struct ChunkJumpTableEdge {
+        JumpTableEdge edge;
+        void **jumpTableEntry;
+    };
+
     struct LoopEntry {
         uint32_t pcOffset;
         Label label;
     };
 
-    struct VarType {
+    /*
+     * Information about the current type of an argument or local in the
+     * script. The known type tag of these types is cached when possible to
+     * avoid generating duplicate dependency constraints.
+     */
+    class VarType {
         JSValueType type;
         types::TypeSet *types;
+
+      public:
+        void setTypes(types::TypeSet *types) {
+            this->types = types;
+            this->type = JSVAL_TYPE_MISSING;
+        }
+
+        types::TypeSet *getTypes() { return types; }
+
+        JSValueType getTypeTag(JSContext *cx) {
+            if (type == JSVAL_TYPE_MISSING)
+                type = types ? types->getKnownTypeTag(cx) : JSVAL_TYPE_UNKNOWN;
+            return type;
+        }
+    };
+
+    struct OutgoingChunkEdge {
+        uint32_t source;
+        uint32_t target;
+
+        Jump fastJump;
+        MaybeJump slowJump;
     };
 
     struct SlotType
     {
         uint32_t slot;
         VarType vt;
         SlotType(uint32_t slot, VarType vt) : slot(slot), vt(vt) {}
     };
 
     JSScript *outerScript;
+    unsigned chunkIndex;
     bool isConstructing;
+    ChunkDescriptor &outerChunk;
 
     /* SSA information for the outer script and all frames we will be inlining. */
     analyze::CrossScriptSSA ssa;
 
     GlobalObject *globalObj;
     const HeapValue *globalSlots;  /* Original slots pointer. */
 
     Assembler masm;
@@ -423,18 +462,19 @@ private:
     js::Vector<SetElementICInfo, 16, CompilerAllocPolicy> setElemICs;
 #endif
     js::Vector<CallPatchInfo, 64, CompilerAllocPolicy> callPatches;
     js::Vector<InternalCallSite, 64, CompilerAllocPolicy> callSites;
     js::Vector<DoublePatch, 16, CompilerAllocPolicy> doubleList;
     js::Vector<uint32_t> fixedIntToDoubleEntries;
     js::Vector<uint32_t> fixedDoubleToAnyEntries;
     js::Vector<JumpTable, 16> jumpTables;
-    js::Vector<uint32_t, 16> jumpTableOffsets;
+    js::Vector<JumpTableEdge, 16> jumpTableEdges;
     js::Vector<LoopEntry, 16> loopEntries;
+    js::Vector<OutgoingChunkEdge, 16> chunkEdges;
     StubCompiler stubcc;
     Label invokeLabel;
     Label arityLabel;
     Label argsCheckLabel;
 #ifdef JS_MONOIC
     Label argsCheckStub;
     Label argsCheckFallthrough;
     Jump argsCheckJump;
@@ -447,17 +487,17 @@ private:
     uint32_t gcNumber;
     enum { NoApplyTricks, LazyArgsObj } applyTricks;
     PCLengthEntry *pcLengths;
 
     Compiler *thisFromCtor() { return this; }
 
     friend class CompilerAllocPolicy;
   public:
-    Compiler(JSContext *cx, JSScript *outerScript, bool isConstructing);
+    Compiler(JSContext *cx, JSScript *outerScript, unsigned chunkIndex, bool isConstructing);
     ~Compiler();
 
     CompileStatus compile();
 
     Label getLabel() { return masm.label(); }
     bool knownJump(jsbytecode *pc);
     Label labelOf(jsbytecode *target, uint32_t inlineIndex);
     void addCallSite(const InternalCallSite &callSite);
@@ -472,16 +512,25 @@ private:
         if (a == outer)
             return PC;
         ActiveFrame *scan = a;
         while (scan && scan->parent != outer)
             scan = static_cast<ActiveFrame *>(scan->parent);
         return scan->parentPC;
     }
 
+    JITScript *outerJIT() {
+        return outerScript->getJIT(isConstructing);
+    }
+
+    bool bytecodeInChunk(jsbytecode *pc) {
+        return (unsigned(pc - outerScript->code) >= outerChunk.begin)
+            && (unsigned(pc - outerScript->code) < outerChunk.end);
+    }
+
     jsbytecode *inlinePC() { return PC; }
     uint32_t inlineIndex() { return a->inlineIndex; }
 
     Assembler &getAssembler(bool ool) { return ool ? stubcc.masm : masm; }
 
     InvariantCodePatch *getInvariantPatch(unsigned index) {
         return &callSites[index].loopPatch;
     }
@@ -495,21 +544,21 @@ private:
             if (na->exitState)
                 return true;
             na = static_cast<ActiveFrame *>(na->parent);
         }
         return false;
     }
 
   private:
-    CompileStatus performCompilation(JITScript **jitp);
+    CompileStatus performCompilation();
     CompileStatus generatePrologue();
     CompileStatus generateMethod();
     CompileStatus generateEpilogue();
-    CompileStatus finishThisUp(JITScript **jitp);
+    CompileStatus finishThisUp();
     CompileStatus pushActiveFrame(JSScript *script, uint32_t argc);
     void popActiveFrame();
     void updatePCCounters(jsbytecode *pc, Label *start, bool *updated);
     void updatePCTypes(jsbytecode *pc, FrameEntry *fe);
     void updateArithCounters(jsbytecode *pc, FrameEntry *fe,
                              JSValueType firstUseType, JSValueType secondUseType);
     void updateElemCounters(jsbytecode *pc, FrameEntry *obj, FrameEntry *id);
     void bumpPropCounter(jsbytecode *pc, int counter);
@@ -586,19 +635,22 @@ private:
 
     /*
      * Try to convert a double fe to an integer, with no truncation performed,
      * or jump to the slow path per uses.
      */
     void tryConvertInteger(FrameEntry *fe, Uses uses);
 
     /* Opcode handlers. */
-    bool jumpAndRun(Jump j, jsbytecode *target, Jump *slow = NULL, bool *trampoline = NULL);
+    bool jumpAndRun(Jump j, jsbytecode *target,
+                    Jump *slow = NULL, bool *trampoline = NULL,
+                    bool fallthrough = false);
     bool startLoop(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
     bool finishLoop(jsbytecode *head);
+    inline bool shouldStartLoop(jsbytecode *head);
     void jsop_bindname(PropertyName *name);
     void jsop_setglobal(uint32_t index);
     void jsop_getprop_slow(PropertyName *name, bool forPrototype = false);
     void jsop_getarg(uint32_t slot);
     void jsop_setarg(uint32_t slot, bool popped);
     void jsop_this();
     void emitReturn(FrameEntry *fe);
     void emitFinalReturn(Assembler &masm);
--- a/js/src/methodjit/FastArithmetic.cpp
+++ b/js/src/methodjit/FastArithmetic.cpp
@@ -1169,17 +1169,17 @@ mjit::Compiler::jsop_equality_int_string
 
         ic.cond = cond;
         ic.tempReg = tempReg;
         ic.lvr = lvr;
         ic.rvr = rvr;
         ic.stubEntry = stubEntry;
         ic.stub = stub;
 
-        bool useIC = !a->parent;
+        bool useIC = !a->parent && bytecodeInChunk(target);
 
         /* Call the IC stub, which may generate a fast path. */
         if (useIC) {
             /* Adjust for the two values just pushed. */
             ic.addrLabel = stubcc.masm.moveWithPatch(ImmPtr(NULL), Registers::ArgReg1);
             ic.stubCall = OOL_STUBCALL_LOCAL_SLOTS(ic::Equality, REJOIN_BRANCH,
                                                    frame.totalDepth() + 2);
             needStub = false;
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -763,19 +763,20 @@ mjit::Compiler::jsop_typeof()
                 type = JSVAL_TYPE_BOOLEAN;
             } else if (atom == rt->atomState.typeAtoms[JSTYPE_NUMBER]) {
                 type = JSVAL_TYPE_INT32;
 
                 /* JSVAL_TYPE_DOUBLE is 0x0 and JSVAL_TYPE_INT32 is 0x1, use <= or > to match both */
                 cond = (cond == Assembler::Equal) ? Assembler::BelowOrEqual : Assembler::Above;
             }
 
-            if (type != JSVAL_TYPE_UNKNOWN) {
-                PC += JSOP_STRING_LENGTH;;
-                PC += JSOP_EQ_LENGTH;
+            jsbytecode *afterPC = PC + JSOP_STRING_LENGTH + JSOP_EQ_LENGTH;
+
+            if (type != JSVAL_TYPE_UNKNOWN && bytecodeInChunk(afterPC)) {
+                PC = afterPC;
 
                 RegisterID result = frame.allocReg(Registers::SingleByteRegs).reg();
 
 #if defined JS_NUNBOX32
                 if (frame.shouldAvoidTypeRemat(fe))
                     masm.set32(cond, masm.tagOf(frame.addressOf(fe)), ImmType(type), result);
                 else
                     masm.set32(cond, frame.tempRegForType(fe), ImmType(type), result);
--- a/js/src/methodjit/FrameState-inl.h
+++ b/js/src/methodjit/FrameState-inl.h
@@ -1356,16 +1356,18 @@ FrameState::pushLocal(uint32_t n)
         /*
          * We really want to assert on local variables, but in the presence of
          * SETLOCAL equivocation of stack slots, and let expressions, just
          * weakly assert on the fixed local vars.
          */
         if (fe->isTracked() && n < a->script->nfixed)
             JS_ASSERT(fe->data.inMemory());
 #endif
+        if (n >= a->script->nfixed)
+            syncFe(fe);
         JSValueType type = fe->isTypeKnown() ? fe->getKnownType() : JSVAL_TYPE_UNKNOWN;
         push(addressOf(fe), type);
     }
 }
 
 inline void
 FrameState::pushArg(uint32_t n)
 {
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -350,18 +350,17 @@ FrameState::bestEvictReg(uint32_t mask, 
             JaegerSpew(JSpew_Regalloc, "    %s is 'this' in a constructor\n", reg.name());
             continue;
         }
 
         /*
          * Evict variables which are only live in future loop iterations, and are
          * not carried around the loop in a register.
          */
-        JS_ASSERT_IF(lifetime->loopTail, loop);
-        if (lifetime->loopTail && !loop->carriesLoopReg(fe)) {
+        if (lifetime->loopTail && (!loop || !loop->carriesLoopReg(fe))) {
             JaegerSpew(JSpew_Regalloc, "result: %s (%s) only live in later iterations\n",
                        entryName(fe), reg.name());
             return reg;
         }
 
         JaegerSpew(JSpew_Regalloc, "    %s (%s): %u\n", entryName(fe), reg.name(), lifetime->end);
 
         /*
@@ -574,19 +573,22 @@ FrameState::dumpAllocation(RegisterAlloc
 RegisterAllocation *
 FrameState::computeAllocation(jsbytecode *target)
 {
     JS_ASSERT(cx->typeInferenceEnabled());
     RegisterAllocation *alloc = cx->typeLifoAlloc().new_<RegisterAllocation>(false);
     if (!alloc)
         return NULL;
 
-    if (a->analysis->getCode(target).exceptionEntry || a->analysis->getCode(target).switchTarget ||
-        a->script->hasBreakpointsAt(target)) {
-        /* State must be synced at exception and switch targets, and at traps. */
+    /*
+     * State must be synced at exception and switch targets, at traps and when
+     * crossing between compilation chunks.
+     */
+    if (a->analysis->getCode(target).safePoint ||
+        (!a->parent && !cc.bytecodeInChunk(target))) {
 #ifdef DEBUG
         if (IsJaegerSpewChannelActive(JSpew_Regalloc)) {
             JaegerSpew(JSpew_Regalloc, "allocation at %u:", unsigned(target - a->script->code));
             dumpAllocation(alloc);
         }
 #endif
         return alloc;
     }
--- a/js/src/methodjit/ICRepatcher.h
+++ b/js/src/methodjit/ICRepatcher.h
@@ -55,17 +55,17 @@ class Repatcher : public JSC::RepatchBuf
 {
     typedef JSC::CodeLocationLabel  CodeLocationLabel;
     typedef JSC::CodeLocationCall   CodeLocationCall;
     typedef JSC::FunctionPtr        FunctionPtr;
 
     CodeLocationLabel label;
 
   public:
-    explicit Repatcher(JITScript *js)
+    explicit Repatcher(JITChunk *js)
       : JSC::RepatchBuffer(js->code), label(js->code.m_code.executableAddress())
     { }
 
     explicit Repatcher(const JSC::JITCode &code)
       : JSC::RepatchBuffer(code), label(code.start())
     { }
 
     using JSC::RepatchBuffer::relink;
--- a/js/src/methodjit/InvokeHelpers.cpp
+++ b/js/src/methodjit/InvokeHelpers.cpp
@@ -59,17 +59,16 @@
 
 #include "jsinterpinlines.h"
 #include "jsscopeinlines.h"
 #include "jsscriptinlines.h"
 #include "jsobjinlines.h"
 #include "jscntxtinlines.h"
 #include "jsatominlines.h"
 #include "StubCalls-inl.h"
-#include "MethodJIT-inl.h"
 
 #include "jsautooplen.h"
 
 using namespace js;
 using namespace js::mjit;
 using namespace JSC;
 
 using ic::Repatcher;
@@ -316,25 +315,23 @@ UncachedInlineCall(VMFrame &f, InitialFr
     bool construct = InitialFrameFlagsAreConstructing(initial);
 
     bool newType = construct && cx->typeInferenceEnabled() &&
         types::UseNewType(cx, f.script(), f.pc());
 
     types::TypeMonitorCall(cx, args, construct);
 
     /* Try to compile if not already compiled. */
-    if (newscript->getJITStatus(construct) == JITScript_None) {
-        CompileStatus status = CanMethodJIT(cx, newscript, construct, CompileRequest_Interpreter);
-        if (status == Compile_Error) {
-            /* A runtime exception was thrown, get out. */
-            return false;
-        }
-        if (status == Compile_Abort)
-            *unjittable = true;
+    CompileStatus status = CanMethodJIT(cx, newscript, newscript->code, construct, CompileRequest_Interpreter);
+    if (status == Compile_Error) {
+        /* A runtime exception was thrown, get out. */
+        return false;
     }
+    if (status == Compile_Abort)
+        *unjittable = true;
 
     /*
      * Make sure we are not calling from an inline frame if we need to make a
      * call object for the callee, as doing so could trigger GC and cause
      * jitcode discarding / frame expansion.
      */
     if (f.regs.inlined() && newfun->isHeavyweight()) {
         ExpandInlineFrames(cx->compartment);
@@ -362,21 +359,23 @@ UncachedInlineCall(VMFrame &f, InitialFr
         return false;
 
     /*
      * If newscript was successfully compiled, run it. Skip for calls which
      * will be constructing a new type object for 'this'.
      */
     if (!newType) {
         if (JITScript *jit = newscript->getJIT(regs.fp()->isConstructing())) {
-            *pret = jit->invokeEntry;
+            if (jit->invokeEntry) {
+                *pret = jit->invokeEntry;
 
-            /* Restore the old fp around and let the JIT code repush the new fp. */
-            regs.popFrame((Value *) regs.fp());
-            return true;
+                /* Restore the old fp around and let the JIT code repush the new fp. */
+                regs.popFrame((Value *) regs.fp());
+                return true;
+            }
         }
     }
 
     /*
      * Otherwise, run newscript in the interpreter. Expand any inlined frame we
      * are calling from, as the new frame is not associated with the VMFrame
      * and will not have its prevpc info updated if frame expansion is
      * triggered while interpreting.
@@ -587,21 +586,21 @@ js_InternalThrow(VMFrame &f)
         ScriptEpilogue(f.cx, f.fp(), false);
 
         // Don't remove the last frame, this is the responsibility of
         // JaegerShot()'s caller. We only guarantee that ScriptEpilogue()
         // has been run.
         if (f.entryfp == f.fp())
             break;
 
-        JS_ASSERT(f.regs.sp == cx->regs().sp);
+        JS_ASSERT(&cx->regs() == &f.regs);
         InlineReturn(f);
     }
 
-    JS_ASSERT(f.regs.sp == cx->regs().sp);
+    JS_ASSERT(&cx->regs() == &f.regs);
 
     if (!pc)
         return NULL;
 
     StackFrame *fp = cx->fp();
     JSScript *script = fp->script();
 
     /*
@@ -615,19 +614,16 @@ js_InternalThrow(VMFrame &f)
 
     if (!script->ensureRanAnalysis(cx, NULL)) {
         js_ReportOutOfMemory(cx);
         return NULL;
     }
 
     analyze::AutoEnterAnalysis enter(cx);
 
-    cx->regs().pc = pc;
-    cx->regs().sp = fp->base() + script->analysis()->getCode(pc).stackDepth;
-
     /*
      * Interpret the ENTERBLOCK and EXCEPTION opcodes, so that we don't go
      * back into the interpreter with a pending exception. This will cause
      * it to immediately rethrow.
      */
     if (cx->isExceptionPending()) {
         JS_ASSERT(JSOp(*pc) == JSOP_ENTERBLOCK);
         StaticBlockObject &blockObj = script->getObject(GET_SLOTNO(pc))->asStaticBlock();
@@ -693,16 +689,38 @@ stubs::ScriptProbeOnlyPrologue(VMFrame &
 }
 
 void JS_FASTCALL
 stubs::ScriptProbeOnlyEpilogue(VMFrame &f)
 {
     Probes::exitJSFun(f.cx, f.fp()->fun(), f.fp()->script());
 }
 
+void JS_FASTCALL
+stubs::CrossChunkShim(VMFrame &f, void *edge_)
+{
+    CrossChunkEdge *edge = (CrossChunkEdge *) edge_;
+
+    mjit::ExpandInlineFrames(f.cx->compartment);
+
+    JSScript *script = f.script();
+    JS_ASSERT(edge->target < script->length);
+    JS_ASSERT(script->code + edge->target == f.pc());
+
+    CompileStatus status = CanMethodJIT(f.cx, script, f.pc(), f.fp()->isConstructing(),
+                                        CompileRequest_Interpreter);
+    if (status == Compile_Error)
+        THROW();
+
+    void **addr = f.returnAddressLocation();
+    *addr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
+
+    f.fp()->setRejoin(StubRejoin(REJOIN_RESUME));
+}
+
 JS_STATIC_ASSERT(JSOP_NOP == 0);
 
 /* :XXX: common out with identical copy in Compiler.cpp */
 #if defined(JS_METHODJIT_SPEW)
 static const char *OpcodeNames[] = {
 # define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) #name,
 # include "jsopcode.tbl"
 # undef OPDEF
@@ -849,16 +867,21 @@ js_InternalInterpret(void *returnData, v
         if (script->hasBreakpointsAt(pc))
             skipTrap = true;
         break;
 
       case REJOIN_FALLTHROUGH:
         f.regs.pc = nextpc;
         break;
 
+      case REJOIN_JUMP:
+        f.regs.pc = (jsbytecode *) returnReg;
+        JS_ASSERT(unsigned(f.regs.pc - script->code) < script->length);
+        break;
+
       case REJOIN_NATIVE:
       case REJOIN_NATIVE_LOWERED:
       case REJOIN_NATIVE_GETTER: {
         /*
          * We don't rejoin until after the native stub finishes execution, in
          * which case the return value will be in memory. For lowered natives,
          * the return value will be in the 'this' value's slot.
          */
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -1097,16 +1097,17 @@ mjit::EnterMethodJIT(JSContext *cx, Stac
 }
 
 static inline JaegerStatus
 CheckStackAndEnterMethodJIT(JSContext *cx, StackFrame *fp, void *code, bool partial)
 {
     JS_CHECK_RECURSION(cx, return Jaeger_Throwing);
 
     JS_ASSERT(!cx->compartment->activeAnalysis);
+    JS_ASSERT(code);
 
     Value *stackLimit = cx->stack.space().getStackLimit(cx, REPORT_ERROR);
     if (!stackLimit)
         return Jaeger_Throwing;
 
     return EnterMethodJIT(cx, fp, code, stackLimit, partial);
 }
 
@@ -1124,117 +1125,136 @@ mjit::JaegerShot(JSContext *cx, bool par
 
 JaegerStatus
 js::mjit::JaegerShotAtSafePoint(JSContext *cx, void *safePoint, bool partial)
 {
     return CheckStackAndEnterMethodJIT(cx, cx->fp(), safePoint, partial);
 }
 
 NativeMapEntry *
-JITScript::nmap() const
+JITChunk::nmap() const
 {
-    return (NativeMapEntry *)((char*)this + sizeof(JITScript));
+    return (NativeMapEntry *)((char*)this + sizeof(*this));
 }
 
 js::mjit::InlineFrame *
-JITScript::inlineFrames() const
+JITChunk::inlineFrames() const
 {
     return (js::mjit::InlineFrame *)((char *)nmap() + sizeof(NativeMapEntry) * nNmapPairs);
 }
 
 js::mjit::CallSite *
-JITScript::callSites() const
+JITChunk::callSites() const
 {
     return (js::mjit::CallSite *)&inlineFrames()[nInlineFrames];
 }
 
 char *
-JITScript::commonSectionLimit() const
+JITChunk::commonSectionLimit() const
 {
     return (char *)&callSites()[nCallSites];
 }
 
 #ifdef JS_MONOIC
 ic::GetGlobalNameIC *
-JITScript::getGlobalNames() const
+JITChunk::getGlobalNames() const
 {
     return (ic::GetGlobalNameIC *) commonSectionLimit();
 }
 
 ic::SetGlobalNameIC *
-JITScript::setGlobalNames() const
+JITChunk::setGlobalNames() const
 {
     return (ic::SetGlobalNameIC *)((char *)getGlobalNames() +
             sizeof(ic::GetGlobalNameIC) * nGetGlobalNames);
 }
 
 ic::CallICInfo *
-JITScript::callICs() const
+JITChunk::callICs() const
 {
     return (ic::CallICInfo *)&setGlobalNames()[nSetGlobalNames];
 }
 
 ic::EqualityICInfo *
-JITScript::equalityICs() const
+JITChunk::equalityICs() const
 {
     return (ic::EqualityICInfo *)&callICs()[nCallICs];
 }
 
 char *
-JITScript::monoICSectionsLimit() const
+JITChunk::monoICSectionsLimit() const
 {
     return (char *)&equalityICs()[nEqualityICs];
 }
 #else   // JS_MONOIC
 char *
-JITScript::monoICSectionsLimit() const
+JITChunk::monoICSectionsLimit() const
 {
     return commonSectionLimit();
 }
 #endif  // JS_MONOIC
 
 #ifdef JS_POLYIC
 ic::GetElementIC *
-JITScript::getElems() const
+JITChunk::getElems() const
 {
     return (ic::GetElementIC *)monoICSectionsLimit();
 }
 
 ic::SetElementIC *
-JITScript::setElems() const
+JITChunk::setElems() const
 {
     return (ic::SetElementIC *)((char *)getElems() + sizeof(ic::GetElementIC) * nGetElems);
 }
 
 ic::PICInfo *
-JITScript::pics() const
+JITChunk::pics() const
 {
     return (ic::PICInfo *)((char *)setElems() + sizeof(ic::SetElementIC) * nSetElems);
 }
 
 char *
-JITScript::polyICSectionsLimit() const
+JITChunk::polyICSectionsLimit() const
 {
     return (char *)pics() + sizeof(ic::PICInfo) * nPICs;
 }
 #else   // JS_POLYIC
 char *
-JITScript::polyICSectionsLimit() const
+JITChunk::polyICSectionsLimit() const
 {
     return monoICSectionsLimit();
 }
 #endif  // JS_POLYIC
 
+void
+JITScript::patchEdge(const CrossChunkEdge &edge, void *label)
+{
+    if (edge.sourceJump1 || edge.sourceJump2) {
+        JITChunk *sourceChunk = chunk(script->code + edge.source);
+        JSC::CodeLocationLabel targetLabel(label);
+        ic::Repatcher repatch(sourceChunk);
+
+        if (edge.sourceJump1)
+            repatch.relink(JSC::CodeLocationJump(edge.sourceJump1), targetLabel);
+        if (edge.sourceJump2)
+            repatch.relink(JSC::CodeLocationJump(edge.sourceJump2), targetLabel);
+    }
+    if (edge.jumpTableEntries) {
+        for (unsigned i = 0; i < edge.jumpTableEntries->length(); i++)
+            *(*edge.jumpTableEntries)[i] = label;
+    }
+}
+
 template <typename T>
 static inline void Destroy(T &t)
 {
     t.~T();
 }
 
-mjit::JITScript::~JITScript()
+JITChunk::~JITChunk()
 {
     code.release();
 
     if (pcLengths)
         Foreground::free_(pcLengths);
 
 #if defined JS_POLYIC
     ic::GetElementIC *getElems_ = getElems();
@@ -1244,19 +1264,16 @@ mjit::JITScript::~JITScript()
         Destroy(getElems_[i]);
     for (uint32_t i = 0; i < nSetElems; i++)
         Destroy(setElems_[i]);
     for (uint32_t i = 0; i < nPICs; i++)
         Destroy(pics_[i]);
 #endif
 
 #if defined JS_MONOIC
-    if (argsCheckPool)
-        argsCheckPool->release();
-
     for (JSC::ExecutablePool **pExecPool = execPools.begin();
          pExecPool != execPools.end();
          ++pExecPool)
     {
         (*pExecPool)->release();
     }
 
     for (unsigned i = 0; i < nativeCallStubs.length(); i++) {
@@ -1266,49 +1283,118 @@ mjit::JITScript::~JITScript()
     }
 
     ic::CallICInfo *callICs_ = callICs();
     for (uint32_t i = 0; i < nCallICs; i++) {
         callICs_[i].releasePools();
         if (callICs_[i].fastGuardedObject)
             callICs_[i].purgeGuardedObject();
     }
+#endif
+}
 
-    // Fixup any ICs still referring to this JIT.
-    while (!JS_CLIST_IS_EMPTY(&callers)) {
-        JS_STATIC_ASSERT(offsetof(ic::CallICInfo, links) == 0);
-        ic::CallICInfo *ic = (ic::CallICInfo *) callers.next;
+void
+JITScript::destroy(JSContext *cx)
+{
+    for (unsigned i = 0; i < nchunks; i++)
+        destroyChunk(cx, i);
+}
+
+void
+JITScript::destroyChunk(JSContext *cx, unsigned chunkIndex, bool resetUses)
+{
+    ChunkDescriptor &desc = chunkDescriptor(chunkIndex);
+
+    if (desc.chunk) {
+        Probes::discardMJITCode(cx, this, script, desc.chunk->code.m_code.executableAddress());
+        cx->delete_(desc.chunk);
+        desc.chunk = NULL;
 
-        uint8_t *start = (uint8_t *)ic->funGuard.executableAddress();
-        JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
+        CrossChunkEdge *edges = this->edges();
+        for (unsigned i = 0; i < nedges; i++) {
+            CrossChunkEdge &edge = edges[i];
+            if (edge.source >= desc.begin && edge.source < desc.end) {
+                edge.sourceJump1 = edge.sourceJump2 = NULL;
+                if (edge.jumpTableEntries) {
+                    cx->delete_(edge.jumpTableEntries);
+                    edge.jumpTableEntries = NULL;
+                }
+            } else if (edge.target >= desc.begin && edge.target < desc.end) {
+                edge.targetLabel = NULL;
+                patchEdge(edge, edge.shimLabel);
+            }
+        }
+    }
+
+    if (resetUses)
+        desc.counter = 0;
 
-        repatch.repatch(ic->funGuard, NULL);
-        repatch.relink(ic->funJump, ic->slowPathStart);
-        ic->purgeGuardedObject();
+    if (chunkIndex == 0) {
+        if (argsCheckPool) {
+            argsCheckPool->release();
+            argsCheckPool = NULL;
+        }
+
+        invokeEntry = NULL;
+        fastEntry = NULL;
+        arityCheckEntry = NULL;
+        argsCheckEntry = NULL;
+
+        if (script->jitNormal == this)
+            script->jitArityCheckNormal = NULL;
+        else
+            script->jitArityCheckCtor = NULL;
+
+        // Fixup any ICs still referring to this chunk.
+        while (!JS_CLIST_IS_EMPTY(&callers)) {
+            JS_STATIC_ASSERT(offsetof(ic::CallICInfo, links) == 0);
+            ic::CallICInfo *ic = (ic::CallICInfo *) callers.next;
+
+            uint8_t *start = (uint8_t *)ic->funGuard.executableAddress();
+            JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
+
+            repatch.repatch(ic->funGuard, NULL);
+            repatch.relink(ic->funJump, ic->slowPathStart);
+            ic->purgeGuardedObject();
+        }
     }
-#endif
 }
 
 size_t
 JSScript::jitDataSize(JSMallocSizeOfFun mallocSizeOf)
 {
     size_t n = 0;
     if (jitNormal)
         n += jitNormal->scriptDataSize(mallocSizeOf); 
     if (jitCtor)
         n += jitCtor->scriptDataSize(mallocSizeOf); 
     return n;
 }
 
-/* Please keep in sync with Compiler::finishThisUp! */
 size_t
 mjit::JITScript::scriptDataSize(JSMallocSizeOfFun mallocSizeOf)
 {
+    size_t usable = mallocSizeOf(this,
+                                 sizeof(JITScript)
+                                 + (nchunks * sizeof(ChunkDescriptor))
+                                 + (nedges * sizeof(CrossChunkEdge)));
+    for (unsigned i = 0; i < nchunks; i++) {
+        const ChunkDescriptor &desc = chunkDescriptor(i);
+        if (desc.chunk)
+            usable += desc.chunk->scriptDataSize(mallocSizeOf);
+    }
+    return usable;
+}
+
+/* Please keep in sync with Compiler::finishThisUp! */
+size_t
+mjit::JITChunk::scriptDataSize(JSMallocSizeOfFun mallocSizeOf)
+{
     size_t computedSize =
-        sizeof(JITScript) +
+        sizeof(JITChunk) +
         sizeof(NativeMapEntry) * nNmapPairs +
         sizeof(InlineFrame) * nInlineFrames +
         sizeof(CallSite) * nCallSites +
 #if defined JS_MONOIC
         sizeof(ic::GetGlobalNameIC) * nGetGlobalNames +
         sizeof(ic::SetGlobalNameIC) * nSetGlobalNames +
         sizeof(ic::CallICInfo) * nCallICs +
         sizeof(ic::EqualityICInfo) * nEqualityICs +
@@ -1329,89 +1415,52 @@ mjit::ReleaseScriptCode(JSContext *cx, J
     // NB: The recompiler may call ReleaseScriptCode, in which case it
     // will get called again when the script is destroyed, so we
     // must protect against calling ReleaseScriptCode twice.
 
     JITScript **pjit = construct ? &script->jitCtor : &script->jitNormal;
     void **parity = construct ? &script->jitArityCheckCtor : &script->jitArityCheckNormal;
 
     if (*pjit) {
-        Probes::discardMJITCode(cx, *pjit, script, (*pjit)->code.m_code.executableAddress());
-        (*pjit)->~JITScript();
+        (*pjit)->destroy(cx);
         cx->free_(*pjit);
         *pjit = NULL;
         *parity = NULL;
     }
 }
 
 #ifdef JS_METHODJIT_PROFILE_STUBS
 void JS_FASTCALL
 mjit::ProfileStubCall(VMFrame &f)
 {
     JSOp op = JSOp(*f.regs.pc);
     StubCallsForOp[op]++;
 }
 #endif
 
-#ifdef JS_POLYIC
-static int
-PICPCComparator(const void *key, const void *entry)
+JITChunk *
+JITScript::findCodeChunk(void *addr)
 {
-    const jsbytecode *pc = (const jsbytecode *)key;
-    const ic::PICInfo *pic = (const ic::PICInfo *)entry;
-
-    /*
-     * We can't just return |pc - pic->pc| because the pointers may be
-     * far apart and an int (or even a ptrdiff_t) may not be large
-     * enough to hold the difference. C says that pointer subtraction
-     * is only guaranteed to work for two pointers into the same array.
-     */
-    if (pc < pic->pc)
-        return -1;
-    else if (pc == pic->pc)
-        return 0;
-    else
-        return 1;
+    for (unsigned i = 0; i < nchunks; i++) {
+        ChunkDescriptor &desc = chunkDescriptor(i);
+        if (desc.chunk && desc.chunk->isValidCode(addr))
+            return desc.chunk;
+    }
+    return NULL;
 }
 
-uintN
-mjit::GetCallTargetCount(JSScript *script, jsbytecode *pc)
+jsbytecode *
+JITScript::nativeToPC(void *returnAddress, CallSite **pinline)
 {
-    ic::PICInfo *pic;
-    
-    if (mjit::JITScript *jit = script->getJIT(false)) {
-        pic = (ic::PICInfo *)bsearch(pc, jit->pics(), jit->nPICs, sizeof(ic::PICInfo),
-                                     PICPCComparator);
-        if (pic)
-            return pic->stubsGenerated + 1; /* Add 1 for the inline path. */
-    }
-    
-    if (mjit::JITScript *jit = script->getJIT(true)) {
-        pic = (ic::PICInfo *)bsearch(pc, jit->pics(), jit->nPICs, sizeof(ic::PICInfo),
-                                     PICPCComparator);
-        if (pic)
-            return pic->stubsGenerated + 1; /* Add 1 for the inline path. */
-    }
+    JITChunk *chunk = findCodeChunk(returnAddress);
+    JS_ASSERT(chunk);
 
-    return 1;
-}
-#else
-uintN
-mjit::GetCallTargetCount(JSScript *script, jsbytecode *pc)
-{
-    return 1;
-}
-#endif
-
-jsbytecode *
-JITScript::nativeToPC(void *returnAddress, CallSite **pinline) const
-{
     size_t low = 0;
-    size_t high = nCallICs;
-    js::mjit::ic::CallICInfo *callICs_ = callICs();
+    size_t high = chunk->nCallICs;
+    js::mjit::ic::CallICInfo *callICs_ = chunk->callICs();
     while (high > low + 1) {
         /* Could overflow here on a script with 2 billion calls. Oh well. */
         size_t mid = (high + low) / 2;
         void *entry = callICs_[mid].funGuard.executableAddress();
 
         /*
          * Use >= here as the return address of the call is likely to be
          * the start address of the next (possibly IC'ed) operation.
@@ -1423,17 +1472,17 @@ JITScript::nativeToPC(void *returnAddres
     }
 
     js::mjit::ic::CallICInfo &ic = callICs_[low];
     JS_ASSERT((uint8_t*)ic.funGuard.executableAddress() + ic.joinPointOffset == returnAddress);
 
     if (ic.call->inlineIndex != UINT32_MAX) {
         if (pinline)
             *pinline = ic.call;
-        InlineFrame *frame = &inlineFrames()[ic.call->inlineIndex];
+        InlineFrame *frame = &chunk->inlineFrames()[ic.call->inlineIndex];
         while (frame && frame->parent)
             frame = frame->parent;
         return frame->parentpc;
     }
 
     if (pinline)
         *pinline = NULL;
     return script->code + ic.call->pcOffset;
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -58,17 +58,20 @@
 #endif
 
 #if !defined(JS_NUNBOX32) && !defined(JS_PUNBOX64)
 # error "No boxing format selected."
 #endif
 
 namespace js {
 
-namespace mjit { struct JITScript; }
+namespace mjit {
+    struct JITChunk;
+    struct JITScript;
+}
 
 struct VMFrame
 {
 #if defined(JS_CPU_SPARC)
     void *savedL0;
     void *savedL1;
     void *savedL2;
     void *savedL3;
@@ -238,16 +241,19 @@ struct VMFrame
      * Get the current frame and JIT. Note that these are NOT stable in case
      * of recompilations; all code which expects these to be stable should
      * check that cx->recompilations() has not changed across a call that could
      * trigger recompilation (pretty much any time the VM is called into).
      */
     StackFrame *fp() { return regs.fp(); }
     mjit::JITScript *jit() { return fp()->jit(); }
 
+    inline mjit::JITChunk *chunk();
+    inline unsigned chunkIndex();
+
     /* Get the inner script/PC in case of inlining. */
     inline JSScript *script();
     inline jsbytecode *pc();
 
 #if defined(JS_CPU_SPARC)
     static const size_t offsetOfFp = 30 * sizeof(void *) + FrameRegs::offsetOfFp;
     static const size_t offsetOfInlined = 30 * sizeof(void *) + FrameRegs::offsetOfInlined;
 #elif defined(JS_CPU_MIPS)
@@ -298,16 +304,19 @@ enum RejoinState {
      * State is coherent for the start of the current bytecode, which is a TRAP
      * that has already been invoked and should not be invoked again.
      */
     REJOIN_TRAP,
 
     /* State is coherent for the start of the next (fallthrough) bytecode. */
     REJOIN_FALLTHROUGH,
 
+    /* State is coherent for the start of the bytecode returned by the call. */
+    REJOIN_JUMP,
+
     /*
      * As for REJOIN_FALLTHROUGH, but holds a reference on the compartment's
      * orphaned native pools which needs to be reclaimed by InternalInterpret.
      * The return value needs to be adjusted if REJOIN_NATIVE_LOWERED, and
      * REJOIN_NATIVE_GETTER is for ABI calls made for property accesses.
      */
     REJOIN_NATIVE,
     REJOIN_NATIVE_LOWERED,
@@ -363,16 +372,30 @@ enum RejoinState {
 
     /*
      * For an opcode fused with IFEQ/IFNE, call returns a boolean indicating
      * the result of the comparison and whether to take or not take the branch.
      */
     REJOIN_BRANCH
 };
 
+/* Get the rejoin state for a StackFrame after returning from a scripted call. */
+static inline JSRejoinState
+ScriptedRejoin(uint32_t pcOffset)
+{
+    return REJOIN_SCRIPTED | (pcOffset << 1);
+}
+
+/* Get the rejoin state for a StackFrame after returning from a stub call. */
+static inline JSRejoinState
+StubRejoin(RejoinState rejoin)
+{
+    return rejoin << 1;
+}
+
 /* Helper to watch for recompilation and frame expansion activity on a compartment. */
 struct RecompilationMonitor
 {
     JSContext *cx;
 
     /*
      * If either inline frame expansion or recompilation occurs, then ICs and
      * stubs should not depend on the frame or JITs being intact. The two are
@@ -631,67 +654,48 @@ struct NativeCallStub {
      */
 #ifdef JS_CPU_X64
     JSC::CodeLocationDataLabelPtr jump;
 #else
     JSC::CodeLocationJump jump;
 #endif
 };
 
-struct JITScript {
+struct JITChunk
+{
     typedef JSC::MacroAssemblerCodeRef CodeRef;
     CodeRef         code;       /* pool & code addresses */
 
-    JSScript        *script;
-
-    void            *invokeEntry;       /* invoke address */
-    void            *fastEntry;         /* cached entry, fastest */
-    void            *arityCheckEntry;   /* arity check address */
-    void            *argsCheckEntry;    /* arguments check address */
-
     PCLengthEntry   *pcLengths;         /* lengths for outer and inline frames */
 
     /*
      * This struct has several variable-length sections that are allocated on
      * the end:  nmaps, MICs, callICs, etc.  To save space -- worthwhile
      * because JITScripts are common -- we only record their lengths.  We can
      * find any of the sections from the lengths because we know their order.
      * Therefore, do not change the section ordering in finishThisUp() without
      * changing nMICs() et al as well.
      */
-    uint32_t        nNmapPairs:31;      /* The NativeMapEntrys are sorted by .bcOff.
+    uint32_t        nNmapPairs;         /* The NativeMapEntrys are sorted by .bcOff.
                                            .ncode values may not be NULL. */
-    bool            singleStepMode:1;   /* compiled in "single step mode" */
     uint32_t        nInlineFrames;
     uint32_t        nCallSites;
 #ifdef JS_MONOIC
     uint32_t        nGetGlobalNames;
     uint32_t        nSetGlobalNames;
     uint32_t        nCallICs;
     uint32_t        nEqualityICs;
 #endif
 #ifdef JS_POLYIC
     uint32_t        nGetElems;
     uint32_t        nSetElems;
     uint32_t        nPICs;
 #endif
 
 #ifdef JS_MONOIC
-    /* Inline cache at function entry for checking this/argument types. */
-    JSC::CodeLocationLabel argsCheckStub;
-    JSC::CodeLocationLabel argsCheckFallthrough;
-    JSC::CodeLocationJump  argsCheckJump;
-    JSC::ExecutablePool *argsCheckPool;
-    void resetArgsCheck();
-#endif
-
-    /* List of inline caches jumping to the fastEntry. */
-    JSCList          callers;
-
-#ifdef JS_MONOIC
     // Additional ExecutablePools that IC stubs were generated into.
     typedef Vector<JSC::ExecutablePool *, 0, SystemAllocPolicy> ExecPoolVector;
     ExecPoolVector execPools;
 #endif
 
     // Additional ExecutablePools for native call and getter stubs.
     Vector<NativeCallStub, 0, SystemAllocPolicy> nativeCallStubs;
 
@@ -705,38 +709,149 @@ struct JITScript {
     ic::EqualityICInfo *equalityICs() const;
 #endif
 #ifdef JS_POLYIC
     ic::GetElementIC *getElems() const;
     ic::SetElementIC *setElems() const;
     ic::PICInfo     *pics() const;
 #endif
 
-    ~JITScript();
-
     bool isValidCode(void *ptr) {
         char *jitcode = (char *)code.m_code.executableAddress();
         char *jcheck = (char *)ptr;
         return jcheck >= jitcode && jcheck < jitcode + code.m_size;
     }
 
     void nukeScriptDependentICs();
 
     /* |mallocSizeOf| can be NULL here, in which case the fallback size computation will be used. */
     size_t scriptDataSize(JSMallocSizeOfFun mallocSizeOf);
 
-    jsbytecode *nativeToPC(void *returnAddress, CallSite **pinline) const;
+    ~JITChunk();
 
   private:
     /* Helpers used to navigate the variable-length sections. */
     char *commonSectionLimit() const;
     char *monoICSectionsLimit() const;
     char *polyICSectionsLimit() const;
 };
 
+void
+SetChunkLimit(uint32_t limit);
+
+/* Information about a compilation chunk within a script. */
+struct ChunkDescriptor
+{
+    /* Bytecode range of the chunk: [begin,end) */
+    uint32_t begin;
+    uint32_t end;
+
+    /* Use counter for the chunk. */
+    uint32_t counter;
+
+    /* Optional compiled code for the chunk. */
+    JITChunk *chunk;
+};
+
+/* Jump or fallthrough edge in the bytecode which crosses a chunk boundary. */
+struct CrossChunkEdge
+{
+    /* Bytecode offsets of the source and target of the edge. */
+    uint32_t source;
+    uint32_t target;
+
+    /* Locations of the jump(s) for the source, NULL if not compiled. */
+    void *sourceJump1;
+    void *sourceJump2;
+
+    /* Any jump table entries along this edge. */
+    typedef Vector<void**,4,SystemAllocPolicy> JumpTableEntryVector;
+    JumpTableEntryVector *jumpTableEntries;
+
+    /* Location of the label for the target, NULL if not compiled. */
+    void *targetLabel;
+
+    /*
+     * Location of a shim which will transfer control to the interpreter at the
+     * target bytecode. The source jumps are patched to jump to this label if
+     * the source is compiled but not the target.
+     */
+    void *shimLabel;
+};
+
+struct JITScript
+{
+    JSScript        *script;
+
+    void            *invokeEntry;       /* invoke address */
+    void            *fastEntry;         /* cached entry, fastest */
+    void            *arityCheckEntry;   /* arity check address */
+    void            *argsCheckEntry;    /* arguments check address */
+
+    /* List of inline caches jumping to the fastEntry. */
+    JSCList         callers;
+
+    uint32_t        nchunks;
+    uint32_t        nedges;
+
+    /*
+     * Pool for shims which transfer control to the interpreter on cross chunk
+     * edges to chunks which do not have compiled code.
+     */
+    JSC::ExecutablePool *shimPool;
+
+#ifdef JS_MONOIC
+    /* Inline cache at function entry for checking this/argument types. */
+    JSC::CodeLocationLabel argsCheckStub;
+    JSC::CodeLocationLabel argsCheckFallthrough;
+    JSC::CodeLocationJump  argsCheckJump;
+    JSC::ExecutablePool *argsCheckPool;
+    void resetArgsCheck();
+#endif
+
+    ChunkDescriptor &chunkDescriptor(unsigned i) {
+        JS_ASSERT(i < nchunks);
+        ChunkDescriptor *descs = (ChunkDescriptor *) ((char *) this + sizeof(JITScript));
+        return descs[i];
+    }
+
+    unsigned chunkIndex(jsbytecode *pc) {
+        unsigned offset = pc - script->code;
+        JS_ASSERT(offset < script->length);
+        for (unsigned i = 0; i < nchunks; i++) {
+            const ChunkDescriptor &desc = chunkDescriptor(i);
+            JS_ASSERT(desc.begin <= offset);
+            if (offset < desc.end)
+                return i;
+        }
+        JS_NOT_REACHED("Bad chunk layout");
+        return 0;
+    }
+
+    JITChunk *chunk(jsbytecode *pc) {
+        return chunkDescriptor(chunkIndex(pc)).chunk;
+    }
+
+    JITChunk *findCodeChunk(void *addr);
+
+    CrossChunkEdge *edges() {
+        return (CrossChunkEdge *) (&chunkDescriptor(0) + nchunks);
+    }
+
+    /* Patch any compiled sources in edge to jump to label. */
+    void patchEdge(const CrossChunkEdge &edge, void *label);
+
+    jsbytecode *nativeToPC(void *returnAddress, CallSite **pinline);
+
+    size_t scriptDataSize(JSMallocSizeOfFun mallocSizeOf);
+
+    void destroy(JSContext *cx);
+    void destroyChunk(JSContext *cx, unsigned chunkIndex, bool resetUses = true);
+};
+
 /*
  * Execute the given mjit code. This is a low-level call and callers must
  * provide the same guarantees as JaegerShot/CheckStackAndEnterMethodJIT.
  */
 JaegerStatus EnterMethodJIT(JSContext *cx, StackFrame *fp, void *code, Value *stackLimit,
                             bool partial);
 
 /* Execute a method that has been JIT compiled. */
@@ -753,18 +868,25 @@ enum CompileStatus
     Compile_Retry,        // static overflow or failed inline, try to recompile
     Compile_Error,        // OOM
     Compile_Skipped
 };
 
 void JS_FASTCALL
 ProfileStubCall(VMFrame &f);
 
-CompileStatus JS_NEVER_INLINE
-TryCompile(JSContext *cx, JSScript *script, bool construct);
+enum CompileRequest
+{
+    CompileRequest_Interpreter,
+    CompileRequest_JIT
+};
+
+CompileStatus
+CanMethodJIT(JSContext *cx, JSScript *script, jsbytecode *pc,
+             bool construct, CompileRequest request);
 
 void
 ReleaseScriptCode(JSContext *cx, JSScript *script, bool construct);
 
 inline void
 ReleaseScriptCode(JSContext *cx, JSScript *script)
 {
     if (script->jitCtor)
@@ -809,19 +931,16 @@ struct CallSite
         this->rejoin = rejoin;
     }
 
     bool isTrap() const {
         return rejoin == REJOIN_TRAP;
     }
 };
 
-uintN
-GetCallTargetCount(JSScript *script, jsbytecode *pc);
-
 void
 DumpAllProfiles(JSContext *cx);
 
 inline void * bsearch_nmap(NativeMapEntry *nmap, size_t nPairs, size_t bcOff)
 {
     size_t lo = 1, hi = nPairs;
     while (1) {
         /* current unsearched space is from lo-1 to hi-1, inclusive. */
@@ -838,52 +957,56 @@ inline void * bsearch_nmap(NativeMapEntr
             continue;
         }
         return nmap[mid-1].ncode;
     }
 }
 
 } /* namespace mjit */
 
+inline mjit::JITChunk *
+VMFrame::chunk()
+{
+    return jit()->chunk(regs.pc);
+}
+
+inline unsigned
+VMFrame::chunkIndex()
+{
+    return jit()->chunkIndex(regs.pc);
+}
+
 inline JSScript *
 VMFrame::script()
 {
     if (regs.inlined())
-        return jit()->inlineFrames()[regs.inlined()->inlineIndex].fun->script();
+        return chunk()->inlineFrames()[regs.inlined()->inlineIndex].fun->script();
     return fp()->script();
 }
 
 inline jsbytecode *
 VMFrame::pc()
 {
     if (regs.inlined())
         return script()->code + regs.inlined()->pcOffset;
     return regs.pc;
 }
 
 } /* namespace js */
 
 inline void *
-JSScript::maybeNativeCodeForPC(bool constructing, jsbytecode *pc)
+JSScript::nativeCodeForPC(bool constructing, jsbytecode *pc)
 {
     js::mjit::JITScript *jit = getJIT(constructing);
     if (!jit)
         return NULL;
-    JS_ASSERT(pc >= code && pc < code + length);
-    return bsearch_nmap(jit->nmap(), jit->nNmapPairs, (size_t)(pc - code));
-}
-
-inline void *
-JSScript::nativeCodeForPC(bool constructing, jsbytecode *pc)
-{
-    js::mjit::JITScript *jit = getJIT(constructing);
-    JS_ASSERT(pc >= code && pc < code + length);
-    void* native = bsearch_nmap(jit->nmap(), jit->nNmapPairs, (size_t)(pc - code));
-    JS_ASSERT(native);
-    return native;
+    js::mjit::JITChunk *chunk = jit->chunk(pc);
+    if (!chunk)
+        return NULL;
+    return bsearch_nmap(chunk->nmap(), chunk->nNmapPairs, (size_t)(pc - code));
 }
 
 extern "C" void JaegerTrampolineReturn();
 extern "C" void JaegerInterpoline();
 extern "C" void JaegerInterpolineScripted();
 
 #if defined(_MSC_VER) || defined(_WIN64)
 extern "C" void *JaegerThrowpoline(js::VMFrame *vmFrame);
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -73,17 +73,17 @@ typedef JSC::MacroAssembler::Label Label
 typedef JSC::MacroAssembler::DataLabel32 DataLabel32;
 typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
 
 #if defined JS_MONOIC
 
 static void
 PatchGetFallback(VMFrame &f, ic::GetGlobalNameIC *ic)
 {
-    Repatcher repatch(f.jit());
+    Repatcher repatch(f.chunk());
     JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, stubs::Name));
     repatch.relink(ic->slowPathCall, fptr);
 }
 
 void JS_FASTCALL
 ic::GetGlobalName(VMFrame &f, ic::GetGlobalNameIC *ic)
 {
     JSObject &obj = f.fp()->scopeChain().global();
@@ -105,17 +105,17 @@ ic::GetGlobalName(VMFrame &f, ic::GetGlo
         if (shape)
             PatchGetFallback(f, ic);
         stubs::Name(f);
         return;
     }
     uint32_t slot = shape->slot();
 
     /* Patch shape guard. */
-    Repatcher repatcher(f.jit());
+    Repatcher repatcher(f.chunk());
     repatcher.repatch(ic->fastPathStart.dataLabelPtrAtOffset(ic->shapeOffset), obj.lastProperty());
 
     /* Patch loads. */
     uint32_t index = obj.dynamicSlotIndex(slot);
     JSC::CodeLocationLabel label = ic->fastPathStart.labelAtOffset(ic->loadStoreOffset);
     repatcher.patchAddressOffsetForValueLoad(label, index * sizeof(Value));
 
     /* Do load anyway... this time. */
@@ -131,17 +131,17 @@ DisabledSetGlobal(VMFrame &f, ic::SetGlo
 
 template void JS_FASTCALL DisabledSetGlobal<true>(VMFrame &f, ic::SetGlobalNameIC *ic);
 template void JS_FASTCALL DisabledSetGlobal<false>(VMFrame &f, ic::SetGlobalNameIC *ic);
 
 static void
 PatchSetFallback(VMFrame &f, ic::SetGlobalNameIC *ic)
 {
     JSScript *script = f.script();
-    Repatcher repatch(f.jit());
+    Repatcher repatch(f.chunk());
     VoidStubSetGlobal stub = STRICT_VARIANT(DisabledSetGlobal);
     JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, stub));
     repatch.relink(ic->slowPathCall, fptr);
 }
 
 void
 SetGlobalNameIC::patchExtraShapeGuard(Repatcher &repatcher, const Shape *shape)
 {
@@ -172,17 +172,17 @@ UpdateSetGlobalName(VMFrame &f, ic::SetG
         obj->watched())
     {
         /* Disable the IC for weird shape attributes and watchpoints. */
         PatchSetFallback(f, ic);
         return Lookup_Uncacheable;
     }
 
     /* Object is not branded, so we can use the inline path. */
-    Repatcher repatcher(f.jit());
+    Repatcher repatcher(f.chunk());
     ic->patchInlineShapeGuard(repatcher, obj->lastProperty());
 
     uint32_t index = obj->dynamicSlotIndex(shape->slot());
     JSC::CodeLocationLabel label = ic->fastPathStart.labelAtOffset(ic->loadStoreOffset);
     repatcher.patchAddressOffsetForValueStore(label, index * sizeof(Value),
                                               ic->vr.isTypeKnown());
 
     return Lookup_Cacheable;
@@ -217,19 +217,17 @@ class EqualityICLinker : public LinkerHe
         : LinkerHelper(masm, JSC::METHOD_CODE), f(f)
     { }
 
     bool init(JSContext *cx) {
         JSC::ExecutablePool *pool = LinkerHelper::init(cx);
         if (!pool)
             return false;
         JS_ASSERT(!f.regs.inlined());
-        JSScript *script = f.fp()->script();
-        JITScript *jit = script->getJIT(f.fp()->isConstructing());
-        if (!jit->execPools.append(pool)) {
+        if (!f.chunk()->execPools.append(pool)) {
             pool->release();
             js_ReportOutOfMemory(cx);
             return false;
         }
         return true;
     }
 };
 
@@ -349,24 +347,24 @@ class EqualityCompiler : public BaseComp
     }
 
     bool linkForIC(Assembler &masm)
     {
         EqualityICLinker buffer(masm, f);
         if (!buffer.init(cx))
             return false;
 
-        Repatcher repatcher(f.jit());
+        Repatcher repatcher(f.chunk());
 
         /* Overwrite the call to the IC with a call to the stub. */
         JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, ic.stub));
         repatcher.relink(ic.stubCall, fptr);
 
         // Silently fail, the IC is disabled now.
-        if (!buffer.verifyRange(f.jit()))
+        if (!buffer.verifyRange(f.chunk()))
             return true;
 
         /* Set the targets of all type test failures to go to the stub. */
         for (size_t i = 0; i < jumpList.length(); i++)
             buffer.link(jumpList[i], ic.stubEntry);
         jumpList.clear();
 
         /* Set the targets for the the success and failure of the actual equality test. */
@@ -435,17 +433,17 @@ NativeStubLinker::init(JSContext *cx)
     JSC::ExecutablePool *pool = LinkerHelper::init(cx);
     if (!pool)
         return false;
 
     NativeCallStub stub;
     stub.pc = pc;
     stub.pool = pool;
     stub.jump = locationOf(done);
-    if (!jit->nativeCallStubs.append(stub)) {
+    if (!chunk->nativeCallStubs.append(stub)) {
         pool->release();
         return false;
     }
 
     return true;
 }
 
 /*
@@ -575,27 +573,27 @@ class CallCompiler : public BaseCompiler
         JSC::ExecutablePool *ep = linker.init(f.cx);
         if (!ep)
             return NULL;
         JS_ASSERT(!ic.pools[index]);
         ic.pools[index] = ep;
         return ep;
     }
 
-    void disable(JITScript *jit)
+    void disable()
     {
         JSC::CodeLocationCall oolCall = ic.slowPathStart.callAtOffset(ic.oolCallOffset);
-        Repatcher repatch(jit);
+        Repatcher repatch(f.chunk());
         JSC::FunctionPtr fptr = callingNew
                                 ? JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, SlowNewFromIC))
                                 : JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, SlowCallFromIC));
         repatch.relink(oolCall, fptr);
     }
 
-    bool generateFullCallStub(JITScript *from, JSScript *script, uint32_t flags)
+    bool generateFullCallStub(JSScript *script, uint32_t flags)
     {
         /*
          * Create a stub that works with arity mismatches. Like the fast-path,
          * this allocates a frame on the caller side, but also performs extra
          * checks for compilability. Perhaps this should be a separate, shared
          * trampoline, but for now we generate it dynamically.
          */
         Assembler masm;
@@ -644,17 +642,17 @@ class CallCompiler : public BaseCompiler
                                 compilePtr, f.regs.pc, &inlined, -1);
         }
 
         Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
                                               Registers::ReturnReg);
         masm.loadPtr(FrameAddress(VMFrame::offsetOfRegsSp()), JSFrameReg);
 
         /* Compute the value of ncode to use at this call site. */
-        ncode = (uint8_t *) f.jit()->code.m_code.executableAddress() + ic.call->codeOffset;
+        ncode = (uint8_t *) f.chunk()->code.m_code.executableAddress() + ic.call->codeOffset;
         masm.storePtr(ImmPtr(ncode), Address(JSFrameReg, StackFrame::offsetOfNcode()));
 
         masm.jump(Registers::ReturnReg);
 
         hasCode.linkTo(masm.label(), &masm);
 
         /* Get nmap[ARITY], set argc, call. */
         if (ic.frameSize.isStatic())
@@ -663,46 +661,46 @@ class CallCompiler : public BaseCompiler
             masm.load32(FrameAddress(VMFrame::offsetOfDynamicArgc()), JSParamReg_Argc);
         masm.jump(t0);
 
         LinkerHelper linker(masm, JSC::METHOD_CODE);
         JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_ScriptStub);
         if (!ep)
             return false;
 
-        if (!linker.verifyRange(from)) {
-            disable(from);
+        if (!linker.verifyRange(f.chunk())) {
+            disable();
             return true;
         }
 
         linker.link(notCompiled, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
         JSC::CodeLocationLabel cs = linker.finalize(f);
 
         JaegerSpew(JSpew_PICs, "generated CALL stub %p (%lu bytes)\n", cs.executableAddress(),
                    (unsigned long) masm.size());
 
         if (f.regs.inlined()) {
             JSC::LinkBuffer code((uint8_t *) cs.executableAddress(), masm.size(), JSC::METHOD_CODE);
             code.patch(inlined, f.regs.inlined());
         }
 
-        Repatcher repatch(from);
+        Repatcher repatch(f.chunk());
         JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
         repatch.relink(oolJump, cs);
 
         return true;
     }
 
-    bool patchInlinePath(JITScript *from, JSScript *script, JSObject *obj)
+    bool patchInlinePath(JSScript *script, JSObject *obj)
     {
         JS_ASSERT(ic.frameSize.isStatic());
         JITScript *jit = script->getJIT(callingNew);
 
         /* Very fast path. */
-        Repatcher repatch(from);
+        Repatcher repatch(f.chunk());
 
         /*
          * Use the arguments check entry if this is a monitored call, we might
          * not have accounted for all possible argument types.
          */
         void *entry = ic.typeMonitored ? jit->argsCheckEntry : jit->fastEntry;
 
         if (!repatch.canRelink(ic.funGuard.jumpAtOffset(ic.hotJumpOffset),
@@ -719,17 +717,17 @@ class CallCompiler : public BaseCompiler
 
         JaegerSpew(JSpew_PICs, "patched CALL path %p (obj: %p)\n",
                    ic.funGuard.executableAddress(),
                    static_cast<void*>(ic.fastGuardedObject));
 
         return true;
     }
 
-    bool generateStubForClosures(JITScript *from, JSObject *obj)
+    bool generateStubForClosures(JSObject *obj)
     {
         JS_ASSERT(ic.frameSize.isStatic());
 
         /* Slightly less fast path - guard on fun->script() instead. */
         Assembler masm;
 
         Registers tempRegs(Registers::AvailRegs);
         tempRegs.takeReg(ic.funObjReg);
@@ -747,39 +745,37 @@ class CallCompiler : public BaseCompiler
 
         LinkerHelper linker(masm, JSC::METHOD_CODE);
         JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_ClosureStub);
         if (!ep)
             return false;
 
         ic.hasJsFunCheck = true;
 
-        if (!linker.verifyRange(from)) {
-            disable(from);
+        if (!linker.verifyRange(f.chunk())) {
+            disable();
             return true;
         }
 
         linker.link(claspGuard, ic.slowPathStart);
         linker.link(funGuard, ic.slowPathStart);
         linker.link(done, ic.funGuard.labelAtOffset(ic.hotPathOffset));
         JSC::CodeLocationLabel cs = linker.finalize(f);
 
         JaegerSpew(JSpew_PICs, "generated CALL closure stub %p (%lu bytes)\n",
                    cs.executableAddress(), (unsigned long) masm.size());
 
-        Repatcher repatch(from);
+        Repatcher repatch(f.chunk());
         repatch.relink(ic.funJump, cs);
 
         return true;
     }
 
     bool generateNativeStub()
     {
-        JITScript *jit = f.jit();
-
         /* Snapshot the frameDepth before SplatApplyArgs modifies it. */
         uintN initialFrameDepth = f.regs.sp - f.fp()->slots();
 
         /*
          * SplatApplyArgs has not been called, so we call it here before
          * potentially touching f.u.call.dynamicArgc.
          */
         CallArgs args;
@@ -920,45 +916,43 @@ class CallCompiler : public BaseCompiler
         if (native == regexp_exec && !CallResultEscapes(f.pc()))
             native = regexp_test;
 
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, native), false);
 
         NativeStubLinker::FinalJump done;
         if (!NativeStubEpilogue(f, masm, &done, initialFrameDepth, vpOffset, MaybeRegisterID(), MaybeRegisterID()))
             return false;
-        NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
+        NativeStubLinker linker(masm, f.chunk(), f.regs.pc, done);
         if (!linker.init(f.cx))
             THROWV(true);
 
-        if (!linker.verifyRange(jit)) {
-            disable(jit);
+        if (!linker.verifyRange(f.chunk())) {
+            disable();
             return true;
         }
 
         linker.patchJump(ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
 
         ic.fastGuardedNative = fun;
 
         linker.link(funGuard, ic.slowPathStart);
         JSC::CodeLocationLabel start = linker.finalize(f);
 
         JaegerSpew(JSpew_PICs, "generated native CALL stub %p (%lu bytes)\n",
                    start.executableAddress(), (unsigned long) masm.size());
 
-        Repatcher repatch(jit);
+        Repatcher repatch(f.chunk());
         repatch.relink(ic.funJump, start);
 
         return true;
     }
 
     void *update()
     {
-        StackFrame *fp = f.fp();
-        JITScript *jit = fp->jit();
         RecompilationMonitor monitor(cx);
 
         bool lowered = ic.frameSize.lowered(f.pc());
         JS_ASSERT_IF(lowered, !callingNew);
 
         stubs::UncachedCallResult ucr;
         if (callingNew)
             stubs::UncachedNewHelper(f, ic.frameSize.staticArgc(), &ucr);
@@ -970,50 +964,50 @@ class CallCompiler : public BaseCompiler
         // and the compilation has a static overflow.
         if (monitor.recompiled())
             return ucr.codeAddr;
 
         // If the function cannot be jitted (generally unjittable or empty script),
         // patch this site to go to a slow path always.
         if (!ucr.codeAddr) {
             if (ucr.unjittable)
-                disable(jit);
+                disable();
             return NULL;
         }
             
         JSFunction *fun = ucr.fun;
         JS_ASSERT(fun);
         JSScript *script = fun->script();
         JS_ASSERT(script);
 
         uint32_t flags = callingNew ? StackFrame::CONSTRUCTING : 0;
 
         if (!ic.hit) {
             ic.hit = true;
             return ucr.codeAddr;
         }
 
         if (!ic.frameSize.isStatic() || ic.frameSize.staticArgc() != fun->nargs) {
-            if (!generateFullCallStub(jit, script, flags))
+            if (!generateFullCallStub(script, flags))
                 THROWV(NULL);
         } else {
-            if (!ic.fastGuardedObject && patchInlinePath(jit, script, fun)) {
+            if (!ic.fastGuardedObject && patchInlinePath(script, fun)) {
                 // Nothing, done.
             } else if (ic.fastGuardedObject &&
                        !ic.hasJsFunCheck &&
                        !ic.fastGuardedNative &&
                        ic.fastGuardedObject->toFunction()->script() == fun->script()) {
                 /*
                  * Note: Multiple "function guard" stubs are not yet
                  * supported, thus the fastGuardedNative check.
                  */
-                if (!generateStubForClosures(jit, fun))
+                if (!generateStubForClosures(fun))
                     THROWV(NULL);
             } else {
-                if (!generateFullCallStub(jit, script, flags))
+                if (!generateFullCallStub(script, flags))
                     THROWV(NULL);
             }
         }
 
         return ucr.codeAddr;
     }
 };
 
@@ -1216,38 +1210,38 @@ ic::GenerateArgumentCheckStub(VMFrame &f
     Jump done = masm.jump();
 
     LinkerHelper linker(masm, JSC::METHOD_CODE);
     JSC::ExecutablePool *ep = linker.init(f.cx);
     if (!ep)
         return;
     jit->argsCheckPool = ep;
 
-    if (!linker.verifyRange(jit)) {
+    if (!linker.verifyRange(f.chunk())) {
         jit->resetArgsCheck();
         return;
     }
 
     for (unsigned i = 0; i < mismatches.length(); i++)
         linker.link(mismatches[i], jit->argsCheckStub);
     linker.link(done, jit->argsCheckFallthrough);
 
     JSC::CodeLocationLabel cs = linker.finalize(f);
 
     JaegerSpew(JSpew_PICs, "generated ARGS CHECK stub %p (%lu bytes)\n",
                cs.executableAddress(), (unsigned long)masm.size());
 
-    Repatcher repatch(jit);
+    Repatcher repatch(f.chunk());
     repatch.relink(jit->argsCheckJump, cs);
 }
 
 void
 JITScript::resetArgsCheck()
 {
     argsCheckPool->release();
     argsCheckPool = NULL;
 
-    Repatcher repatch(this);
+    Repatcher repatch(chunk(script->code));
     repatch.relink(argsCheckJump, argsCheckStub);
 }
 
 #endif /* JS_MONOIC */
 
--- a/js/src/methodjit/PolyIC.cpp
+++ b/js/src/methodjit/PolyIC.cpp
@@ -123,17 +123,21 @@ class PICStubCompiler : public BaseCompi
         return error();
     }
 
     LookupStatus disable(const char *reason) {
         return disable(f.cx, reason);
     }
 
     LookupStatus disable(JSContext *cx, const char *reason) {
-        return pic.disable(cx, reason, stub);
+        return pic.disable(f, reason, stub);
+    }
+
+    LookupStatus disable(VMFrame &f, const char *reason) {
+        return pic.disable(f, reason, stub);
     }
 
     bool hadGC() {
         return gcNumber != f.cx->runtime->gcNumber;
     }
 
   protected:
     void spew(const char *event, const char *op) {
@@ -215,17 +219,17 @@ class SetPropCompiler : public PICStubCo
         repatcher.relink(pic.slowPathCall, target);
     }
 
     LookupStatus patchInline(const Shape *shape)
     {
         JS_ASSERT(!pic.inlinePathPatched);
         JaegerSpew(JSpew_PICs, "patch setprop inline at %p\n", pic.fastPathStart.executableAddress());
 
-        Repatcher repatcher(f.jit());
+        Repatcher repatcher(f.chunk());
         SetPropLabels &labels = pic.setPropLabels();
 
         int32_t offset;
         if (obj->isFixedSlot(shape->slot())) {
             CodeLocationInstruction istr = labels.getDslotsLoad(pic.fastPathRejoin, pic.u.vr);
             repatcher.repatchLoadPtrToLEA(istr);
 
             //
@@ -254,17 +258,17 @@ class SetPropCompiler : public PICStubCo
     }
 
     int getLastStubSecondShapeGuard() const {
         return lastStubSecondShapeGuard ? POST_INST_OFFSET(lastStubSecondShapeGuard) : 0;
     }
 
     void patchPreviousToHere(CodeLocationLabel cs)
     {
-        Repatcher repatcher(pic.lastCodeBlock(f.jit()));
+        Repatcher repatcher(pic.lastCodeBlock(f.chunk()));
         CodeLocationLabel label = pic.lastPathStart();
 
         // Patch either the inline fast path or a generated stub. The stub
         // omits the prefix of the inline fast path that loads the shape, so
         // the offsets are different.
         if (pic.stubsGenerated) {
             repatcher.relink(pic.setPropLabels().getStubShapeJump(label), cs);
         } else {
@@ -432,24 +436,24 @@ class SetPropCompiler : public PICStubCo
             for (Jump *pj = otherGuards.begin(); pj != otherGuards.end(); ++pj)
                 pj->linkTo(masm.label(), &masm);
             slowExit = masm.jump();
             pic.secondShapeGuard = masm.distanceOf(masm.label()) - masm.distanceOf(start);
         } else {
             pic.secondShapeGuard = 0;
         }
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(shapeGuard, pic.slowPathStart);
         if (slowExit.isSet())
             buffer.link(slowExit.get(), pic.slowPathStart);
         for (Jump *pj = slowExits.begin(); pj != slowExits.end(); ++pj)
             buffer.link(*pj, pic.slowPathStart);
@@ -752,58 +756,58 @@ struct GetPropHelper {
             return ic.disable(cx, "non-native holder");
         shape = (const Shape *)prop;
         return Lookup_Cacheable;
     }
 
     LookupStatus lookup() {
         JSObject *aobj = js_GetProtoIfDenseArray(obj);
         if (!aobj->isNative())
-            return ic.disable(cx, "non-native");
+            return ic.disable(f, "non-native");
 
         RecompilationMonitor monitor(cx);
         if (!aobj->lookupProperty(cx, name, &holder, &prop))
             return ic.error(cx);
         if (monitor.recompiled())
             return Lookup_Uncacheable;
 
         if (!prop)
-            return ic.disable(cx, "lookup failed");
+            return ic.disable(f, "lookup failed");
         if (!IsCacheableProtoChain(obj, holder))
-            return ic.disable(cx, "non-native holder");
+            return ic.disable(f, "non-native holder");
         shape = (const Shape *)prop;
         return Lookup_Cacheable;
     }
 
     LookupStatus testForGet() {
         if (!shape->hasDefaultGetter()) {
             if (shape->isMethod()) {
                 if (JSOp(*f.pc()) != JSOP_CALLPROP)
-                    return ic.disable(cx, "method valued shape");
+                    return ic.disable(f, "method valued shape");
             } else {
                 if (shape->hasGetterValue())
-                    return ic.disable(cx, "getter value shape");
+                    return ic.disable(f, "getter value shape");
                 if (shape->hasSlot() && holder != obj)
-                    return ic.disable(cx, "slotful getter hook through prototype");
+                    return ic.disable(f, "slotful getter hook through prototype");
                 if (!ic.canCallHook)
-                    return ic.disable(cx, "can't call getter hook");
+                    return ic.disable(f, "can't call getter hook");
                 if (f.regs.inlined()) {
                     /*
                      * As with native stubs, getter hook stubs can't be
                      * generated for inline frames. Mark the inner function
                      * as uninlineable and recompile.
                      */
                     f.script()->uninlineable = true;
                     MarkTypeObjectFlags(cx, f.script()->function(),
                                         types::OBJECT_FLAG_UNINLINEABLE);
                     return Lookup_Uncacheable;
                 }
             }
         } else if (!shape->hasSlot()) {
-            return ic.disable(cx, "no slot");
+            return ic.disable(f, "no slot");
         }
 
         return Lookup_Cacheable;
     }
 
     LookupStatus lookupAndTest() {
         LookupStatus status = lookup();
         if (status != Lookup_Cacheable)
@@ -860,24 +864,24 @@ class GetPropCompiler : public PICStubCo
         masm.move(pic.objReg, pic.shapeReg);
         Jump overridden = masm.branchTest32(Assembler::NonZero, pic.shapeReg,
                                             Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT));
         masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), pic.objReg);
 
         masm.move(ImmType(JSVAL_TYPE_INT32), pic.shapeReg);
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(notArgs, pic.slowPathStart);
         buffer.link(overridden, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
 
         CodeLocationLabel start = buffer.finalize(f);
@@ -901,24 +905,24 @@ class GetPropCompiler : public PICStubCo
 
         isDense.linkTo(masm.label(), &masm);
         masm.loadPtr(Address(pic.objReg, JSObject::offsetOfElements()), pic.objReg);
         masm.load32(Address(pic.objReg, ObjectElements::offsetOfLength()), pic.objReg);
         Jump oob = masm.branch32(Assembler::Above, pic.objReg, Imm32(JSVAL_INT_MAX));
         masm.move(ImmType(JSVAL_TYPE_INT32), pic.shapeReg);
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(notArray, pic.slowPathStart);
         buffer.link(oob, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
 
         CodeLocationLabel start = buffer.finalize(f);
@@ -939,24 +943,24 @@ class GetPropCompiler : public PICStubCo
         Jump notStringObj = masm.guardShape(pic.objReg, obj);
 
         masm.loadPayload(Address(pic.objReg, JSObject::getPrimitiveThisOffset()), pic.objReg);
         masm.loadPtr(Address(pic.objReg, JSString::offsetOfLengthAndFlags()), pic.objReg);
         masm.urshift32(Imm32(JSString::LENGTH_SHIFT), pic.objReg);
         masm.move(ImmType(JSVAL_TYPE_INT32), pic.shapeReg);
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(notStringObj, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
 
         CodeLocationLabel start = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generate string object length stub at %p\n",
@@ -1009,38 +1013,38 @@ class GetPropCompiler : public PICStubCo
         masm.move(ImmPtr(obj), pic.objReg);
         masm.loadShape(pic.objReg, pic.shapeReg);
         Jump shapeMismatch = masm.branchPtr(Assembler::NotEqual, pic.shapeReg,
                                             ImmPtr(obj->lastProperty()));
         masm.loadObjProp(obj, pic.objReg, getprop.shape, pic.shapeReg, pic.objReg);
 
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(notString, pic.getSlowTypeCheck());
         buffer.link(shapeMismatch, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
 
         CodeLocationLabel cs = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generate string call stub at %p\n",
                    cs.executableAddress());
 
         /* Patch the type check to jump here. */
         if (pic.hasTypeCheck()) {
-            Repatcher repatcher(f.jit());
+            Repatcher repatcher(f.chunk());
             repatcher.relink(pic.getPropLabels().getInlineTypeJump(pic.fastPathStart), cs);
         }
 
         /* Disable the PIC so we don't keep generating stubs on the above shape mismatch. */
         disable("generated string call stub");
         return Lookup_Cacheable;
     }
 
@@ -1052,48 +1056,48 @@ class GetPropCompiler : public PICStubCo
         Jump notString = masm.branchPtr(Assembler::NotEqual, pic.typeReg(),
                                         ImmType(JSVAL_TYPE_STRING));
         masm.loadPtr(Address(pic.objReg, JSString::offsetOfLengthAndFlags()), pic.objReg);
         // String length is guaranteed to be no more than 2**28, so the 32-bit operation is OK.
         masm.urshift32(Imm32(JSString::LENGTH_SHIFT), pic.objReg);
         masm.move(ImmType(JSVAL_TYPE_INT32), pic.shapeReg);
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(notString, pic.getSlowTypeCheck());
         buffer.link(done, pic.fastPathRejoin);
 
         CodeLocationLabel start = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generate string length stub at %p\n",
                    start.executableAddress());
 
         if (pic.hasTypeCheck()) {
-            Repatcher repatcher(f.jit());
+            Repatcher repatcher(f.chunk());
             repatcher.relink(pic.getPropLabels().getInlineTypeJump(pic.fastPathStart), start);
         }
 
         disable("generated string length stub");
 
         return Lookup_Cacheable;
     }
 
     LookupStatus patchInline(JSObject *holder, const Shape *shape)
     {
         spew("patch", "inline");
-        Repatcher repatcher(f.jit());
+        Repatcher repatcher(f.chunk());
         GetPropLabels &labels = pic.getPropLabels();
 
         int32_t offset;
         if (holder->isFixedSlot(shape->slot())) {
             CodeLocationInstruction istr = labels.getDslotsLoad(pic.fastPathRejoin);
             repatcher.repatchLoadPtrToLEA(istr);
 
             //
@@ -1185,22 +1189,22 @@ class GetPropCompiler : public PICStubCo
         masm.storeArg(1, holdObjReg);
         masm.storeArg(0, cxReg);
 
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, getter), false);
 
         NativeStubLinker::FinalJump done;
         if (!NativeStubEpilogue(f, masm, &done, 0, vpOffset, pic.shapeReg, pic.objReg))
             return;
-        NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
+        NativeStubLinker linker(masm, f.chunk(), f.regs.pc, done);
         if (!linker.init(f.cx))
             THROW();
 
-        if (!linker.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !linker.verifyRange(f.jit())) {
+        if (!linker.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !linker.verifyRange(f.chunk())) {
             disable("code memory is out of range");
             return;
         }
 
         linker.patchJump(pic.fastPathRejoin);
 
         linkerEpilogue(linker, start, shapeMismatches);
     }
@@ -1272,24 +1276,24 @@ class GetPropCompiler : public PICStubCo
                 pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
             return Lookup_Cacheable;
         }
 
         /* Load the value out of the object. */
         masm.loadObjProp(holder, holderReg, shape, pic.shapeReg, pic.objReg);
         Jump done = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         // The final exit jumps to the store-back in the inline stub.
         buffer.link(done, pic.fastPathRejoin);
 
         linkerEpilogue(buffer, start, shapeMismatches);
 
@@ -1315,17 +1319,17 @@ class GetPropCompiler : public PICStubCo
         if (pic.stubsGenerated == MAX_PIC_STUBS)
             disable("max stubs reached");
         if (obj->isDenseArray())
             disable("dense array");
     }
 
     void patchPreviousToHere(CodeLocationLabel cs)
     {
-        Repatcher repatcher(pic.lastCodeBlock(f.jit()));
+        Repatcher repatcher(pic.lastCodeBlock(f.chunk()));
         CodeLocationLabel label = pic.lastPathStart();
 
         // Patch either the inline fast path or a generated stub. The stub
         // omits the prefix of the inline fast path that loads the shape, so
         // the offsets are different.
         int shapeGuardJumpOffset;
         if (pic.stubsGenerated)
             shapeGuardJumpOffset = pic.getPropLabels().getStubShapeJumpOffset();
@@ -1371,17 +1375,17 @@ class ScopeNameCompiler : public PICStub
     JSObject *scopeChain;
     PropertyName *name;
     GetPropHelper<ScopeNameCompiler> getprop;
     ScopeNameCompiler *thisFromCtor() { return this; }
 
     void patchPreviousToHere(CodeLocationLabel cs)
     {
         ScopeNameLabels &       labels = pic.scopeNameLabels();
-        Repatcher               repatcher(pic.lastCodeBlock(f.jit()));
+        Repatcher               repatcher(pic.lastCodeBlock(f.chunk()));
         CodeLocationLabel       start = pic.lastPathStart();
         JSC::CodeLocationJump   jump;
 
         // Patch either the inline fast path or a generated stub.
         if (pic.stubsGenerated)
             jump = labels.getStubJump(start);
         else
             jump = labels.getInlineJump(start);
@@ -1487,24 +1491,24 @@ class ScopeNameCompiler : public PICStub
         for (Jump *pj = fails.begin(); pj != fails.end(); ++pj)
             pj->linkTo(masm.label(), &masm);
         if (finalNull.isSet())
             finalNull.get().linkTo(masm.label(), &masm);
         finalShape.linkTo(masm.label(), &masm);
         Label failLabel = masm.label();
         Jump failJump = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(failJump, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
         CodeLocationLabel cs = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generated %s global stub at %p\n", type, cs.executableAddress());
         spew("NAME stub", "global");
@@ -1599,24 +1603,24 @@ class ScopeNameCompiler : public PICStub
         for (Jump *pj = fails.begin(); pj != fails.end(); ++pj)
             pj->linkTo(masm.label(), &masm);
         if (finalNull.isSet())
             finalNull.get().linkTo(masm.label(), &masm);
         finalShape.linkTo(masm.label(), &masm);
         Label failLabel = masm.label();
         Jump failJump = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(failJump, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
         CodeLocationLabel cs = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generated %s call stub at %p\n", type, cs.executableAddress());
 
@@ -1730,17 +1734,17 @@ class BindNameCompiler : public PICStubC
         /* Link the slow path to call the IC entry point. */
         FunctionPtr target(JS_FUNC_TO_DATA_PTR(void *, ic::BindName));
         repatcher.relink(pic.slowPathCall, target);
     }
 
     void patchPreviousToHere(CodeLocationLabel cs)
     {
         BindNameLabels &labels = pic.bindNameLabels();
-        Repatcher repatcher(pic.lastCodeBlock(f.jit()));
+        Repatcher repatcher(pic.lastCodeBlock(f.chunk()));
         JSC::CodeLocationJump jump;
 
         /* Patch either the inline fast path or a generated stub. */
         if (pic.stubsGenerated)
             jump = labels.getStubJump(pic.lastPathStart());
         else
             jump = labels.getInlineJump(pic.getFastShapeGuard());
         repatcher.relink(jump, cs);
@@ -1780,24 +1784,24 @@ class BindNameCompiler : public PICStubC
 
         // All failures flow to here, so there is a common point to patch.
         for (Jump *pj = fails.begin(); pj != fails.end(); ++pj)
             pj->linkTo(masm.label(), &masm);
         firstShape.linkTo(masm.label(), &masm);
         Label failLabel = masm.label();
         Jump failJump = masm.jump();
 
-        pic.updatePCCounters(cx, masm);
+        pic.updatePCCounters(f, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
-        if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
-            !buffer.verifyRange(f.jit())) {
+        if (!buffer.verifyRange(pic.lastCodeBlock(f.chunk())) ||
+            !buffer.verifyRange(f.chunk())) {
             return disable("code memory is out of range");
         }
 
         buffer.link(failJump, pic.slowPathStart);
         buffer.link(done, pic.fastPathRejoin);
         CodeLocationLabel cs = buffer.finalize(f);
         JaegerSpew(JSpew_PICs, "generated %s stub at %p\n", type, cs.executableAddress());
 
@@ -2056,56 +2060,56 @@ BaseIC::spew(JSContext *cx, const char *
 {
 #ifdef JS_METHODJIT_SPEW
     JaegerSpew(JSpew_PICs, "%s %s: %s (%s: %d)\n",
                js_CodeName[op], event, message, cx->fp()->script()->filename, CurrentLine(cx));
 #endif
 }
 
 /* Total length of scripts preceding a frame. */
-inline uint32_t frameCountersOffset(JSContext *cx)
+inline uint32_t frameCountersOffset(VMFrame &f)
 {
+    JSContext *cx = f.cx;
+
     uint32_t offset = 0;
     if (cx->regs().inlined()) {
         offset += cx->fp()->script()->length;
         uint32_t index = cx->regs().inlined()->inlineIndex;
-        InlineFrame *frames = cx->fp()->jit()->inlineFrames();
+        InlineFrame *frames = f.chunk()->inlineFrames();
         for (unsigned i = 0; i < index; i++)
             offset += frames[i].fun->script()->length;
     }
 
     jsbytecode *pc;
     JSScript *script = cx->stack.currentScript(&pc);
     offset += pc - script->code;
 
     return offset;
 }
 
 LookupStatus
-BaseIC::disable(JSContext *cx, const char *reason, void *stub)
+BaseIC::disable(VMFrame &f, const char *reason, void *stub)
 {
-    JITScript *jit = cx->fp()->jit();
-    if (jit->pcLengths) {
-        uint32_t offset = frameCountersOffset(cx);
-        jit->pcLengths[offset].picsLength = 0;
+    if (f.chunk()->pcLengths) {
+        uint32_t offset = frameCountersOffset(f);
+        f.chunk()->pcLengths[offset].picsLength = 0;
     }
 
-    spew(cx, "disabled", reason);
-    Repatcher repatcher(jit);
+    spew(f.cx, "disabled", reason);
+    Repatcher repatcher(f.chunk());
     repatcher.relink(slowPathCall, FunctionPtr(stub));
     return Lookup_Uncacheable;
 }
 
 void
-BaseIC::updatePCCounters(JSContext *cx, Assembler &masm)
+BaseIC::updatePCCounters(VMFrame &f, Assembler &masm)
 {
-    JITScript *jit = cx->fp()->jit();
-    if (jit->pcLengths) {
-        uint32_t offset = frameCountersOffset(cx);
-        jit->pcLengths[offset].picsLength += masm.size();
+    if (f.chunk()->pcLengths) {
+        uint32_t offset = frameCountersOffset(f);
+        f.chunk()->pcLengths[offset].picsLength += masm.size();
     }
 }
 
 bool
 BaseIC::shouldUpdate(JSContext *cx)
 {
     if (!hit) {
         hit = true;
@@ -2130,21 +2134,21 @@ GetElementIC::shouldUpdate(JSContext *cx
         spew(cx, "ignored", "first hit");
         return false;
     }
     JS_ASSERT(stubsGenerated < MAX_GETELEM_IC_STUBS);
     return true;
 }
 
 LookupStatus
-GetElementIC::disable(JSContext *cx, const char *reason)
+GetElementIC::disable(VMFrame &f, const char *reason)
 {
     slowCallPatched = true;
     void *stub = JS_FUNC_TO_DATA_PTR(void *, DisabledGetElem);
-    BaseIC::disable(cx, reason, stub);
+    BaseIC::disable(f, reason, stub);
     return Lookup_Uncacheable;
 }
 
 LookupStatus
 GetElementIC::error(JSContext *cx)
 {
     return Lookup_Error;
 }
@@ -2177,17 +2181,17 @@ GetElementIC::attachGetProp(VMFrame &f, 
     LookupStatus status = getprop.lookupAndTest();
     if (status != Lookup_Cacheable)
         return status;
 
     // With TI enabled, string property stubs can only be added to an opcode if
     // the value read will go through a type barrier afterwards. TI only
     // accounts for integer-valued properties accessed by GETELEM/CALLELEM.
     if (cx->typeInferenceEnabled() && !forcedTypeBarrier)
-        return disable(cx, "string element access may not have type barrier");
+        return disable(f, "string element access may not have type barrier");
 
     Assembler masm;
 
     // Guard on the string's type and identity.
     MaybeJump atomTypeGuard;
     if (hasInlineTypeGuard() && !inlineTypeGuardPatched) {
         // We link all string-key dependent stubs together, and store the
         // first set of guards in the IC, separately, from int-key dependent
@@ -2243,26 +2247,26 @@ GetElementIC::attachGetProp(VMFrame &f, 
     }
 
     // Load the value.
     const Shape *shape = getprop.shape;
     masm.loadObjProp(holder, holderReg, shape, typeReg, objReg);
 
     Jump done = masm.jump();
 
-    updatePCCounters(cx, masm);
+    updatePCCounters(f, masm);
 
     PICLinker buffer(masm, *this);
     if (!buffer.init(cx))
         return error(cx);
 
     if (hasLastStringStub && !buffer.verifyRange(lastStringStub))
-        return disable(cx, "code memory is out of range");
-    if (!buffer.verifyRange(cx->fp()->jit()))
-        return disable(cx, "code memory is out of range");
+        return disable(f, "code memory is out of range");
+    if (!buffer.verifyRange(f.chunk()))
+        return disable(f, "code memory is out of range");
 
     // Patch all guards.
     buffer.maybeLink(atomIdGuard, slowPathStart);
     buffer.maybeLink(atomTypeGuard, slowPathStart);
     buffer.link(shapeGuard, slowPathStart);
     buffer.maybeLink(protoGuard, slowPathStart);
     for (Jump *pj = otherGuards.begin(); pj != otherGuards.end(); ++pj)
         buffer.link(*pj, slowPathStart);
@@ -2274,17 +2278,17 @@ GetElementIC::attachGetProp(VMFrame &f, 
     JaegerSpew(JSpew_PICs, "generated %s stub at %p for atom %p (\"%s\") shape %p (%s: %d)\n",
                js_CodeName[op], cs.executableAddress(), (void*)name, chars,
                (void*)holder->lastProperty(), cx->fp()->script()->filename, CurrentLine(cx));
     cx->free_(chars);
 #endif
 
     // Update the inline guards, if needed.
     if (shouldPatchInlineTypeGuard() || shouldPatchUnconditionalShapeGuard()) {
-        Repatcher repatcher(cx->fp()->jit());
+        Repatcher repatcher(f.chunk());
 
         if (shouldPatchInlineTypeGuard()) {
             // A type guard is present in the inline path, and this is the
             // first string stub, so patch it now.
             JS_ASSERT(!inlineTypeGuardPatched);
             JS_ASSERT(atomTypeGuard.isSet());
 
             repatcher.relink(fastPathStart.jumpAtOffset(inlineTypeGuard), cs);
@@ -2333,34 +2337,34 @@ GetElementIC::attachGetProp(VMFrame &f, 
     }
     firstShapeGuard = buffer.locationOf(shapeGuard) - cs;
     JS_ASSERT(firstShapeGuard == buffer.locationOf(shapeGuard) - cs);
     JS_ASSERT(firstShapeGuard);
 
     stubsGenerated++;
 
     if (stubsGenerated == MAX_GETELEM_IC_STUBS)
-        disable(cx, "max stubs reached");
+        disable(f, "max stubs reached");
 
     // Finally, fetch the value to avoid redoing the property lookup.
     *vp = holder->getSlot(shape->slot());
 
     return Lookup_Cacheable;
 }
 
 LookupStatus
 GetElementIC::attachArguments(VMFrame &f, JSObject *obj, const Value &v, jsid id, Value *vp)
 {
     JSContext *cx = f.cx;
 
     if (!v.isInt32())
-        return disable(cx, "arguments object with non-integer key");
+        return disable(f, "arguments object with non-integer key");
 
     if (op == JSOP_CALLELEM)
-        return disable(cx, "arguments object with call");
+        return disable(f, "arguments object with call");
 
     JS_ASSERT(hasInlineTypeGuard() || idRemat.knownType() == JSVAL_TYPE_INT32);
 
     Assembler masm;
 
     Jump shapeGuard = masm.testObjClass(Assembler::NotEqual, objReg, typeReg, obj->getClass());
 
     masm.move(objReg, typeReg);
@@ -2452,68 +2456,68 @@ GetElementIC::attachArguments(VMFrame &f
     masm.addPtr(Imm32(2), objReg);
     masm.lshiftPtr(Imm32(3), objReg);
 
     masm.pop(typeReg);
     masm.subPtr(objReg, typeReg);
 
     masm.jump(loadFromStack);
 
-    updatePCCounters(cx, masm);
+    updatePCCounters(f, masm);
 
     PICLinker buffer(masm, *this);
 
     if (!buffer.init(cx))
         return error(cx);
 
-    if (!buffer.verifyRange(cx->fp()->jit()))
-        return disable(cx, "code memory is out of range");
+    if (!buffer.verifyRange(f.chunk()))
+        return disable(f, "code memory is out of range");
 
     buffer.link(shapeGuard, slowPathStart);
     buffer.link(overridden, slowPathStart);
     buffer.link(outOfBounds, slowPathStart);
     buffer.link(holeCheck, slowPathStart);
     buffer.link(done, fastPathRejoin);    
     buffer.link(done2, fastPathRejoin);
     
     CodeLocationLabel cs = buffer.finalizeCodeAddendum();
 
     JaegerSpew(JSpew_PICs, "generated getelem arguments stub at %p\n", cs.executableAddress());
 
-    Repatcher repatcher(cx->fp()->jit());
+    Repatcher repatcher(f.chunk());
     repatcher.relink(fastPathStart.jumpAtOffset(inlineShapeGuard), cs);
 
     JS_ASSERT(!shouldPatchUnconditionalShapeGuard());
     JS_ASSERT(!inlineShapeGuardPatched);
 
     inlineShapeGuardPatched = true;
     stubsGenerated++;
 
     if (stubsGenerated == MAX_GETELEM_IC_STUBS)
-        disable(cx, "max stubs reached");
-
-    disable(cx, "generated arguments stub");
+        disable(f, "max stubs reached");
+
+    disable(f, "generated arguments stub");
 
     if (!obj->getGeneric(cx, id, vp))
         return Lookup_Error;
 
     return Lookup_Cacheable;
 }
 
 #if defined JS_METHODJIT_TYPED_ARRAY
 LookupStatus
 GetElementIC::attachTypedArray(VMFrame &f, JSObject *obj, const Value &v, jsid id, Value *vp)
 {
     JSContext *cx = f.cx;
 
     if (!v.isInt32())
-        return disable(cx, "typed array with string key");
+        return disable(f, "typed array with string key");
 
     if (op == JSOP_CALLELEM)
-        return disable(cx, "typed array with call");
+        return disable(f, "typed array with call");
 
     // The fast-path guarantees that after the dense shape guard, the type is
     // known to be int32, either via type inference or the inline type check.
     JS_ASSERT(hasInlineTypeGuard() || idRemat.knownType() == JSVAL_TYPE_INT32);
 
     Assembler masm;
 
     // Guard on this typed array's shape/class.
@@ -2537,57 +2541,57 @@ GetElementIC::attachTypedArray(VMFrame &
                  : Int32Key::FromRegister(idRemat.dataReg());
 
     JSObject *tarray = js::TypedArray::getTypedArray(obj);
     if (!masm.supportsFloatingPoint() &&
         (TypedArray::getType(tarray) == js::TypedArray::TYPE_FLOAT32 ||
          TypedArray::getType(tarray) == js::TypedArray::TYPE_FLOAT64 ||
          TypedArray::getType(tarray) == js::TypedArray::TYPE_UINT32))
     {
-        return disable(cx, "fpu not supported");
+        return disable(f, "fpu not supported");
     }
 
     MaybeRegisterID tempReg;
     masm.loadFromTypedArray(TypedArray::getType(tarray), objReg, key, typeReg, objReg, tempReg);
 
     Jump done = masm.jump();
 
-    updatePCCounters(cx, masm);
+    updatePCCounters(f, masm);
 
     PICLinker buffer(masm, *this);
     if (!buffer.init(cx))
         return error(cx);
 
-    if (!buffer.verifyRange(cx->fp()->jit()))
-        return disable(cx, "code memory is out of range");
+    if (!buffer.verifyRange(f.chunk()))
+        return disable(f, "code memory is out of range");
 
     buffer.link(shapeGuard, slowPathStart);
     buffer.link(outOfBounds, slowPathStart);
     buffer.link(done, fastPathRejoin);
 
     CodeLocationLabel cs = buffer.finalizeCodeAddendum();
     JaegerSpew(JSpew_PICs, "generated getelem typed array stub at %p\n", cs.executableAddress());
 
     // If we can generate a typed array stub, the shape guard is conditional.
     // Also, we only support one typed array.
     JS_ASSERT(!shouldPatchUnconditionalShapeGuard());
     JS_ASSERT(!inlineShapeGuardPatched);
 
-    Repatcher repatcher(cx->fp()->jit());
+    Repatcher repatcher(f.chunk());
     repatcher.relink(fastPathStart.jumpAtOffset(inlineShapeGuard), cs);
     inlineShapeGuardPatched = true;
 
     stubsGenerated++;
 
     // In the future, it might make sense to attach multiple typed array stubs.
     // For simplicitly, they are currently monomorphic.
     if (stubsGenerated == MAX_GETELEM_IC_STUBS)
-        disable(cx, "max stubs reached");
-
-    disable(cx, "generated typed array stub");
+        disable(f, "max stubs reached");
+
+    disable(f, "generated typed array stub");
 
     // Fetch the value as expected of Lookup_Cacheable for GetElement.
     if (!obj->getGeneric(cx, id, vp))
         return Lookup_Error;
 
     return Lookup_Cacheable;
 }
 #endif /* JS_METHODJIT_TYPED_ARRAY */
@@ -2617,27 +2621,27 @@ GetElementIC::update(VMFrame &f, JSObjec
      * IC are not compatible with carrying entries in floating point registers.
      * Since we can use type information to generate inline paths for typed
      * arrays, just don't generate these ICs with inference enabled.
      */
     if (!f.cx->typeInferenceEnabled() && js_IsTypedArray(obj))
         return attachTypedArray(f, obj, v, id, vp);
 #endif
 
-    return disable(f.cx, "unhandled object and key type");
+    return disable(f, "unhandled object and key type");
 }
 
 void JS_FASTCALL
 ic::GetElement(VMFrame &f, ic::GetElementIC *ic)
 {
     JSContext *cx = f.cx;
 
     // Right now, we don't optimize for strings or lazy arguments.
     if (!f.regs.sp[-2].isObject()) {
-        ic->disable(cx, "non-object");
+        ic->disable(f, "non-object");
         stubs::GetElem(f);
         return;
     }
 
     Value idval = f.regs.sp[-1];
 
     RecompilationMonitor monitor(cx);
 
@@ -2671,21 +2675,21 @@ ic::GetElement(VMFrame &f, ic::GetElemen
     if (!obj->getGeneric(cx, id, &f.regs.sp[-2]))
         THROW();
 }
 
 #define APPLY_STRICTNESS(f, s)                          \
     (FunctionTemplateConditional(s, f<true>, f<false>))
 
 LookupStatus
-SetElementIC::disable(JSContext *cx, const char *reason)
+SetElementIC::disable(VMFrame &f, const char *reason)
 {
     slowCallPatched = true;
     VoidStub stub = APPLY_STRICTNESS(stubs::SetElem, strictMode);
-    BaseIC::disable(cx, reason, JS_FUNC_TO_DATA_PTR(void *, stub));
+    BaseIC::disable(f, reason, JS_FUNC_TO_DATA_PTR(void *, stub));
     return Lookup_Uncacheable;
 }
 
 LookupStatus
 SetElementIC::error(JSContext *cx)
 {
     return Lookup_Error;
 }
@@ -2708,40 +2712,40 @@ SetElementIC::purge(Repatcher &repatcher
 }
 
 LookupStatus
 SetElementIC::attachHoleStub(VMFrame &f, JSObject *obj, int32_t keyval)
 {
     JSContext *cx = f.cx;
 
     if (keyval < 0)
-        return disable(cx, "negative key index");
+        return disable(f, "negative key index");
 
     // We may have failed a capacity check instead of a dense array check.
     // However we should still build the IC in this case, since it could
     // be in a loop that is filling in the array.
 
     if (js_PrototypeHasIndexedProperties(cx, obj))
-        return disable(cx, "prototype has indexed properties");
+        return disable(f, "prototype has indexed properties");
 
     Assembler masm;
 
     Vector<Jump, 8> fails(cx);
 
     if (!GeneratePrototypeGuards(cx, fails, masm, obj, NULL, objReg, objReg))
         return error(cx);
 
     // Test for indexed properties in Array.prototype. We test each shape
     // along the proto chain. This affords us two optimizations:
     //  1) Loading the prototype can be avoided because the shape would change;
     //     instead we can bake in their identities.
     //  2) We only have to test the shape, rather than INDEXED.
     for (JSObject *pobj = obj->getProto(); pobj; pobj = pobj->getProto()) {
         if (!pobj->isNative())
-            return disable(cx, "non-native array prototype");
+            return disable(f, "non-native array prototype");
         masm.move(ImmPtr(pobj), objReg);
         Jump j = masm.guardShape(objReg, pobj);
         if (!fails.append(j))
             return error(cx);
     }
 
     // Restore |obj|.
     masm.rematPayload(StateRemat::FromInt32(objRemat), objReg);
@@ -2784,32 +2788,32 @@ SetElementIC::attachHoleStub(VMFrame &f,
     JS_ASSERT(!execPool);
     JS_ASSERT(!inlineHoleGuardPatched);
 
     LinkerHelper buffer(masm, JSC::METHOD_CODE);
     execPool = buffer.init(cx);
     if (!execPool)
         return error(cx);
 
-    if (!buffer.verifyRange(cx->fp()->jit()))
-        return disable(cx, "code memory is out of range");
+    if (!buffer.verifyRange(f.chunk()))
+        return disable(f, "code memory is out of range");
 
     // Patch all guards.
     for (size_t i = 0; i < fails.length(); i++)
         buffer.link(fails[i], slowPathStart);
     buffer.link(done, fastPathRejoin);
 
     CodeLocationLabel cs = buffer.finalize(f);
     JaegerSpew(JSpew_PICs, "generated dense array hole stub at %p\n", cs.executableAddress());
 
-    Repatcher repatcher(cx->fp()->jit());
+    Repatcher repatcher(f.chunk());
     repatcher.relink(fastPathStart.jumpAtOffset(inlineHoleGuard), cs);
     inlineHoleGuardPatched = true;
 
-    disable(cx, "generated dense array hole stub");
+    disable(f, "generated dense array hole stub");
 
     return Lookup_Cacheable;
 }
 
 #if defined JS_METHODJIT_TYPED_ARRAY
 LookupStatus
 SetElementIC::attachTypedArray(VMFrame &f, JSObject *obj, int32_t key)
 {
@@ -2836,17 +2840,17 @@ SetElementIC::attachTypedArray(VMFrame &
     // Load the array's packed data vector.
     masm.loadPtr(Address(objReg, TypedArray::dataOffset()), objReg);
 
     JSObject *tarray = js::TypedArray::getTypedArray(obj);
     if (!masm.supportsFloatingPoint() &&
         (TypedArray::getType(tarray) == js::TypedArray::TYPE_FLOAT32 ||
          TypedArray::getType(tarray) == js::TypedArray::TYPE_FLOAT64))
     {
-        return disable(cx, "fpu not supported");
+        return disable(f, "fpu not supported");
     }
 
     int shift = js::TypedArray::slotWidth(obj);
     if (hasConstantKey) {
         Address addr(objReg, keyValue * shift);
         if (!StoreToTypedArray(cx, masm, tarray, addr, vr, volatileMask))
             return error(cx);
     } else {
@@ -2873,66 +2877,66 @@ SetElementIC::attachTypedArray(VMFrame &
     // by a GC or shape regenerated GC. We let this stub live for the lifetime
     // of the script.
     JS_ASSERT(!execPool);
     LinkerHelper buffer(masm, JSC::METHOD_CODE);
     execPool = buffer.init(cx);
     if (!execPool)
         return error(cx);
 
-    if (!buffer.verifyRange(cx->fp()->jit()))
-        return disable(cx, "code memory is out of range");
+    if (!buffer.verifyRange(f.chunk()))
+        return disable(f, "code memory is out of range");
 
     // Note that the out-of-bounds path simply does nothing.
     buffer.link(shapeGuard, slowPathStart);
     buffer.link(outOfBounds, fastPathRejoin);
     buffer.link(done, fastPathRejoin);
     masm.finalize(buffer);
 
     CodeLocationLabel cs = buffer.finalizeCodeAddendum();
     JaegerSpew(JSpew_PICs, "generated setelem typed array stub at %p\n", cs.executableAddress());
 
-    Repatcher repatcher(cx->fp()->jit());
+    Repatcher repatcher(f.chunk());
     repatcher.relink(fastPathStart.jumpAtOffset(inlineShapeGuard), cs);
     inlineShapeGuardPatched = true;
 
     stubsGenerated++;
 
     // In the future, it might make sense to attach multiple typed array stubs.
     // For simplicitly, they are currently monomorphic.
     if (stubsGenerated == MAX_GETELEM_IC_STUBS)
-        disable(cx, "max stubs reached");
-
-    disable(cx, "generated typed array stub");
+        disable(f, "max stubs reached");
+
+    disable(f, "generated typed array stub");
 
     return Lookup_Cacheable;
 }
 #endif /* JS_METHODJIT_TYPED_ARRAY */
 
 LookupStatus
 SetElementIC::update(VMFrame &f, const Value &objval, const Value &idval)
 {
     if (!objval.isObject())
-        return disable(f.cx, "primitive lval");
+        return disable(f, "primitive lval");
     if (!idval.isInt32())
-        return disable(f.cx, "non-int32_t key");
+        return disable(f, "non-int32 key");
 
     JSObject *obj = &objval.toObject();
     int32_t key = idval.toInt32();
 
     if (obj->isDenseArray())
         return attachHoleStub(f, obj, key);
 
 #if defined JS_METHODJIT_TYPED_ARRAY
     /* Not attaching typed array stubs with linear scan allocator, see GetElementIC. */
     if (!f.cx->typeInferenceEnabled() && js_IsTypedArray(obj))
         return attachTypedArray(f, obj, key);
 #endif
 
-    return disable(f.cx, "unsupported object type");
+    return disable(f, "unsupported object type");
 }
 
 bool
 SetElementIC::shouldUpdate(JSContext *cx)
 {
     if (!hit) {
         hit = true;
         spew(cx, "ignored", "first hit");
--- a/js/src/methodjit/PolyIC.h
+++ b/js/src/methodjit/PolyIC.h
@@ -106,18 +106,18 @@ struct BaseIC : public MacroAssemblerTyp
         hit = false;
         slowCallPatched = false;
         forcedTypeBarrier = false;
         stubsGenerated = 0;
         secondShapeGuard = 0;
     }
     bool shouldUpdate(JSContext *cx);
     void spew(JSContext *cx, const char *event, const char *reason);
-    LookupStatus disable(JSContext *cx, const char *reason, void *stub);
-    void updatePCCounters(JSContext *cx, Assembler &masm);
+    LookupStatus disable(VMFrame &f, const char *reason, void *stub);
+    void updatePCCounters(VMFrame &f, Assembler &masm);
     bool isCallOp();
 };
 
 class BasePolyIC : public BaseIC {
     typedef Vector<JSC::ExecutablePool *, 2, SystemAllocPolicy> ExecPoolVector;
 
     // ExecutablePools that IC stubs were generated into.  Very commonly (eg.
     // 99.5% of BasePolyICs) there are 0 or 1, and there are lots of
@@ -298,17 +298,17 @@ struct GetElementIC : public BasePolyIC 
         hasLastStringStub = false;
     }
     void purge(Repatcher &repatcher);
     LookupStatus update(VMFrame &f, JSObject *obj, const Value &v, jsid id, Value *vp);
     LookupStatus attachGetProp(VMFrame &f, JSObject *obj, const Value &v, PropertyName *name,
                                Value *vp);
     LookupStatus attachArguments(VMFrame &f, JSObject *obj, const Value &v, jsid id, Value *vp);
     LookupStatus attachTypedArray(VMFrame &f, JSObject *obj, const Value &v, jsid id, Value *vp);
-    LookupStatus disable(JSContext *cx, const char *reason);
+    LookupStatus disable(VMFrame &f, const char *reason);
     LookupStatus error(JSContext *cx);
     bool shouldUpdate(JSContext *cx);
 };
 
 struct SetElementIC : public BaseIC {
     SetElementIC() : execPool(NULL) { reset(); }
     ~SetElementIC() {
         if (execPool)
@@ -364,17 +364,17 @@ struct SetElementIC : public BaseIC {
         execPool = NULL;
         inlineShapeGuardPatched = false;
         inlineHoleGuardPatched = false;
     }
     void purge(Repatcher &repatcher);
     LookupStatus attachTypedArray(VMFrame &f, JSObject *obj, int32_t key);
     LookupStatus attachHoleStub(VMFrame &f, JSObject *obj, int32_t key);
     LookupStatus update(VMFrame &f, const Value &objval, const Value &idval);
-    LookupStatus disable(JSContext *cx, const char *reason);
+    LookupStatus disable(VMFrame &f, const char *reason);
     LookupStatus error(JSContext *cx);
     bool shouldUpdate(JSContext *cx);
 };
 
 struct PICInfo : public BasePolyIC {
     PICInfo() { reset(); }
 
     // Operation this is a PIC for.
@@ -421,19 +421,19 @@ struct PICInfo : public BasePolyIC {
 
     CodeLocationLabel getSlowTypeCheck() {
         JS_ASSERT(isGet());
         return slowPathStart.labelAtOffset(u.get.typeCheckOffset);
     }
 
     // Return a JITCode block corresponding to the code memory to attach a
     // new stub to.
-    JITCode lastCodeBlock(JITScript *jit) {
+    JITCode lastCodeBlock(JITChunk *chunk) {
         if (!stubsGenerated)
-            return JITCode(jit->code.m_code.executableAddress(), jit->code.m_size);
+            return JITCode(chunk->code.m_code.executableAddress(), chunk->code.m_size);
         return lastStubStart;
     }
 
     void updateLastPath(LinkerHelper &linker, Label label) {
         CodeLocationLabel loc = linker.locationOf(label);
         lastStubStart = JITCode(loc.executableAddress(), linker.size());
     }
 
--- a/js/src/methodjit/Retcon.cpp
+++ b/js/src/methodjit/Retcon.cpp
@@ -47,34 +47,22 @@
 #include "jsdbgapi.h"
 #include "jsnum.h"
 #include "assembler/assembler/LinkBuffer.h"
 #include "assembler/assembler/RepatchBuffer.h"
 
 #include "jscntxtinlines.h"
 #include "jsinterpinlines.h"
 
-#include "MethodJIT-inl.h"
-
 using namespace js;
 using namespace js::mjit;
 
 namespace js {
 namespace mjit {
 
-static inline JSRejoinState ScriptedRejoin(uint32_t pcOffset)
-{
-    return REJOIN_SCRIPTED | (pcOffset << 1);
-}
-
-static inline JSRejoinState StubRejoin(RejoinState rejoin)
-{
-    return rejoin << 1;
-}
-
 static inline void
 SetRejoinState(StackFrame *fp, const CallSite &site, void **location)
 {
     if (site.rejoin == REJOIN_SCRIPTED) {
         fp->setRejoin(ScriptedRejoin(site.pcOffset));
         *location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
     } else {
         fp->setRejoin(StubRejoin(site.rejoin));
@@ -92,60 +80,60 @@ CallsiteMatches(uint8_t *codeStart, cons
     if (codeStart + site.codeOffset + 4 == location)
         return true;
 #endif
 
     return false;
 }
 
 void
-Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
+Recompiler::patchCall(JITChunk *chunk, StackFrame *fp, void **location)
 {
-    uint8_t* codeStart = (uint8_t *)jit->code.m_code.executableAddress();
+    uint8_t* codeStart = (uint8_t *)chunk->code.m_code.executableAddress();
 
-    CallSite *callSites_ = jit->callSites();
-    for (uint32_t i = 0; i < jit->nCallSites; i++) {
+    CallSite *callSites_ = chunk->callSites();
+    for (uint32_t i = 0; i < chunk->nCallSites; i++) {
         if (CallsiteMatches(codeStart, callSites_[i], *location)) {
             JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
             SetRejoinState(fp, callSites_[i], location);
             return;
         }
     }
 
     JS_NOT_REACHED("failed to find call site");
 }
 
 void
-Recompiler::patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
+Recompiler::patchNative(JSCompartment *compartment, JITChunk *chunk, StackFrame *fp,
                         jsbytecode *pc, RejoinState rejoin)
 {
     /*
      * There is a native call or getter IC at pc which triggered recompilation.
      * The recompilation could have been triggered either by the native call
      * itself, or by a SplatApplyArgs preparing for the native call. Either
      * way, we don't want to patch up the call, but will instead steal the pool
-     * for the IC so it doesn't get freed with the JITScript, and patch up the
+     * for the IC so it doesn't get freed with the JITChunk, and patch up the
      * jump at the end to go to the interpoline.
      *
-     * When doing this, we do not reset the the IC itself; the JITScript must
+     * When doing this, we do not reset the the IC itself; the JITChunk must
      * be dead and about to be released due to the recompilation (or a GC).
      */
     fp->setRejoin(StubRejoin(rejoin));
 
     /* :XXX: We might crash later if this fails. */
     compartment->jaegerCompartment()->orphanedNativeFrames.append(fp);
 
     DebugOnly<bool> found = false;
 
     /*
      * Find and patch all native call stubs attached to the given PC. There may
      * be multiple ones for getter stubs attached to e.g. a GETELEM.
      */
-    for (unsigned i = 0; i < jit->nativeCallStubs.length(); i++) {
-        NativeCallStub &stub = jit->nativeCallStubs[i];
+    for (unsigned i = 0; i < chunk->nativeCallStubs.length(); i++) {
+        NativeCallStub &stub = chunk->nativeCallStubs[i];
         if (stub.pc != pc)
             continue;
 
         found = true;
 
         /* Check for pools that were already patched. */
         if (!stub.pool)
             continue;
@@ -189,32 +177,39 @@ Recompiler::patchFrame(JSCompartment *co
     StackFrame *fp = f->fp();
     void **addr = f->returnAddressLocation();
     RejoinState rejoin = (RejoinState) f->stubRejoin;
     if (rejoin == REJOIN_NATIVE ||
         rejoin == REJOIN_NATIVE_LOWERED ||
         rejoin == REJOIN_NATIVE_GETTER) {
         /* Native call. */
         if (fp->script() == script) {
-            patchNative(compartment, fp->jit(), fp, f->regs.pc, rejoin);
+            patchNative(compartment, fp->jit()->chunk(f->regs.pc), fp, f->regs.pc, rejoin);
             f->stubRejoin = REJOIN_NATIVE_PATCHED;
         }
     } else if (rejoin == REJOIN_NATIVE_PATCHED) {
         /* Already patched, don't do anything. */
     } else if (rejoin) {
         /* Recompilation triggered by CompileFunction. */
         if (fp->script() == script) {
             fp->setRejoin(StubRejoin(rejoin));
             *addr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
             f->stubRejoin = 0;
         }
-    } else if (script->jitCtor && script->jitCtor->isValidCode(*addr)) {
-        patchCall(script->jitCtor, fp, addr);
-    } else if (script->jitNormal && script->jitNormal->isValidCode(*addr)) {
-        patchCall(script->jitNormal, fp, addr);
+    } else {
+        if (script->jitCtor) {
+            JITChunk *chunk = script->jitCtor->findCodeChunk(*addr);
+            if (chunk)
+                patchCall(chunk, fp, addr);
+        }
+        if (script->jitNormal) {
+            JITChunk *chunk = script->jitNormal->findCodeChunk(*addr);
+            if (chunk)
+                patchCall(chunk, fp, addr);
+        }
     }
 }
 
 StackFrame *
 Recompiler::expandInlineFrameChain(StackFrame *outer, InlineFrame *inner)
 {
     StackFrame *parent;
     if (inner->parent)
@@ -263,25 +258,28 @@ Recompiler::expandInlineFrames(JSCompart
     JS_ASSERT_IF(next, next->prev() == fp && next->prevInline() == inlined);
 
     /*
      * Treat any frame expansion as a recompilation event, so that f.jit() is
      * stable if no recompilations have occurred.
      */
     compartment->types.frameExpansions++;
 
+    jsbytecode *pc = next ? next->prevpc(NULL) : f->regs.pc;
+    JITChunk *chunk = fp->jit()->chunk(pc);
+
     /*
      * Patch the VMFrame's return address if it is returning at the given inline site.
      * Note there is no worry about handling a native or CompileFunction call here,
      * as such IC stubs are not generated within inline frames.
      */
     void **frameAddr = f->returnAddressLocation();
-    uint8_t* codeStart = (uint8_t *)fp->jit()->code.m_code.executableAddress();
+    uint8_t* codeStart = (uint8_t *)chunk->code.m_code.executableAddress();
 
-    InlineFrame *inner = &fp->jit()->inlineFrames()[inlined->inlineIndex];
+    InlineFrame *inner = &chunk->inlineFrames()[inlined->inlineIndex];
     jsbytecode *innerpc = inner->fun->script()->code + inlined->pcOffset;
 
     StackFrame *innerfp = expandInlineFrameChain(fp, inner);
 
     /* Check if the VMFrame returns into the inlined frame. */
     if (f->stubRejoin && f->fp() == fp) {
         /* The VMFrame is calling CompileFunction. */
         JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
@@ -378,21 +376,16 @@ ClearAllFrames(JSCompartment *compartmen
         // frames can confuse the recompiler, which may see the VMFrame before
         // it has resumed execution.
 
         for (StackFrame *fp = f->fp(); fp != f->entryfp; fp = fp->prev())
             fp->setNativeReturnAddress(NULL);
     }
 }
 
-Recompiler::Recompiler(JSContext *cx, JSScript *script)
-  : cx(cx), script(script)
-{    
-}
-
 /*
  * Recompilation can be triggered either by the debugger (turning debug mode on for
  * a script or setting/clearing a trap), or by dynamic changes in type information
  * from type inference. When recompiling we don't immediately recompile the JIT
  * code, but destroy the old code and remove all references to the code, including
  * those from active stack frames. Things to do:
  *
  * - Purge scripted call inline caches calling into the script.
@@ -403,17 +396,17 @@ Recompiler::Recompiler(JSContext *cx, JS
  * - For VMFrames with a stub call return address in the original script,
  *   redirect to the interpoline.
  *
  * - For VMFrames whose entryncode address (the value of entryfp->ncode before
  *   being clobbered with JaegerTrampolineReturn) is in the original script,
  *   redirect that entryncode to the interpoline.
  */
 void
-Recompiler::recompile(bool resetUses)
+Recompiler::clearStackReferences(JSContext *cx, JSScript *script)
 {
     JS_ASSERT(script->hasJITCode());
 
     JaegerSpew(JSpew_Recompile, "recompiling script (file \"%s\") (line \"%d\") (length \"%d\")\n",
                script->filename, script->lineno, script->length);
 
     types::AutoEnterTypeInference enter(cx, true);
 
@@ -444,62 +437,27 @@ Recompiler::recompile(bool resetUses)
 
             if (next) {
                 // check for a scripted call returning into the recompiled script.
                 // this misses scanning the entry fp, which cannot return directly
                 // into JIT code.
                 void **addr = next->addressOfNativeReturnAddress();
 
                 if (JITCodeReturnAddress(*addr)) {
-                    JS_ASSERT(fp->jit()->isValidCode(*addr));
-                    patchCall(fp->jit(), fp, addr);
+                    JITChunk *chunk = fp->jit()->findCodeChunk(*addr);
+                    patchCall(chunk, fp, addr);
                 }
             }
 
             next = fp;
         }
 
         patchFrame(cx->compartment, f, script);
     }
 
-    if (script->jitNormal) {
-        cleanup(script->jitNormal);
-        ReleaseScriptCode(cx, script, false);
-    }
-    if (script->jitCtor) {
-        cleanup(script->jitCtor);
-        ReleaseScriptCode(cx, script, true);
-    }
-
-    if (resetUses) {
-        /*
-         * Wait for the script to get warm again before doing another compile,
-         * unless we are recompiling *because* the script got hot.
-         */
-        script->resetUseCount();
-    }
-
     cx->compartment->types.recompilations++;
 }
 
-void
-Recompiler::cleanup(JITScript *jit)
-{
-    while (!JS_CLIST_IS_EMPTY(&jit->callers)) {
-        JaegerSpew(JSpew_Recompile, "Purging IC caller\n");
-
-        JS_STATIC_ASSERT(offsetof(ic::CallICInfo, links) == 0);
-        ic::CallICInfo *ic = (ic::CallICInfo *) jit->callers.next;
-
-        uint8_t *start = (uint8_t *)ic->funGuard.executableAddress();
-        JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
-
-        repatch.repatch(ic->funGuard, NULL);
-        repatch.relink(ic->funJump, ic->slowPathStart);
-        ic->purgeGuardedObject();
-    }
-}
-
 } /* namespace mjit */
 } /* namespace js */
 
 #endif /* JS_METHODJIT */
 
--- a/js/src/methodjit/Retcon.h
+++ b/js/src/methodjit/Retcon.h
@@ -59,38 +59,33 @@ namespace mjit {
  * for it are still on the stack, removing all references in the world to it
  * and patching up those existing frames to go into the interpreter. If you
  * ever change the code associated with a JSScript, or otherwise would cause
  * existing JITed code to be incorrect, you /must/ use this to invalidate the
  * JITed code, fixing up the stack in the process.
  */
 class Recompiler {
 public:
-    Recompiler(JSContext *cx, JSScript *script);
 
-    void recompile(bool resetUses = true);
+    static void
+    clearStackReferences(JSContext *cx, JSScript *script);
 
     static void
     expandInlineFrames(JSCompartment *compartment, StackFrame *fp, mjit::CallSite *inlined,
                        StackFrame *next, VMFrame *f);
 
     static void patchFrame(JSCompartment *compartment, VMFrame *f, JSScript *script);
 
 private:
-    JSContext *cx;
-    JSScript *script;
 
-    static void patchCall(JITScript *jit, StackFrame *fp, void **location);
-    static void patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
+    static void patchCall(JITChunk *chunk, StackFrame *fp, void **location);
+    static void patchNative(JSCompartment *compartment, JITChunk *chunk, StackFrame *fp,
                             jsbytecode *pc, RejoinState rejoin);
 
     static StackFrame *
     expandInlineFrameChain(StackFrame *outer, InlineFrame *inner);
-
-    /* Detach jit from any IC callers. */
-    static void cleanup(JITScript *jit);
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif
 
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -889,18 +889,33 @@ stubs::Interrupt(VMFrame &f, jsbytecode 
     if (!js_HandleExecutionInterrupt(f.cx))
         THROW();
 }
 
 void JS_FASTCALL
 stubs::RecompileForInline(VMFrame &f)
 {
     ExpandInlineFrames(f.cx->compartment);
-    Recompiler recompiler(f.cx, f.script());
-    recompiler.recompile(/* resetUses */ false);
+    Recompiler::clearStackReferences(f.cx, f.script());
+
+    bool releaseChunk = true;
+    if (f.jit()->nchunks > 1) {
+        StackFrame *fp = f.fp();
+        for (FrameRegsIter i(f.cx); !i.done(); ++i) {
+            StackFrame *xfp = i.fp();
+            if (xfp->script() == fp->script() && xfp != fp) {
+                mjit::ReleaseScriptCode(f.cx, fp->script());
+                releaseChunk = false;
+                break;
+            }
+        }
+    }
+
+    if (releaseChunk)
+        f.jit()->destroyChunk(f.cx, f.chunkIndex(), /* resetUses = */ false);
 }
 
 void JS_FASTCALL
 stubs::Trap(VMFrame &f, uint32_t trapTypes)
 {
     Value rval;
 
     /*
@@ -1472,31 +1487,43 @@ stubs::LeaveBlock(VMFrame &f)
      */
     JSObject &obj = fp->scopeChain();
     if (obj.getProto() == &blockObj)
         obj.asClonedBlock().put(cx);
 
     fp->setBlockChain(blockObj.enclosingBlock());
 }
 
+inline void *
+FindNativeCode(VMFrame &f, jsbytecode *target)
+{
+    void* native = f.fp()->script()->nativeCodeForPC(f.fp()->isConstructing(), target);
+    if (native)
+        return native;
+
+    CompileStatus status = CanMethodJIT(f.cx, f.script(), target, f.fp()->isConstructing(),
+                                        CompileRequest_Interpreter);
+    if (status == Compile_Error)
+        THROWV(NULL);
+
+    mjit::ClearAllFrames(f.cx->compartment);
+    return target;
+}
+
 void * JS_FASTCALL
 stubs::LookupSwitch(VMFrame &f, jsbytecode *pc)
 {
     jsbytecode *jpc = pc;
     JSScript *script = f.fp()->script();
-    bool ctor = f.fp()->isConstructing();
 
     /* This is correct because the compiler adjusts the stack beforehand. */
     Value lval = f.regs.sp[-1];
 
-    if (!lval.isPrimitive()) {
-        void* native = script->nativeCodeForPC(ctor, pc + GET_JUMP_OFFSET(pc));
-        JS_ASSERT(native);
-        return native;
-    }
+    if (!lval.isPrimitive())
+        return FindNativeCode(f, pc + GET_JUMP_OFFSET(pc));
 
     JS_ASSERT(pc[0] == JSOP_LOOKUPSWITCH);
 
     pc += JUMP_OFFSET_LEN;
     uint32_t npairs = GET_UINT16(pc);
     pc += UINT16_LEN;
 
     JS_ASSERT(npairs);
@@ -1505,55 +1532,41 @@ stubs::LookupSwitch(VMFrame &f, jsbyteco
         JSLinearString *str = lval.toString()->ensureLinear(f.cx);
         if (!str)
             THROWV(NULL);
         for (uint32_t i = 1; i <= npairs; i++) {
             Value rval = script->getConst(GET_INDEX(pc));
             pc += INDEX_LEN;
             if (rval.isString()) {
                 JSLinearString *rhs = &rval.toString()->asLinear();
-                if (rhs == str || EqualStrings(str, rhs)) {
-                    void* native = script->nativeCodeForPC(ctor,
-                                                           jpc + GET_JUMP_OFFSET(pc));
-                    JS_ASSERT(native);
-                    return native;
-                }
+                if (rhs == str || EqualStrings(str, rhs))
+                    return FindNativeCode(f, jpc + GET_JUMP_OFFSET(pc));
             }
             pc += JUMP_OFFSET_LEN;
         }
     } else if (lval.isNumber()) {
         double d = lval.toNumber();
         for (uint32_t i = 1; i <= npairs; i++) {
             Value rval = script->getConst(GET_INDEX(pc));
             pc += INDEX_LEN;
-            if (rval.isNumber() && d == rval.toNumber()) {
-                void* native = script->nativeCodeForPC(ctor,
-                                                       jpc + GET_JUMP_OFFSET(pc));
-                JS_ASSERT(native);
-                return native;
-            }
+            if (rval.isNumber() && d == rval.toNumber())
+                return FindNativeCode(f, jpc + GET_JUMP_OFFSET(pc));
             pc += JUMP_OFFSET_LEN;
         }
     } else {
         for (uint32_t i = 1; i <= npairs; i++) {
             Value rval = script->getConst(GET_INDEX(pc));
             pc += INDEX_LEN;
-            if (lval == rval) {
-                void* native = script->nativeCodeForPC(ctor,
-                                                       jpc + GET_JUMP_OFFSET(pc));
-                JS_ASSERT(native);
-                return native;
-            }
+            if (lval == rval)
+                return FindNativeCode(f, jpc + GET_JUMP_OFFSET(pc));
             pc += JUMP_OFFSET_LEN;
         }
     }
 
-    void* native = script->nativeCodeForPC(ctor, jpc + GET_JUMP_OFFSET(jpc));
-    JS_ASSERT(native);
-    return native;
+    return FindNativeCode(f, jpc + GET_JUMP_OFFSET(jpc));
 }
 
 void * JS_FASTCALL
 stubs::TableSwitch(VMFrame &f, jsbytecode *origPc)
 {
     jsbytecode * const originalPC = origPc;
 
     JSOp op = JSOp(*originalPC);
@@ -1591,21 +1604,17 @@ stubs::TableSwitch(VMFrame &f, jsbytecod
             pc += JUMP_OFFSET_LEN * tableIdx;
             if (uint32_t candidateOffset = GET_JUMP_OFFSET(pc))
                 jumpOffset = candidateOffset;
         }
     }
 
 finally:
     /* Provide the native address. */
-    JSScript* script = f.fp()->script();
-    void* native = script->nativeCodeForPC(f.fp()->isConstructing(),
-                                           originalPC + jumpOffset);
-    JS_ASSERT(native);
-    return native;
+    return FindNativeCode(f, originalPC + jumpOffset);
 }
 
 void JS_FASTCALL
 stubs::Pos(VMFrame &f)
 {
     if (!ToNumber(f.cx, &f.regs.sp[-1]))
         THROW();
     if (!f.regs.sp[-1].isInt32())
@@ -1887,18 +1896,18 @@ stubs::InvariantFailure(VMFrame &f, void
 
     /* Recompile the outermost script, and don't hoist any bounds checks. */
     JSScript *script = f.fp()->script();
     JS_ASSERT(!script->failedBoundsCheck);
     script->failedBoundsCheck = true;
 
     ExpandInlineFrames(f.cx->compartment);
 
-    Recompiler recompiler(f.cx, script);
-    recompiler.recompile();
+    mjit::Recompiler::clearStackReferences(f.cx, script);
+    mjit::ReleaseScriptCode(f.cx, script);
 
     /* Return the same value (if any) as the call triggering the invariant failure. */
     return rval;
 }
 
 void JS_FASTCALL
 stubs::Exception(VMFrame &f)
 {
--- a/js/src/methodjit/StubCalls.h
+++ b/js/src/methodjit/StubCalls.h
@@ -218,16 +218,18 @@ JSObject * JS_FASTCALL
 NewDenseUnallocatedArray(VMFrame &f, uint32_t length);
 
 void JS_FASTCALL ArrayConcatTwoArrays(VMFrame &f);
 void JS_FASTCALL ArrayShift(VMFrame &f);
 
 void JS_FASTCALL WriteBarrier(VMFrame &f, Value *addr);
 void JS_FASTCALL GCThingWriteBarrier(VMFrame &f, Value *addr);
 
+void JS_FASTCALL CrossChunkShim(VMFrame &f, void *edge);
+
 } /* namespace stubs */
 
 /* 
  * If COND is true, return A; otherwise, return B. This allows us to choose between
  * function template instantiations without running afoul of C++'s overload resolution
  * rules. (Try simplifying, and you'll either see the problem --- or have found a
  * better solution!)
  */
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -3851,16 +3851,36 @@ MJitDataStats(JSContext *cx, uintN argc,
     JS_SET_RVAL(cx, vp, INT_TO_JSVAL(n));
 #else
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
 #endif
     return true;
 }
 
 JSBool
+MJitChunkLimit(JSContext *cx, uintN argc, jsval *vp)
+{
+    if (argc > 1) {
+        JS_ReportError(cx, "Wrong number of arguments");
+        return JS_FALSE;
+    }
+
+    jsdouble t;
+    if (!JS_ValueToNumber(cx, JS_ARGV(cx, vp)[0], &t))
+        return JS_FALSE;
+
+#ifdef JS_METHODJIT
+    mjit::SetChunkLimit((uint32_t) t);
+#endif
+
+    vp->setUndefined();
+    return true;
+}
+
+JSBool
 StringStats(JSContext *cx, uintN argc, jsval *vp)
 {
     // XXX: should report something meaningful;  bug 625305 will probably fix
     // this.
     JS_SET_RVAL(cx, vp, INT_TO_JSVAL(0));
     return true;
 }
 
@@ -4015,16 +4035,17 @@ static JSFunctionSpec shell_functions[] 
     JS_FN("elapsed",        Elapsed,        0,0),
     JS_FN("parent",         Parent,         1,0),
     JS_FN("wrap",           Wrap,           1,0),
     JS_FN("serialize",      Serialize,      1,0),
     JS_FN("deserialize",    Deserialize,    1,0),
 #ifdef JS_METHODJIT
     JS_FN("mjitcodestats",  MJitCodeStats,  0,0),
     JS_FN("mjitdatastats",  MJitDataStats,  0,0),
+    JS_FN("mjitChunkLimit", MJitChunkLimit, 1,0),
 #endif
     JS_FN("stringstats",    StringStats,    0,0),
     JS_FN("newGlobal",      NewGlobal,      1,0),
     JS_FN("parseLegacyJSON",ParseLegacyJSON,1,0),
     JS_FN("enableStackWalkingAssertion",EnableStackWalkingAssertion,1,0),
     JS_FN("getMaxArgs",     GetMaxArgs,     0,0),
     JS_FS_END
 };
@@ -4161,16 +4182,17 @@ static const char *const shell_help_mess
 "elapsed()                Execution time elapsed for the current context.",
 "parent(obj)              Returns the parent of obj.",
 "wrap(obj)                Wrap an object into a noop wrapper.",
 "serialize(sd)            Serialize sd using JS_WriteStructuredClone. Returns a TypedArray.",
 "deserialize(a)           Deserialize data generated by serialize.",
 #ifdef JS_METHODJIT
 "mjitcodestats()          Return stats on mjit code memory usage.",
 "mjitdatastats()          Return stats on mjit data memory usage.",
+"mjitChunkLimit(N)        Specify limit on compiled chunk size during mjit compilation.",
 #endif
 "stringstats()            Return stats on string memory usage.",
 "newGlobal(kind)          Return a new global object, in the current\n"
 "                         compartment if kind === 'same-compartment' or in a\n"
 "                         new compartment if kind === 'new-compartment'",
 "parseLegacyJSON(str)     Parse str as legacy JSON, returning the result if the\n"
 "                         parse succeeded and throwing a SyntaxError if not.",
 "enableStackWalkingAssertion(enabled)\n"
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -182,18 +182,18 @@ BreakpointSite::recompile(JSContext *cx,
 #ifdef JS_METHODJIT
     if (script->hasJITCode()) {
         Maybe<AutoCompartment> ac;
         if (!forTrap) {
             ac.construct(cx, ScriptGlobal(cx, script, scriptGlobal));
             if (!ac.ref().enter())
                 return false;
         }
-        mjit::Recompiler recompiler(cx, script);
-        recompiler.recompile();
+        mjit::Recompiler::clearStackReferences(cx, script);
+        mjit::ReleaseScriptCode(cx, script);
     }
 #endif
     return true;
 }
 
 bool
 BreakpointSite::inc(JSContext *cx)
 {
--- a/js/src/vm/Stack-inl.h
+++ b/js/src/vm/Stack-inl.h
@@ -652,18 +652,19 @@ ContextStack::currentScript(jsbytecode *
     while (fp && fp->isDummyFrame())
         fp = fp->prev();
     if (!fp)
         return NULL;
 
 #ifdef JS_METHODJIT
     mjit::CallSite *inlined = regs->inlined();
     if (inlined) {
-        JS_ASSERT(inlined->inlineIndex < fp->jit()->nInlineFrames);
-        mjit::InlineFrame *frame = &fp->jit()->inlineFrames()[inlined->inlineIndex];
+        mjit::JITChunk *chunk = fp->jit()->chunk(regs->pc);
+        JS_ASSERT(inlined->inlineIndex < chunk->nInlineFrames);
+        mjit::InlineFrame *frame = &chunk->inlineFrames()[inlined->inlineIndex];
         JSScript *script = frame->fun->script();
         if (script->compartment() != cx_->compartment)
             return NULL;
         if (ppc)
             *ppc = script->code + inlined->pcOffset;
         return script;
     }
 #endif
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -564,17 +564,18 @@ ContextStack::ensureOnTop(JSContext *cx,
      * expanded (along with other inline frames in the compartment).
      * To avoid pathological behavior here, make sure to mark any topmost
      * function as uninlineable, which will expand inline frames if there are
      * any and prevent the function from being inlined in the future.
      */
     if (FrameRegs *regs = cx->maybeRegs()) {
         JSFunction *fun = NULL;
         if (JSInlinedSite *site = regs->inlined()) {
-            fun = regs->fp()->jit()->inlineFrames()[site->inlineIndex].fun;
+            mjit::JITChunk *chunk = regs->fp()->jit()->chunk(regs->pc);
+            fun = chunk->inlineFrames()[site->inlineIndex].fun;
         } else {
             StackFrame *fp = regs->fp();
             if (fp->isFunctionFrame()) {
                 JSFunction *f = fp->fun();
                 if (f->isInterpreted())
                     fun = f;
             }
         }