Bug 857845 part 1 - rm JaegerMonkey. r=bhackett, sr=luke
authorJan de Mooij <jdemooij@mozilla.com>
Mon, 13 May 2013 16:47:57 -0700
changeset 131975 89a645d498e3
parent 131974 060dd308b234
child 131976 6dabe5db3900
push id28061
push userjandemooij@gmail.com
push date2013-05-15 17:05 +0000
treeherdermozilla-inbound@89a645d498e3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbhackett, luke
bugs857845
milestone24.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 857845 part 1 - rm JaegerMonkey. r=bhackett, sr=luke
js/src/Makefile.in
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
js/src/builtin/TestingFunctions.cpp
js/src/configure.in
js/src/gc/Marking.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Zone.cpp
js/src/ion/Bailouts.cpp
js/src/ion/BaselineCompiler.cpp
js/src/ion/Ion.cpp
js/src/ion/IonBuilder.cpp
js/src/jit-test/jit_test.py
js/src/jit-test/tests/auto-regress/bug726799.js
js/src/jit-test/tests/auto-regress/bug728509.js
js/src/jit-test/tests/auto-regress/bug740654.js
js/src/jit-test/tests/auto-regress/bug743876.js
js/src/jit-test/tests/basic/testBug755916.js
js/src/jit-test/tests/jaeger/bug719918.js
js/src/jit-test/tests/jaeger/bug781859-2.js
js/src/jit-test/tests/jaeger/bug781859-3.js
js/src/jit-test/tests/jaeger/chunk/bug712267.js
js/src/jsanalyze.cpp
js/src/jsanalyze.h
js/src/jsapi-tests/testDebugger.cpp
js/src/jsapi.cpp
js/src/jsarray.cpp
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsfun.cpp
js/src/jsgc.cpp
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/jsinterp.cpp
js/src/jsinterpinlines.h
js/src/jsmemorymetrics.cpp
js/src/jsopcode.cpp
js/src/jsprobes.cpp
js/src/jsprobes.h
js/src/jsscript.cpp
js/src/jsscript.h
js/src/jsscriptinlines.h
js/src/methodjit/BaseAssembler.h
js/src/methodjit/BaseCompiler.h
js/src/methodjit/CodeGenIncludes.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/FastArithmetic.cpp
js/src/methodjit/FastBuiltins.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameEntry.h
js/src/methodjit/FrameState-inl.h
js/src/methodjit/FrameState.cpp
js/src/methodjit/FrameState.h
js/src/methodjit/ICChecker.h
js/src/methodjit/ICLabels.h
js/src/methodjit/ICRepatcher.h
js/src/methodjit/ImmutableSync.cpp
js/src/methodjit/ImmutableSync.h
js/src/methodjit/InlineFrameAssembler.h
js/src/methodjit/InvokeHelpers.cpp
js/src/methodjit/Logging.cpp
js/src/methodjit/Logging.h
js/src/methodjit/LoopState.cpp
js/src/methodjit/LoopState.h
js/src/methodjit/MachineRegs.h
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/MonoIC.cpp
js/src/methodjit/MonoIC.h
js/src/methodjit/NunboxAssembler.h
js/src/methodjit/PolyIC.cpp
js/src/methodjit/PolyIC.h
js/src/methodjit/PunboxAssembler.h
js/src/methodjit/RematInfo.h
js/src/methodjit/Retcon.cpp
js/src/methodjit/Retcon.h
js/src/methodjit/StubCalls-inl.h
js/src/methodjit/StubCalls.cpp
js/src/methodjit/StubCalls.h
js/src/methodjit/StubCompiler.cpp
js/src/methodjit/StubCompiler.h
js/src/methodjit/TrampolineCompiler.cpp
js/src/methodjit/TrampolineCompiler.h
js/src/methodjit/TrampolineMIPS.cpp
js/src/methodjit/TrampolineMasmX64.asm
js/src/methodjit/TrampolineMingwX64.s
js/src/methodjit/TrampolineSUNWX64.s
js/src/methodjit/TrampolineSUNWX86.s
js/src/methodjit/TrampolineSparc.s
js/src/methodjit/TypedArrayIC.h
js/src/shell/js.cpp
js/src/vm/Debugger.cpp
js/src/vm/GlobalObject.cpp
js/src/vm/RegExpObject-inl.h
js/src/vm/SPSProfiler.cpp
js/src/vm/SPSProfiler.h
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -179,40 +179,16 @@ ifdef ENABLE_TRACE_LOGGING
 
 ###############################################
 # BEGIN include sources for trace logging
 #
 CPPSRCS += 	TraceLogging.cpp
 
 endif
 
-ifdef ENABLE_METHODJIT
-
-###############################################
-# BEGIN include sources for the method JIT
-#
-VPATH += 	$(srcdir)/methodjit
-
-CPPSRCS += 	MethodJIT.cpp \
-		StubCalls.cpp \
-		Compiler.cpp \
-		FrameState.cpp \
-		FastArithmetic.cpp \
-		FastBuiltins.cpp \
-		FastOps.cpp \
-		LoopState.cpp \
-		StubCompiler.cpp \
-		MonoIC.cpp \
-		PolyIC.cpp \
-		ImmutableSync.cpp \
-		InvokeHelpers.cpp \
-		Retcon.cpp \
-		TrampolineCompiler.cpp \
-		$(NULL)
-
 # Ion
 ifdef ENABLE_ION
 VPATH +=	$(srcdir)/ion
 VPATH +=	$(srcdir)/ion/shared
 
 CPPSRCS +=	MIR.cpp \
 		BytecodeAnalysis.cpp \
 		BaselineCompiler.cpp \
@@ -331,27 +307,16 @@ CPPSRCS +=	Lowering-arm.cpp \
 		Architecture-arm.cpp \
 		MacroAssembler-arm.cpp \
 		BaselineCompiler-arm.cpp \
 		BaselineIC-arm.cpp \
 		$(NULL)
 endif #ENABLE_ION
 endif
 endif #ENABLE_ION
-ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
-ASFILES +=	TrampolineSparc.s
-endif
-ifeq (mips, $(findstring mips,$(TARGET_CPU)))
-CPPSRCS +=	TrampolineMIPS.cpp
-endif
-#
-# END enclude sources for the method JIT
-#############################################
-
-endif
 
 ###############################################
 # BEGIN include sources for the Nitro assembler
 #
 
 VPATH += 	$(srcdir)/assembler \
 		$(srcdir)/assembler/wtf \
 		$(srcdir)/assembler/jit \
@@ -361,39 +326,34 @@ VPATH += 	$(srcdir)/assembler \
 CPPSRCS += 	ExecutableAllocator.cpp \
 		PageBlock.cpp \
 		YarrInterpreter.cpp \
 		YarrPattern.cpp \
 		YarrSyntaxChecker.cpp \
 		YarrCanonicalizeUCS2.cpp \
 		$(NONE)
 
-ifdef ENABLE_METHODJIT_SPEW
-CPPSRCS += Logging.cpp
-endif
-
 ifneq (,$(filter-out OS2 WINNT,$(OS_ARCH)))
 CPPSRCS += ExecutableAllocatorPosix.cpp \
            OSAllocatorPosix.cpp \
            $(NONE)
 endif
 ifeq ($(OS_ARCH),WINNT)
 CPPSRCS += ExecutableAllocatorWin.cpp \
            OSAllocatorWin.cpp \
            $(NONE)
 endif
 ifeq ($(OS_ARCH),OS2)
 CPPSRCS += ExecutableAllocatorOS2.cpp \
            OSAllocatorOS2.cpp \
            $(NONE)
 endif
 
-ifneq (,$(ENABLE_METHODJIT)$(ENABLE_ION)$(ENABLE_YARR_JIT))
+ifneq (,$(ENABLE_ION)$(ENABLE_YARR_JIT))
 VPATH += 	$(srcdir)/assembler/assembler \
-		$(srcdir)/methodjit \
 		$(NONE)
 
 CPPSRCS +=	ARMAssembler.cpp \
 		MacroAssemblerARM.cpp \
 		MacroAssemblerX86Common.cpp \
 		$(NONE)
 
 ifdef ENABLE_YARR_JIT
@@ -995,39 +955,17 @@ selfhosted.out.h: $(selfhosted_out_h_dep
 # BEGIN kludges for the Nitro assembler
 #
 
 # Needed to "configure" it correctly.  Unfortunately these
 # flags wind up being applied to all code in js/src, not just
 # the code in js/src/assembler.
 CXXFLAGS += -DUSE_SYSTEM_MALLOC=1 -DENABLE_ASSEMBLER=1
 
-ifneq (,$(ENABLE_YARR_JIT)$(ENABLE_METHODJIT))
+ifneq (,$(ENABLE_YARR_JIT))
 CXXFLAGS +=  -DENABLE_JIT=1
 endif
 
 INCLUDES +=	-I$(srcdir)/assembler -I$(srcdir)/yarr
 
-ifdef ENABLE_METHODJIT
-# Build a standalone test program that exercises the assembler
-# sources a bit.
-TESTMAIN_OBJS = \
-		Assertions.$(OBJ_SUFFIX) \
-		ExecutableAllocator.$(OBJ_SUFFIX) \
-		ARMAssembler.$(OBJ_SUFFIX) \
-		MacroAssemblerARM.$(OBJ_SUFFIX) \
-		TestMain.$(OBJ_SUFFIX) \
-		jsutil.$(OBJ_SUFFIX) \
-		jslog2.$(OBJ_SUFFIX)
-
-ifeq ($(OS_ARCH),WINNT)
-TESTMAIN_OBJS += ExecutableAllocatorWin.$(OBJ_SUFFIX)
-else
-TESTMAIN_OBJS += ExecutableAllocatorPosix.$(OBJ_SUFFIX)
-endif
-
-TestMain$(HOST_BIN_SUFFIX): $(TESTMAIN_OBJS)
-	$(CXX) -o TestMain$(HOST_BIN_SUFFIX) $(TESTMAIN_OBJS)
-endif
-
 #
 # END kludges for the Nitro assembler
 ###############################################
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -40,17 +40,16 @@
 #include "assembler/wtf/Assertions.h"
 
 #include <stdarg.h>
 #include "jsfriendapi.h"
 #include "jsopcode.h"
 
 #include "ion/IonSpewer.h"
 #include "js/RootingAPI.h"
-#include "methodjit/Logging.h"
 
 #define PRETTY_PRINT_OFFSET(os) (((os)<0)?"-":""), (((os)<0)?-(os):(os))
 
 #define FIXME_INSN_PRINTING                                 \
     do {                                                    \
         spew("FIXME insn printing %s:%d",                   \
              __FILE__, __LINE__);                           \
     } while (0)
@@ -281,18 +280,17 @@ namespace JSC {
             printer = sp;
         }
 
         void spew(const char *fmt, ...)
 #ifdef __GNUC__
             __attribute__ ((format (printf, 2, 3)))
 #endif
         {
-            if (printer ||
-                js::IsJaegerSpewChannelActive(js::JSpew_Insns)
+            if (printer
 #ifdef JS_ION
                 || js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)
 #endif
                 )
             {
                 // Buffer to hold the formatted string. Note that this may contain
                 // '%' characters, so do not pass it directly to printf functions.
                 char buf[200];
@@ -301,56 +299,41 @@ namespace JSC {
                 va_start(va, fmt);
                 int i = vsnprintf(buf, sizeof(buf), fmt, va);
                 va_end(va);
 
                 if (i > -1) {
                     if (printer)
                         printer->printf("%s\n", buf);
 
-                    // The assembler doesn't know which compiler it is for, so if
-                    // both JM and Ion spew are on, just print via one channel
-                    // (Use JM to pick up isOOLPath).
-                    if (js::IsJaegerSpewChannelActive(js::JSpew_Insns))
-                        js::JaegerSpew(js::JSpew_Insns, "%s       %s\n", isOOLPath ? ">" : " ", buf);
 #ifdef JS_ION
-                    else
-                        js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
+                    js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
 #endif
                 }
             }
         }
 
         static void staticSpew(const char *fmt, ...)
 #ifdef __GNUC__
             __attribute__ ((format (printf, 1, 2)))
 #endif
         {
-            if (js::IsJaegerSpewChannelActive(js::JSpew_Insns)
 #ifdef JS_ION
-                || js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)
-#endif
-                )
-            {
+            if (js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)) {
                 char buf[200];
 
                 va_list va;
                 va_start(va, fmt);
                 int i = vsnprintf(buf, sizeof(buf), fmt, va);
                 va_end(va);
 
-                if (i > -1) {
-                    if (js::IsJaegerSpewChannelActive(js::JSpew_Insns))
-                        js::JaegerSpew(js::JSpew_Insns, "        %s\n", buf);
-#ifdef JS_ION
-                    else
-                        js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
+                if (i > -1)
+                    js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
+            }
 #endif
-                }
-            }
         }
     };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
 
 #endif // AssemblerBuffer_h
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -34,17 +34,16 @@
 #include "assembler/wtf/Platform.h"
 
 #if ENABLE_ASSEMBLER
 
 #include "AssemblerBuffer.h"
 #include "assembler/wtf/SegmentedVector.h"
 #include "assembler/wtf/Assertions.h"
 
-#include "methodjit/Logging.h"
 #include "jsnum.h"
 #define ASSEMBLER_HAS_CONSTANT_POOL 1
 
 namespace JSC {
 
 /*
     On a constant pool 4 or 8 bytes data can be stored. The values can be
     constants or addresses. The addresses should be 32 or 64 bits. The constants
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -11,17 +11,16 @@
 #include "jsfriendapi.h"
 #include "jsgc.h"
 #include "jsobj.h"
 #include "jsobjinlines.h"
 #include "jsprf.h"
 #include "jswrapper.h"
 
 #include "builtin/TestingFunctions.h"
-#include "methodjit/MethodJIT.h"
 #include "vm/ForkJoin.h"
 
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace JS;
 
 using mozilla::ArrayLength;
@@ -165,24 +164,16 @@ GetBuildConfiguration(JSContext *cx, uns
 #ifdef JS_OOM_DO_BACKTRACES
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "oom-backtraces", &value))
         return false;
 
-#ifdef JS_METHODJIT
-    value = BooleanValue(true);
-#else
-    value = BooleanValue(false);
-#endif
-    if (!JS_SetProperty(cx, info, "methodjit", &value))
-        return false;
-
 #ifdef ENABLE_PARALLEL_JS
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "parallelJS", &value))
         return false;
 
@@ -811,55 +802,16 @@ DumpHeapComplete(JSContext *cx, unsigned
     js::DumpHeapComplete(JS_GetRuntime(cx), dumpFile);
 
     fclose(dumpFile);
 
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
     return true;
 }
 
-JSBool
-MJitChunkLimit(JSContext *cx, unsigned argc, jsval *vp)
-{
-    CallArgs args = CallArgsFromVp(argc, vp);
-
-    if (argc != 1) {
-        RootedObject callee(cx, &args.callee());
-        ReportUsageError(cx, callee, "Wrong number of arguments");
-        return JS_FALSE;
-    }
-
-    if (cx->runtime->alwaysPreserveCode) {
-        JS_ReportError(cx, "Can't change chunk limit after gcPreserveCode()");
-        return JS_FALSE;
-    }
-
-    for (CompartmentsIter c(cx->runtime); !c.done(); c.next()) {
-        if (c->lastAnimationTime != 0) {
-            JS_ReportError(cx, "Can't change chunk limit if code may be preserved");
-            return JS_FALSE;
-        }
-    }
-
-    double t;
-    if (!JS_ValueToNumber(cx, args[0], &t))
-        return JS_FALSE;
-
-#ifdef JS_METHODJIT
-    mjit::SetChunkLimit((uint32_t) t);
-#endif
-
-    // Clear out analysis information which might refer to code compiled with
-    // the previous chunk limit.
-    JS_GC(cx->runtime);
-
-    vp->setUndefined();
-    return true;
-}
-
 static JSBool
 Terminate(JSContext *cx, unsigned arg, jsval *vp)
 {
     JS_ClearPendingException(cx);
     return JS_FALSE;
 }
 
 static JSBool
@@ -1051,20 +1003,16 @@ static JSFunctionSpecWithHelp TestingFun
     JS_FN_HELP("isProxy", IsProxy, 1, 0,
 "isProxy(obj)",
 "  If true, obj is a proxy of some sort"),
 
     JS_FN_HELP("dumpHeapComplete", DumpHeapComplete, 1, 0,
 "dumpHeapComplete([filename])",
 "  Dump reachable and unreachable objects to a file."),
 
-    JS_FN_HELP("mjitChunkLimit", MJitChunkLimit, 1, 0,
-"mjitChunkLimit(N)",
-"  Specify limit on compiled chunk size during mjit compilation."),
-
     JS_FN_HELP("terminate", Terminate, 0, 0,
 "terminate()",
 "  Terminate JavaScript execution, as if we had run out of\n"
 "  memory or been terminated by the slow script dialog."),
 
     JS_FN_HELP("enableSPSProfilingAssertions", EnableSPSProfilingAssertions, 1, 0,
 "enableSPSProfilingAssertions(slow)",
 "  Enables SPS instrumentation and corresponding assertions. If 'slow' is\n"
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -2148,33 +2148,16 @@ mips*-*)
     AC_DEFINE(JS_NUNBOX32)
     ;;
 esac
 
 MOZ_ARG_DISABLE_BOOL(ion,
 [  --disable-ion      Disable use of the IonMonkey JIT],
   ENABLE_ION= )
 
-MOZ_ARG_DISABLE_BOOL(methodjit,
-[  --disable-methodjit           Disable method JIT support],
-  ENABLE_METHODJIT= )
-
-MOZ_ARG_DISABLE_BOOL(monoic,
-[  --disable-monoic      Disable use of MICs by JIT compiler],
-  ENABLE_MONOIC= )
-
-MOZ_ARG_DISABLE_BOOL(polyic,
-[  --disable-polyic      Disable use of PICs by JIT compiler],
-  ENABLE_POLYIC= )
-
-MOZ_ARG_ENABLE_BOOL(methodjit-spew,
-[  --enable-methodjit-spew      Enable method JIT spew support],
-  ENABLE_METHODJIT_SPEW=1,
-  ENABLE_METHODJIT_SPEW= )
-
 MOZ_ARG_DISABLE_BOOL(yarr-jit,
 [  --disable-yarr-jit    Disable YARR JIT support],
   ENABLE_YARR_JIT= )
 
 AC_SUBST(ENABLE_METHODJIT)
 AC_SUBST(ENABLE_METHODJIT_SPEW)
 
 if test "$ENABLE_METHODJIT"; then
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -6,17 +6,16 @@
 
 #include "mozilla/DebugOnly.h"
 
 #include "jsprf.h"
 #include "jsstr.h"
 
 #include "gc/Marking.h"
 #include "gc/Nursery-inl.h"
-#include "methodjit/MethodJIT.h"
 #include "vm/Shape.h"
 
 #include "jsobjinlines.h"
 
 #include "ion/IonCode.h"
 #include "vm/Shape-inl.h"
 #include "vm/String-inl.h"
 
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -764,22 +764,16 @@ js::gc::MarkRuntime(JSTracer *trc, bool 
                 c->watchpointMap->markAll(trc);
         }
 
         /* Mark debug scopes, if present */
         if (c->debugScopes)
             c->debugScopes->mark(trc);
     }
 
-#ifdef JS_METHODJIT
-    /* We need to expand inline frames before stack scanning. */
-    for (ZonesIter zone(rt); !zone.done(); zone.next())
-        mjit::ExpandInlineFrames(zone);
-#endif
-
     rt->stackSpace.mark(trc);
 
 #ifdef JS_ION
     ion::MarkIonActivations(rt, trc);
 #endif
 
     for (CompartmentsIter c(rt); !c.done(); c.next())
         c->mark(trc);
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -61,23 +61,16 @@ Zone::init(JSContext *cx)
 {
     types.init(cx);
     return true;
 }
 
 void
 Zone::setNeedsBarrier(bool needs, ShouldUpdateIon updateIon)
 {
-#ifdef JS_METHODJIT
-    /* ClearAllFrames calls compileBarriers() and needs the old value. */
-    bool old = compileBarriers();
-    if (compileBarriers(needs) != old)
-        mjit::ClearAllFrames(this);
-#endif
-
 #ifdef JS_ION
     if (updateIon == UpdateIon && needs != ionUsingBarriers_) {
         ion::ToggleBarriers(this, needs);
         ionUsingBarriers_ = needs;
     }
 #endif
 
     needsBarrier_ = needs;
@@ -151,72 +144,55 @@ Zone::sweep(FreeOp *fop, bool releaseTyp
     }
 
     active = false;
 }
 
 void
 Zone::discardJitCode(FreeOp *fop, bool discardConstraints)
 {
-#ifdef JS_METHODJIT
-    /*
-     * Kick all frames on the stack into the interpreter, and release all JIT
-     * code in the compartment unless code is being preserved, in which case
-     * purge all caches in the JIT scripts. Even if we are not releasing all
-     * JIT code, we still need to release code for scripts which are in the
-     * middle of a native or getter stub call, as these stubs will have been
-     * redirected to the interpoline.
-     */
-    mjit::ClearAllFrames(this);
-
+#ifdef JS_ION
     if (isPreservingCode()) {
         PurgeJITCaches(this);
     } else {
-# ifdef JS_ION
 
-#  ifdef DEBUG
+# ifdef DEBUG
         /* Assert no baseline scripts are marked as active. */
         for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
             JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
         }
-#  endif
+# endif
 
         /* Mark baseline scripts on the stack as active. */
         ion::MarkActiveBaselineScripts(this);
 
         /* Only mark OSI points if code is being discarded. */
         ion::InvalidateAll(fop, this);
-# endif
+
         for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
-
-            mjit::ReleaseScriptCode(fop, script);
-# ifdef JS_ION
             ion::FinishInvalidation(fop, script);
 
             /*
              * Discard baseline script if it's not marked as active. Note that
              * this also resets the active flag.
              */
             ion::FinishDiscardBaselineScript(fop, script);
-# endif
 
             /*
              * Use counts for scripts are reset on GC. After discarding code we
              * need to let it warm back up to get information such as which
              * opcodes are setting array holes or accessing getter properties.
              */
             script->resetUseCount();
         }
 
         for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
-#ifdef JS_ION
             /* Free optimized baseline stubs. */
             if (comp->ionCompartment())
                 comp->ionCompartment()->optimizedStubSpace()->free();
-#endif
 
             comp->types.sweepCompilerOutputs(fop, discardConstraints);
         }
     }
-#endif /* JS_METHODJIT */
+#endif
 }
--- a/js/src/ion/Bailouts.cpp
+++ b/js/src/ion/Bailouts.cpp
@@ -567,21 +567,16 @@ ion::CachedShapeGuardFailure()
 {
     JSContext *cx = GetIonContext()->cx;
     JSScript *script = GetBailedJSScript(cx);
 
     JS_ASSERT(!script->ionScript()->invalidated());
 
     script->failedShapeGuard = true;
 
-    // Purge JM caches in the script and all inlined script, to avoid baking in
-    // the same shape guard next time.
-    for (size_t i = 0; i < script->ionScript()->scriptEntries(); i++)
-        mjit::PurgeCaches(script->ionScript()->getScript(i));
-
     IonSpew(IonSpew_Invalidate, "Invalidating due to shape guard failure");
 
     return Invalidate(cx, script);
 }
 
 uint32_t
 ion::ThunkToInterpreter(Value *vp)
 {
--- a/js/src/ion/BaselineCompiler.cpp
+++ b/js/src/ion/BaselineCompiler.cpp
@@ -185,33 +185,16 @@ BaselineCompiler::compile()
 
     // All SPS instrumentation is emitted toggled off.  Toggle them on if needed.
     if (cx->runtime->spsProfiler.enabled())
         baselineScript->toggleSPS(true);
 
     return Method_Compiled;
 }
 
-#ifdef DEBUG
-#define SPEW_OPCODE()                                                         \
-    JS_BEGIN_MACRO                                                            \
-        if (IsJaegerSpewChannelActive(JSpew_JSOps)) {                         \
-            Sprinter sprinter(cx);                                            \
-            sprinter.init();                                                  \
-            RootedScript script_(cx, script);                                 \
-            js_Disassemble1(cx, script_, pc, pc - script_->code,              \
-                            JS_TRUE, &sprinter);                              \
-            JaegerSpew(JSpew_JSOps, "    %2u %s",                             \
-                       (unsigned)frame.stackDepth(), sprinter.string());      \
-        }                                                                     \
-    JS_END_MACRO;
-#else
-#define SPEW_OPCODE()
-#endif /* DEBUG */
-
 bool
 BaselineCompiler::emitPrologue()
 {
     masm.push(BaselineFrameReg);
     masm.mov(BaselineStackReg, BaselineFrameReg);
 
     masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg);
     masm.checkStackAlignment();
@@ -534,17 +517,16 @@ MethodStatus
 BaselineCompiler::emitBody()
 {
     JS_ASSERT(pc == script->code);
 
     bool lastOpUnreachable = false;
     uint32_t emittedOps = 0;
 
     while (true) {
-        SPEW_OPCODE();
         JSOp op = JSOp(*pc);
         IonSpew(IonSpew_BaselineOp, "Compiling op @ %d: %s",
                 int(pc - script->code), js_CodeName[op]);
 
         BytecodeInfo *info = analysis_.maybeInfo(pc);
 
         // Skip unreachable ops.
         if (!info) {
--- a/js/src/ion/Ion.cpp
+++ b/js/src/ion/Ion.cpp
@@ -40,17 +40,16 @@
 #endif
 #include "gc/Marking.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 #include "vm/Stack-inl.h"
 #include "ion/IonFrames-inl.h"
 #include "ion/CompilerRoot.h"
-#include "methodjit/Retcon.h"
 #include "ExecutionModeInlines.h"
 
 #if JS_TRACE_LOGGING
 #include "TraceLogging.h"
 #endif
 
 using namespace js;
 using namespace js::ion;
@@ -1289,20 +1288,17 @@ AttachFinishedCompilations(JSContext *cx
             {
                 // Release the worker thread lock and root the compiler for GC.
                 AutoTempAllocatorRooter root(cx, &builder->temp());
                 AutoUnlockWorkerThreadState unlock(cx->runtime);
                 AutoFlushCache afc("AttachFinishedCompilations");
                 success = codegen->link();
             }
 
-            if (success) {
-                if (script->hasIonScript())
-                    mjit::DisableScriptCodeForIon(script, script->ionScript()->osrPc());
-            } else {
+            if (!success) {
                 // Silently ignore OOM during code generation, we're at an
                 // operation callback and can't propagate failures.
                 cx->clearPendingException();
             }
         }
 
         FinishOffThreadBuilder(builder);
     }
@@ -1585,18 +1581,18 @@ Compile(JSContext *cx, HandleScript scri
     IonScript *scriptIon = GetIonScript(script, executionMode);
     if (scriptIon) {
         if (!scriptIon->method())
             return Method_CantCompile;
         return Method_Compiled;
     }
 
     if (executionMode == SequentialExecution) {
-        if (cx->methodJitEnabled || IsBaselineEnabled(cx)) {
-            // If JM is enabled we use getUseCount instead of incUseCount to avoid
+        if (IsBaselineEnabled(cx)) {
+            // If Baseline is enabled we use getUseCount instead of incUseCount to avoid
             // bumping the use count twice.
             if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
                 return Method_Skipped;
         } else {
             if (script->incUseCount() < js_IonOptions.usesBeforeCompileNoJaeger)
                 return Method_Skipped;
         }
     }
@@ -2292,18 +2288,16 @@ ion::Invalidate(types::TypeCompartment &
     AutoFlushCache afc ("Invalidate");
 
     // Add an invalidation reference to all invalidated IonScripts to indicate
     // to the traversal which frames have been invalidated.
     bool anyInvalidation = false;
     for (size_t i = 0; i < invalid.length(); i++) {
         const types::CompilerOutput &co = *invalid[i].compilerOutput(types);
         switch (co.kind()) {
-          case types::CompilerOutput::MethodJIT:
-            break;
           case types::CompilerOutput::Ion:
           case types::CompilerOutput::ParallelIon:
             JS_ASSERT(co.isValid());
             IonSpew(IonSpew_Invalidate, " Invalidate %s:%u, IonScript %p",
                     co.script->filename(), co.script->lineno, co.ion());
 
             // Keep the ion script alive during the invalidation and flag this
             // ionScript as being invalidated.  This increment is removed by the
@@ -2323,18 +2317,16 @@ ion::Invalidate(types::TypeCompartment &
 
     // Drop the references added above. If a script was never active, its
     // IonScript will be immediately destroyed. Otherwise, it will be held live
     // until its last invalidated frame is destroyed.
     for (size_t i = 0; i < invalid.length(); i++) {
         types::CompilerOutput &co = *invalid[i].compilerOutput(types);
         ExecutionMode executionMode = SequentialExecution;
         switch (co.kind()) {
-          case types::CompilerOutput::MethodJIT:
-            continue;
           case types::CompilerOutput::Ion:
             break;
           case types::CompilerOutput::ParallelIon:
             executionMode = ParallelExecution;
             break;
         }
         JS_ASSERT(co.isValid());
         JSScript *script = co.script;
--- a/js/src/ion/IonBuilder.cpp
+++ b/js/src/ion/IonBuilder.cpp
@@ -7475,23 +7475,18 @@ bool
 IonBuilder::getPropTryInlineAccess(bool *emitted, HandlePropertyName name, HandleId id,
                                    bool barrier, types::StackTypeSet *types)
 {
     JS_ASSERT(*emitted == false);
     if (current->peek(-1)->type() != MIRType_Object)
         return true;
 
     Vector<Shape *> shapes(cx);
-    if (Shape *objShape = mjit::GetPICSingleShape(cx, script(), pc, info().constructing())) {
-        if (!shapes.append(objShape))
-            return false;
-    } else {
-        if (!inspector->maybeShapesForPropertyOp(pc, shapes))
-            return false;
-    }
+    if (!inspector->maybeShapesForPropertyOp(pc, shapes))
+        return false;
 
     if (shapes.empty() || !CanInlinePropertyOpShapes(shapes))
         return true;
 
     MIRType rvalType = MIRTypeFromValueType(types->getKnownTypeTag());
     if (barrier || IsNullOrUndefined(rvalType))
         rvalType = MIRType_Value;
 
@@ -7677,23 +7672,18 @@ IonBuilder::jsop_setprop(HandlePropertyN
         current->add(fixed);
         current->push(value);
         if (propTypes->needsBarrier(cx))
             fixed->setNeedsBarrier();
         return resumeAfter(fixed);
     }
 
     Vector<Shape *> shapes(cx);
-    if (Shape *objShape = mjit::GetPICSingleShape(cx, script(), pc, info().constructing())) {
-        if (!shapes.append(objShape))
-            return false;
-    } else {
-        if (!inspector->maybeShapesForPropertyOp(pc, shapes))
-            return false;
-    }
+    if (!inspector->maybeShapesForPropertyOp(pc, shapes))
+        return false;
 
     if (!shapes.empty() && CanInlinePropertyOpShapes(shapes)) {
         if (shapes.length() == 1) {
             spew("Inlining monomorphic SETPROP");
 
             // The JM IC was monomorphic, so we inline the property access as
             // long as the shape is not in dictionary mode. We cannot be sure
             // that the shape is still a lastProperty, and calling Shape::search
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -137,31 +137,23 @@ def main(argv):
     if not options.run_slow:
         test_list = [ _ for _ in test_list if not _.slow ]
 
     # The full test list is ready. Now create copies for each JIT configuration.
     job_list = []
     if options.tbpl:
         # Running all bits would take forever. Instead, we test a few interesting combinations.
         flags = [
-                      ['--no-baseline', '--no-jm'],
                       ['--ion-eager'], # implies --baseline-eager
-                      ['--no-baseline'],
-                      ['--no-baseline', '--ion-eager'],
                       ['--baseline-eager'],
                       ['--baseline-eager', '--no-ti', '--no-fpu'],
-                      # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn
-                      ['--no-baseline', '--no-ion', '--no-jm', '--no-ti'],
+                      ['--no-baseline'],
+                      ['--no-baseline', '--ion-eager'],
+                      ['--no-baseline', '--no-ion'],
                       ['--no-baseline', '--no-ion', '--no-ti'],
-                      ['--no-baseline', '--no-ion', '--no-ti', '--always-mjit', '--debugjit'],
-                      ['--no-baseline', '--no-ion', '--no-jm'],
-                      ['--no-baseline', '--no-ion'],
-                      ['--no-baseline', '--no-ion', '--always-mjit'],
-                      ['--no-baseline', '--no-ion', '--always-mjit', '--debugjit'],
-                      ['--no-baseline', '--no-ion', '--debugjit']
                     ]
         for test in test_list:
             for variant in flags:
                 new_test = test.copy()
                 new_test.jitflags.extend(variant)
                 job_list.append(new_test)
     elif options.ion:
         flags = [['--no-jm'], ['--ion-eager']]
--- a/js/src/jit-test/tests/auto-regress/bug726799.js
+++ b/js/src/jit-test/tests/auto-regress/bug726799.js
@@ -1,16 +1,15 @@
 // Binary: cache/js-dbg-32-ebafee0cea36-linux
 // Flags: -m -n
 //
 function tryItOut(code) {
     f = eval("(function(){" + code + "})")
     for (e in f()) {}
 }
-mjitChunkLimit(25)
 tryItOut("\
     for each(x in[0,0,0,0,0,0,0]) {\
         function f(b) {\
             Object.defineProperty(b,\"\",({t:f}))\
         }\
         for each(d in[(1),String,String,String,String,(0),String,(1),String]) {\
             try{\
                 f(d);\
--- a/js/src/jit-test/tests/auto-regress/bug728509.js
+++ b/js/src/jit-test/tests/auto-regress/bug728509.js
@@ -5,10 +5,10 @@ function g(code) {
     try {
         f = eval("(function(){" + code + "})")
     } catch (r) {}
     f()
     try {
         evalcx("(function(){return" + code + "})()")
     } catch (e) {}
 }
-g("mjitChunkLimit(8)")
+g("")
 g(" function(x,[]){NaN.x::c}()")
--- a/js/src/jit-test/tests/auto-regress/bug740654.js
+++ b/js/src/jit-test/tests/auto-regress/bug740654.js
@@ -1,14 +1,13 @@
 // |jit-test| error:InternalError
 
 // Binary: cache/js-dbg-32-92fe907ddac8-linux
 // Flags: -m -n
 //
-mjitChunkLimit(31)
 o = {}
 o.valueOf = function() {
     for (var p in undefined) {
         a = new Function;
     }
     +o;
 };
 +o;
--- a/js/src/jit-test/tests/auto-regress/bug743876.js
+++ b/js/src/jit-test/tests/auto-regress/bug743876.js
@@ -1,16 +1,16 @@
 // Binary: cache/js-dbg-64-434f50e70815-linux
 // Flags: -m -n -a
 //
 
 var lfcode = new Array();
 lfcode.push("3");
 lfcode.push("\
-evaluate(\"mjitChunkLimit(5)\");\
+evaluate(\"\");\
 function slice(a, b) {\
     return slice(index, ++(ArrayBuffer));\
 }\
 ");
 lfcode.push("0");
 lfcode.push("var arr = [0, 1, 2, 3, 4];\
 function replacer() {\
   assertEq(arguments.length, 2);\
--- a/js/src/jit-test/tests/basic/testBug755916.js
+++ b/js/src/jit-test/tests/basic/testBug755916.js
@@ -3,11 +3,10 @@
 Object.defineProperty(this, "t2", {
     get: function() {
         for (p in h2) {
             t2
         }
     }
 })
 h2 = {}
-mjitChunkLimit(8)
 h2.a = function() {}
 Object(t2)
--- a/js/src/jit-test/tests/jaeger/bug719918.js
+++ b/js/src/jit-test/tests/jaeger/bug719918.js
@@ -2,18 +2,16 @@ function test(m) {
   do {
     if (m = arr[0]) break;
     m = 0;
   }
   while (0);
   arr[1] = m;
 }
 
-mjitChunkLimit(10);
-
 arr = new Float64Array(2);
 
 // run function a lot to trigger methodjit compile
 for(var i=0; i<200; i++)
   test(0);
 
 // should return 0, not NaN
 assertEq(arr[1], 0)
--- a/js/src/jit-test/tests/jaeger/bug781859-2.js
+++ b/js/src/jit-test/tests/jaeger/bug781859-2.js
@@ -1,9 +1,8 @@
-mjitChunkLimit(42);
 Function("\
     switch (/x/) {\
         case 8:\
         break;\
         t(function(){})\
     }\
     while (false)(function(){})\
 ")()
--- a/js/src/jit-test/tests/jaeger/bug781859-3.js
+++ b/js/src/jit-test/tests/jaeger/bug781859-3.js
@@ -1,9 +1,8 @@
-mjitChunkLimit(10);
 function e() {
     try {
         var t = undefined;
     } catch (e) { }
     while (t)
         continue;
 }
 for (var i = 0; i < 20; i++)
--- a/js/src/jit-test/tests/jaeger/chunk/bug712267.js
+++ b/js/src/jit-test/tests/jaeger/chunk/bug712267.js
@@ -1,10 +1,8 @@
-
-evaluate("mjitChunkLimit(5)");
 expected = 100;
 function slice(a, b) {
   return expected--;
 }
 function f() {
   var length = 8.724e02 ;
   var index = 0;
   function get3() {
--- a/js/src/jsanalyze.cpp
+++ b/js/src/jsanalyze.cpp
@@ -591,35 +591,16 @@ ScriptAnalysis::analyzeBytecode(JSContex
 
     /*
      * Always ensure that a script's arguments usage has been analyzed before
      * entering the script. This allows the functionPrologue to ensure that
      * arguments are always created eagerly which simplifies interp logic.
      */
     if (!script_->analyzedArgsUsage())
         analyzeSSA(cx);
-
-    /*
-     * If the script has JIT information (we are reanalyzing the script after
-     * a purge), add safepoints for the targets of any cross chunk edges in
-     * the script. These safepoints are normally added when the JITScript is
-     * constructed, but will have been lost during the purge.
-     */
-#ifdef JS_METHODJIT
-    mjit::JITScript *jit = NULL;
-    for (int constructing = 0; constructing <= 1 && !jit; constructing++) {
-        for (int barriers = 0; barriers <= 1 && !jit; barriers++)
-            jit = script_->getJIT((bool) constructing, (bool) barriers);
-    }
-    if (jit) {
-        mjit::CrossChunkEdge *edges = jit->edges();
-        for (size_t i = 0; i < jit->nedges; i++)
-            getCode(edges[i].target).safePoint = true;
-    }
-#endif
 }
 
 /////////////////////////////////////////////////////////////////////
 // Lifetime Analysis
 /////////////////////////////////////////////////////////////////////
 
 void
 ScriptAnalysis::analyzeLifetimes(JSContext *cx)
@@ -885,17 +866,17 @@ ScriptAnalysis::analyzeLifetimes(JSConte
         offset--;
     }
 
     js_free(saved);
 
     ranLifetimes_ = true;
 }
 
-#ifdef JS_METHODJIT_SPEW
+#ifdef DEBUG
 void
 LifetimeVariable::print() const
 {
     Lifetime *segment = lifetime ? lifetime : saved;
     while (segment) {
         printf(" (%u,%u%s)", segment->start, segment->end, segment->loopTail ? ",tail" : "");
         segment = segment->next;
     }
@@ -1101,31 +1082,16 @@ ScriptAnalysis::ensureVariable(LifetimeV
         return;
     }
 
     JS_ASSERT(until < var.lifetime->start);
     var.lifetime->start = until;
     var.ensured = true;
 }
 
-void
-ScriptAnalysis::clearAllocations()
-{
-    /*
-     * Clear out storage used for register allocations in a compilation once
-     * that compilation has finished. Register allocations are only used for
-     * a single compilation.
-     */
-    for (unsigned i = 0; i < script_->length; i++) {
-        Bytecode *code = maybeCode(i);
-        if (code)
-            code->allocation = NULL;
-    }
-}
-
 /////////////////////////////////////////////////////////////////////
 // SSA Analysis
 /////////////////////////////////////////////////////////////////////
 
 void
 ScriptAnalysis::analyzeSSA(JSContext *cx)
 {
     JS_ASSERT(cx->compartment->activeAnalysis && !ranSSA() && !failed());
@@ -1836,23 +1802,19 @@ ScriptAnalysis::needsArgsObj(JSContext *
         return needsArgsObj(cx, seen, SSAValue::PhiValue(use->offset, use->u.phi));
 
     jsbytecode *pc = script_->code + use->offset;
     JSOp op = JSOp(*pc);
 
     if (op == JSOP_POP || op == JSOP_POPN)
         return false;
 
-    /* SplatApplyArgs can read fp->canonicalActualArg(i) directly. */
-    if (op == JSOP_FUNAPPLY && GET_ARGC(pc) == 2 && use->u.which == 0) {
-#ifdef JS_METHODJIT
-        JS_ASSERT(mjit::IsLowerableFunCallOrApply(pc));
-#endif
+    /* We can read the frame's arguments directly for f.apply(x, arguments). */
+    if (op == JSOP_FUNAPPLY && GET_ARGC(pc) == 2 && use->u.which == 0)
         return false;
-    }
 
     /* arguments[i] can read fp->canonicalActualArg(i) directly. */
     if (op == JSOP_GETELEM && use->u.which == 1)
         return false;
 
     /* arguments.length length can read fp->numActualArgs() directly. */
     if (op == JSOP_LENGTH)
         return false;
--- a/js/src/jsanalyze.h
+++ b/js/src/jsanalyze.h
@@ -20,19 +20,16 @@
 #include "jsopcodeinlines.h"
 
 #include "ds/LifoAlloc.h"
 #include "js/TemplateLib.h"
 #include "vm/ScopeObject.h"
 
 class JSScript;
 
-/* Forward declaration of downstream register allocations computed for join points. */
-namespace js { namespace mjit { struct RegisterAllocation; } }
-
 namespace js {
 namespace analyze {
 
 /*
  * There are three analyses we can perform on a JSScript, outlined below.
  * The results of all three are stored in ScriptAnalysis, but the analyses
  * themselves can be performed separately. Along with type inference results,
  * per-script analysis results are tied to the per-compartment analysis pool
@@ -120,21 +117,16 @@ class Bytecode
     /* Stack depth before this opcode. */
     uint32_t stackDepth;
 
   private:
 
     /* If this is a JSOP_LOOPHEAD or JSOP_LOOPENTRY, information about the loop. */
     LoopAnalysis *loop;
 
-    /* --------- Lifetime analysis --------- */
-
-    /* Any allocation computed downstream for this bytecode. */
-    mjit::RegisterAllocation *allocation;
-
     /* --------- SSA analysis --------- */
 
     /* Generated location of each value popped by this bytecode. */
     SSAValue *poppedValues;
 
     /* Points where values pushed or written by this bytecode are popped. */
     SSAUseChain **pushedUses;
 
@@ -508,17 +500,17 @@ struct LifetimeVariable
                     return UINT32_MAX;
                 offset = segment->start;
             }
             segment = segment->next;
         }
         return offset;
     }
 
-#ifdef JS_METHODJIT_SPEW
+#ifdef DEBUG
     void print() const;
 #endif
 };
 
 struct SSAPhiNode;
 
 /*
  * Representation of values on stack or in slots at each point in the script.
@@ -990,24 +982,16 @@ class ScriptAnalysis
         JS_ASSERT(trackUseChain(v));
         if (v.kind() == SSAValue::PUSHED)
             return getCode(v.pushedOffset()).pushedUses[v.pushedIndex()];
         if (v.kind() == SSAValue::VAR)
             return getCode(v.varOffset()).pushedUses[GetDefCount(script_, v.varOffset())];
         return v.phiNode()->uses;
     }
 
-    mjit::RegisterAllocation *&getAllocation(uint32_t offset) {
-        JS_ASSERT(offset < script_->length);
-        return getCode(offset).allocation;
-    }
-    mjit::RegisterAllocation *&getAllocation(const jsbytecode *pc) {
-        return getAllocation(pc - script_->code);
-    }
-
     LoopAnalysis *getLoop(uint32_t offset) {
         JS_ASSERT(offset < script_->length);
         return getCode(offset).loop;
     }
     LoopAnalysis *getLoop(const jsbytecode *pc) { return getLoop(pc - script_->code); }
 
     /* For a JSOP_CALL* op, get the pc of the corresponding JSOP_CALL/NEW/etc. */
     jsbytecode *getCallPC(jsbytecode *pc)
@@ -1046,18 +1030,16 @@ class ScriptAnalysis
         JS_ASSERT(script_->compartment()->activeAnalysis);
         JS_ASSERT(!slotEscapes(slot));
         return lifetimes[slot];
     }
 
     void printSSA(JSContext *cx);
     void printTypes(JSContext *cx);
 
-    void clearAllocations();
-
   private:
     void setOOM(JSContext *cx) {
         if (!outOfMemory)
             js_ReportOutOfMemory(cx);
         outOfMemory = true;
         hadFailure = true;
     }
 
--- a/js/src/jsapi-tests/testDebugger.cpp
+++ b/js/src/jsapi-tests/testDebugger.cpp
@@ -126,31 +126,27 @@ ThrowHook(JSContext *cx, JSScript *, jsb
     jsval _;
     JS_EvaluateScript(cx, global, text, strlen(text), "", 0, &_);
 
     return JSTRAP_CONTINUE;
 }
 
 BEGIN_TEST(testDebugger_throwHook)
 {
-    uint32_t newopts =
-        JS_GetOptions(cx) | JSOPTION_METHODJIT | JSOPTION_METHODJIT_ALWAYS;
-    uint32_t oldopts = JS_SetOptions(cx, newopts);
-
+    CHECK(JS_SetDebugMode(cx, true));
     CHECK(JS_SetThrowHook(rt, ThrowHook, NULL));
     EXEC("function foo() { throw 3 };\n"
          "for (var i = 0; i < 10; ++i) { \n"
          "  var x = {}\n"
          "  try {\n"
          "    foo(); \n"
          "  } catch(e) {}\n"
          "}\n");
     CHECK(called);
     CHECK(JS_SetThrowHook(rt, NULL, NULL));
-    JS_SetOptions(cx, oldopts);
     return true;
 }
 END_TEST(testDebugger_throwHook)
 
 BEGIN_TEST(testDebugger_debuggerObjectVsDebugMode)
 {
     CHECK(JS_DefineDebuggerObject(cx, global));
     JS::RootedObject debuggee(cx, JS_NewGlobalObject(cx, getGlobalClass(), NULL));
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -82,20 +82,19 @@
 #include "vm/ObjectImpl-inl.h"
 #include "vm/RegExpObject-inl.h"
 #include "vm/RegExpStatics-inl.h"
 #include "vm/Shape-inl.h"
 #include "vm/String-inl.h"
 
 #if ENABLE_YARR_JIT
 #include "assembler/jit/ExecutableAllocator.h"
-#include "methodjit/Logging.h"
 #endif
 
-#ifdef JS_METHODJIT
+#ifdef JS_ION
 #include "ion/Ion.h"
 #endif
 
 using namespace js;
 using namespace js::gc;
 using namespace js::types;
 
 using mozilla::Maybe;
@@ -687,17 +686,17 @@ JS::isGCEnabled()
 JS_FRIEND_API(bool) JS::isGCEnabled() { return true; }
 #endif
 
 static const JSSecurityCallbacks NullSecurityCallbacks = { };
 
 static bool
 JitSupportsFloatingPoint()
 {
-#if defined(JS_METHODJIT) || defined(JS_ION)
+#if defined(JS_ION)
     if (!JSC::MacroAssembler().supportsFloatingPoint())
         return false;
 
 #if defined(JS_ION) && WTF_ARM_ARCH_VERSION == 6
     if (!js::ion::hasVFP())
         return false;
 #endif
 
@@ -734,19 +733,16 @@ JSRuntime::JSRuntime(JSUseHelperThreads 
     defaultLocale(NULL),
 #ifdef JS_THREADSAFE
     ownerThread_(NULL),
 #endif
     tempLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
     freeLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
     execAlloc_(NULL),
     bumpAlloc_(NULL),
-#ifdef JS_METHODJIT
-    jaegerRuntime_(NULL),
-#endif
     ionRuntime_(NULL),
     selfHostingGlobal_(NULL),
     nativeStackBase(0),
     nativeStackQuota(0),
     interpreterFrames(NULL),
     cxCallback(NULL),
     destroyCompartmentCallback(NULL),
     compartmentNameCallback(NULL),
@@ -922,20 +918,16 @@ JSRuntime::init(uint32_t maxbytes)
 
     operationCallbackLock = PR_NewLock();
     if (!operationCallbackLock)
         return false;
 #endif
 
     js::TlsPerThreadData.set(&mainThread);
 
-#ifdef JS_METHODJIT_SPEW
-    JMCheckLogging();
-#endif
-
     if (!js_InitGC(this, maxbytes))
         return false;
 
     if (!gcMarker.init())
         return false;
 
     const char *size = getenv("JSGC_MARK_STACK_LIMIT");
     if (size)
@@ -1045,23 +1037,20 @@ JSRuntime::~JSRuntime()
     js_FinishGC(this);
 #ifdef JS_THREADSAFE
     if (gcLock)
         PR_DestroyLock(gcLock);
 #endif
 
     js_delete(bumpAlloc_);
     js_delete(mathCache_);
-#ifdef JS_METHODJIT
-    js_delete(jaegerRuntime_);
-#endif
 #ifdef JS_ION
     js_delete(ionRuntime_);
 #endif
-    js_delete(execAlloc_);  /* Delete after jaegerRuntime_. */
+    js_delete(execAlloc_);  /* Delete after ionRuntime_. */
 
     if (ionPcScriptCache)
         js_delete(ionPcScriptCache);
 
 #ifdef JSGC_GENERATIONAL
     gcStoreBuffer.disable();
     gcNursery.disable();
 #endif
@@ -1161,17 +1150,17 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHe
 
         js_NewRuntimeWasCalled = JS_TRUE;
     }
 
     JSRuntime *rt = js_new<JSRuntime>(useHelperThreads);
     if (!rt)
         return NULL;
 
-#if defined(JS_METHODJIT) && defined(JS_ION)
+#if defined(JS_ION)
     if (!ion::InitializeIon())
         return NULL;
 #endif
 
     if (!ForkJoinSlice::InitializeTLS())
         return NULL;
 
     if (!rt->init(maxbytes)) {
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -19,18 +19,16 @@
 #include "jsfun.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jstypes.h"
 #include "jsutil.h"
 #include "ds/Sort.h"
-#include "methodjit/MethodJIT.h"
-#include "methodjit/StubCalls-inl.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/ForkJoin.h"
 #include "vm/NumericConversions.h"
 #include "vm/Shape.h"
 #include "vm/StringBuffer.h"
 
 #include "jsatominlines.h"
 #include "jscntxtinlines.h"
@@ -2001,25 +1999,16 @@ js::ArrayShiftMoveElements(JSObject *obj
      * At this point the length and initialized length have already been
      * decremented and the result fetched, so just shift the array elements
      * themselves.
      */
     uint32_t initlen = obj->getDenseInitializedLength();
     obj->moveDenseElementsUnbarriered(0, 1, initlen);
 }
 
-#ifdef JS_METHODJIT
-void JS_FASTCALL
-mjit::stubs::ArrayShift(VMFrame &f)
-{
-    JSObject *obj = &f.regs.sp[-1].toObject();
-    ArrayShiftMoveElements(obj);
-}
-#endif /* JS_METHODJIT */
-
 /* ES5 15.4.4.9 */
 JSBool
 js::array_shift(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     /* Step 1. */
     RootedObject obj(cx, ToObject(cx, args.thisv()));
@@ -2432,17 +2421,17 @@ array_splice(JSContext *cx, unsigned arg
     if (!SetLengthProperty(cx, obj, finalLength))
         return false;
 
     /* Step 17. */
     args.rval().setObject(*arr);
     return true;
 }
 
-#ifdef JS_METHODJIT
+#ifdef JS_ION
 bool
 js::array_concat_dense(JSContext *cx, HandleObject obj1, HandleObject obj2, HandleObject result)
 {
     JS_ASSERT(result->isArray() && obj1->isArray() && obj2->isArray());
 
     uint32_t initlen1 = obj1->getDenseInitializedLength();
     JS_ASSERT(initlen1 == obj1->getArrayLength());
 
@@ -2455,32 +2444,20 @@ js::array_concat_dense(JSContext *cx, Ha
     if (!result->ensureElements(cx, len))
         return false;
 
     JS_ASSERT(!result->getDenseInitializedLength());
     result->setDenseInitializedLength(len);
 
     result->initDenseElements(0, obj1->getDenseElements(), initlen1);
     result->initDenseElements(initlen1, obj2->getDenseElements(), initlen2);
-
     result->setArrayLengthInt32(len);
     return true;
 }
-
-void JS_FASTCALL
-mjit::stubs::ArrayConcatTwoArrays(VMFrame &f)
-{
-    RootedObject result(f.cx, &f.regs.sp[-3].toObject());
-    RootedObject obj1(f.cx, &f.regs.sp[-2].toObject());
-    RootedObject obj2(f.cx, &f.regs.sp[-1].toObject());
-
-    if (!array_concat_dense(f.cx, obj1, obj2, result))
-        THROW();
-}
-#endif /* JS_METHODJIT */
+#endif /* JS_ION */
 
 /*
  * Python-esque sequence operations.
  */
 JSBool
 js::array_concat(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
@@ -2987,28 +2964,16 @@ js::NewDenseAllocatedArray(JSContext *cx
 
 JSObject * JS_FASTCALL
 js::NewDenseUnallocatedArray(JSContext *cx, uint32_t length, JSObject *proto /* = NULL */,
                              NewObjectKind newKind /* = GenericObject */)
 {
     return NewArray<false>(cx, length, proto, newKind);
 }
 
-#ifdef JS_METHODJIT
-JSObject * JS_FASTCALL
-mjit::stubs::NewDenseUnallocatedArray(VMFrame &f, uint32_t length)
-{
-    JSObject *obj = NewArray<false>(f.cx, length, (JSObject *)f.scratch);
-    if (!obj)
-        THROWV(NULL);
-
-    return obj;
-}
-#endif
-
 JSObject *
 js::NewDenseCopiedArray(JSContext *cx, uint32_t length, HandleObject src, uint32_t elementOffset,
                         JSObject *proto /* = NULL */)
 {
     JS_ASSERT(!src->isIndexed());
 
     JSObject* obj = NewArray<true>(cx, length, proto);
     if (!obj)
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -39,19 +39,16 @@
 #include "jspubtd.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jsworkers.h"
 #ifdef JS_ION
 #include "ion/Ion.h"
 #endif
 
-#ifdef JS_METHODJIT
-# include "methodjit/MethodJIT.h"
-#endif
 #include "gc/Marking.h"
 #include "js/CharacterEncoding.h"
 #include "js/MemoryMetrics.h"
 #include "frontend/ParseMaps.h"
 #include "vm/Shape.h"
 #include "yarr/BumpPointerAllocator.h"
 
 #include "jscntxtinlines.h"
@@ -203,35 +200,16 @@ JSRuntime::createMathCache(JSContext *cx
         js_ReportOutOfMemory(cx);
         return NULL;
     }
 
     mathCache_ = newMathCache;
     return mathCache_;
 }
 
-#ifdef JS_METHODJIT
-mjit::JaegerRuntime *
-JSRuntime::createJaegerRuntime(JSContext *cx)
-{
-    JS_ASSERT(!jaegerRuntime_);
-    JS_ASSERT(cx->runtime == this);
-
-    mjit::JaegerRuntime *jr = js_new<mjit::JaegerRuntime>();
-    if (!jr || !jr->init(cx)) {
-        js_ReportOutOfMemory(cx);
-        js_delete(jr);
-        return NULL;
-    }
-
-    jaegerRuntime_ = jr;
-    return jaegerRuntime_;
-}
-#endif
-
 void
 JSCompartment::sweepCallsiteClones()
 {
     if (callsiteClones.initialized()) {
         for (CallsiteCloneTable::Enum e(callsiteClones); !e.empty(); e.popFront()) {
             CallsiteCloneKey key = e.front().key;
             JSFunction *fun = e.front().value;
             if (!IsScriptMarked(&key.script) || !IsObjectMarked(&fun))
@@ -1160,20 +1138,17 @@ JSContext::JSContext(JSRuntime *rt)
     operationCallback(NULL),
     data(NULL),
     data2(NULL),
 #ifdef JS_THREADSAFE
     outstandingRequests(0),
 #endif
     resolveFlags(0),
     iterValue(MagicValue(JS_NO_ITER_VALUE)),
-#ifdef JS_METHODJIT
-    methodJitEnabled(false),
     jitIsBroken(false),
-#endif
 #ifdef MOZ_TRACE_JSCALLS
     functionCallback(NULL),
 #endif
     innermostGenerator_(NULL),
 #ifdef DEBUG
     stackIterAssertionEnabled(true),
 #endif
     activeCompilations(0)
@@ -1387,17 +1362,16 @@ void
 JSContext::purge()
 {
     if (!activeCompilations) {
         js_delete(parseMapPool_);
         parseMapPool_ = NULL;
     }
 }
 
-#if defined(JS_METHODJIT)
 static bool
 ComputeIsJITBroken()
 {
 #if !defined(ANDROID) || defined(GONK)
     return false;
 #else  // ANDROID
     if (getenv("JS_IGNORE_JIT_BROKENNESS")) {
         return false;
@@ -1458,25 +1432,21 @@ IsJITBrokenHere()
     static bool computedIsBroken = false;
     static bool isBroken = false;
     if (!computedIsBroken) {
         isBroken = ComputeIsJITBroken();
         computedIsBroken = true;
     }
     return isBroken;
 }
-#endif
 
 void
 JSContext::updateJITEnabled()
 {
-#ifdef JS_METHODJIT
     jitIsBroken = IsJITBrokenHere();
-    methodJitEnabled = (options_ & JSOPTION_METHODJIT) && !jitIsBroken;
-#endif
 }
 
 size_t
 JSContext::sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const
 {
     /*
      * There are other JSContext members that could be measured; the following
      * ones have been found by DMD to be worth measuring.  More stuff may be
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -119,20 +119,16 @@ class AutoCycleDetector
 
     bool foundCycle() { return cyclic; }
 };
 
 /* Updates references in the cycle detection set if the GC moves them. */
 extern void
 TraceCycleDetectionSet(JSTracer *trc, ObjectSet &set);
 
-namespace mjit {
-class JaegerRuntime;
-}
-
 class MathCache;
 
 namespace ion {
 class IonRuntime;
 class IonActivation;
 }
 
 class WeakMapBase;
@@ -742,54 +738,38 @@ struct JSRuntime : public JS::shadow::Ru
 
   private:
     /*
      * Both of these allocators are used for regular expression code which is shared at the
      * thread-data level.
      */
     JSC::ExecutableAllocator *execAlloc_;
     WTF::BumpPointerAllocator *bumpAlloc_;
-#ifdef JS_METHODJIT
-    js::mjit::JaegerRuntime *jaegerRuntime_;
-#endif
     js::ion::IonRuntime *ionRuntime_;
 
     JSObject *selfHostingGlobal_;
 
     JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx);
     WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx);
-    js::mjit::JaegerRuntime *createJaegerRuntime(JSContext *cx);
     js::ion::IonRuntime *createIonRuntime(JSContext *cx);
 
   public:
     JSC::ExecutableAllocator *getExecAlloc(JSContext *cx) {
         return execAlloc_ ? execAlloc_ : createExecutableAllocator(cx);
     }
     JSC::ExecutableAllocator &execAlloc() {
         JS_ASSERT(execAlloc_);
         return *execAlloc_;
     }
     JSC::ExecutableAllocator *maybeExecAlloc() {
         return execAlloc_;
     }
     WTF::BumpPointerAllocator *getBumpPointerAllocator(JSContext *cx) {
         return bumpAlloc_ ? bumpAlloc_ : createBumpPointerAllocator(cx);
     }
-#ifdef JS_METHODJIT
-    js::mjit::JaegerRuntime *getJaegerRuntime(JSContext *cx) {
-        return jaegerRuntime_ ? jaegerRuntime_ : createJaegerRuntime(cx);
-    }
-    bool hasJaegerRuntime() const {
-        return jaegerRuntime_;
-    }
-    js::mjit::JaegerRuntime &jaegerRuntime() {
-        JS_ASSERT(hasJaegerRuntime());
-        return *jaegerRuntime_;
-    }
-#endif
     js::ion::IonRuntime *getIonRuntime(JSContext *cx) {
         return ionRuntime_ ? ionRuntime_ : createIonRuntime(cx);
     }
     js::ion::IonRuntime *ionRuntime() {
         return ionRuntime_;
     }
     bool hasIonRuntime() const {
         return !!ionRuntime_;
@@ -1739,25 +1719,19 @@ struct JSContext : js::ContextFriendFiel
 #endif
 
     /* Stored here to avoid passing it around as a parameter. */
     unsigned               resolveFlags;
 
     /* Location to stash the iteration value between JSOP_MOREITER and JSOP_ITERNEXT. */
     js::Value           iterValue;
 
-#ifdef JS_METHODJIT
-    bool methodJitEnabled;
     bool jitIsBroken;
 
-    js::mjit::JaegerRuntime &jaegerRuntime() { return runtime->jaegerRuntime(); }
-#endif
-
     inline bool typeInferenceEnabled() const;
-    inline bool jaegerCompilationAllowed() const;
 
     void updateJITEnabled();
 
 #ifdef MOZ_TRACE_JSCALLS
     /* Function entry/exit debugging callback. */
     JSFunctionCallback    functionCallback;
 
     void doFunctionCallback(const JSFunction *fun,
@@ -2152,26 +2126,16 @@ static MOZ_ALWAYS_INLINE bool
 JS_CHECK_OPERATION_LIMIT(JSContext *cx)
 {
     JS_ASSERT_REQUEST_DEPTH(cx);
     return !cx->runtime->interrupt || js_InvokeOperationCallback(cx);
 }
 
 namespace js {
 
-#ifdef JS_METHODJIT
-namespace mjit {
-void ExpandInlineFrames(JS::Zone *zone);
-}
-#endif
-
-} /* namespace js */
-
-namespace js {
-
 /************************************************************************/
 
 static JS_ALWAYS_INLINE void
 MakeRangeGCSafe(Value *vec, size_t len)
 {
     mozilla::PodZero(vec, len);
 }
 
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -655,26 +655,18 @@ JSCompartment::setDebugModeFromC(JSConte
 void
 JSCompartment::updateForDebugMode(FreeOp *fop, AutoDebugModeGC &dmgc)
 {
     for (ContextIter acx(rt); !acx.done(); acx.next()) {
         if (acx->compartment == this)
             acx->updateJITEnabled();
     }
 
-#ifdef JS_METHODJIT
-    bool enabled = debugMode();
-
-    JS_ASSERT_IF(enabled, !hasScriptsOnStack());
-
-    for (gc::CellIter i(zone(), gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
-        JSScript *script = i.get<JSScript>();
-        if (script->compartment() == this)
-            script->debugMode = enabled;
-    }
+#ifdef JS_ION
+    JS_ASSERT_IF(debugMode(), !hasScriptsOnStack());
 
     // When we change a compartment's debug mode, whether we're turning it
     // on or off, we must always throw away all analyses: debug mode
     // affects various aspects of the analysis, which then get baked into
     // SSA results, which affects code generation in complicated ways. We
     // must also throw away all JIT code, as its soundness depends on the
     // analyses.
     //
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -406,22 +406,16 @@ class js::AutoDebugModeGC
 };
 
 inline bool
 JSContext::typeInferenceEnabled() const
 {
     return compartment->zone()->types.inferenceEnabled;
 }
 
-inline bool
-JSContext::jaegerCompilationAllowed() const
-{
-    return compartment->zone()->types.jaegerCompilationAllowed;
-}
-
 inline js::Handle<js::GlobalObject*>
 JSContext::global() const
 {
     /*
      * It's safe to use |unsafeGet()| here because any compartment that is
      * on-stack will be marked automatically, so there's no need for a read
      * barrier on it. Once the compartment is popped, the handle is no longer
      * safe to use.
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -28,20 +28,16 @@
 #include "builtin/Eval.h"
 #include "frontend/BytecodeCompiler.h"
 #include "frontend/TokenStream.h"
 #include "gc/Marking.h"
 #include "vm/Shape.h"
 #include "vm/StringBuffer.h"
 #include "vm/Xdr.h"
 
-#ifdef JS_METHODJIT
-#include "methodjit/MethodJIT.h"
-#endif
-
 #include "jsfuninlines.h"
 #include "jsinferinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
 
 #include "vm/Stack-inl.h"
 
@@ -128,38 +124,16 @@ fun_getProperty(JSContext *cx, HandleObj
         JSScript *script = iter.script();
         ion::ForbidCompilation(cx, script);
 #endif
 
         vp.setObject(*argsobj);
         return true;
     }
 
-#ifdef JS_METHODJIT
-    StackFrame *fp = NULL;
-    if (!iter.isIon())
-        fp = iter.interpFrame();
-
-    if (JSID_IS_ATOM(id, cx->names().caller) && fp && fp->prev()) {
-        /*
-         * If the frame was called from within an inlined frame, mark the
-         * innermost function as uninlineable to expand its frame and allow us
-         * to recover its callee object.
-         */
-        InlinedSite *inlined;
-        jsbytecode *prevpc = fp->prevpc(&inlined);
-        if (inlined) {
-            mjit::JITChunk *chunk = fp->prev()->jit()->chunk(prevpc);
-            JSFunction *fun = chunk->inlineFrames()[inlined->inlineIndex].fun;
-            fun->nonLazyScript()->uninlineable = true;
-            MarkTypeObjectFlags(cx, fun, OBJECT_FLAG_UNINLINEABLE);
-        }
-    }
-#endif
-
     if (JSID_IS_ATOM(id, cx->names().caller)) {
         ++iter;
         if (iter.done() || !iter.isFunctionFrame()) {
             JS_ASSERT(vp.isNull());
             return true;
         }
 
         /* Callsite clones should never escape to script. */
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -53,17 +53,16 @@
 #include "jsscript.h"
 #include "jswatchpoint.h"
 #include "jsweakmap.h"
 
 #include "gc/FindSCCs.h"
 #include "gc/GCInternals.h"
 #include "gc/Marking.h"
 #include "gc/Memory.h"
-#include "methodjit/MethodJIT.h"
 #include "vm/Debugger.h"
 #include "vm/Shape.h"
 #include "vm/String.h"
 #include "vm/ForkJoin.h"
 #include "ion/IonCode.h"
 #ifdef JS_ION
 # include "ion/BaselineJIT.h"
 #endif
@@ -854,22 +853,16 @@ js::SetGCZeal(JSRuntime *rt, uint8_t zea
 {
     if (zeal == 0) {
         if (rt->gcVerifyPreData)
             VerifyBarriers(rt, PreBarrierVerifier);
         if (rt->gcVerifyPostData)
             VerifyBarriers(rt, PostBarrierVerifier);
     }
 
-#ifdef JS_METHODJIT
-    /* In case Zone::compileBarriers() changed... */
-    for (ZonesIter zone(rt); !zone.done(); zone.next())
-        mjit::ClearAllFrames(zone);
-#endif
-
     bool schedule = zeal >= js::gc::ZealAllocValue;
     rt->gcZeal_ = zeal;
     rt->gcZealFrequency = frequency;
     rt->gcNextScheduled = schedule ? frequency : 0;
 }
 
 static bool
 InitGCZeal(JSRuntime *rt)
@@ -4829,47 +4822,41 @@ void PreventGCDuringInteractiveDebug()
     TlsPerThreadData.get()->suppressGC++;
 }
 
 #endif
 
 void
 js::ReleaseAllJITCode(FreeOp *fop)
 {
-#ifdef JS_METHODJIT
+#ifdef JS_ION
     for (ZonesIter zone(fop->runtime()); !zone.done(); zone.next()) {
-        mjit::ClearAllFrames(zone);
-# ifdef JS_ION
-
-#  ifdef DEBUG
+
+# ifdef DEBUG
         /* Assert no baseline scripts are marked as active. */
         for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
             JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
         }
-#  endif
+# endif
 
         /* Mark baseline scripts on the stack as active. */
         ion::MarkActiveBaselineScripts(zone);
 
         ion::InvalidateAll(fop, zone);
-# endif
 
         for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
-            mjit::ReleaseScriptCode(fop, script);
-# ifdef JS_ION
             ion::FinishInvalidation(fop, script);
 
             /*
              * Discard baseline script if it's not marked as active. Note that
              * this also resets the active flag.
              */
             ion::FinishDiscardBaselineScript(fop, script);
-# endif
         }
     }
 #endif
 }
 
 /*
  * There are three possible PCCount profiling states:
  *
@@ -4967,29 +4954,22 @@ js::PurgePCCounts(JSContext *cx)
     JS_ASSERT(!rt->profilingScripts);
 
     ReleaseScriptCounts(rt->defaultFreeOp());
 }
 
 void
 js::PurgeJITCaches(Zone *zone)
 {
-#ifdef JS_METHODJIT
-    mjit::ClearAllFrames(zone);
-
+#ifdef JS_ION
     for (CellIterUnderGC i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
 
-        /* Discard JM caches. */
-        mjit::PurgeCaches(script);
-
-#ifdef JS_ION
         /* Discard Ion caches. */
         ion::PurgeCaches(script, zone);
-#endif
     }
 #endif
 }
 
 
 void
 ArenaLists::adoptArenas(JSRuntime *rt, ArenaLists *fromArenaLists)
 {
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -21,18 +21,16 @@
 
 #ifdef JS_ION
 #include "ion/BaselineJIT.h"
 #include "ion/Ion.h"
 #include "ion/IonCompartment.h"
 #endif
 #include "gc/Marking.h"
 #include "js/MemoryMetrics.h"
-#include "methodjit/MethodJIT.h"
-#include "methodjit/Retcon.h"
 #include "vm/Shape.h"
 
 #include "jsatominlines.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
 
@@ -2338,42 +2336,16 @@ enum RecompileKind {
  * compilation.
  */
 static inline bool
 JITCodeHasCheck(JSScript *script, jsbytecode *pc, RecompileKind kind)
 {
     if (kind == RECOMPILE_NONE)
         return false;
 
-#ifdef JS_METHODJIT
-    for (int constructing = 0; constructing <= 1; constructing++) {
-        for (int barriers = 0; barriers <= 1; barriers++) {
-            mjit::JITScript *jit = script->getJIT((bool) constructing, (bool) barriers);
-            if (!jit)
-                continue;
-            mjit::JITChunk *chunk = jit->chunk(pc);
-            if (!chunk)
-                continue;
-            bool found = false;
-            uint32_t count = (kind == RECOMPILE_CHECK_MONITORED)
-                             ? chunk->nMonitoredBytecodes
-                             : chunk->nTypeBarrierBytecodes;
-            uint32_t *bytecodes = (kind == RECOMPILE_CHECK_MONITORED)
-                                  ? chunk->monitoredBytecodes()
-                                  : chunk->typeBarrierBytecodes();
-            for (size_t i = 0; i < count; i++) {
-                if (bytecodes[i] == uint32_t(pc - script->code))
-                    found = true;
-            }
-            if (!found)
-                return false;
-        }
-    }
-#endif
-
     if (script->hasAnyIonScript() || script->isIonCompilingOffThread())
         return false;
 
     return true;
 }
 
 /*
  * Force recompilation of any jitcode for script at pc, or of any other script
@@ -2399,18 +2371,16 @@ AddPendingRecompile(JSContext *cx, JSScr
     if (info.outputIndex != RecompileInfo::NoCompilerRunning) {
         CompilerOutput *co = info.compilerOutput(cx);
         if (!co) {
             if (script->compartment() != cx->compartment)
                 MOZ_CRASH();
             return;
         }
         switch (co->kind()) {
-          case CompilerOutput::MethodJIT:
-            break;
           case CompilerOutput::Ion:
           case CompilerOutput::ParallelIon:
             if (co->script == script)
                 co->invalidate();
             break;
         }
     }
 
@@ -2813,29 +2783,20 @@ TypeCompartment::processPendingRecompile
         return;
 
     /* Steal the list of scripts to recompile, else we will try to recursively recompile them. */
     Vector<RecompileInfo> *pending = pendingRecompiles;
     pendingRecompiles = NULL;
 
     JS_ASSERT(!pending->empty());
 
-#ifdef JS_METHODJIT
-
-    mjit::ExpandInlineFrames(compartment()->zone());
-
+#ifdef JS_ION
     for (unsigned i = 0; i < pending->length(); i++) {
         CompilerOutput &co = *(*pending)[i].compilerOutput(*this);
         switch (co.kind()) {
-          case CompilerOutput::MethodJIT:
-            JS_ASSERT(co.isValid());
-            mjit::Recompiler::clearStackReferences(fop, co.script);
-            co.mjit()->destroyChunk(fop, co.chunkIndex);
-            JS_ASSERT(co.script == NULL);
-            break;
           case CompilerOutput::Ion:
           case CompilerOutput::ParallelIon:
 # ifdef JS_THREADSAFE
             /*
              * If we are inside transitive compilation, which is a worklist
              * fixpoint algorithm, we need to be re-add invalidated scripts to
              * the worklist.
              */
@@ -2843,20 +2804,18 @@ TypeCompartment::processPendingRecompile
                 transitiveCompilationWorklist->insert(transitiveCompilationWorklist->begin(),
                                                       co.script);
             }
 # endif
             break;
         }
     }
 
-# ifdef JS_ION
     ion::Invalidate(*this, fop, *pending);
-# endif
-#endif /* JS_METHODJIT */
+#endif /* JS_ION */
 
     fop->delete_(pending);
 }
 
 void
 TypeCompartment::setPendingNukeTypes(JSContext *cx)
 {
     TypeZone *zone = &compartment()->zone()->types;
@@ -2893,33 +2852,26 @@ TypeZone::nukeTypes(FreeOp *fop)
         if (comp->types.pendingRecompiles) {
             fop->free_(comp->types.pendingRecompiles);
             comp->types.pendingRecompiles = NULL;
         }
     }
 
     inferenceEnabled = false;
 
-#ifdef JS_METHODJIT
-    mjit::ExpandInlineFrames(zone());
-    mjit::ClearAllFrames(zone());
-# ifdef JS_ION
+#ifdef JS_ION
     ion::InvalidateAll(fop, zone());
-# endif
 
     /* Throw away all JIT code in the compartment, but leave everything else alone. */
 
     for (gc::CellIter i(zone(), gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
-        mjit::ReleaseScriptCode(fop, script);
-# ifdef JS_ION
         ion::FinishInvalidation(fop, script);
-# endif
-    }
-#endif /* JS_METHODJIT */
+    }
+#endif /* JS_ION */
 
     pendingNukeTypes = false;
 }
 
 void
 TypeCompartment::addPendingRecompile(JSContext *cx, const RecompileInfo &info)
 {
     CompilerOutput *co = info.compilerOutput(cx);
@@ -2932,25 +2884,18 @@ TypeCompartment::addPendingRecompile(JSC
     if (co->isValid())
         CancelOffThreadIonCompile(cx->compartment, co->script);
 
     if (!co->isValid()) {
         JS_ASSERT(co->script == NULL);
         return;
     }
 
-#ifdef JS_METHODJIT
-    mjit::JITScript *jit = co->script->getJIT(co->constructing, co->barriers);
-    bool hasJITCode = jit && jit->chunkDescriptor(co->chunkIndex).chunk;
-
-# if defined(JS_ION)
-    hasJITCode |= !!co->script->hasAnyIonScript();
-# endif
-
-    if (!hasJITCode) {
+#if defined(JS_ION)
+    if (!co->script->hasAnyIonScript()) {
         /* Scripts which haven't been compiled yet don't need to be recompiled. */
         return;
     }
 #endif
 
     if (!pendingRecompiles) {
         pendingRecompiles = cx->new_< Vector<RecompileInfo> >(cx);
         if (!pendingRecompiles) {
@@ -2976,51 +2921,28 @@ TypeCompartment::addPendingRecompile(JSC
 
 void
 TypeCompartment::addPendingRecompile(JSContext *cx, JSScript *script, jsbytecode *pc)
 {
     JS_ASSERT(script);
     if (!constrainedOutputs)
         return;
 
-#ifdef JS_METHODJIT
-    for (int constructing = 0; constructing <= 1; constructing++) {
-        for (int barriers = 0; barriers <= 1; barriers++) {
-            mjit::JITScript *jit = script->getJIT((bool) constructing, (bool) barriers);
-            if (!jit)
-                continue;
-
-            if (pc) {
-                unsigned int chunkIndex = jit->chunkIndex(pc);
-                mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
-                if (chunk)
-                    addPendingRecompile(cx, chunk->recompileInfo);
-            } else {
-                for (size_t chunkIndex = 0; chunkIndex < jit->nchunks; chunkIndex++) {
-                    mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
-                    if (chunk)
-                        addPendingRecompile(cx, chunk->recompileInfo);
-                }
-            }
-        }
-    }
-
-# ifdef JS_ION
+#ifdef JS_ION
     CancelOffThreadIonCompile(cx->compartment, script);
 
     // Let the script warm up again before attempting another compile.
     if (ion::IsBaselineEnabled(cx))
         script->resetUseCount();
 
     if (script->hasIonScript())
         addPendingRecompile(cx, script->ionScript()->recompileInfo());
 
     if (script->hasParallelIonScript())
         addPendingRecompile(cx, script->parallelIonScript()->recompileInfo());
-# endif
 #endif
 }
 
 void
 TypeCompartment::monitorBytecode(JSContext *cx, JSScript *script, uint32_t offset,
                                  bool returnOnly)
 {
     if (!script->ensureRanInference(cx))
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -96,20 +96,16 @@ class RootedBase<TaggedProto> : public T
     friend class TaggedProtoOperations<Rooted<TaggedProto> >;
     const TaggedProto *extract() const {
         return static_cast<const Rooted<TaggedProto> *>(this)->address();
     }
 };
 
 class CallObject;
 
-namespace mjit {
-    struct JITScript;
-}
-
 namespace ion {
     struct IonScript;
 }
 
 namespace types {
 
 /* Type set entry for either a JSObject with singleton type or a non-singleton TypeObject. */
 struct TypeObjectKey {
@@ -1275,17 +1271,16 @@ typedef HashMap<AllocationSiteKey,ReadBa
  * Information about the result of the compilation of a script.  This structure
  * stored in the TypeCompartment is indexed by the RecompileInfo. This
  * indirection enable the invalidation of all constraints related to the same
  * compilation. The compiler output is build by the AutoEnterCompilation.
  */
 struct CompilerOutput
 {
     enum Kind {
-        MethodJIT,
         Ion,
         ParallelIon
     };
 
     JSScript *script;
 
     // This integer will always be a member of CompilerOutput::Kind,
     // but, for portability, bitfields are limited to bool, int, and
@@ -1296,17 +1291,16 @@ struct CompilerOutput
     bool pendingRecompilation : 1;
     uint32_t chunkIndex:27;
 
     CompilerOutput();
 
     Kind kind() const { return static_cast<Kind>(kindInt); }
     void setKind(Kind k) { kindInt = k; }
 
-    mjit::JITScript *mjit() const;
     ion::IonScript *ion() const;
 
     bool isValid() const;
 
     void setPendingRecompilation() {
         pendingRecompilation = true;
     }
     void invalidate() {
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -86,73 +86,48 @@ namespace types {
 
 /////////////////////////////////////////////////////////////////////
 // CompilerOutput & RecompileInfo
 /////////////////////////////////////////////////////////////////////
 
 inline
 CompilerOutput::CompilerOutput()
   : script(NULL),
-    kindInt(MethodJIT),
+    kindInt(Ion),
     constructing(false),
     barriers(false),
     chunkIndex(false)
 {
 }
 
-inline mjit::JITScript *
-CompilerOutput::mjit() const
-{
-#ifdef JS_METHODJIT
-    JS_ASSERT(kind() == MethodJIT && isValid());
-    return script->getJIT(constructing, barriers);
-#else
-    return NULL;
-#endif
-}
-
 inline ion::IonScript *
 CompilerOutput::ion() const
 {
 #ifdef JS_ION
-    JS_ASSERT(kind() != MethodJIT && isValid());
+    JS_ASSERT(isValid());
     switch (kind()) {
-      case MethodJIT: break;
       case Ion: return script->ionScript();
       case ParallelIon: return script->parallelIonScript();
     }
 #endif
     JS_NOT_REACHED("Invalid kind of CompilerOutput");
     return NULL;
 }
 
 inline bool
 CompilerOutput::isValid() const
 {
     if (!script)
         return false;
 
-#if defined(DEBUG) && (defined(JS_METHODJIT) || defined(JS_ION))
+#if defined(DEBUG) && defined(JS_ION)
     TypeCompartment &types = script->compartment()->types;
 #endif
 
     switch (kind()) {
-      case MethodJIT: {
-#ifdef JS_METHODJIT
-        mjit::JITScript *jit = script->getJIT(constructing, barriers);
-        if (!jit)
-            return false;
-        mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
-        if (!chunk)
-            return false;
-        JS_ASSERT(this == chunk->recompileInfo.compilerOutput(types));
-        return true;
-#endif
-      }
-
       case Ion:
 #ifdef JS_ION
         if (script->hasIonScript()) {
             JS_ASSERT(this == script->ionScript()->recompileInfo().compilerOutput(types));
             return true;
         }
         if (script->isIonCompilingOffThread())
             return true;
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -33,20 +33,16 @@
 #include "jspropertycache.h"
 #include "jsscript.h"
 #include "jsstr.h"
 
 #include "builtin/Eval.h"
 #include "vm/Debugger.h"
 #include "vm/Shape.h"
 
-#ifdef JS_METHODJIT
-#include "methodjit/MethodJIT.h"
-#include "methodjit/Logging.h"
-#endif
 #include "ion/Ion.h"
 #include "ion/BaselineJIT.h"
 
 #ifdef JS_ION
 #include "ion/IonFrames-inl.h"
 #endif
 
 #include "jsatominlines.h"
@@ -293,19 +289,16 @@ js::ValueToCallable(JSContext *cx, const
 bool
 js::RunScript(JSContext *cx, StackFrame *fp)
 {
     JS_ASSERT(fp == cx->fp());
     RootedScript script(cx, fp->script());
 
     JS_ASSERT_IF(!fp->isGeneratorFrame(), cx->regs().pc == script->code);
     JS_ASSERT_IF(fp->isEvalFrame(), script->isActiveEval);
-#ifdef JS_METHODJIT_SPEW
-    JMCheckLogging();
-#endif
 
     JS_CHECK_RECURSION(cx, return false);
 
     // Check to see if useNewType flag should be set for this frame.
     if (fp->isFunctionFrame() && fp->isConstructing() && !fp->isGeneratorFrame() &&
         cx->typeInferenceEnabled())
     {
         ScriptFrameIter iter(cx);
@@ -364,27 +357,16 @@ js::RunScript(JSContext *cx, StackFrame 
             // from baseline.
             JS_ASSERT(status != ion::IonExec_Bailout);
 
             return !IsErrorStatus(status);
         }
     }
 #endif
 
-#ifdef JS_METHODJIT
-    mjit::CompileStatus status;
-    status = mjit::CanMethodJIT(cx, script, script->code, fp->isConstructing(),
-                                mjit::CompileRequest_Interpreter, fp);
-    if (status == mjit::Compile_Error)
-        return false;
-
-    if (status == mjit::Compile_Okay)
-        return mjit::JaegerStatusToSuccess(mjit::JaegerShot(cx, false));
-#endif
-
     return Interpret(cx, fp) != Interpret_Error;
 }
 
 /*
  * Find a function reference and its 'this' value implicit first parameter
  * under argc arguments on cx's stack, and call the function.  Push missing
  * required arguments, allocate declared local variables, and pop everything
  * when done.  Then push the return value.
@@ -1077,35 +1059,16 @@ js::Interpret(JSContext *cx, StackFrame 
 # define END_CASE_LEN12     len = 12; goto advance_pc;
 # define END_VARLEN_CASE    goto advance_pc;
 # define ADD_EMPTY_CASE(OP) BEGIN_CASE(OP)
 # define END_EMPTY_CASES    goto advance_pc_by_one;
 
 #define LOAD_DOUBLE(PCOFF, dbl)                                               \
     (dbl = script->getConst(GET_UINT32_INDEX(regs.pc + (PCOFF))).toDouble())
 
-#ifdef JS_METHODJIT
-
-#define CHECK_PARTIAL_METHODJIT(status)                                       \
-    JS_BEGIN_MACRO                                                            \
-        switch (status) {                                                     \
-          case mjit::Jaeger_UnfinishedAtTrap:                                 \
-            interpMode = JSINTERP_SKIP_TRAP;                                  \
-            /* FALLTHROUGH */                                                 \
-          case mjit::Jaeger_Unfinished:                                       \
-            op = (JSOp) *regs.pc;                                             \
-            SET_SCRIPT(regs.fp()->script());                                  \
-            if (cx->isExceptionPending())                                     \
-                goto error;                                                   \
-            DO_OP();                                                          \
-          default:;                                                           \
-        }                                                                     \
-    JS_END_MACRO
-#endif
-
     /*
      * Prepare to call a user-supplied branch handler, and abort the script
      * if it returns false.
      */
 #define CHECK_BRANCH()                                                        \
     JS_BEGIN_MACRO                                                            \
         if (cx->runtime->interrupt && !js_HandleExecutionInterrupt(cx))       \
             goto error;                                                       \
@@ -1379,45 +1342,16 @@ BEGIN_CASE(JSOP_LOOPHEAD)
 END_CASE(JSOP_LOOPHEAD)
 
 BEGIN_CASE(JSOP_LABEL)
 END_CASE(JSOP_LABEL)
 
 check_backedge:
 {
     CHECK_BRANCH();
-    if (op != JSOP_LOOPHEAD)
-        DO_OP();
-
-#ifdef JS_METHODJIT
-    // Attempt on-stack replacement with JaegerMonkey code, which is keyed to
-    // the interpreter state at the JSOP_LOOPHEAD at the start of the loop.
-    // Unlike IonMonkey, this requires two different code fragments to perform
-    // hoisting.
-    mjit::CompileStatus status =
-        mjit::CanMethodJIT(cx, script, regs.pc, regs.fp()->isConstructing(),
-                           mjit::CompileRequest_Interpreter, regs.fp());
-    if (status == mjit::Compile_Error)
-        goto error;
-    if (status == mjit::Compile_Okay) {
-        void *ncode =
-            script->nativeCodeForPC(regs.fp()->isConstructing(), regs.pc);
-        JS_ASSERT(ncode);
-        mjit::JaegerStatus status = mjit::JaegerShotAtSafePoint(cx, ncode, true);
-        if (status == mjit::Jaeger_ThrowBeforeEnter)
-            goto error;
-        CHECK_PARTIAL_METHODJIT(status);
-        interpReturnOK = (status == mjit::Jaeger_Returned);
-        if (entryFrame != regs.fp())
-            goto jit_return;
-        regs.fp()->setFinishedInInterpreter();
-        goto leave_on_safe_point;
-    }
-#endif /* JS_METHODJIT */
-
     DO_OP();
 }
 
 BEGIN_CASE(JSOP_LOOPENTRY)
 
 #ifdef JS_ION
     // Attempt on-stack replacement with Ion code. IonMonkey OSR takes place at
     // the point of the initial loop entry, to consolidate hoisted code between
@@ -1551,37 +1485,30 @@ BEGIN_CASE(JSOP_STOP)
             interpReturnOK = ScriptDebugEpilogue(cx, regs.fp(), interpReturnOK);
 
         if (!regs.fp()->isYielding())
             regs.fp()->epilogue(cx);
         else
             Probes::exitScript(cx, script, script->function(), regs.fp());
 
         /* The JIT inlines the epilogue. */
-#if defined(JS_METHODJIT) || defined(JS_ION)
+#if defined(JS_ION)
   jit_return:
 #endif
 
         /* The results of lowered call/apply frames need to be shifted. */
-        bool shiftResult = regs.fp()->loweredCallOrApply();
-
         cx->stack.popInlineFrame(regs);
         SET_SCRIPT(regs.fp()->script());
 
         JS_ASSERT(js_CodeSpec[*regs.pc].format & JOF_INVOKE);
 
         /* Resume execution in the calling frame. */
         if (JS_LIKELY(interpReturnOK)) {
             TypeScript::Monitor(cx, script, regs.pc, regs.sp[-1]);
 
-            if (shiftResult) {
-                regs.sp[-2] = regs.sp[-1];
-                regs.sp--;
-            }
-
             len = JSOP_CALL_LENGTH;
             DO_NEXT_OP(len);
         }
 
         /* Increment pc so that |sp - fp->slots == ReconstructStackDepth(pc)|. */
         regs.pc += JSOP_CALL_LENGTH;
         goto error;
     } else {
@@ -2448,34 +2375,16 @@ BEGIN_CASE(JSOP_FUNCALL)
             JS_ASSERT(exec != ion::IonExec_Bailout);
 
             interpReturnOK = !IsErrorStatus(exec);
             goto jit_return;
         }
     }
 #endif
 
-#ifdef JS_METHODJIT
-    if (!newType && cx->methodJitEnabled) {
-        /* Try to ensure methods are method JIT'd.  */
-        mjit::CompileStatus status = mjit::CanMethodJIT(cx, script, script->code,
-                                                        construct,
-                                                        mjit::CompileRequest_Interpreter,
-                                                        regs.fp());
-        if (status == mjit::Compile_Error)
-            goto error;
-        if (status == mjit::Compile_Okay) {
-            mjit::JaegerStatus status = mjit::JaegerShot(cx, true);
-            CHECK_PARTIAL_METHODJIT(status);
-            interpReturnOK = mjit::JaegerStatusToSuccess(status);
-            goto jit_return;
-        }
-    }
-#endif
-
     if (!regs.fp()->prologue(cx))
         goto error;
     if (cx->compartment->debugMode()) {
         switch (ScriptDebugPrologue(cx, regs.fp())) {
           case JSTRAP_CONTINUE:
             break;
           case JSTRAP_RETURN:
             interpReturnOK = true;
@@ -3391,17 +3300,17 @@ END_CASE(JSOP_ARRAYPUSH)
     if (!regs.fp()->isYielding())
         regs.fp()->epilogue(cx);
     else
         Probes::exitScript(cx, script, script->function(), regs.fp());
     regs.fp()->setFinishedInInterpreter();
 
     gc::MaybeVerifyBarriers(cx, true);
 
-#ifdef JS_METHODJIT
+#ifdef JS_ION
     /*
      * This path is used when it's guaranteed the method can be finished
      * inside the JIT.
      */
   leave_on_safe_point:
 #endif
 
     return interpReturnOK ? Interpret_Ok : Interpret_Error;
--- a/js/src/jsinterpinlines.h
+++ b/js/src/jsinterpinlines.h
@@ -12,17 +12,16 @@
 #include "jscompartment.h"
 #include "jsinfer.h"
 #include "jsinterp.h"
 #include "jslibmath.h"
 #include "jsnum.h"
 #include "jsprobes.h"
 #include "jsstr.h"
 
-#include "methodjit/MethodJIT.h"
 #include "vm/ForkJoin.h"
 
 #include "jsatominlines.h"
 #include "jsfuninlines.h"
 #include "jsinferinlines.h"
 #include "jsopcodeinlines.h"
 #include "jspropertycacheinlines.h"
 #include "jstypedarrayinlines.h"
--- a/js/src/jsmemorymetrics.cpp
+++ b/js/src/jsmemorymetrics.cpp
@@ -248,43 +248,38 @@ StatsCellCallback(JSRuntime *rt, void *d
         break;
       }
 
       case JSTRACE_SCRIPT: {
         JSScript *script = static_cast<JSScript *>(thing);
         CompartmentStats *cStats = GetCompartmentStats(script->compartment());
         cStats->gcHeapScripts += thingSize;
         cStats->scriptData += script->sizeOfData(rtStats->mallocSizeOf_);
-#ifdef JS_METHODJIT
-        cStats->jaegerData += script->sizeOfJitScripts(rtStats->mallocSizeOf_);
-# ifdef JS_ION
+#ifdef JS_ION
         size_t baselineData = 0, baselineStubsFallback = 0;
         ion::SizeOfBaselineData(script, rtStats->mallocSizeOf_, &baselineData,
                                 &baselineStubsFallback);
         cStats->baselineData += baselineData;
         cStats->baselineStubsFallback += baselineStubsFallback;
         cStats->ionData += ion::SizeOfIonData(script, rtStats->mallocSizeOf_);
-# endif
 #endif
 
         ScriptSource *ss = script->scriptSource();
         SourceSet::AddPtr entry = closure->seenSources.lookupForAdd(ss);
         if (!entry) {
             closure->seenSources.add(entry, ss); // Not much to be done on failure.
             rtStats->runtime.scriptSources += ss->sizeOfIncludingThis(rtStats->mallocSizeOf_);
         }
         break;
       }
 
       case JSTRACE_IONCODE: {
-#ifdef JS_METHODJIT
-# ifdef JS_ION
+#ifdef JS_ION
         zStats->gcHeapIonCodes += thingSize;
         // The code for a script is counted in ExecutableAllocator::sizeOfCode().
-# endif
 #endif
         break;
       }
 
       case JSTRACE_TYPE_OBJECT: {
         types::TypeObject *obj = static_cast<types::TypeObject *>(thing);
         zStats->gcHeapTypeObjects += thingSize;
         zStats->typeObjects += obj->sizeOfExcludingThis(rtStats->mallocSizeOf_);
--- a/js/src/jsopcode.cpp
+++ b/js/src/jsopcode.cpp
@@ -1487,19 +1487,16 @@ FindStartPC(JSContext *cx, ScriptFrameIt
      * would compute.
      *
      * FIXME: also fall back if iter.isIonOptimizedJS(), since the stack snapshot
      * may be for the previous pc (see bug 831120).
      */
     if (iter.isIonOptimizedJS())
         return true;
 
-    if (!iter.isIonBaselineJS() && iter.interpFrame()->jitRevisedStack())
-        return true;
-
     *valuepc = NULL;
 
     PCStack pcstack;
     if (!pcstack.init(cx, iter.script(), current))
         return false;
 
     if (spindex < 0 && spindex + pcstack.depth() < 0)
         spindex = JSDVG_SEARCH_STACK;
--- a/js/src/jsprobes.cpp
+++ b/js/src/jsprobes.cpp
@@ -27,53 +27,16 @@ bool Probes::ProfilingActive = true;
 Probes::JITReportGranularity
 Probes::JITGranularityRequested(JSContext *cx)
 {
     if (cx->runtime->spsProfiler.enabled())
         return JITREPORT_GRANULARITY_LINE;
     return JITREPORT_GRANULARITY_NONE;
 }
 
-#ifdef JS_METHODJIT
-
-bool
-Probes::registerMJITCode(JSContext *cx, js::mjit::JITChunk *chunk,
-                         js::mjit::JSActiveFrame *outerFrame,
-                         js::mjit::JSActiveFrame **inlineFrames)
-{
-    if (cx->runtime->spsProfiler.enabled() &&
-        !cx->runtime->spsProfiler.registerMJITCode(chunk, outerFrame, inlineFrames))
-    {
-        return false;
-    }
-
-    return true;
-}
-
-void
-Probes::discardMJITCode(FreeOp *fop, mjit::JITScript *jscr, mjit::JITChunk *chunk, void* address)
-{
-    if (fop->runtime()->spsProfiler.enabled())
-        fop->runtime()->spsProfiler.discardMJITCode(jscr, chunk, address);
-}
-
-bool
-Probes::registerICCode(JSContext *cx,
-                       mjit::JITChunk *chunk, JSScript *script, jsbytecode* pc,
-                       void *start, size_t size)
-{
-    if (cx->runtime->spsProfiler.enabled() &&
-        !cx->runtime->spsProfiler.registerICCode(chunk, script, pc, start, size))
-    {
-        return false;
-    }
-    return true;
-}
-#endif
-
 /* ICs are unregistered in a batch */
 void
 Probes::discardExecutableRegion(void *start, size_t size)
 {
     /*
      * Not needed for SPS because ICs are disposed of when the normal JITChunk
      * is disposed of
      */
--- a/js/src/jsprobes.h
+++ b/js/src/jsprobes.h
@@ -13,22 +13,16 @@
 #include "jspubtd.h"
 #include "jsprvtd.h"
 #include "jscntxt.h"
 #include "jsobj.h"
 #include "jsscript.h"
 
 namespace js {
 
-namespace mjit {
-struct NativeAddressInfo;
-struct JSActiveFrame;
-struct JITChunk;
-}
-
 namespace Probes {
 
 /*
  * Static probes
  *
  * The probe points defined in this file are scattered around the SpiderMonkey
  * source tree. The presence of Probes::someEvent() means that someEvent is
  * about to happen or has happened. To the extent possible, probes should be
@@ -108,40 +102,16 @@ enum JITReportGranularity {
 };
 
 /*
  * Finest granularity of JIT information desired by all watchers.
  */
 JITReportGranularity
 JITGranularityRequested(JSContext *cx);
 
-#ifdef JS_METHODJIT
-/*
- * New method JIT code has been created
- */
-bool
-registerMJITCode(JSContext *cx, js::mjit::JITChunk *chunk,
-                 mjit::JSActiveFrame *outerFrame,
-                 mjit::JSActiveFrame **inlineFrames);
-
-/*
- * Method JIT code is about to be discarded
- */
-void
-discardMJITCode(FreeOp *fop, mjit::JITScript *jscr, mjit::JITChunk *chunk, void* address);
-
-/*
- * IC code has been allocated within the given JITChunk
- */
-bool
-registerICCode(JSContext *cx,
-               mjit::JITChunk *chunk, JSScript *script, jsbytecode* pc,
-               void *start, size_t size);
-#endif /* JS_METHODJIT */
-
 /*
  * A whole region of code has been deallocated, containing any number of ICs.
  * (ICs are unregistered in a batch, so individual ICs are not registered.)
  */
 void
 discardExecutableRegion(void *start, size_t size);
 
 /*
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -22,20 +22,18 @@
 #include "jsdbgapi.h"
 #include "jsfun.h"
 #include "jsgc.h"
 #include "jsinterp.h"
 #include "jsopcode.h"
 
 #include "gc/Marking.h"
 #include "frontend/BytecodeEmitter.h"
-#include "methodjit/MethodJIT.h"
 #include "ion/IonCode.h"
 #include "ion/BaselineJIT.h"
-#include "methodjit/Retcon.h"
 #include "vm/Debugger.h"
 #include "vm/Shape.h"
 #include "vm/Xdr.h"
 
 #include "jsinferinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
@@ -1837,20 +1835,16 @@ JSScript::fullyInitFromEmitter(JSContext
         bce->regexpList.finish(script->regexps());
     if (bce->constList.length() != 0)
         bce->constList.finish(script->consts());
     script->strict = bce->sc->strict;
     script->explicitUseStrict = bce->sc->hasExplicitUseStrict();
     script->bindingsAccessedDynamically = bce->sc->bindingsAccessedDynamically();
     script->funHasExtensibleScope = funbox ? funbox->hasExtensibleScope() : false;
     script->hasSingletons = bce->hasSingletons;
-#ifdef JS_METHODJIT
-    if (cx->compartment->debugMode())
-        script->debugMode = true;
-#endif
 
     if (funbox) {
         if (funbox->argumentsHasLocalBinding()) {
             // This must precede the script->bindings.transfer() call below
             script->setArgumentsHasVarBinding();
             if (funbox->definitelyNeedsArgsObj())
                 script->setNeedsArgsObj(true);
         } else {
@@ -1981,21 +1975,18 @@ JSScript::finalize(FreeOp *fop)
     fop->runtime()->spsProfiler.onScriptFinalized(this);
 
     if (originPrincipals)
         JS_DropPrincipals(fop->runtime(), originPrincipals);
 
     if (types)
         types->destroy();
 
-#ifdef JS_METHODJIT
-    mjit::ReleaseScriptCode(fop, this);
-# ifdef JS_ION
+#ifdef JS_ION
     ion::DestroyIonScripts(fop, this);
-# endif
 #endif
 
     destroyScriptCounts(fop);
     destroyDebugScript(fop);
     scriptSource_->decref();
 
     if (data) {
         JS_POISON(data, 0xdb, computedSizeOfData());
@@ -2505,23 +2496,16 @@ JSScript::ensureHasDebugScript(JSContext
         frames->enableInterruptsIfRunning(this);
 
     return true;
 }
 
 void
 JSScript::recompileForStepMode(FreeOp *fop)
 {
-#ifdef JS_METHODJIT
-    if (hasMJITInfo()) {
-        mjit::Recompiler::clearStackReferences(fop, this);
-        mjit::ReleaseScriptCode(fop, this);
-    }
-#endif
-
 #ifdef JS_ION
     if (hasBaselineScript())
         baseline->toggleDebugTraps(this, NULL);
 #endif
 }
 
 bool
 JSScript::tryNewStepMode(JSContext *cx, uint32_t newValue)
@@ -2693,26 +2677,16 @@ JSScript::markChildren(JSTracer *trc)
         compartment()->mark();
 
         if (code)
             MarkScriptBytecode(trc->runtime, code);
     }
 
     bindings.trace(trc);
 
-#ifdef JS_METHODJIT
-    for (int constructing = 0; constructing <= 1; constructing++) {
-        for (int barriers = 0; barriers <= 1; barriers++) {
-            mjit::JITScript *jit = getJIT((bool) constructing, (bool) barriers);
-            if (jit)
-                jit->trace(trc);
-        }
-    }
-#endif
-
     if (hasAnyBreakpointsOrStepMode()) {
         for (unsigned i = 0; i < length; i++) {
             BreakpointSite *site = debugScript()->breakpoints[i];
             if (site && site->trapHandler)
                 MarkValue(trc, &site->trapClosure, "trap closure");
         }
     }
 
@@ -2836,24 +2810,16 @@ JSScript::argumentsOptimizationFailed(JS
                 script->needsArgsObj_ = false;
                 return false;
             }
 
             SetFrameArgumentsObject(cx, frame, script, argsobj);
         }
     }
 
-#ifdef JS_METHODJIT
-    if (script->hasMJITInfo()) {
-        mjit::ExpandInlineFrames(cx->zone());
-        mjit::Recompiler::clearStackReferences(cx->runtime->defaultFreeOp(), script);
-        mjit::ReleaseScriptCode(cx->runtime->defaultFreeOp(), script);
-    }
-#endif
-
     if (script->hasAnalysis() && script->analysis()->ranInference()) {
         types::AutoEnterAnalysis enter(cx);
         types::TypeScript::MonitorUnknown(cx, script, script->argumentsBytecode());
     }
 
     return true;
 }
 
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -283,80 +283,16 @@ struct ScriptSource;
 } /* namespace js */
 
 class JSScript : public js::gc::Cell
 {
     static const uint32_t stepFlagMask = 0x80000000U;
     static const uint32_t stepCountMask = 0x7fffffffU;
 
   public:
-#ifdef JS_METHODJIT
-    // This type wraps JITScript.  It has three possible states.
-    // - "Empty": no compilation has been attempted and there is no JITScript.
-    // - "Unjittable": compilation failed and there is no JITScript.
-    // - "Valid": compilation succeeded and there is a JITScript.
-    class JITScriptHandle
-    {
-        // CallCompiler must be a friend because it generates code that uses
-        // UNJITTABLE.
-        friend class js::mjit::CallCompiler;
-
-        // The exact representation:
-        // - NULL means "empty".
-        // - UNJITTABLE means "unjittable".
-        // - Any other value means "valid".
-        // UNJITTABLE = 1 so that we can check that a JITScript is valid
-        // with a single |> 1| test.  It's defined outside the class because
-        // non-integral static const fields can't be defined in the class.
-        static const js::mjit::JITScript *UNJITTABLE;   // = (JITScript *)1;
-        js::mjit::JITScript *value;
-
-      public:
-        JITScriptHandle()       { value = NULL; }
-
-        bool isEmpty()          { return value == NULL; }
-        bool isUnjittable()     { return value == UNJITTABLE; }
-        bool isValid()          { return value  > UNJITTABLE; }
-
-        js::mjit::JITScript *getValid() {
-            JS_ASSERT(isValid());
-            return value;
-        }
-
-        void setEmpty()         { value = NULL; }
-        void setUnjittable()    { value = const_cast<js::mjit::JITScript *>(UNJITTABLE); }
-        void setValid(js::mjit::JITScript *jit) {
-            value = jit;
-            JS_ASSERT(isValid());
-        }
-
-        static void staticAsserts();
-    };
-
-    // All the possible JITScripts that can simultaneously exist for a script.
-    struct JITScriptSet
-    {
-        JITScriptHandle jitHandleNormal;          // JIT info for normal scripts
-        JITScriptHandle jitHandleNormalBarriered; // barriered JIT info for normal scripts
-        JITScriptHandle jitHandleCtor;            // JIT info for constructors
-        JITScriptHandle jitHandleCtorBarriered;   // barriered JIT info for constructors
-
-        static size_t jitHandleOffset(bool constructing, bool barriers) {
-            return constructing
-                ? (barriers
-                   ? offsetof(JITScriptSet, jitHandleCtorBarriered)
-                   : offsetof(JITScriptSet, jitHandleCtor))
-                : (barriers
-                   ? offsetof(JITScriptSet, jitHandleNormalBarriered)
-                   : offsetof(JITScriptSet, jitHandleNormal));
-        }
-    };
-
-#endif  // JS_METHODJIT
-
     //
     // We order fields according to their size in order to avoid wasting space
     // for alignment.
     //
 
     // Larger-than-word-sized fields.
 
   public:
@@ -375,21 +311,16 @@ class JSScript : public js::gc::Cell
     JSCompartment   *compartment_;
     JSPrincipals    *originPrincipals; /* see jsapi.h 'originPrincipals' comment */
 
     /* Persistent type information retained across GCs. */
     js::types::TypeScript *types;
 
   private:
     js::ScriptSource *scriptSource_; /* source code */
-#ifdef JS_METHODJIT
-    JITScriptSet *mJITInfo;
-#else
-    void         *mJITInfoPad;
-#endif
     js::HeapPtrFunction function_;
 
     // For callsite clones, which cannot have enclosing scopes, the original
     // function; otherwise the enclosing scope
     js::HeapPtrObject   enclosingScopeOrOriginalFunction_;
 
     // 32-bit fields.
 
@@ -487,27 +418,22 @@ class JSScript : public js::gc::Cell
 
     /* script is attempted to be cloned anew at each callsite. This is
        temporarily needed for ParallelArray selfhosted code until type
        information can be made context sensitive. See discussion in
        bug 826148. */
     bool            shouldCloneAtCallsite:1;
 
     bool            isCallsiteClone:1; /* is a callsite clone; has a link to the original function */
-#ifdef JS_METHODJIT
-    bool            debugMode:1;      /* script was compiled in debug mode */
+#ifdef JS_ION
     bool            failedBoundsCheck:1; /* script has had hoisted bounds checks fail */
-#else
-    bool            debugModePad:1;
-    bool            failedBoundsCheckPad:1;
-#endif
-#ifdef JS_ION
     bool            failedShapeGuard:1; /* script has had hoisted shape guard fail */
     bool            hadFrequentBailouts:1;
 #else
+    bool            failedBoundsCheckPad:1;
     bool            failedShapeGuardPad:1;
     bool            hadFrequentBailoutsPad:1;
 #endif
     bool            invalidatedIdempotentCache:1; /* idempotent cache has triggered invalidation */
     bool            isGenerator:1;    /* is a generator */
     bool            isGeneratorExp:1; /* is a generator expression */
     bool            hasScriptCounts:1;/* script has an entry in
                                          JSCompartment::scriptCountsMap */
@@ -589,16 +515,20 @@ class JSScript : public js::gc::Cell
   private:
     /* Information attached by Baseline/Ion for sequential mode execution. */
     js::ion::IonScript *ion;
     js::ion::BaselineScript *baseline;
 
     /* Information attached by Ion for parallel mode execution */
     js::ion::IonScript *parallelIon;
 
+#if JS_BITS_PER_WORD == 32
+    uint32_t padding0;
+#endif
+
     /*
      * Pointer to either baseline->method()->raw() or ion->method()->raw(), or NULL
      * if there's no Baseline or Ion script.
      */
     uint8_t *baselineOrIonRaw;
     uint8_t *baselineOrIonSkipArgCheck;
 
   public:
@@ -762,66 +692,23 @@ class JSScript : public js::gc::Cell
      */
     bool enclosingScriptsCompiledSuccessfully() const;
 
   private:
     bool makeTypes(JSContext *cx);
     bool makeBytecodeTypeMap(JSContext *cx);
     bool makeAnalysis(JSContext *cx);
 
-#ifdef JS_METHODJIT
-  private:
-    // CallCompiler must be a friend because it generates code that directly
-    // accesses jitHandleNormal/jitHandleCtor, via jitHandleOffset().
-    friend class js::mjit::CallCompiler;
-
   public:
-    bool hasMJITInfo() {
-        return mJITInfo != NULL;
-    }
-
-    static size_t offsetOfMJITInfo() { return offsetof(JSScript, mJITInfo); }
-
-    inline bool ensureHasMJITInfo(JSContext *cx);
-    inline void destroyMJITInfo(js::FreeOp *fop);
-
-    JITScriptHandle *jitHandle(bool constructing, bool barriers) {
-        JS_ASSERT(mJITInfo);
-        return constructing
-               ? (barriers ? &mJITInfo->jitHandleCtorBarriered : &mJITInfo->jitHandleCtor)
-               : (barriers ? &mJITInfo->jitHandleNormalBarriered : &mJITInfo->jitHandleNormal);
-    }
-
-    js::mjit::JITScript *getJIT(bool constructing, bool barriers) {
-        if (!mJITInfo)
-            return NULL;
-        JITScriptHandle *jith = jitHandle(constructing, barriers);
-        return jith->isValid() ? jith->getValid() : NULL;
-    }
-
-    static void ReleaseCode(js::FreeOp *fop, JITScriptHandle *jith);
-
-    // These methods are implemented in MethodJIT.h.
-    inline void **nativeMap(bool constructing);
-    inline void *nativeCodeForPC(bool constructing, jsbytecode *pc);
-
     uint32_t getUseCount() const  { return useCount; }
     uint32_t incUseCount(uint32_t amount = 1) { return useCount += amount; }
     uint32_t *addressOfUseCount() { return &useCount; }
     static size_t offsetOfUseCount() { return offsetof(JSScript, useCount); }
     void resetUseCount() { useCount = 0; }
 
-    /*
-     * Size of the JITScript and all sections.  If |mallocSizeOf| is NULL, the
-     * size is computed analytically.  (This method is implemented in
-     * MethodJIT.cpp.)
-     */
-    size_t sizeOfJitScripts(JSMallocSizeOfFun mallocSizeOf);
-#endif
-
   public:
     bool initScriptCounts(JSContext *cx);
     js::PCCounts getPCCounts(jsbytecode *pc);
     void addIonCounts(js::ion::IonScriptCounts *ionCounts);
     js::ion::IonScriptCounts *getIonCounts();
     js::ScriptCounts releaseScriptCounts();
     void destroyScriptCounts(js::FreeOp *fop);
 
--- a/js/src/jsscriptinlines.h
+++ b/js/src/jsscriptinlines.h
@@ -152,34 +152,16 @@ JSScript::global() const
 {
     /*
      * A JSScript always marks its compartment's global (via bindings) so we
      * can assert that maybeGlobal is non-null here.
      */
     return *compartment()->maybeGlobal();
 }
 
-#ifdef JS_METHODJIT
-inline bool
-JSScript::ensureHasMJITInfo(JSContext *cx)
-{
-    if (mJITInfo)
-        return true;
-    mJITInfo = cx->new_<JITScriptSet>();
-    return mJITInfo != NULL;
-}
-
-inline void
-JSScript::destroyMJITInfo(js::FreeOp *fop)
-{
-    fop->delete_(mJITInfo);
-    mJITInfo = NULL;
-}
-#endif /* JS_METHODJIT */
-
 inline void
 JSScript::writeBarrierPre(JSScript *script)
 {
 #ifdef JSGC_INCREMENTAL
     if (!script || !script->runtime()->needsBarrier())
         return;
 
     JS::Zone *zone = script->zone();
deleted file mode 100644
--- a/js/src/methodjit/BaseAssembler.h
+++ /dev/null
@@ -1,1560 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined jsjaeger_baseassembler_h__ && defined JS_METHODJIT
-#define jsjaeger_baseassembler_h__
-
-#include "mozilla/DebugOnly.h"
-
-#include "jscntxt.h"
-#include "assembler/assembler/MacroAssemblerCodeRef.h"
-#include "assembler/assembler/MacroAssembler.h"
-#include "assembler/assembler/LinkBuffer.h"
-#include "assembler/moco/MocoStubs.h"
-#include "methodjit/MethodJIT.h"
-#include "methodjit/MachineRegs.h"
-#include "CodeGenIncludes.h"
-#include "jsobjinlines.h"
-#include "jstypedarrayinlines.h"
-
-#include "vm/Shape-inl.h"
-
-using mozilla::DebugOnly;
-
-namespace js {
-namespace mjit {
-
-class Assembler;
-
-// Represents an int32_t property name in generated code, which must be either
-// a RegisterID or a constant value.
-struct Int32Key {
-    typedef JSC::MacroAssembler::RegisterID RegisterID;
-
-    MaybeRegisterID reg_;
-    int32_t index_;
-
-    Int32Key() : index_(0) { }
-
-    static Int32Key FromRegister(RegisterID reg) {
-        Int32Key key;
-        key.reg_ = reg;
-        return key;
-    }
-    static Int32Key FromConstant(int32_t index) {
-        Int32Key key;
-        key.index_ = index;
-        return key;
-    }
-
-    int32_t index() const {
-        JS_ASSERT(!reg_.isSet());
-        return index_;
-    }
-
-    RegisterID reg() const { return reg_.reg(); }
-    bool isConstant() const { return !reg_.isSet(); }
-};
-
-struct FrameAddress : JSC::MacroAssembler::Address
-{
-    FrameAddress(int32_t offset)
-      : Address(JSC::MacroAssembler::stackPointerRegister, offset)
-    { }
-};
-
-struct ImmIntPtr : public JSC::MacroAssembler::ImmPtr
-{
-    ImmIntPtr(intptr_t val)
-      : ImmPtr(reinterpret_cast<void*>(val))
-    { }
-};
-
-struct StackMarker {
-    uint32_t base;
-    uint32_t bytes;
-
-    StackMarker(uint32_t base, uint32_t bytes)
-      : base(base), bytes(bytes)
-    { }
-};
-
-typedef SPSInstrumentation<Assembler, JSC::MacroAssembler::RegisterID>
-        MJITInstrumentation;
-
-class Assembler : public ValueAssembler
-{
-    struct CallPatch {
-        CallPatch(Call cl, void *fun)
-          : call(cl), fun(fun)
-        { }
-
-        Call call;
-        JSC::FunctionPtr fun;
-    };
-
-    struct DoublePatch {
-        double d;
-        DataLabelPtr label;
-    };
-
-    /* :TODO: OOM */
-    Label startLabel;
-    Vector<CallPatch, 64, SystemAllocPolicy> callPatches;
-    Vector<DoublePatch, 16, SystemAllocPolicy> doublePatches;
-
-    // Registers that can be clobbered during a call sequence.
-    Registers   availInCall;
-
-    // Extra number of bytes that can be used for storing structs/references
-    // across calls.
-    uint32_t    extraStackSpace;
-
-    // Calling convention used by the currently in-progress call.
-    Registers::CallConvention callConvention;
-
-    // Amount of stack space reserved for the currently in-progress call. This
-    // includes alignment and parameters.
-    uint32_t    stackAdjust;
-
-    // Debug flag to make sure calls do not nest.
-#ifdef DEBUG
-    bool        callIsAligned;
-#endif
-
-    // When instrumentation is enabled, these fields are used to manage the
-    // instrumentation which occurs at call() locations. Each call() has to know
-    // the pc of where the call was invoked from, and this can be changing all
-    // the time in the case of masm from Compiler, or never changing in the case
-    // of ICs.
-    MJITInstrumentation *sps;
-    VMFrame *vmframe; // pc tracker from ICs
-    jsbytecode **pc;  // pc tracker for compilers
-
-  public:
-    Assembler(MJITInstrumentation *sps = NULL, VMFrame *vmframe = NULL)
-      : callPatches(SystemAllocPolicy()),
-        availInCall(0),
-        extraStackSpace(0),
-        stackAdjust(0),
-#ifdef DEBUG
-        callIsAligned(false),
-#endif
-        sps(sps),
-        vmframe(vmframe),
-        pc(NULL)
-    {
-        startLabel = label();
-        if (vmframe)
-            sps->setPushed(vmframe->script());
-    }
-
-    Assembler(MJITInstrumentation *sps, jsbytecode **pc)
-      : callPatches(SystemAllocPolicy()),
-        availInCall(0),
-        extraStackSpace(0),
-        stackAdjust(0),
-#ifdef DEBUG
-        callIsAligned(false),
-#endif
-        sps(sps),
-        vmframe(NULL),
-        pc(pc)
-    {
-        startLabel = label();
-    }
-
-    /* Register pair storing returned type/data for calls. */
-#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::X86Registers::edi;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::X86Registers::esi;
-static const JSC::MacroAssembler::RegisterID JSParamReg_Argc   = JSC::X86Registers::ecx;
-#elif defined(JS_CPU_ARM)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type  = JSC::ARMRegisters::r5;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data  = JSC::ARMRegisters::r4;
-static const JSC::MacroAssembler::RegisterID JSParamReg_Argc   = JSC::ARMRegisters::r1;
-#elif defined(JS_CPU_SPARC)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::SparcRegisters::l2;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::SparcRegisters::l3;
-static const JSC::MacroAssembler::RegisterID JSParamReg_Argc  = JSC::SparcRegisters::l4;
-#elif defined(JS_CPU_MIPS)
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::MIPSRegisters::a0;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::MIPSRegisters::a2;
-static const JSC::MacroAssembler::RegisterID JSParamReg_Argc  = JSC::MIPSRegisters::a1;
-#endif
-
-    size_t distanceOf(Label l) {
-        return differenceBetween(startLabel, l);
-    }
-
-    void loadPtrFromImm(void *ptr, RegisterID reg) {
-        loadPtr(ptr, reg);
-    }
-
-    void loadShape(RegisterID obj, RegisterID shape) {
-        loadPtr(Address(obj, JSObject::offsetOfShape()), shape);
-    }
-
-    Jump guardShape(RegisterID objReg, Shape *shape) {
-        return branchPtr(NotEqual, Address(objReg, JSObject::offsetOfShape()), ImmPtr(shape));
-    }
-
-    Jump guardShape(RegisterID objReg, JSObject *obj) {
-        return guardShape(objReg, obj->lastProperty());
-    }
-
-    /*
-     * Finds and returns the address of a known object and slot.
-     */
-    Address objSlotRef(JSObject *obj, RegisterID reg, uint32_t slot) {
-        move(ImmPtr(obj), reg);
-        if (obj->isFixedSlot(slot)) {
-            return Address(reg, JSObject::getFixedSlotOffset(slot));
-        } else {
-            loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
-            return Address(reg, obj->dynamicSlotIndex(slot) * sizeof(Value));
-        }
-    }
-
-#ifdef JS_CPU_X86
-    void idiv(RegisterID reg) {
-        m_assembler.cdq();
-        m_assembler.idivl_r(reg);
-    }
-
-    void fastLoadDouble(RegisterID lo, RegisterID hi, FPRegisterID fpReg) {
-        JS_ASSERT(fpReg != Registers::FPConversionTemp);
-        if (MacroAssemblerX86Common::getSSEState() >= HasSSE4_1) {
-            m_assembler.movd_rr(lo, fpReg);
-            m_assembler.pinsrd_rr(hi, fpReg);
-        } else {
-            m_assembler.movd_rr(lo, fpReg);
-            m_assembler.movd_rr(hi, Registers::FPConversionTemp);
-            m_assembler.unpcklps_rr(Registers::FPConversionTemp, fpReg);
-        }
-    }
-#endif
-
-    /*
-     * Move a register pair which may indicate either an int32_t or double into fpreg,
-     * converting to double in the int32_t case.
-     */
-    void moveInt32OrDouble(RegisterID data, RegisterID type, Address address, FPRegisterID fpreg)
-    {
-#ifdef JS_CPU_X86
-        fastLoadDouble(data, type, fpreg);
-        Jump notInteger = testInt32(Assembler::NotEqual, type);
-        convertInt32ToDouble(data, fpreg);
-        notInteger.linkTo(label(), this);
-#else
-        Jump notInteger = testInt32(Assembler::NotEqual, type);
-        convertInt32ToDouble(data, fpreg);
-        Jump fallthrough = jump();
-        notInteger.linkTo(label(), this);
-
-        /* Store the components, then read it back out as a double. */
-        storeValueFromComponents(type, data, address);
-        loadDouble(address, fpreg);
-
-        fallthrough.linkTo(label(), this);
-#endif
-    }
-
-    /*
-     * Move a memory address which contains either an int32_t or double into fpreg,
-     * converting to double in the int32_t case.
-     */
-    template <typename T>
-    void moveInt32OrDouble(T address, FPRegisterID fpreg)
-    {
-        Jump notInteger = testInt32(Assembler::NotEqual, address);
-        convertInt32ToDouble(payloadOf(address), fpreg);
-        Jump fallthrough = jump();
-        notInteger.linkTo(label(), this);
-        loadDouble(address, fpreg);
-        fallthrough.linkTo(label(), this);
-    }
-
-    /* Ensure that an in-memory address is definitely a double. */
-    void ensureInMemoryDouble(Address address)
-    {
-        Jump notInteger = testInt32(Assembler::NotEqual, address);
-        convertInt32ToDouble(payloadOf(address), Registers::FPConversionTemp);
-        storeDouble(Registers::FPConversionTemp, address);
-        notInteger.linkTo(label(), this);
-    }
-
-    void negateDouble(FPRegisterID fpreg)
-    {
-#if defined JS_CPU_X86 || defined JS_CPU_X64
-        static const uint64_t DoubleNegMask = 0x8000000000000000ULL;
-        loadDouble(&DoubleNegMask, Registers::FPConversionTemp);
-        xorDouble(Registers::FPConversionTemp, fpreg);
-#elif defined JS_CPU_ARM || defined JS_CPU_SPARC || defined JS_CPU_MIPS
-        negDouble(fpreg, fpreg);
-#endif
-    }
-
-    /* Prepare for a call that might THROW. */
-    void *getFallibleCallTarget(void *fun) {
-#ifdef JS_CPU_ARM
-        /*
-         * Insert a veneer for ARM to allow it to catch exceptions. There is no
-         * reliable way to determine the location of the return address on the
-         * stack, so a typical C(++) return address cannot be hijacked.
-         *
-         * We put the real target address into IP, as this won't conflict with
-         * the EABI argument-passing mechanism. JaegerStubVeneer is responsible
-         * for calling 'fun' (in IP) and catching exceptions.
-         *
-         * Note that we must use 'moveWithPatch' here, rather than 'move',
-         * because 'move' might try to optimize the constant load, and we need a
-         * consistent code sequence for patching.
-         */
-        moveWithPatch(Imm32(intptr_t(fun)), JSC::ARMRegisters::ip);
-
-        return JS_FUNC_TO_DATA_PTR(void *, JaegerStubVeneer);
-#elif defined(JS_CPU_SPARC)
-        /*
-         * We can simulate the situation in jited code to let call return to a
-         * target address located on stack without veneer. We record the return
-         * address and jump to that address after call return to jited code. The
-         * reason we take veneer back is jited code maybe released when
-         * exceptions happened. That will make the call have no chance to return
-         * back to jited code.
-         */
-        moveWithPatch(Imm32(intptr_t(fun)), JSC::SparcRegisters::i0);
-        return JS_FUNC_TO_DATA_PTR(void *, JaegerStubVeneer);
-#elif defined(JS_CPU_MIPS)
-        /*
-         * For MIPS, we need to call JaegerStubVeneer by passing
-         * the real target address in v0.
-         */
-        moveWithPatch(Imm32(intptr_t(fun)), JSC::MIPSRegisters::v0);
-        return JS_FUNC_TO_DATA_PTR(void *, JaegerStubVeneer);
-#else
-        /*
-         * Architectures that push the return address to an easily-determined
-         * location on the stack can hijack C++'s return mechanism by overwriting
-         * that address, so a veneer is not required.
-         */
-        return fun;
-#endif
-    }
-
-    static inline uint32_t align(uint32_t bytes, uint32_t alignment) {
-        return (alignment - (bytes % alignment)) % alignment;
-    }
-
-    // Specifies extra stack space that is available across a call, for storing
-    // large parameters (structs) or returning values via references. All extra
-    // stack space must be reserved up-front, and is aligned on an 8-byte
-    // boundary.
-    //
-    // Returns an offset that can be used to index into this stack
-    StackMarker allocStack(uint32_t bytes, uint32_t alignment = 4) {
-        bytes += align(bytes + extraStackSpace, alignment);
-        subPtr(Imm32(bytes), stackPointerRegister);
-        extraStackSpace += bytes;
-        return StackMarker(extraStackSpace, bytes);
-    }
-
-    // Similar to allocStack(), but combines it with a push().
-    void saveReg(RegisterID reg) {
-        push(reg);
-        extraStackSpace += sizeof(void *);
-    }
-
-    // Similar to freeStack(), but combines it with a pop().
-    void restoreReg(RegisterID reg) {
-        JS_ASSERT(extraStackSpace >= sizeof(void *));
-        extraStackSpace -= sizeof(void *);
-        pop(reg);
-    }
-
-#if defined JS_CPU_MIPS
-    static const uint32_t StackAlignment = 8;
-#else
-    static const uint32_t StackAlignment = 16;
-#endif
-
-    static inline uint32_t alignForCall(uint32_t stackBytes) {
-#if defined(JS_CPU_X86) || defined(JS_CPU_X64) || defined(JS_CPU_MIPS)
-        // If StackAlignment is a power of two, % is just two shifts.
-        // 16 - (x % 16) gives alignment, extra % 16 handles total == 0.
-        return align(stackBytes, StackAlignment);
-#else
-        return 0;
-#endif
-    }
-
-    // Some platforms require stack manipulation before making stub calls.
-    // When using THROW/V, the return address is replaced, meaning the
-    // stack de-adjustment will not have occured. JaegerThrowpoline accounts
-    // for this. For stub calls, which are always invoked as if they use
-    // two parameters, the stack adjustment is constant.
-    //
-    // When using callWithABI() manually, for example via an IC, it might
-    // be necessary to jump directly to JaegerThrowpoline. In this case,
-    // the constant is provided here in order to appropriately adjust the
-    // stack.
-#ifdef _WIN64
-    static const uint32_t ReturnStackAdjustment = 32;
-#elif defined(JS_CPU_X86) && defined(JS_NO_FASTCALL)
-    static const uint32_t ReturnStackAdjustment = 16;
-#else
-    static const uint32_t ReturnStackAdjustment = 0;
-#endif
-
-    void throwInJIT() {
-        if (ReturnStackAdjustment)
-            subPtr(Imm32(ReturnStackAdjustment), stackPointerRegister);
-        move(ImmPtr(JS_FUNC_TO_DATA_PTR(void *, JaegerThrowpoline)), Registers::ReturnReg);
-        jump(Registers::ReturnReg);
-    }
-
-    // Windows x64 requires extra space in between calls.
-#ifdef _WIN64
-    static const uint32_t ShadowStackSpace = 32;
-#elif defined(JS_CPU_SPARC)
-    static const uint32_t ShadowStackSpace = 92;
-#else
-    static const uint32_t ShadowStackSpace = 0;
-#endif
-
-#if defined(JS_CPU_SPARC)
-    static const uint32_t BaseStackSpace = 104;
-#else
-    static const uint32_t BaseStackSpace = 0;
-#endif
-
-    // Prepare the stack for a call sequence. This must be called AFTER all
-    // volatile regs have been saved, and BEFORE pushArg() is used. The stack
-    // is assumed to be aligned to 16-bytes plus any pushes that occured via
-    // saveRegs().
-    //
-    // During a call sequence all registers are "owned" by the Assembler.
-    // Attempts to perform loads, nested calls, or anything that can clobber
-    // a register, is asking for breaking on some platform or some situation.
-    // Be careful to limit to storeArg() during setupABICall.
-    void setupABICall(Registers::CallConvention convention, uint32_t generalArgs) {
-        if (sps && sps->enabled())
-            leaveBeforeCall();
-
-        JS_ASSERT(!callIsAligned);
-
-        uint32_t numArgRegs = Registers::numArgRegs(convention);
-        uint32_t pushCount = (generalArgs > numArgRegs)
-                           ? generalArgs - numArgRegs
-                           : 0;
-
-        // Assume all temporary regs are available to clobber.
-        availInCall = Registers::TempRegs;
-
-        // Find the total number of bytes the stack will have been adjusted by,
-        // in order to compute alignment.
-        uint32_t total = (pushCount * sizeof(void *)) +
-                       extraStackSpace;
-
-        stackAdjust = (pushCount * sizeof(void *)) +
-                      alignForCall(total);
-
-#ifdef _WIN64
-        // Windows x64 ABI requires 32 bytes of "shadow space" for the callee
-        // to spill its parameters.
-        stackAdjust += ShadowStackSpace;
-#endif
-
-        if (stackAdjust)
-            subPtr(Imm32(stackAdjust), stackPointerRegister);
-
-        callConvention = convention;
-#ifdef DEBUG
-        callIsAligned = true;
-#endif
-    }
-
-    // Computes an interior pointer into VMFrame during a call.
-    Address vmFrameOffset(uint32_t offs) {
-        return Address(stackPointerRegister, stackAdjust + extraStackSpace + offs);
-    }
-
-    // Get an Address to the extra space already allocated before the call.
-    Address addressOfExtra(const StackMarker &marker) {
-        // Stack looks like this:
-        //   extraStackSpace
-        //   stackAdjust
-        // To get to the requested offset into extraStackSpace, we can walk
-        // up to the top of the extra stack space, then subtract |offs|.
-        //
-        // Note that it's not required we're in a call - stackAdjust can be 0.
-        JS_ASSERT(marker.base <= extraStackSpace);
-        return Address(stackPointerRegister, BaseStackSpace + stackAdjust + extraStackSpace - marker.base);
-    }
-
-    // This is an internal function only for use inside a setupABICall(),
-    // callWithABI() sequence, and only for arguments known to fit in
-    // registers.
-    Address addressOfArg(uint32_t i) {
-        uint32_t numArgRegs = Registers::numArgRegs(callConvention);
-        JS_ASSERT(i >= numArgRegs);
-
-        // Note that shadow space is for the callee to spill, and thus it must
-        // be skipped when writing its arguments.
-        int32_t spOffset = ((i - numArgRegs) * sizeof(void *)) + ShadowStackSpace;
-        return Address(stackPointerRegister, spOffset);
-    }
-
-    // Push an argument for a call.
-    void storeArg(uint32_t i, RegisterID reg) {
-        JS_ASSERT(callIsAligned);
-        RegisterID to;
-        if (Registers::regForArg(callConvention, i, &to)) {
-            if (reg != to)
-                move(reg, to);
-            availInCall.takeRegUnchecked(to);
-        } else {
-            storePtr(reg, addressOfArg(i));
-        }
-    }
-
-    // This variant can clobber temporary registers. However, it will NOT
-    // clobber any registers that have already been set via storeArg().
-    void storeArg(uint32_t i, Address address) {
-        JS_ASSERT(callIsAligned);
-        RegisterID to;
-        if (Registers::regForArg(callConvention, i, &to)) {
-            loadPtr(address, to);
-            availInCall.takeRegUnchecked(to);
-        } else if (!availInCall.empty()) {
-            // Memory-to-memory, and there is a temporary register free.
-            RegisterID reg = availInCall.takeAnyReg().reg();
-            loadPtr(address, reg);
-            storeArg(i, reg);
-            availInCall.putReg(reg);
-        } else {
-            // Memory-to-memory, but no temporary registers are free.
-            // This shouldn't happen on any platforms, because
-            // (TempRegs) Union (ArgRegs) != 0
-            JS_NOT_REACHED("too much reg pressure");
-        }
-    }
-
-    // This variant can clobber temporary registers. However, it will NOT
-    // clobber any registers that have already been set via storeArg().
-    void storeArgAddr(uint32_t i, Address address) {
-        JS_ASSERT(callIsAligned);
-        RegisterID to;
-        if (Registers::regForArg(callConvention, i, &to)) {
-            lea(address, to);
-            availInCall.takeRegUnchecked(to);
-        } else if (!availInCall.empty()) {
-            // Memory-to-memory, and there is a temporary register free.
-            RegisterID reg = availInCall.takeAnyReg().reg();
-            lea(address, reg);
-            storeArg(i, reg);
-            availInCall.putReg(reg);
-        } else {
-            // Memory-to-memory, but no temporary registers are free.
-            // This shouldn't happen on any platforms, because
-            // (TempRegs) Union (ArgRegs) != 0
-            JS_NOT_REACHED("too much reg pressure");
-        }
-    }
-
-    void storeArg(uint32_t i, ImmPtr imm) {
-        JS_ASSERT(callIsAligned);
-        RegisterID to;
-        if (Registers::regForArg(callConvention, i, &to)) {
-            move(imm, to);
-            availInCall.takeRegUnchecked(to);
-        } else {
-            storePtr(imm, addressOfArg(i));
-        }
-    }
-
-    void storeArg(uint32_t i, Imm32 imm) {
-        JS_ASSERT(callIsAligned);
-        RegisterID to;
-        if (Registers::regForArg(callConvention, i, &to)) {
-            move(imm, to);
-            availInCall.takeRegUnchecked(to);
-        } else {
-            store32(imm, addressOfArg(i));
-        }
-    }
-
-  private:
-    // When profiling is enabled, we need to run an epilogue and a prologue to
-    // every call. These two functions manage this code generation and are
-    // called from callWithABI below.
-    void leaveBeforeCall() {
-        jsbytecode *pc = vmframe == NULL ? *this->pc : vmframe->pc();
-        if (availInCall.empty()) {
-            RegisterID reg = Registers(Registers::TempRegs).peekReg().reg();
-            saveReg(reg);
-            sps->leave(pc, *this, reg);
-            restoreReg(reg);
-        } else {
-            sps->leave(pc, *this, availInCall.peekReg().reg());
-        }
-    }
-
-    void reenterAfterCall() {
-        if (availInCall.empty()) {
-            RegisterID reg = Registers(Registers::TempRegs).peekReg().reg();
-            saveReg(reg);
-            sps->reenter(*this, reg);
-            restoreReg(reg);
-        } else {
-            sps->reenter(*this, availInCall.peekReg().reg());
-        }
-    }
-
-  public:
-    // High-level call helper, given an optional function pointer and a
-    // calling convention. setupABICall() must have been called beforehand,
-    // as well as each numbered argument stored with storeArg().
-    //
-    // After callWithABI(), the call state is reset, so a new call may begin.
-    Call callWithABI(void *fun, bool canThrow) {
-#ifdef JS_CPU_ARM
-        // the repatcher requires that these instructions are adjacent in
-        // memory, make sure that they are in fact adjacent.
-        // Theoretically, this requires only 12 bytes of space, however
-        // there are at least a couple of off-by-one errors that I've noticed
-        // that make 12 insufficent.  In case 16 is also insufficent, I've bumped
-        // it to 20.
-        ensureSpace(20);
-        DebugOnly<int> initFlushCount = flushCount();
-#endif
-        // [Bug 614953]: This can only be made conditional once the ARM back-end
-        // is able to distinguish and patch both call sequences. Other
-        // architecutres are unaffected regardless.
-        //if (canThrow) {
-            // Some platforms (such as ARM) require a call veneer if the target
-            // might THROW. For other platforms, getFallibleCallTarget does
-            // nothing.
-            fun = getFallibleCallTarget(fun);
-        //}
-
-        JS_ASSERT(callIsAligned);
-
-        Call cl = callAddress(fun);
-
-#ifdef JS_CPU_ARM
-        JS_ASSERT(initFlushCount == flushCount());
-#endif
-        if (sps && sps->enabled())
-            reenterAfterCall();
-        if (stackAdjust)
-            addPtr(Imm32(stackAdjust), stackPointerRegister);
-
-        stackAdjust = 0;
-
-#ifdef DEBUG
-        callIsAligned = false;
-#endif
-        return cl;
-    }
-
-    Call callAddress(void *ptr) {
-        Call cl = call();
-        callPatches.append(CallPatch(cl, ptr));
-        return cl;
-    }
-
-    // Frees stack space allocated by allocStack().
-    void freeStack(const StackMarker &mark) {
-        JS_ASSERT(!callIsAligned);
-        JS_ASSERT(mark.bytes <= extraStackSpace);
-
-        extraStackSpace -= mark.bytes;
-        addPtr(Imm32(mark.bytes), stackPointerRegister);
-    }
-
-    // Wrap AbstractMacroAssembler::getLinkerCallReturnOffset which is protected.
-    unsigned callReturnOffset(Call call) {
-        return getLinkerCallReturnOffset(call);
-    }
-
-
-#define STUB_CALL_TYPE(type)                                                  \
-    Call callWithVMFrame(bool inlining, type stub, jsbytecode *pc,            \
-                         DataLabelPtr *pinlined, uint32_t fd) {               \
-        return fallibleVMCall(inlining, JS_FUNC_TO_DATA_PTR(void *, stub),    \
-                              pc, pinlined, fd);                              \
-    }
-
-    STUB_CALL_TYPE(JSObjStub)
-    STUB_CALL_TYPE(VoidPtrStubUInt32)
-    STUB_CALL_TYPE(VoidStubUInt32)
-    STUB_CALL_TYPE(VoidStub)
-
-#undef STUB_CALL_TYPE
-
-    void setupFrameDepth(int32_t frameDepth) {
-        // |frameDepth < 0| implies ic::SplatApplyArgs has been called which
-        // means regs.sp has already been set in the VMFrame.
-        if (frameDepth >= 0) {
-            // sp = fp->slots() + frameDepth
-            // regs->sp = sp
-            addPtr(Imm32(sizeof(StackFrame) + frameDepth * sizeof(jsval)),
-                   JSFrameReg,
-                   Registers::ClobberInCall);
-            storePtr(Registers::ClobberInCall, FrameAddress(VMFrame::offsetOfRegsSp()));
-        }
-    }
-
-    void setupInfallibleVMFrame(int32_t frameDepth) {
-        setupFrameDepth(frameDepth);
-
-        // The JIT has moved Arg1 already, and we've guaranteed to not clobber
-        // it. Move ArgReg0 into place now. setupFallibleVMFrame will not
-        // clobber it either.
-        move(MacroAssembler::stackPointerRegister, Registers::ArgReg0);
-    }
-
-    void setupFallibleVMFrame(bool inlining, jsbytecode *pc,
-                              DataLabelPtr *pinlined, int32_t frameDepth) {
-        setupInfallibleVMFrame(frameDepth);
-
-        // regs->fp = fp
-        storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
-
-        // PC -> regs->pc :(  Note: If pc is null, we are emitting a trampoline,
-        // so regs->pc is already correct.
-        if (pc)
-            storePtr(ImmPtr(pc), FrameAddress(VMFrame::offsetOfRegsPc()));
-
-        if (inlining) {
-            // inlined -> regs->inlined :(
-            DataLabelPtr ptr = storePtrWithPatch(ImmPtr(NULL),
-                                                 FrameAddress(VMFrame::offsetOfInlined));
-            if (pinlined)
-                *pinlined = ptr;
-        }
-
-        restoreStackBase();
-    }
-
-    void setupFallibleABICall(bool inlining, jsbytecode *pc, int32_t frameDepth) {
-        setupFrameDepth(frameDepth);
-
-        /* Store fp and pc */
-        storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
-        storePtr(ImmPtr(pc), FrameAddress(VMFrame::offsetOfRegsPc()));
-
-        if (inlining) {
-            /* ABI calls cannot be made from inlined frames. */
-            storePtr(ImmPtr(NULL), FrameAddress(VMFrame::offsetOfInlined));
-        }
-    }
-
-    void restoreStackBase() {
-#if defined(JS_CPU_X86)
-        /*
-         * We use the %ebp base stack pointer on x86 to store the JSStackFrame.
-         * Restore this before calling so that debuggers can construct a
-         * coherent stack if we crash outside of JIT code.
-         */
-        JS_STATIC_ASSERT(JSFrameReg == JSC::X86Registers::ebp);
-        move(JSC::X86Registers::esp, JSFrameReg);
-        addPtr(Imm32(VMFrame::STACK_BASE_DIFFERENCE), JSFrameReg);
-#endif
-    }
-
-    // An infallible VM call is a stub call (taking a VMFrame & and one
-    // optional parameter) that does not need |pc| and |fp| updated, since
-    // the call is guaranteed to not fail. However, |sp| is always coherent.
-    Call infallibleVMCall(void *ptr, int32_t frameDepth) {
-        setupInfallibleVMFrame(frameDepth);
-        return wrapVMCall(ptr);
-    }
-
-    // A fallible VM call is a stub call (taking a VMFrame & and one optional
-    // parameter) that needs the entire VMFrame to be coherent, meaning that
-    // |pc|, |inlined| and |fp| are guaranteed to be up-to-date.
-    //
-    // If |pc| is null, the caller guarantees that the current regs->pc may be
-    // trusted. This is the case for a single debug-only path; see
-    // generateForceReturn.
-    Call fallibleVMCall(bool inlining, void *ptr, jsbytecode *pc,
-                        DataLabelPtr *pinlined, int32_t frameDepth) {
-        setupFallibleVMFrame(inlining, pc, pinlined, frameDepth);
-        Call call = wrapVMCall(ptr);
-
-        // Restore the frame pointer from the VM.
-        loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
-
-        return call;
-    }
-
-    Call wrapVMCall(void *ptr) {
-        JS_ASSERT(!callIsAligned);
-
-        // Every stub call has at most two arguments.
-        setupABICall(Registers::FastCall, 2);
-
-        // On x86, if JS_NO_FASTCALL is present, these will result in actual
-        // pushes to the stack, which the caller will clean up. Otherwise,
-        // they'll be ignored because the registers fit into the calling
-        // sequence.
-        storeArg(0, Registers::ArgReg0);
-        storeArg(1, Registers::ArgReg1);
-
-        // [Bug 614953]: The second argument, 'canThrow', can be set to 'false'
-        // for infallibleVMCall invocations. However, this changes the call
-        // sequence on ARM, and the ARM repatcher cannot currently distinguish
-        // between the two sequences. The argument does not affect the code
-        // generated by x86 or amd64.
-        return callWithABI(ptr, true);
-    }
-
-    // Constant doubles can't be directly moved into a register, we need to put
-    // them in memory and load them back with.
-    void slowLoadConstantDouble(double d, FPRegisterID fpreg) {
-        DoublePatch patch;
-        patch.d = d;
-        patch.label = loadDouble(NULL, fpreg);
-        doublePatches.append(patch);
-    }
-
-    size_t numDoubles() { return doublePatches.length(); }
-
-    void finalize(JSC::LinkBuffer &linker, double *doubleVec = NULL) {
-        for (size_t i = 0; i < callPatches.length(); i++) {
-            CallPatch &patch = callPatches[i];
-            linker.link(patch.call, JSC::FunctionPtr(patch.fun));
-        }
-        for (size_t i = 0; i < doublePatches.length(); i++) {
-            DoublePatch &patch = doublePatches[i];
-            doubleVec[i] = patch.d;
-            linker.patch(patch.label, &doubleVec[i]);
-        }
-    }
-
-    struct FastArrayLoadFails {
-        Jump rangeCheck;
-        Jump holeCheck;
-    };
-
-    // Guard an extent (capacity, length or initialized length) on an array or typed array.
-    Jump guardArrayExtent(int offset, RegisterID reg,
-                          const Int32Key &key, Condition cond) {
-        Address extent(reg, offset);
-        if (key.isConstant())
-            return branch32(cond, extent, Imm32(key.index()));
-        return branch32(cond, extent, key.reg());
-    }
-
-    Jump guardElementNotHole(RegisterID elements, const Int32Key &key) {
-        Jump jmp;
-
-        if (key.isConstant()) {
-            Address slot(elements, key.index() * sizeof(Value));
-            jmp = guardNotHole(slot);
-        } else {
-            BaseIndex slot(elements, key.reg(), JSVAL_SCALE);
-            jmp = guardNotHole(slot);
-        }
-
-        return jmp;
-    }
-
-    // Load a jsval from an array slot, given a key. |objReg| is clobbered.
-    FastArrayLoadFails fastArrayLoad(RegisterID objReg, const Int32Key &key,
-                                     RegisterID typeReg, RegisterID dataReg) {
-        JS_ASSERT(objReg != typeReg);
-
-        RegisterID elementsReg = objReg;
-        loadPtr(Address(objReg, JSObject::offsetOfElements()), elementsReg);
-
-        FastArrayLoadFails fails;
-        fails.rangeCheck = guardArrayExtent(ObjectElements::offsetOfInitializedLength(),
-                                            objReg, key, BelowOrEqual);
-
-        // Load the slot out of the array.
-        if (key.isConstant()) {
-            Address slot(elementsReg, key.index() * sizeof(Value));
-            fails.holeCheck = fastArrayLoadSlot(slot, true, typeReg, dataReg);
-        } else {
-            BaseIndex slot(elementsReg, key.reg(), JSVAL_SCALE);
-            fails.holeCheck = fastArrayLoadSlot(slot, true, typeReg, dataReg);
-        }
-
-        return fails;
-    }
-
-    void storeKey(const Int32Key &key, Address address) {
-        if (key.isConstant())
-            store32(Imm32(key.index()), address);
-        else
-            store32(key.reg(), address);
-    }
-
-    void bumpKey(Int32Key &key, int32_t delta) {
-        if (key.isConstant())
-            key.index_ += delta;
-        else
-            add32(Imm32(delta), key.reg());
-    }
-
-    void loadFrameActuals(JSFunction *fun, RegisterID reg) {
-        /* Bias for the case where there was an arguments overflow. */
-        load32(Address(JSFrameReg, StackFrame::offsetOfNumActual()), reg);
-        add32(Imm32(fun->nargs + 2), reg);
-        Jump overflowArgs = branchTest32(Assembler::NonZero,
-                                         Address(JSFrameReg, StackFrame::offsetOfFlags()),
-                                         Imm32(StackFrame::OVERFLOW_ARGS));
-        move(Imm32(fun->nargs), reg);
-        overflowArgs.linkTo(label(), this);
-        lshiftPtr(Imm32(3), reg);
-        negPtr(reg);
-        addPtr(JSFrameReg, reg);
-    }
-
-    void loadBaseShape(RegisterID obj, RegisterID dest) {
-        loadPtr(Address(obj, JSObject::offsetOfShape()), dest);
-        loadPtr(Address(dest, Shape::offsetOfBase()), dest);
-    }
-
-    void loadObjClass(RegisterID obj, RegisterID dest) {
-        loadPtr(Address(obj, JSObject::offsetOfType()), dest);
-        loadPtr(Address(dest, offsetof(types::TypeObject, clasp)), dest);
-    }
-
-    Jump testClass(Condition cond, RegisterID claspReg, js::Class *clasp) {
-        return branchPtr(cond, claspReg, ImmPtr(clasp));
-    }
-
-    Jump testObjClass(Condition cond, RegisterID obj, RegisterID temp, js::Class *clasp) {
-        loadPtr(Address(obj, JSObject::offsetOfType()), temp);
-        return branchPtr(cond, Address(temp, offsetof(types::TypeObject, clasp)), ImmPtr(clasp));
-    }
-
-    Jump testFunction(Condition cond, RegisterID fun, RegisterID temp) {
-        return testObjClass(cond, fun, temp, &js::FunctionClass);
-    }
-
-    void branchValue(Condition cond, RegisterID reg, int32_t value, RegisterID result)
-    {
-        if (Registers::maskReg(result) & Registers::SingleByteRegs) {
-            set32(cond, reg, Imm32(value), result);
-        } else {
-            Jump j = branch32(cond, reg, Imm32(value));
-            move(Imm32(0), result);
-            Jump skip = jump();
-            j.linkTo(label(), this);
-            move(Imm32(1), result);
-            skip.linkTo(label(), this);
-        }
-    }
-
-    void branchValue(Condition cond, RegisterID lreg, RegisterID rreg, RegisterID result)
-    {
-        if (Registers::maskReg(result) & Registers::SingleByteRegs) {
-            set32(cond, lreg, rreg, result);
-        } else {
-            Jump j = branch32(cond, lreg, rreg);
-            move(Imm32(0), result);
-            Jump skip = jump();
-            j.linkTo(label(), this);
-            move(Imm32(1), result);
-            skip.linkTo(label(), this);
-        }
-    }
-
-    void rematPayload(const StateRemat &remat, RegisterID reg) {
-        if (remat.inMemory())
-            loadPayload(remat.address(), reg);
-        else
-            move(remat.reg(), reg);
-    }
-
-    void loadDynamicSlot(RegisterID objReg, uint32_t index,
-                         RegisterID typeReg, RegisterID dataReg) {
-        loadPtr(Address(objReg, JSObject::offsetOfSlots()), dataReg);
-        loadValueAsComponents(Address(dataReg, index * sizeof(Value)), typeReg, dataReg);
-    }
-
-    void loadObjProp(JSObject *obj, RegisterID objReg,
-                     js::Shape *shape,
-                     RegisterID typeReg, RegisterID dataReg)
-    {
-        if (obj->isFixedSlot(shape->slot()))
-            loadInlineSlot(objReg, shape->slot(), typeReg, dataReg);
-        else
-            loadDynamicSlot(objReg, obj->dynamicSlotIndex(shape->slot()), typeReg, dataReg);
-    }
-
-#ifdef JS_METHODJIT_TYPED_ARRAY
-    // Load a value from a typed array's packed data vector into dataReg.
-    // This function expects the following combinations of typeReg, dataReg and tempReg:
-    // 1) for all INT arrays other than UINT32:
-    //    - dataReg is a GP-register
-    //    - typeReg is optional
-    //    - tempReg is not set
-    // 2) for UINT32:
-    //    - dataReg is either a FP-register or a GP-register
-    //    - typeReg is set if dataReg is a GP-register
-    //    - tempReg is set if dataReg is a FP-register
-    // 3) for FLOAT32 and FLOAT64:
-    //    - dataReg is either a FP-register or a GP-register
-    //    - typeReg is set if dataReg is a GP-register
-    //    - tempReg is not set
-    template <typename T>
-    void loadFromTypedArray(int atype, T address, MaybeRegisterID typeReg,
-                            AnyRegisterID dataReg, MaybeRegisterID tempReg)
-    {
-        // If dataReg is an FP-register we don't use typeReg.
-        JS_ASSERT_IF(dataReg.isFPReg(), !typeReg.isSet());
-
-        // We only need tempReg for Uint32Array and only if dataReg is an FP-register.
-        JS_ASSERT_IF(atype != js::TypedArray::TYPE_UINT32 || dataReg.isReg(), !tempReg.isSet());
-
-        switch (atype) {
-          case js::TypedArray::TYPE_INT8:
-            load8SignExtend(address, dataReg.reg());
-            if (typeReg.isSet())
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-            break;
-          case js::TypedArray::TYPE_UINT8:
-          case js::TypedArray::TYPE_UINT8_CLAMPED:
-            load8ZeroExtend(address, dataReg.reg());
-            if (typeReg.isSet())
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-            break;
-          case js::TypedArray::TYPE_INT16:
-            load16SignExtend(address, dataReg.reg());
-            if (typeReg.isSet())
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-            break;
-          case js::TypedArray::TYPE_UINT16:
-            load16(address, dataReg.reg());
-            if (typeReg.isSet())
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-            break;
-          case js::TypedArray::TYPE_INT32:
-            load32(address, dataReg.reg());
-            if (typeReg.isSet())
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-            break;
-          case js::TypedArray::TYPE_UINT32:
-          {
-            // For Uint32Array the result is either int32_t or double.
-            // If dataReg is a GP-register, load a double or int32_t into dataReg/typeReg.
-            // If dataReg is a FP-register, load the value as double.
-            if (dataReg.isReg()) {
-                load32(address, dataReg.reg());
-                move(ImmType(JSVAL_TYPE_INT32), typeReg.reg());
-                Jump safeInt = branch32(Assembler::Below, dataReg.reg(), Imm32(0x80000000));
-                convertUInt32ToDouble(dataReg.reg(), Registers::FPConversionTemp);
-                breakDouble(Registers::FPConversionTemp, typeReg.reg(), dataReg.reg());
-                safeInt.linkTo(label(), this);
-            } else {
-                load32(address, tempReg.reg());
-                convertUInt32ToDouble(tempReg.reg(), dataReg.fpreg());
-            }
-            break;
-          }
-          case js::TypedArray::TYPE_FLOAT32:
-          case js::TypedArray::TYPE_FLOAT64:
-          {
-            FPRegisterID fpreg = dataReg.isReg()
-                               ? Registers::FPConversionTemp
-                               : dataReg.fpreg();
-            if (atype == js::TypedArray::TYPE_FLOAT32)
-                loadFloat(address, fpreg);
-            else
-                loadDouble(address, fpreg);
-            // Make sure NaN gets canonicalized. If dataReg is not an FP-register
-            // we have to use loadStaticDouble as we were probably called from an
-            // IC and we can't use slowLoadConstantDouble.
-            Jump notNaN = branchDouble(Assembler::DoubleEqual, fpreg, fpreg);
-            if (dataReg.isReg())
-                loadStaticDouble(&js_NaN, Registers::FPConversionTemp, dataReg.reg());
-            else
-                slowLoadConstantDouble(js_NaN, fpreg);
-            notNaN.linkTo(label(), this);
-            if (dataReg.isReg())
-                breakDouble(Registers::FPConversionTemp, typeReg.reg(), dataReg.reg());
-            break;
-          }
-        }
-    }
-
-    void loadFromTypedArray(int atype, RegisterID objReg, Int32Key key,
-                            MaybeRegisterID typeReg, AnyRegisterID dataReg,
-                            MaybeRegisterID tempReg)
-    {
-        int shift = TypedArray::slotWidth(atype);
-
-        if (key.isConstant()) {
-            Address addr(objReg, key.index() * shift);
-            loadFromTypedArray(atype, addr, typeReg, dataReg, tempReg);
-        } else {
-            Assembler::Scale scale = Assembler::TimesOne;
-            switch (shift) {
-              case 2:
-                scale = Assembler::TimesTwo;
-                break;
-              case 4:
-                scale = Assembler::TimesFour;
-                break;
-              case 8:
-                scale = Assembler::TimesEight;
-                break;
-            }
-            BaseIndex addr(objReg, key.reg(), scale);
-            loadFromTypedArray(atype, addr, typeReg, dataReg, tempReg);
-        }
-    }
-
-    template <typename S, typename T>
-    void storeToTypedIntArray(int atype, S src, T address)
-    {
-        switch (atype) {
-          case js::TypedArray::TYPE_INT8:
-          case js::TypedArray::TYPE_UINT8:
-          case js::TypedArray::TYPE_UINT8_CLAMPED:
-            store8(src, address);
-            break;
-          case js::TypedArray::TYPE_INT16:
-          case js::TypedArray::TYPE_UINT16:
-            store16(src, address);
-            break;
-          case js::TypedArray::TYPE_INT32:
-          case js::TypedArray::TYPE_UINT32:
-            store32(src, address);
-            break;
-          default:
-            JS_NOT_REACHED("unknown int array type");
-        }
-    }
-
-    template <typename S, typename T>
-    void storeToTypedFloatArray(int atype, S src, T address)
-    {
-        if (atype == js::TypedArray::TYPE_FLOAT32)
-            storeFloat(src, address);
-        else
-            storeDouble(src, address);
-    }
-
-    template <typename T>
-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 7 && defined(JS_CPU_ARM)
-    __attribute__((optimize("-O1")))
-#endif
-    void storeToTypedArray(int atype, ValueRemat vr, T address)
-    {
-        if (atype == js::TypedArray::TYPE_FLOAT32 || atype == js::TypedArray::TYPE_FLOAT64) {
-            if (vr.isConstant())
-                storeToTypedFloatArray(atype, ImmDouble(vr.value().toDouble()), address);
-            else
-                storeToTypedFloatArray(atype, vr.fpReg(), address);
-        } else {
-            if (vr.isConstant())
-                storeToTypedIntArray(atype, Imm32(vr.value().toInt32()), address);
-            else
-                storeToTypedIntArray(atype, vr.dataReg(), address);
-        }
-    }
-
-    void storeToTypedArray(int atype, RegisterID objReg, Int32Key key, ValueRemat vr)
-    {
-        int shift = TypedArray::slotWidth(atype);
-        if (key.isConstant()) {
-            Address addr(objReg, key.index() * shift);
-            storeToTypedArray(atype, vr, addr);
-        } else {
-            Assembler::Scale scale = Assembler::TimesOne;
-            switch (shift) {
-            case 2:
-                scale = Assembler::TimesTwo;
-                break;
-            case 4:
-                scale = Assembler::TimesFour;
-                break;
-            case 8:
-                scale = Assembler::TimesEight;
-                break;
-            }
-            BaseIndex addr(objReg, key.reg(), scale);
-            storeToTypedArray(atype, vr, addr);
-        }
-    }
-
-    void clampInt32ToUint8(RegisterID reg)
-    {
-        Jump j = branch32(Assembler::GreaterThanOrEqual, reg, Imm32(0));
-        move(Imm32(0), reg);
-        Jump done = jump();
-        j.linkTo(label(), this);
-        j = branch32(Assembler::LessThanOrEqual, reg, Imm32(255));
-        move(Imm32(255), reg);
-        j.linkTo(label(), this);
-        done.linkTo(label(), this);
-    }
-
-    // Inline version of js::ClampDoubleToUint8.
-    void clampDoubleToUint8(FPRegisterID fpReg, FPRegisterID fpTemp, RegisterID reg)
-    {
-        JS_ASSERT(fpTemp != Registers::FPConversionTemp);
-
-        // <= 0 or NaN ==> 0
-        zeroDouble(fpTemp);
-        Jump positive = branchDouble(Assembler::DoubleGreaterThan, fpReg, fpTemp);
-        move(Imm32(0), reg);
-        Jump done1 = jump();
-
-        // Add 0.5 and truncate.
-        positive.linkTo(label(), this);
-        slowLoadConstantDouble(0.5, fpTemp);
-        addDouble(fpReg, fpTemp);
-        Jump notInt = branchTruncateDoubleToInt32(fpTemp, reg);
-
-        // > 255 ==> 255
-        Jump inRange = branch32(Assembler::BelowOrEqual, reg, Imm32(255));
-        notInt.linkTo(label(), this);
-        move(Imm32(255), reg);
-        Jump done2 = jump();
-
-        // Check if we had a tie.
-        inRange.linkTo(label(), this);
-        convertInt32ToDouble(reg, Registers::FPConversionTemp);
-        Jump done3 = branchDouble(Assembler::DoubleNotEqual, fpTemp, Registers::FPConversionTemp);
-
-        // It was a tie. Mask out the ones bit to get an even value.
-        // See js::ClampDoubleToUint8 for the reasoning behind this.
-        and32(Imm32(~1), reg);
-
-        done1.linkTo(label(), this);
-        done2.linkTo(label(), this);
-        done3.linkTo(label(), this);
-    }
-#endif /* JS_METHODJIT_TYPED_ARRAY */
-
-    Address objPropAddress(JSObject *obj, RegisterID objReg, uint32_t slot)
-    {
-        if (obj->isFixedSlot(slot))
-            return Address(objReg, JSObject::getFixedSlotOffset(slot));
-        loadPtr(Address(objReg, JSObject::offsetOfSlots()), objReg);
-        return Address(objReg, obj->dynamicSlotIndex(slot) * sizeof(Value));
-    }
-
-    static uint32_t maskAddress(Address address) {
-        return Registers::maskReg(address.base);
-    }
-
-    static uint32_t maskAddress(BaseIndex address) {
-        return Registers::maskReg(address.base) |
-               Registers::maskReg(address.index);
-    }
-
-    /*
-     * Generate code testing whether an in memory value at address has a type
-     * in the specified set. Updates mismatches with any failure jumps. Assumes
-     * that no temporary (caller save) registers are live.
-     */
-    bool generateTypeCheck(JSContext *cx, Address address,
-                           types::TypeSet *types, Vector<Jump> *mismatches)
-    {
-        if (types->unknown())
-            return true;
-
-        Vector<Jump> matches(cx);
-
-        if (types->hasType(types::Type::DoubleType())) {
-            /* Type sets containing double also contain int. */
-            if (!matches.append(testNumber(Assembler::Equal, address)))
-                return false;
-        } else if (types->hasType(types::Type::Int32Type())) {
-            if (!matches.append(testInt32(Assembler::Equal, address)))
-                return false;
-        }
-
-        if (types->hasType(types::Type::UndefinedType())) {
-            if (!matches.append(testUndefined(Assembler::Equal, address)))
-                return false;
-        }
-
-        if (types->hasType(types::Type::BooleanType())) {
-            if (!matches.append(testBoolean(Assembler::Equal, address)))
-                return false;
-        }
-
-        if (types->hasType(types::Type::StringType())) {
-            if (!matches.append(testString(Assembler::Equal, address)))
-                return false;
-        }
-
-        if (types->hasType(types::Type::NullType())) {
-            if (!matches.append(testNull(Assembler::Equal, address)))
-                return false;
-        }
-
-        unsigned count = 0;
-        if (types->hasType(types::Type::AnyObjectType())) {
-            if (!matches.append(testObject(Assembler::Equal, address)))
-                return false;
-        } else {
-            count = types->getObjectCount();
-        }
-
-        if (count != 0) {
-            if (!mismatches->append(testObject(Assembler::NotEqual, address)))
-                return false;
-            RegisterID reg = Registers::ArgReg1;
-
-            loadPayload(address, reg);
-
-            for (unsigned i = 0; i < count; i++) {
-                if (JSObject *object = types->getSingleObject(i)) {
-                    if (!matches.append(branchPtr(Assembler::Equal, reg, ImmPtr(object))))
-                        return false;
-                }
-            }
-
-            loadPtr(Address(reg, JSObject::offsetOfType()), reg);
-
-            for (unsigned i = 0; i < count; i++) {
-                if (types::TypeObject *object = types->getTypeObject(i)) {
-                    if (!matches.append(branchPtr(Assembler::Equal, reg, ImmPtr(object))))
-                        return false;
-                }
-            }
-        }
-
-        if (!mismatches->append(jump()))
-            return false;
-
-        for (unsigned i = 0; i < matches.length(); i++)
-            matches[i].linkTo(label(), this);
-
-        return true;
-    }
-
-    /*
-     * Get a free object for the specified GC kind in compartment, writing it
-     * to result and filling it in according to templateObject. Returns a jump
-     * taken if a free thing was not retrieved. Note: don't call this directly,
-     * use Compiler::getNewObject instead.
-     */
-    Jump getNewObject(JSContext *cx, RegisterID result, JSObject *templateObject)
-    {
-        gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
-
-        JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
-        int thingSize = (int)gc::Arena::thingSize(allocKind);
-
-        JS_ASSERT(cx->typeInferenceEnabled());
-        JS_ASSERT(!templateObject->hasDynamicSlots());
-        JS_ASSERT(!templateObject->hasDynamicElements());
-
-#ifdef JS_GC_ZEAL
-        if (cx->runtime->needZealousGC())
-            return jump();
-#endif
-
-        /*
-         * Inline FreeSpan::allocate. Only the case where the current freelist
-         * span is not empty is handled.
-         */
-        gc::FreeSpan *list = const_cast<gc::FreeSpan *>
-                             (cx->zone()->allocator.arenas.getFreeList(allocKind));
-        loadPtr(&list->first, result);
-
-        Jump jump = branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(&list->last), result);
-
-        addPtr(Imm32(thingSize), result);
-        storePtr(result, &list->first);
-
-        /*
-         * Fill in the blank object. Order doesn't matter here, from here
-         * everything is infallible. Note that this bakes GC thing pointers
-         * into the code without explicitly pinning them. With type inference
-         * enabled, JIT code is collected on GC except when analysis or
-         * compilation is active, in which case type objects won't be collected
-         * but other things may be. The shape held by templateObject *must* be
-         * pinned against GC either by the script or by some type object.
-         */
-
-        int elementsOffset = JSObject::offsetOfFixedElements();
-
-        /*
-         * Write out the elements pointer before readjusting the result register,
-         * as for dense arrays we will need to get the address of the fixed
-         * elements first.
-         */
-        if (templateObject->isArray()) {
-            JS_ASSERT(!templateObject->getDenseInitializedLength());
-            addPtr(Imm32(-thingSize + elementsOffset), result);
-            storePtr(result, Address(result, -elementsOffset + JSObject::offsetOfElements()));
-            addPtr(Imm32(-elementsOffset), result);
-        } else {
-            addPtr(Imm32(-thingSize), result);
-            storePtr(ImmPtr(emptyObjectElements), Address(result, JSObject::offsetOfElements()));
-        }
-
-        storePtr(ImmPtr(templateObject->lastProperty()), Address(result, JSObject::offsetOfShape()));
-        storePtr(ImmPtr(templateObject->type()), Address(result, JSObject::offsetOfType()));
-        storePtr(ImmPtr(NULL), Address(result, JSObject::offsetOfSlots()));
-
-        if (templateObject->isArray()) {
-            /* Fill in the elements header. */
-            store32(Imm32(templateObject->getDenseCapacity()),
-                    Address(result, elementsOffset + ObjectElements::offsetOfCapacity()));
-            store32(Imm32(templateObject->getDenseInitializedLength()),
-                    Address(result, elementsOffset + ObjectElements::offsetOfInitializedLength()));
-            store32(Imm32(templateObject->getArrayLength()),
-                    Address(result, elementsOffset + ObjectElements::offsetOfLength()));
-            store32(Imm32(templateObject->shouldConvertDoubleElements()
-                          ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
-                          : 0),
-                    Address(result, elementsOffset + ObjectElements::offsetOfFlags()));
-        } else {
-            /*
-             * Fixed slots of non-array objects are required to be initialized;
-             * Use the values currently in the template object.
-             */
-            for (unsigned i = 0; i < templateObject->slotSpan(); i++) {
-                storeValue(templateObject->getFixedSlot(i),
-                           Address(result, JSObject::getFixedSlotOffset(i)));
-            }
-        }
-
-        if (templateObject->hasPrivate()) {
-            uint32_t nfixed = templateObject->numFixedSlots();
-            storePtr(ImmPtr(templateObject->getPrivate()),
-                     Address(result, JSObject::getPrivateDataOffset(nfixed)));
-        }
-
-        return jump;
-    }
-
-    /* Add the value stored in 'value' to the accumulator 'count'. */
-    void addCount(const double *value, double *count, RegisterID scratch)
-    {
-        loadDouble(value, Registers::FPConversionTemp);
-        move(ImmPtr(count), scratch);
-        addDouble(Address(scratch), Registers::FPConversionTemp);
-        storeDouble(Registers::FPConversionTemp, Address(scratch));
-    }
-
-    /* Add one to the accumulator |count|. */
-    void bumpCount(double *count, RegisterID scratch)
-    {
-        addCount(&oneDouble, count, scratch);
-    }
-
-    /* Bump the stub call count for script/pc if they are being counted. */
-    void bumpStubCount(JSScript *script, jsbytecode *pc, RegisterID scratch)
-    {
-        if (script->hasScriptCounts) {
-            PCCounts counts = script->getPCCounts(pc);
-            double *count = &counts.get(PCCounts::BASE_METHODJIT_STUBS);
-            bumpCount(count, scratch);
-        }
-    }
-
-  private:
-    /*
-     * Performs address arithmetic to return the base of the ProfileEntry into
-     * the register provided. The Jump returned is taken if the SPS stack is
-     * overflowing and no data should be written to it.
-     */
-    Jump spsProfileEntryAddress(SPSProfiler *p, int offset, RegisterID reg)
-    {
-        load32(p->sizePointer(), reg);
-        if (offset != 0)
-            add32(Imm32(offset), reg);
-        Jump j = branch32(Assembler::GreaterThanOrEqual, reg, Imm32(p->maxSize()));
-        JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*));
-        // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*)))
-        lshift32(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), reg);
-        addPtr(ImmPtr(p->stack()), reg);
-        return j;
-    }
-
-  public:
-    void spsUpdatePCIdx(SPSProfiler *p, int32_t idx, RegisterID reg) {
-        Jump j = spsProfileEntryAddress(p, -1, reg);
-        store32(Imm32(idx), Address(reg, ProfileEntry::offsetOfPCIdx()));
-        j.linkTo(label(), this);
-    }
-
-    void spsPushFrame(SPSProfiler *p, const char *str, JSScript *s, RegisterID reg) {
-        Jump j = spsProfileEntryAddress(p, 0, reg);
-
-        storePtr(ImmPtr(str),  Address(reg, ProfileEntry::offsetOfString()));
-        storePtr(ImmPtr(s),    Address(reg, ProfileEntry::offsetOfScript()));
-        storePtr(ImmPtr(NULL), Address(reg, ProfileEntry::offsetOfStackAddress()));
-        store32(Imm32(ProfileEntry::NullPCIndex),
-                Address(reg, ProfileEntry::offsetOfPCIdx()));
-
-        /* Always increment the stack size, regardless if we actually pushed */
-        j.linkTo(label(), this);
-        add32(Imm32(1), AbsoluteAddress(p->sizePointer()));
-    }
-
-    void spsPopFrame(SPSProfiler *p, RegisterID reg) {
-        move(ImmPtr(p->sizePointer()), reg);
-        sub32(Imm32(1), Address(reg, 0));
-    }
-
-    static const double oneDouble;
-};
-
-/* Return f<true> if the script is strict mode code, f<false> otherwise. */
-#define STRICT_VARIANT(script, f)                                             \
-    (FunctionTemplateConditional(script->strict,                              \
-                                 f<true>, f<false>))
-
-/* Save some typing. */
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = Assembler::JSReturnReg_Type;
-static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = Assembler::JSReturnReg_Data;
-static const JSC::MacroAssembler::RegisterID JSParamReg_Argc  = Assembler::JSParamReg_Argc;
-
-struct FrameFlagsAddress : JSC::MacroAssembler::Address
-{
-    FrameFlagsAddress()
-      : Address(JSFrameReg, StackFrame::offsetOfFlags())
-    {}
-};
-
-class PreserveRegisters {
-    typedef JSC::MacroAssembler::RegisterID RegisterID;
-
-    Assembler   &masm;
-    uint32_t    count;
-    RegisterID  regs[JSC::MacroAssembler::TotalRegisters];
-
-  public:
-    PreserveRegisters(Assembler &masm) : masm(masm), count(0) { }
-    ~PreserveRegisters() { JS_ASSERT(!count); }
-
-    void preserve(Registers mask) {
-        JS_ASSERT(!count);
-
-        while (!mask.empty()) {
-            RegisterID reg = mask.takeAnyReg().reg();
-            regs[count++] = reg;
-            masm.saveReg(reg);
-        }
-    }
-
-    void restore() {
-        while (count)
-            masm.restoreReg(regs[--count]);
-    }
-};
-
-} /* namespace mjit */
-} /* namespace js */
-
-#endif
-
deleted file mode 100644
--- a/js/src/methodjit/BaseCompiler.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined jsjaeger_compilerbase_h__ && defined JS_METHODJIT
-#define jsjaeger_compilerbase_h__
-
-#include "jscntxt.h"
-#include "assembler/assembler/MacroAssembler.h"
-#include "assembler/assembler/LinkBuffer.h"
-#include "assembler/assembler/RepatchBuffer.h"
-#include "assembler/jit/ExecutableAllocator.h"
-#include <limits.h>
-
-#if defined JS_CPU_ARM
-# define POST_INST_OFFSET(__expr) ((__expr) - sizeof(ARMWord))
-#else
-# define POST_INST_OFFSET(__expr) (__expr)
-#endif
-
-namespace js {
-namespace mjit {
-
-struct MacroAssemblerTypedefs {
-    typedef JSC::MacroAssembler::Label Label;
-    typedef JSC::MacroAssembler::Imm32 Imm32;
-    typedef JSC::MacroAssembler::ImmPtr ImmPtr;
-    typedef JSC::MacroAssembler::RegisterID RegisterID;
-    typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
-    typedef JSC::MacroAssembler::Address Address;
-    typedef JSC::MacroAssembler::BaseIndex BaseIndex;
-    typedef JSC::MacroAssembler::AbsoluteAddress AbsoluteAddress;
-    typedef JSC::MacroAssembler MacroAssembler;
-    typedef JSC::MacroAssembler::Jump Jump;
-    typedef JSC::MacroAssembler::JumpList JumpList;
-    typedef JSC::MacroAssembler::Call Call;
-    typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
-    typedef JSC::MacroAssembler::DataLabel32 DataLabel32;
-    typedef JSC::FunctionPtr FunctionPtr;
-    typedef JSC::RepatchBuffer RepatchBuffer;
-    typedef JSC::CodeLocationLabel CodeLocationLabel;
-    typedef JSC::CodeLocationDataLabel32 CodeLocationDataLabel32;
-    typedef JSC::CodeLocationDataLabelPtr CodeLocationDataLabelPtr;
-    typedef JSC::CodeLocationJump CodeLocationJump;
-    typedef JSC::CodeLocationCall CodeLocationCall;
-    typedef JSC::CodeLocationInstruction CodeLocationInstruction;
-    typedef JSC::ReturnAddressPtr ReturnAddressPtr;
-    typedef JSC::MacroAssemblerCodePtr MacroAssemblerCodePtr;
-    typedef JSC::JITCode JITCode;
-#if defined JS_CPU_ARM
-    typedef JSC::ARMWord ARMWord;
-#endif
-};
-
-class BaseCompiler : public MacroAssemblerTypedefs
-{
-  protected:
-    JSContext *cx;
-
-  public:
-    BaseCompiler() : cx(NULL)
-    { }
-
-    BaseCompiler(JSContext *cx) : cx(cx)
-    { }
-};
-
-#ifdef JS_CPU_X64
-inline bool
-VerifyRange(void *start1, size_t size1, void *start2, size_t size2)
-{
-    uintptr_t end1 = uintptr_t(start1) + size1;
-    uintptr_t end2 = uintptr_t(start2) + size2;
-
-    uintptr_t lowest = Min(uintptr_t(start1), uintptr_t(start2));
-    uintptr_t highest = Max(end1, end2);
-
-    return (highest - lowest < INT_MAX);
-}
-#endif
-
-// This class wraps JSC::LinkBuffer for Mozilla-specific memory handling.
-// Every return |false| guarantees an OOM that has been correctly propagated,
-// and should continue to propagate.
-class LinkerHelper : public JSC::LinkBuffer
-{
-  protected:
-    Assembler &masm;
-#ifdef DEBUG
-    bool verifiedRange;
-#endif
-
-  public:
-    LinkerHelper(Assembler &masm, JSC::CodeKind kind) : JSC::LinkBuffer(kind)
-        , masm(masm)
-#ifdef DEBUG
-        , verifiedRange(false)
-#endif
-    { }
-
-    ~LinkerHelper() {
-        JS_ASSERT(verifiedRange);
-    }
-
-    bool verifyRange(const JSC::JITCode &other) {
-        markVerified();
-#ifdef JS_CPU_X64
-        return VerifyRange(m_code, m_size, other.start(), other.size());
-#else
-        return true;
-#endif
-    }
-
-    bool verifyRange(JITChunk *chunk) {
-        return verifyRange(JSC::JITCode(chunk->code.m_code.executableAddress(),
-                                        chunk->code.m_size));
-    }
-
-    JSC::ExecutablePool *init(JSContext *cx) {
-        // The pool is incref'd after this call, so it's necessary to release()
-        // on any failure.
-        JSC::ExecutableAllocator *allocator = &cx->runtime->execAlloc();
-        allocator->setDestroyCallback(Probes::discardExecutableRegion);
-        JSC::ExecutablePool *pool;
-        m_code = executableAllocAndCopy(masm, allocator, &pool);
-        if (!m_code) {
-            markVerified();
-            js_ReportOutOfMemory(cx);
-            return NULL;
-        }
-        m_size = masm.size();   // must come after call to executableAllocAndCopy()!
-        return pool;
-    }
-
-    JSC::CodeLocationLabel finalize(VMFrame &f) {
-        masm.finalize(*this);
-        JSC::CodeLocationLabel label = finalizeCodeAddendum();
-        Probes::registerICCode(f.cx, f.chunk(), f.script(), f.pc(),
-                               label.executableAddress(), masm.size());
-        return label;
-    }
-
-    void maybeLink(MaybeJump jump, JSC::CodeLocationLabel label) {
-        if (!jump.isSet())
-            return;
-        link(jump.get(), label);
-    }
-
-    size_t size() const {
-        return m_size;
-    }
-
-  protected:
-    void markVerified() {
-#ifdef DEBUG
-        verifiedRange = true;
-#endif
-    }
-};
-
-class NativeStubLinker : public LinkerHelper
-{
-  public:
-#ifdef JS_CPU_X64
-    typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
-#else
-    typedef JSC::MacroAssembler::Jump FinalJump;
-#endif
-
-    NativeStubLinker(Assembler &masm, JITChunk *chunk, jsbytecode *pc, FinalJump done)
-        : LinkerHelper(masm, JSC::JAEGER_CODE), chunk(chunk), pc(pc), done(done)
-    {}
-
-    bool init(JSContext *cx);
-
-    void patchJump(JSC::CodeLocationLabel target) {
-#ifdef JS_CPU_X64
-        patch(done, target);
-#else
-        link(done, target);
-#endif
-    }
-
-  private:
-    JITChunk *chunk;
-    jsbytecode *pc;
-    FinalJump done;
-};
-
-bool
-NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
-                   int32_t initialFrameDepth, int32_t vpOffset,
-                   MaybeRegisterID typeReg, MaybeRegisterID dataReg);
-
-/*
- * On ARM, we periodically flush a constant pool into the instruction stream
- * where constants are found using PC-relative addressing. This is necessary
- * because the fixed-width instruction set doesn't support wide immediates.
- *
- * ICs perform repatching on the inline (fast) path by knowing small and
- * generally fixed code location offset values where the patchable instructions
- * live. Dumping a huge constant pool into the middle of an IC's inline path
- * makes the distance between emitted instructions potentially variable and/or
- * large, which makes the IC offsets invalid. We must reserve contiguous space
- * up front to prevent this from happening.
- */
-#ifdef JS_CPU_ARM
-template <size_t reservedSpace>
-class AutoReserveICSpace {
-    typedef Assembler::Label Label;
-
-    Assembler           &masm;
-    bool                didCheck;
-    bool                *overflowSpace;
-    int                 flushCount;
-
-  public:
-    AutoReserveICSpace(Assembler &masm, bool *overflowSpace)
-        : masm(masm), didCheck(false), overflowSpace(overflowSpace)
-    {
-        masm.ensureSpace(reservedSpace);
-        flushCount = masm.flushCount();
-    }
-
-    /* Allow manual IC space checks so that non-patchable code at the end of an IC section can be
-     * free to use constant pools. */
-    void check() {
-        JS_ASSERT(!didCheck);
-        didCheck = true;
-
-        if (masm.flushCount() != flushCount)
-            *overflowSpace = true;
-    }
-
-    ~AutoReserveICSpace() {
-        /* Automatically check the IC space if we didn't already do it manually. */
-        if (!didCheck) {
-            check();
-        }
-    }
-};
-
-# define RESERVE_IC_SPACE(__masm)       AutoReserveICSpace<256> arics(__masm, &this->overflowICSpace)
-# define CHECK_IC_SPACE()               arics.check()
-
-/* The OOL path can need a lot of space because we save and restore a lot of registers. The actual
- * sequene varies. However, dumping the literal pool before an OOL block is probably a good idea
- * anyway, as we branch directly to the start of the block from the fast path. */
-# define RESERVE_OOL_SPACE(__masm)      AutoReserveICSpace<2048> arics_ool(__masm, &this->overflowICSpace)
-
-/* Allow the OOL patch to be checked before object destruction. Often, non-patchable epilogues or
- * rejoining sequences are emitted, and it isn't necessary to protect these from literal pools. */
-# define CHECK_OOL_SPACE()              arics_ool.check()
-#else
-# define RESERVE_IC_SPACE(__masm)       /* Do nothing. */
-# define CHECK_IC_SPACE()               /* Do nothing. */
-# define RESERVE_OOL_SPACE(__masm)      /* Do nothing. */
-# define CHECK_OOL_SPACE()              /* Do nothing. */
-#endif
-
-} /* namespace js */
-} /* namespace mjit */
-
-#endif
deleted file mode 100644
--- a/js/src/methodjit/CodeGenIncludes.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined jsjaeger_codegenincs_h__ && defined JS_METHODJIT
-#define jsjaeger_codegenincs_h__
-
-/* Get a label for assertion purposes. Prevent #ifdef clutter. */
-#ifdef DEBUG
-# define DBGLABEL(name) Label name = masm.label();
-# define DBGLABEL_NOMASM(name) Label name = label();
-# define DBGLABEL_ASSIGN(name) name = masm.label();
-#else
-# define DBGLABEL(name)
-# define DBGLABEL_NOMASM(name)
-# define DBGLABEL_ASSIGN(name)
-#endif
-
-#if defined JS_NUNBOX32
-# include "NunboxAssembler.h"
-#elif defined JS_PUNBOX64
-# include "PunboxAssembler.h"
-#else
-# error "Neither JS_NUNBOX32 nor JS_PUNBOX64 is defined."
-#endif
-
-#include "BaseAssembler.h"
-
-#endif /* jsjaeger_codegenincs_h__ */
-
deleted file mode 100644
--- a/js/src/methodjit/Compiler.cpp
+++ /dev/null
@@ -1,8223 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "mozilla/DebugOnly.h"
-
-#include "MethodJIT.h"
-#include "jsnum.h"
-#include "jsbool.h"
-#include "jsiter.h"
-#include "Compiler.h"
-#include "StubCalls.h"
-#include "MonoIC.h"
-#include "PolyIC.h"
-#include "ICChecker.h"
-#include "Retcon.h"
-#include "assembler/jit/ExecutableAllocator.h"
-#include "assembler/assembler/LinkBuffer.h"
-#include "FrameState-inl.h"
-#include "jsobjinlines.h"
-#include "jsscriptinlines.h"
-#include "InlineFrameAssembler.h"
-#include "jscompartment.h"
-#include "jsopcodeinlines.h"
-#include "jsworkers.h"
-
-#include "builtin/RegExp.h"
-#include "vm/RegExpStatics.h"
-#include "vm/RegExpObject.h"
-
-#include "jsautooplen.h"
-#include "jstypedarrayinlines.h"
-#include "vm/RegExpObject-inl.h"
-
-#include "ion/BaselineJIT.h"
-#include "ion/Ion.h"
-
-#if JS_TRACE_LOGGING
-#include "TraceLogging.h"
-#endif
-
-using namespace js;
-using namespace js::mjit;
-#if defined(JS_POLYIC) || defined(JS_MONOIC)
-using namespace js::mjit::ic;
-#endif
-using namespace js::analyze;
-
-using mozilla::DebugOnly;
-
-#define RETURN_IF_OOM(retval)                                   \
-    JS_BEGIN_MACRO                                              \
-        if (oomInVector || masm.oom() || stubcc.masm.oom())     \
-            return retval;                                      \
-    JS_END_MACRO
-
-static inline bool IsIonEnabled(JSContext *cx)
-{
-#ifdef JS_ION
-    return ion::IsEnabled(cx);
-#else
-    return false;
-#endif
-}
-
-/*
- * Number of times a script must be called or had a backedge before we try to
- * inline its calls. This is only used if IonMonkey is disabled.
- */
-static const size_t USES_BEFORE_INLINING = 10240;
-
-mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript,
-                         unsigned chunkIndex, bool isConstructing)
-  : BaseCompiler(cx),
-    outerScript(cx, outerScript),
-    chunkIndex(chunkIndex),
-    isConstructing(isConstructing),
-    outerChunk(outerJIT()->chunkDescriptor(chunkIndex)),
-    ssa(cx, outerScript),
-    globalObj(cx, outerScript->compileAndGo ? &outerScript->global() : NULL),
-    globalSlots(globalObj ? globalObj->getRawSlots() : NULL),
-    sps(&cx->runtime->spsProfiler),
-    masm(&sps, &PC),
-    frame(cx, *thisFromCtor(), masm, stubcc),
-    a(NULL), outer(NULL), script_(cx), PC(NULL), loop(NULL),
-    inlineFrames(CompilerAllocPolicy(cx, *thisFromCtor())),
-    branchPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
-#if defined JS_MONOIC
-    getGlobalNames(CompilerAllocPolicy(cx, *thisFromCtor())),
-    setGlobalNames(CompilerAllocPolicy(cx, *thisFromCtor())),
-    callICs(CompilerAllocPolicy(cx, *thisFromCtor())),
-    equalityICs(CompilerAllocPolicy(cx, *thisFromCtor())),
-#endif
-#if defined JS_POLYIC
-    pics(CompilerAllocPolicy(cx, *thisFromCtor())),
-    getElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
-    setElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
-#endif
-    callPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
-    callSites(CompilerAllocPolicy(cx, *thisFromCtor())),
-    compileTriggers(CompilerAllocPolicy(cx, *thisFromCtor())),
-    doubleList(CompilerAllocPolicy(cx, *thisFromCtor())),
-    rootedTemplates(CompilerAllocPolicy(cx, *thisFromCtor())),
-    rootedRegExps(CompilerAllocPolicy(cx, *thisFromCtor())),
-    monitoredBytecodes(CompilerAllocPolicy(cx, *thisFromCtor())),
-    typeBarrierBytecodes(CompilerAllocPolicy(cx, *thisFromCtor())),
-    fixedIntToDoubleEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
-    fixedDoubleToAnyEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
-    jumpTables(CompilerAllocPolicy(cx, *thisFromCtor())),
-    jumpTableEdges(CompilerAllocPolicy(cx, *thisFromCtor())),
-    loopEntries(CompilerAllocPolicy(cx, *thisFromCtor())),
-    chunkEdges(CompilerAllocPolicy(cx, *thisFromCtor())),
-    stubcc(cx, *thisFromCtor(), frame),
-    debugMode_(cx->compartment->debugMode()),
-    inlining_(false),
-    hasGlobalReallocation(false),
-    oomInVector(false),
-    overflowICSpace(false),
-    gcNumber(cx->runtime->gcNumber),
-    pcLengths(NULL)
-{
-    JS_ASSERT(cx->jaegerCompilationAllowed());
-
-    if (!IsIonEnabled(cx)) {
-        /* Once a script starts getting really hot we will inline calls in it. */
-        if (!debugMode() && cx->typeInferenceEnabled() && globalObj &&
-            (outerScript->getUseCount() >= USES_BEFORE_INLINING ||
-             cx->hasOption(JSOPTION_METHODJIT_ALWAYS))) {
-            inlining_ = true;
-        }
-    }
-}
-
-CompileStatus
-mjit::Compiler::compile()
-{
-    JS_ASSERT(!outerChunkRef().chunk);
-
-#if JS_TRACE_LOGGING
-    AutoTraceLog logger(TraceLogging::defaultLogger(),
-                        TraceLogging::JM_COMPILE_START,
-                        TraceLogging::JM_COMPILE_STOP,
-                        outerScript);
-#endif
-
-    CompileStatus status = performCompilation();
-    if (status != Compile_Okay && status != Compile_Retry) {
-        mjit::ExpandInlineFrames(cx->zone());
-        mjit::Recompiler::clearStackReferences(cx->runtime->defaultFreeOp(), outerScript);
-        if (!outerScript->ensureHasMJITInfo(cx))
-            return Compile_Error;
-        JSScript::JITScriptHandle *jith = outerScript->jitHandle(isConstructing, cx->zone()->compileBarriers());
-        JSScript::ReleaseCode(cx->runtime->defaultFreeOp(), jith);
-        jith->setUnjittable();
-
-        if (outerScript->function()) {
-            outerScript->uninlineable = true;
-            types::MarkTypeObjectFlags(cx, outerScript->function(),
-                                       types::OBJECT_FLAG_UNINLINEABLE);
-        }
-    }
-
-    return status;
-}
-
-CompileStatus
-mjit::Compiler::checkAnalysis(HandleScript script)
-{
-    if (!script->ensureRanAnalysis(cx))
-        return Compile_Error;
-
-    if (!script->analysis()->jaegerCompileable()) {
-        JaegerSpew(JSpew_Abort, "script has uncompileable opcodes\n");
-        return Compile_Abort;
-    }
-
-    if (cx->typeInferenceEnabled() && !script->ensureRanInference(cx))
-        return Compile_Error;
-
-    ScriptAnalysis *analysis = script->analysis();
-    analysis->assertMatchingDebugMode();
-    if (analysis->failed()) {
-        JaegerSpew(JSpew_Abort, "couldn't analyze bytecode; probably switchX or OOM\n");
-        return Compile_Abort;
-    }
-
-    return Compile_Okay;
-}
-
-CompileStatus
-mjit::Compiler::addInlineFrame(HandleScript script, uint32_t depth,
-                               uint32_t parent, jsbytecode *parentpc)
-{
-    JS_ASSERT(inlining());
-
-    CompileStatus status = checkAnalysis(script);
-    if (status != Compile_Okay)
-        return status;
-
-    if (!ssa.addInlineFrame(script, depth, parent, parentpc))
-        return Compile_Error;
-
-    uint32_t index = ssa.iterFrame(ssa.numFrames() - 1).index;
-    return scanInlineCalls(index, depth);
-}
-
-CompileStatus
-mjit::Compiler::scanInlineCalls(uint32_t index, uint32_t depth)
-{
-    /* Maximum number of calls we will inline at the same site. */
-    static const uint32_t INLINE_SITE_LIMIT = 5;
-
-    JS_ASSERT(inlining() && globalObj);
-
-    /* Not inlining yet from 'new' scripts. */
-    if (isConstructing)
-        return Compile_Okay;
-
-    JSScript *script = ssa.getFrame(index).script;
-    ScriptAnalysis *analysis = script->analysis();
-
-    /* Don't inline from functions which could have a non-global scope object. */
-    if (!script->compileAndGo ||
-        &script->global() != globalObj ||
-        (script->function() && script->function()->getParent() != globalObj) ||
-        (script->function() && script->function()->isHeavyweight()) ||
-        script->isActiveEval) {
-        return Compile_Okay;
-    }
-
-    uint32_t nextOffset = 0;
-    uint32_t lastOffset = script->length;
-
-    if (index == CrossScriptSSA::OUTER_FRAME) {
-        nextOffset = outerChunk.begin;
-        lastOffset = outerChunk.end;
-    }
-
-    while (nextOffset < lastOffset) {
-        uint32_t offset = nextOffset;
-        jsbytecode *pc = script->code + offset;
-        nextOffset = offset + GetBytecodeLength(pc);
-
-        Bytecode *code = analysis->maybeCode(pc);
-        if (!code)
-            continue;
-
-        /* :XXX: Not yet inlining 'new' calls. */
-        if (JSOp(*pc) != JSOP_CALL)
-            continue;
-
-        /* Not inlining at monitored call sites or those with type barriers. */
-        if (code->monitoredTypes || code->monitoredTypesReturn || analysis->typeBarriers(cx, pc) != NULL)
-            continue;
-
-        uint32_t argc = GET_ARGC(pc);
-        types::StackTypeSet *calleeTypes = analysis->poppedTypes(pc, argc + 1);
-
-        if (calleeTypes->getKnownTypeTag() != JSVAL_TYPE_OBJECT)
-            continue;
-
-        if (calleeTypes->getObjectCount() >= INLINE_SITE_LIMIT)
-            continue;
-
-        /*
-         * Compute the maximum height we can grow the stack for inlined frames.
-         * We always reserve space for loop temporaries, for an extra stack
-         * frame pushed when making a call from the deepest inlined frame, and
-         * for the temporary slot used by type barriers.
-         */
-        uint32_t stackLimit = outerScript->nslots + StackSpace::STACK_JIT_EXTRA
-            - VALUES_PER_STACK_FRAME - FrameState::TEMPORARY_LIMIT - 1;
-
-        /* Compute the depth of any frames inlined at this site. */
-        uint32_t nextDepth = depth + VALUES_PER_STACK_FRAME + script->nfixed + code->stackDepth;
-
-        /*
-         * Scan each of the possible callees for other conditions precluding
-         * inlining. We only inline at a call site if all callees are inlineable.
-         */
-        unsigned count = calleeTypes->getObjectCount();
-        bool okay = true;
-        for (unsigned i = 0; i < count; i++) {
-            if (calleeTypes->getTypeObject(i) != NULL) {
-                okay = false;
-                break;
-            }
-
-            JSObject *obj = calleeTypes->getSingleObject(i);
-            if (!obj)
-                continue;
-
-            if (!obj->isFunction()) {
-                okay = false;
-                break;
-            }
-
-            JSFunction *fun = obj->toFunction();
-            if (!fun->isInterpreted()) {
-                okay = false;
-                break;
-            }
-            RootedScript script(cx, fun->nonLazyScript());
-
-            /*
-             * Don't inline calls to scripts which haven't been analyzed.
-             * We need to analyze the inlined scripts to compile them, and
-             * doing so can change type information we have queried already
-             * in making inlining decisions.
-             */
-            if (!script->hasAnalysis() || !script->analysis()->ranInference()) {
-                okay = false;
-                break;
-            }
-
-            /* See bug 768313. */
-            if (script->hasScriptCounts != outerScript->hasScriptCounts) {
-                okay = false;
-                break;
-            }
-
-            /*
-             * The outer and inner scripts must have the same scope. This only
-             * allows us to inline calls between non-inner functions. Also
-             * check for consistent strictness between the functions.
-             */
-            if (!globalObj ||
-                fun->getParent() != globalObj ||
-                outerScript->strict != script->strict) {
-                okay = false;
-                break;
-            }
-
-            /* We can't cope with inlining recursive functions yet. */
-            uint32_t nindex = index;
-            while (nindex != CrossScriptSSA::INVALID_FRAME) {
-                if (ssa.getFrame(nindex).script == script)
-                    okay = false;
-                nindex = ssa.getFrame(nindex).parent;
-            }
-            if (!okay)
-                break;
-
-            /* Watch for excessively deep nesting of inlined frames. */
-            if (nextDepth + script->nslots >= stackLimit) {
-                okay = false;
-                break;
-            }
-
-            if (!script->types) {
-                okay = false;
-                break;
-            }
-
-            CompileStatus status = checkAnalysis(script);
-            if (status != Compile_Okay)
-                return status;
-
-            if (!script->analysis()->jaegerInlineable(argc)) {
-                okay = false;
-                break;
-            }
-
-            types::TypeObject *funType = fun->getType(cx);
-            if (!funType ||
-                types::HeapTypeSet::HasObjectFlags(cx, funType, types::OBJECT_FLAG_UNINLINEABLE))
-            {
-                okay = false;
-                break;
-            }
-
-            /*
-             * Watch for a generic state change in the callee's type, so that
-             * the outer script will be recompiled if any type information
-             * changes in stack values within the callee.
-             */
-            types::HeapTypeSet::WatchObjectStateChange(cx, funType);
-
-            /*
-             * Don't inline scripts which use 'this' if it is possible they
-             * could be called with a 'this' value requiring wrapping. During
-             * inlining we do not want to modify frame entries belonging to the
-             * caller.
-             */
-            if (script->analysis()->usesThisValue() &&
-                types::TypeScript::ThisTypes(script)->getKnownTypeTag() != JSVAL_TYPE_OBJECT) {
-                okay = false;
-                break;
-            }
-        }
-        if (!okay)
-            continue;
-
-        /*
-         * Add the inline frames to the cross script SSA. We will pick these
-         * back up when compiling the call site.
-         */
-        for (unsigned i = 0; i < count; i++) {
-            JSObject *obj = calleeTypes->getSingleObject(i);
-            if (!obj)
-                continue;
-
-            JSFunction *fun = obj->toFunction();
-            RootedScript script(cx, fun->nonLazyScript());
-
-            CompileStatus status = addInlineFrame(script, nextDepth, index, pc);
-            if (status != Compile_Okay)
-                return status;
-        }
-    }
-
-    return Compile_Okay;
-}
-
-CompileStatus
-mjit::Compiler::pushActiveFrame(JSScript *scriptArg, uint32_t argc)
-{
-    RootedScript script(cx, scriptArg);
-    if (cx->runtime->profilingScripts && !script->hasScriptCounts)
-        script->initScriptCounts(cx);
-
-    ActiveFrame *newa = js_new<ActiveFrame>(cx);
-    if (!newa) {
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-
-    newa->parent = a;
-    if (a)
-        newa->parentPC = PC;
-    newa->script = script;
-    newa->mainCodeStart = masm.size();
-    newa->stubCodeStart = stubcc.size();
-
-    if (outer) {
-        newa->inlineIndex = uint32_t(inlineFrames.length());
-        inlineFrames.append(newa);
-    } else {
-        newa->inlineIndex = CrossScriptSSA::OUTER_FRAME;
-        outer = newa;
-    }
-    JS_ASSERT(ssa.getFrame(newa->inlineIndex).script == script);
-
-    newa->inlinePCOffset = ssa.frameLength(newa->inlineIndex);
-
-    ScriptAnalysis *newAnalysis = script->analysis();
-
-#ifdef JS_METHODJIT_SPEW
-    if (cx->typeInferenceEnabled() && IsJaegerSpewChannelActive(JSpew_Regalloc)) {
-        unsigned nargs = script->function() ? script->function()->nargs : 0;
-        for (unsigned i = 0; i < nargs; i++) {
-            uint32_t slot = ArgSlot(i);
-            if (!newAnalysis->slotEscapes(slot)) {
-                JaegerSpew(JSpew_Regalloc, "Argument %u:", i);
-                newAnalysis->liveness(slot).print();
-            }
-        }
-        for (unsigned i = 0; i < script->nfixed; i++) {
-            uint32_t slot = LocalSlot(script, i);
-            if (!newAnalysis->slotEscapes(slot)) {
-                JaegerSpew(JSpew_Regalloc, "Local %u:", i);
-                newAnalysis->liveness(slot).print();
-            }
-        }
-    }
-#endif
-
-    if (!frame.pushActiveFrame(script, argc)) {
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-
-    newa->jumpMap = js_pod_malloc<Label>(script->length);
-    if (!newa->jumpMap) {
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-#ifdef DEBUG
-    for (uint32_t i = 0; i < script->length; i++)
-        newa->jumpMap[i] = Label();
-#endif
-
-    if (cx->typeInferenceEnabled()) {
-        CompileStatus status = prepareInferenceTypes(script, newa);
-        if (status != Compile_Okay)
-            return status;
-    }
-
-    if (script != outerScript && !sps.enterInlineFrame())
-        return Compile_Error;
-
-    this->script_ = script;
-    this->analysis = newAnalysis;
-    this->PC = script->code;
-    this->a = newa;
-
-    return Compile_Okay;
-}
-
-void
-mjit::Compiler::popActiveFrame()
-{
-    JS_ASSERT(a->parent);
-    a->mainCodeEnd = masm.size();
-    a->stubCodeEnd = stubcc.size();
-    this->PC = a->parentPC;
-    this->a = (ActiveFrame *) a->parent;
-    this->script_ = a->script;
-    this->analysis = this->script_->analysis();
-
-    frame.popActiveFrame();
-    sps.leaveInlineFrame();
-}
-
-#define CHECK_STATUS(expr)                                           \
-    JS_BEGIN_MACRO                                                   \
-        CompileStatus status_ = (expr);                              \
-        if (status_ != Compile_Okay) {                               \
-            if (oomInVector || masm.oom() || stubcc.masm.oom())      \
-                js_ReportOutOfMemory(cx);                            \
-            return status_;                                          \
-        }                                                            \
-    JS_END_MACRO
-
-CompileStatus
-mjit::Compiler::performCompilation()
-{
-    JaegerSpew(JSpew_Scripts,
-               "compiling script (file \"%s\") (line \"%d\") (length \"%d\") (chunk \"%d\") (usecount \"%d\")\n",
-               outerScript->filename(), outerScript->lineno, outerScript->length, chunkIndex, (int) outerScript->getUseCount());
-
-    if (inlining()) {
-        JaegerSpew(JSpew_Inlining,
-                   "inlining calls in script (file \"%s\") (line \"%d\")\n",
-                   outerScript->filename(), outerScript->lineno);
-    }
-
-#ifdef JS_METHODJIT_SPEW
-    Profiler prof;
-    prof.start();
-#endif
-
-#ifdef JS_METHODJIT
-    outerScript->debugMode = debugMode();
-#endif
-
-    JS_ASSERT(cx->compartment->activeAnalysis);
-
-    {
-        types::AutoEnterCompilation enter(cx, types::CompilerOutput::MethodJIT);
-        if (!enter.init(outerScript, isConstructing, chunkIndex)) {
-            js_ReportOutOfMemory(cx);
-            return Compile_Error;
-        }
-
-        CHECK_STATUS(checkAnalysis(outerScript));
-        if (inlining())
-            CHECK_STATUS(scanInlineCalls(CrossScriptSSA::OUTER_FRAME, 0));
-        CHECK_STATUS(pushActiveFrame(outerScript, 0));
-
-        if (outerScript->hasScriptCounts || Probes::wantNativeAddressInfo(cx)) {
-            size_t length = ssa.frameLength(ssa.numFrames() - 1);
-            pcLengths = js_pod_calloc<PCLengthEntry>(length);
-            if (!pcLengths)
-                return Compile_Error;
-        }
-
-        if (chunkIndex == 0)
-            CHECK_STATUS(generatePrologue());
-        else
-            sps.setPushed(script_);
-        CHECK_STATUS(generateMethod());
-        if (outerJIT() && chunkIndex == outerJIT()->nchunks - 1)
-            CHECK_STATUS(generateEpilogue());
-        CHECK_STATUS(finishThisUp());
-    }
-
-#ifdef JS_METHODJIT_SPEW
-    prof.stop();
-    JaegerSpew(JSpew_Prof, "compilation took %d us\n", prof.time_us());
-#endif
-
-    JaegerSpew(JSpew_Scripts, "successfully compiled (code \"%p\") (size \"%u\")\n",
-               outerChunkRef().chunk->code.m_code.executableAddress(),
-               unsigned(outerChunkRef().chunk->code.m_size));
-
-    return Compile_Okay;
-}
-
-#undef CHECK_STATUS
-
-mjit::JSActiveFrame::JSActiveFrame()
-    : parent(NULL), parentPC(NULL), script(NULL), inlineIndex(UINT32_MAX)
-{
-}
-
-mjit::Compiler::ActiveFrame::ActiveFrame(JSContext *cx)
-    : jumpMap(NULL),
-      varTypes(NULL), needReturnValue(false),
-      syncReturnValue(false), returnValueDouble(false), returnSet(false),
-      returnEntry(NULL), returnJumps(NULL), exitState(NULL)
-{}
-
-mjit::Compiler::ActiveFrame::~ActiveFrame()
-{
-    js_free(jumpMap);
-    if (varTypes)
-        js_free(varTypes);
-}
-
-mjit::Compiler::~Compiler()
-{
-    if (outer)
-        js_delete(outer);
-    for (unsigned i = 0; i < inlineFrames.length(); i++)
-        js_delete(inlineFrames[i]);
-    while (loop) {
-        LoopState *nloop = loop->outer;
-        js_delete(loop);
-        loop = nloop;
-    }
-}
-
-CompileStatus
-mjit::Compiler::prepareInferenceTypes(JSScript *script, ActiveFrame *a)
-{
-    /*
-     * During our walk of the script, we need to preserve the invariant that at
-     * join points the in memory type tag is always in sync with the known type
-     * tag of the variable's SSA value at that join point. In particular, SSA
-     * values inferred as (int|double) must in fact be doubles, stored either
-     * in floating point registers or in memory. There is an exception for
-     * locals whose value is currently dead, whose type might not be synced.
-     *
-     * To ensure this, we need to know the SSA values for each variable at each
-     * join point, which the SSA analysis does not store explicitly. These can
-     * be recovered, though. During the forward walk, the SSA value of a var
-     * (and its associated type set) change only when we see an explicit assign
-     * to the var or get to a join point with a phi node for that var. So we
-     * can duplicate the effects of that walk here by watching for writes to
-     * vars (updateVarTypes) and new phi nodes at join points.
-     *
-     * When we get to a branch and need to know a variable's value at the
-     * branch target, we know it will either be a phi node at the target or
-     * the variable's current value, as no phi node is created at the target
-     * only if a variable has the same value on all incoming edges.
-     */
-
-    a->varTypes = js_pod_calloc<VarType>(TotalSlots(script));
-    if (!a->varTypes) {
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-
-    for (uint32_t slot = ArgSlot(0); slot < TotalSlots(script); slot++) {
-        VarType &vt = a->varTypes[slot];
-        vt.setTypes(types::TypeScript::SlotTypes(script, slot));
-    }
-
-    return Compile_Okay;
-}
-
-/*
- * Number of times a script must be called or have back edges taken before we
- * run it in the methodjit. We wait longer if type inference is enabled, to
- * allow more gathering of type information and less recompilation.
- */
-static const size_t USES_BEFORE_COMPILE       = 16;
-static const size_t INFER_USES_BEFORE_COMPILE = 43;
-
-/* Target maximum size, in bytecode length, for a compiled chunk of a script. */
-static uint32_t CHUNK_LIMIT = 1500;
-
-void
-mjit::SetChunkLimit(uint32_t limit)
-{
-    if (limit)
-        CHUNK_LIMIT = limit;
-}
-
-JITScript *
-MakeJITScript(JSContext *cx, JSScript *script)
-{
-    if (!script->ensureRanAnalysis(cx))
-        return NULL;
-
-    ScriptAnalysis *analysis = script->analysis();
-
-    Vector<ChunkDescriptor> chunks(cx);
-    Vector<CrossChunkEdge> edges(cx);
-
-    if (script->length < CHUNK_LIMIT || !cx->typeInferenceEnabled()) {
-        ChunkDescriptor desc;
-        desc.begin = 0;
-        desc.end = script->length;
-        if (!chunks.append(desc))
-            return NULL;
-    } else {
-        if (!script->ensureRanInference(cx))
-            return NULL;
-
-        /* Outgoing edges within the current chunk. */
-        Vector<CrossChunkEdge> currentEdges(cx);
-        uint32_t chunkStart = 0;
-
-        bool preserveNextChunk = false;
-        unsigned offset, nextOffset = 0;
-        while (nextOffset < script->length) {
-            offset = nextOffset;
-
-            jsbytecode *pc = script->code + offset;
-            JSOp op = JSOp(*pc);
-
-            nextOffset = offset + GetBytecodeLength(pc);
-
-            Bytecode *code = analysis->maybeCode(offset);
-            if (!code)
-                op = JSOP_NOP; /* Ignore edges from unreachable opcodes. */
-
-            /* Whether this should be the last opcode in the chunk. */
-            bool finishChunk = false;
-
-            /* Keep going, override finishChunk. */
-            bool preserveChunk = preserveNextChunk;
-            preserveNextChunk = false;
-
-            /*
-             * Add an edge for opcodes which perform a branch. Skip LABEL ops,
-             * which do not actually branch. XXX LABEL should not be JOF_JUMP.
-             */
-            uint32_t type = JOF_TYPE(js_CodeSpec[op].format);
-            if (type == JOF_JUMP && op != JSOP_LABEL) {
-                CrossChunkEdge edge;
-                edge.source = offset;
-                edge.target = FollowBranch(cx, script, pc - script->code);
-                if (edge.target < offset) {
-                    /* Always end chunks after loop back edges. */
-                    finishChunk = true;
-                    if (edge.target < chunkStart) {
-                        analysis->getCode(edge.target).safePoint = true;
-                        if (!edges.append(edge))
-                            return NULL;
-                    }
-                } else if (edge.target == nextOffset) {
-                    /*
-                     * Override finishChunk for bytecodes which directly
-                     * jump to their fallthrough opcode ('if (x) {}'). This
-                     * creates two CFG edges with the same source/target, which
-                     * will confuse the compiler's edge patching code.
-                     */
-                    preserveChunk = true;
-                } else {
-                    if (!currentEdges.append(edge))
-                        return NULL;
-                }
-            }
-
-            if (op == JSOP_TABLESWITCH) {
-                jsbytecode *pc2 = pc;
-                unsigned defaultOffset = offset + GET_JUMP_OFFSET(pc);
-                pc2 += JUMP_OFFSET_LEN;
-                int32_t low = GET_JUMP_OFFSET(pc2);
-                pc2 += JUMP_OFFSET_LEN;
-                int32_t high = GET_JUMP_OFFSET(pc2);
-                pc2 += JUMP_OFFSET_LEN;
-
-                CrossChunkEdge edge;
-                edge.source = offset;
-                edge.target = defaultOffset;
-                if (!currentEdges.append(edge))
-                    return NULL;
-
-                for (int32_t i = low; i <= high; i++) {
-                    unsigned targetOffset = offset + GET_JUMP_OFFSET(pc2);
-                    if (targetOffset != offset) {
-                        /*
-                         * This can end up inserting duplicate edges, all but
-                         * the first of which will be ignored.
-                         */
-                        CrossChunkEdge edge;
-                        edge.source = offset;
-                        edge.target = targetOffset;
-                        if (!currentEdges.append(edge))
-                            return NULL;
-                    }
-                    pc2 += JUMP_OFFSET_LEN;
-                }
-            }
-
-            if (unsigned(offset - chunkStart) > CHUNK_LIMIT)
-                finishChunk = true;
-
-            if (nextOffset >= script->length || !analysis->maybeCode(nextOffset)) {
-                /* Ensure that chunks do not start on unreachable opcodes. */
-                preserveChunk = true;
-            } else {
-                /*
-                 * Start new chunks at the opcode before each loop head.
-                 * This ensures that the initial goto for loops is included in
-                 * the same chunk as the loop itself.
-                 */
-                jsbytecode *nextpc = script->code + nextOffset;
-
-                /*
-                 * Don't insert a chunk boundary in the middle of two opcodes
-                 * which may be fused together.
-                 */
-                switch (JSOp(*nextpc)) {
-                  case JSOP_POP:
-                  case JSOP_IFNE:
-                  case JSOP_IFEQ:
-                    preserveChunk = true;
-                    break;
-                  default:
-                    break;
-                }
-
-                uint32_t afterOffset = nextOffset + GetBytecodeLength(nextpc);
-                if (afterOffset < script->length) {
-                    if (analysis->maybeCode(afterOffset) &&
-                        JSOp(script->code[afterOffset]) == JSOP_LOOPHEAD &&
-                        analysis->getLoop(afterOffset))
-                    {
-                        if (preserveChunk)
-                            preserveNextChunk = true;
-                        else
-                            finishChunk = true;
-                    }
-                }
-            }
-
-            if (finishChunk && !preserveChunk) {
-                ChunkDescriptor desc;
-                desc.begin = chunkStart;
-                desc.end = nextOffset;
-                if (!chunks.append(desc))
-                    return NULL;
-
-                /* Add an edge for fallthrough from this chunk to the next one. */
-                if (BytecodeFallsThrough(op)) {
-                    CrossChunkEdge edge;
-                    edge.source = offset;
-                    edge.target = nextOffset;
-                    analysis->getCode(edge.target).safePoint = true;
-                    if (!edges.append(edge))
-                        return NULL;
-                }
-
-                chunkStart = nextOffset;
-                for (unsigned i = 0; i < currentEdges.length(); i++) {
-                    const CrossChunkEdge &edge = currentEdges[i];
-                    if (edge.target >= nextOffset) {
-                        analysis->getCode(edge.target).safePoint = true;
-                        if (!edges.append(edge))
-                            return NULL;
-                    }
-                }
-                currentEdges.clear();
-            }
-        }
-
-        if (chunkStart != script->length) {
-            ChunkDescriptor desc;
-            desc.begin = chunkStart;
-            desc.end = script->length;
-            if (!chunks.append(desc))
-                return NULL;
-        }
-    }
-
-    size_t dataSize = sizeof(JITScript)
-        + (chunks.length() * sizeof(ChunkDescriptor))
-        + (edges.length() * sizeof(CrossChunkEdge));
-    uint8_t *cursor = (uint8_t *) js_calloc(dataSize);
-    if (!cursor)
-        return NULL;
-
-    JITScript *jit = (JITScript *) cursor;
-    cursor += sizeof(JITScript);
-
-    jit->script = script;
-    JS_INIT_CLIST(&jit->callers);
-
-    jit->nchunks = chunks.length();
-    for (unsigned i = 0; i < chunks.length(); i++) {
-        const ChunkDescriptor &a = chunks[i];
-        ChunkDescriptor &b = jit->chunkDescriptor(i);
-        b.begin = a.begin;
-        b.end = a.end;
-
-        if (chunks.length() == 1) {
-            /* Seed the chunk's count so it is immediately compiled. */
-            b.counter = INFER_USES_BEFORE_COMPILE;
-        }
-    }
-
-    if (edges.empty())
-        return jit;
-
-    jit->nedges = edges.length();
-    CrossChunkEdge *jitEdges = jit->edges();
-    for (unsigned i = 0; i < edges.length(); i++) {
-        const CrossChunkEdge &a = edges[i];
-        CrossChunkEdge &b = jitEdges[i];
-        b.source = a.source;
-        b.target = a.target;
-    }
-
-    /* Generate a pool with all cross chunk shims, and set shimLabel for each edge. */
-    jsbytecode *pc;
-    MJITInstrumentation sps(&cx->runtime->spsProfiler);
-    Assembler masm(&sps, &pc);
-    sps.setPushed(script);
-    for (unsigned i = 0; i < jit->nedges; i++) {
-        pc = script->code + jitEdges[i].target;
-        jitEdges[i].shimLabel = (void *) masm.distanceOf(masm.label());
-        masm.move(JSC::MacroAssembler::ImmPtr(&jitEdges[i]), Registers::ArgReg1);
-        masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::CrossChunkShim),
-                            pc, NULL, script->nfixed + analysis->getCode(pc).stackDepth);
-    }
-    LinkerHelper linker(masm, JSC::JAEGER_CODE);
-    JSC::ExecutablePool *ep = linker.init(cx);
-    if (!ep)
-        return NULL;
-    jit->shimPool = ep;
-
-    masm.finalize(linker);
-    uint8_t *shimCode = (uint8_t *) linker.finalizeCodeAddendum().executableAddress();
-
-    JS_ALWAYS_TRUE(linker.verifyRange(JSC::JITCode(shimCode, masm.size())));
-
-    JaegerSpew(JSpew_PICs, "generated SHIM POOL stub %p (%lu bytes)\n",
-               shimCode, (unsigned long)masm.size());
-
-    for (unsigned i = 0; i < jit->nedges; i++) {
-        CrossChunkEdge &edge = jitEdges[i];
-        edge.shimLabel = shimCode + (size_t) edge.shimLabel;
-    }
-
-    return jit;
-}
-
-static inline bool
-IonGetsFirstChance(JSContext *cx, JSScript *script, jsbytecode *pc, CompileRequest request)
-{
-#ifdef JS_ION
-    if (!ion::IsEnabled(cx))
-        return false;
-
-    // If the script is not hot, use JM. recompileCheckHelper will insert a check
-    // to trigger a recompile when the script becomes hot.
-    if (script->getUseCount() < ion::js_IonOptions.usesBeforeCompile)
-        return false;
-
-    // If we're called from JM, use JM to avoid slow JM -> Ion calls.
-    if (request == CompileRequest_JIT)
-        return false;
-
-    // If there's no way this script is going to be Ion compiled, let JM take over.
-    if (!script->canIonCompile())
-        return false;
-
-    // If we cannot enter Ion because bailouts are expected, let JM take over.
-    if (script->hasIonScript() && script->ionScript()->bailoutExpected())
-        return false;
-
-    // If we cannot enter Ion because it was compiled for OSR at a different PC,
-    // let JM take over until the PC is reached. Don't do this until the script
-    // reaches a high use count, as if we do this prematurely we may get stuck
-    // in JM code.
-    if (OffThreadCompilationEnabled(cx) && script->hasIonScript() &&
-        pc && script->ionScript()->osrPc() && script->ionScript()->osrPc() != pc &&
-        script->getUseCount() >= ion::js_IonOptions.usesBeforeCompile * 2)
-    {
-        return false;
-    }
-
-    // If ion compilation is pending or in progress on another thread, continue
-    // using JM until that compilation finishes.
-    if (script->isIonCompilingOffThread())
-        return false;
-
-    return true;
-#endif
-    return false;
-}
-
-CompileStatus
-mjit::CanMethodJIT(JSContext *cx, JSScript *script, jsbytecode *pc,
-                   bool construct, CompileRequest request, StackFrame *frame)
-{
-    bool compiledOnce = false;
-  checkOutput:
-    if (!cx->methodJitEnabled)
-        return Compile_Abort;
-
-    if (!cx->jaegerCompilationAllowed())
-        return Compile_Abort;
-
-#ifdef JS_ION
-    if (ion::IsBaselineEnabled(cx) || ion::IsEnabled(cx))
-        return Compile_Abort;
-#endif
-
-    /*
-     * If SPS (profiling) is enabled, then the emitted instrumentation has to be
-     * careful to not wildly write to random locations. This is relevant
-     * whenever the status of profiling (on/off) is changed while JS is running.
-     * All pushed frames still need to be popped, but newly emitted code may
-     * have slightly different behavior.
-     *
-     * For a new function, this doesn't matter at all, but if we're compiling
-     * the current function, then the writes start to matter. If an SPS frame
-     * has been pushed and SPS is still enabled, then we're good to go. If an
-     * SPS frame has not been pushed, and SPS is not enabled, then we're still
-     * good to go. If, however, the two are different, then we cannot emit JIT
-     * code because the instrumentation will be wrong one way or another.
-     */
-    if (frame->script() == script && pc != script->code) {
-        if (frame->hasPushedSPSFrame() != cx->runtime->spsProfiler.enabled())
-            return Compile_Skipped;
-    }
-
-    if (IonGetsFirstChance(cx, script, pc, request)) {
-        if (script->hasIonScript())
-            script->incUseCount();
-        return Compile_Skipped;
-    }
-
-    if (script->hasMJITInfo()) {
-        JSScript::JITScriptHandle *jith = script->jitHandle(construct, cx->zone()->compileBarriers());
-        if (jith->isUnjittable())
-            return Compile_Abort;
-    }
-
-    if (!cx->hasOption(JSOPTION_METHODJIT_ALWAYS) &&
-        (cx->typeInferenceEnabled()
-         ? script->incUseCount() <= INFER_USES_BEFORE_COMPILE
-         : script->incUseCount() <= USES_BEFORE_COMPILE))
-    {
-        return Compile_Skipped;
-    }
-
-    if (!cx->runtime->getJaegerRuntime(cx))
-        return Compile_Error;
-
-    // Ensure that constructors have at least one slot.
-    if (construct && !script->nslots)
-        script->nslots++;
-
-    uint64_t gcNumber = cx->runtime->gcNumber;
-
-    if (!script->ensureHasMJITInfo(cx))
-        return Compile_Error;
-
-    JSScript::JITScriptHandle *jith = script->jitHandle(construct, cx->zone()->compileBarriers());
-
-    JITScript *jit;
-    if (jith->isEmpty()) {
-        jit = MakeJITScript(cx, script);
-        if (!jit)
-            return Compile_Error;
-
-        // Script analysis can trigger GC, watch in case compileBarriers() changed.
-        if (gcNumber != cx->runtime->gcNumber) {
-            FreeOp *fop = cx->runtime->defaultFreeOp();
-            jit->destroy(fop);
-            fop->free_(jit);
-            return Compile_Skipped;
-        }
-
-        jith->setValid(jit);
-    } else {
-        jit = jith->getValid();
-    }
-
-    unsigned chunkIndex = jit->chunkIndex(pc);
-    ChunkDescriptor &desc = jit->chunkDescriptor(chunkIndex);
-
-    if (jit->mustDestroyEntryChunk) {
-        // We kept this chunk around so that Ion can get info from its caches.
-        // If we end up here, we decided not to use Ion so we can destroy the
-        // chunk now.
-        JS_ASSERT(jit->nchunks == 1);
-        jit->mustDestroyEntryChunk = false;
-
-        if (desc.chunk) {
-            jit->destroyChunk(cx->runtime->defaultFreeOp(), chunkIndex, /* resetUses = */ false);
-            return Compile_Skipped;
-        }
-    }
-
-    if (desc.chunk)
-        return Compile_Okay;
-    if (compiledOnce)
-        return Compile_Skipped;
-
-    if (!cx->hasOption(JSOPTION_METHODJIT_ALWAYS) &&
-        ++desc.counter <= INFER_USES_BEFORE_COMPILE)
-    {
-        return Compile_Skipped;
-    }
-
-    CompileStatus status;
-    {
-        types::AutoEnterAnalysis enter(cx);
-
-        Compiler cc(cx, script, chunkIndex, construct);
-        status = cc.compile();
-    }
-
-    /*
-     * Check if we have hit the threshold for purging analysis data. This is
-     * done after compilation, rather than after another analysis stage, to
-     * ensure we don't throw away the work just performed.
-     */
-    cx->compartment->types.maybePurgeAnalysis(cx);
-
-    if (status == Compile_Okay) {
-        /*
-         * Compiling a script can occasionally trigger its own recompilation,
-         * so go back through the compilation logic.
-         */
-        compiledOnce = true;
-        goto checkOutput;
-    }
-
-    /* Non-OOM errors should have an associated exception. */
-    JS_ASSERT_IF(status == Compile_Error,
-                 cx->isExceptionPending() || cx->runtime->hadOutOfMemory);
-
-    return status;
-}
-
-CompileStatus
-mjit::Compiler::generatePrologue()
-{
-    fastEntryLabel = masm.label();
-
-    /*
-     * If there is no function, then this can only be called via JaegerShot(),
-     * which expects an existing frame to be initialized like the interpreter.
-     */
-    if (script_->function()) {
-        Jump j = masm.jump();
-
-        /*
-         * Entry point #2: The caller has partially constructed a frame, and
-         * either argc >= nargs or the arity check has corrected the frame.
-         */
-        fastEntryLabel = masm.label();
-
-        /* Store this early on so slow paths can access it. */
-        masm.storePtr(ImmPtr(script_->function()),
-                      Address(JSFrameReg, StackFrame::offsetOfExec()));
-        if (script_->isCallsiteClone) {
-            masm.storeValue(ObjectValue(*script_->function()),
-                            Address(JSFrameReg, StackFrame::offsetOfCallee(script_->function())));
-        }
-
-        {
-            /*
-             * Entry point #3: The caller has partially constructed a frame,
-             * but argc might be != nargs, so an arity check might be called.
-             *
-             * This loops back to entry point #2.
-             */
-            arityLabel = stubcc.masm.label();
-
-            Jump argMatch = stubcc.masm.branch32(Assembler::Equal, JSParamReg_Argc,
-                                                 Imm32(script_->function()->nargs));
-
-            if (JSParamReg_Argc != Registers::ArgReg1)
-                stubcc.masm.move(JSParamReg_Argc, Registers::ArgReg1);
-
-            /* Slow path - call the arity check function. Returns new fp. */
-            stubcc.masm.storePtr(ImmPtr(script_->function()),
-                                 Address(JSFrameReg, StackFrame::offsetOfExec()));
-            OOL_STUBCALL(stubs::FixupArity, REJOIN_NONE);
-            stubcc.masm.move(Registers::ReturnReg, JSFrameReg);
-            argMatch.linkTo(stubcc.masm.label(), &stubcc.masm);
-
-            argsCheckLabel = stubcc.masm.label();
-
-            /* Type check the arguments as well. */
-            if (cx->typeInferenceEnabled()) {
-#ifdef JS_MONOIC
-                this->argsCheckJump = stubcc.masm.jump();
-                this->argsCheckStub = stubcc.masm.label();
-                this->argsCheckJump.linkTo(this->argsCheckStub, &stubcc.masm);
-#endif
-                stubcc.masm.storePtr(ImmPtr(script_->function()),
-                                     Address(JSFrameReg, StackFrame::offsetOfExec()));
-                OOL_STUBCALL(stubs::CheckArgumentTypes, REJOIN_CHECK_ARGUMENTS);
-#ifdef JS_MONOIC
-                this->argsCheckFallthrough = stubcc.masm.label();
-#endif
-            }
-
-            stubcc.crossJump(stubcc.masm.jump(), fastEntryLabel);
-        }
-
-        /*
-         * Guard that there is enough stack space. Note we reserve space for
-         * any inline frames we end up generating, or a callee's stack frame
-         * we write to before the callee checks the stack.
-         */
-        uint32_t nvals = VALUES_PER_STACK_FRAME + script_->nslots + StackSpace::STACK_JIT_EXTRA;
-        masm.addPtr(Imm32(nvals * sizeof(Value)), JSFrameReg, Registers::ReturnReg);
-        Jump stackCheck = masm.branchPtr(Assembler::AboveOrEqual, Registers::ReturnReg,
-                                         FrameAddress(offsetof(VMFrame, stackLimit)));
-
-        /*
-         * If the stack check fails then we need to either commit more of the
-         * reserved stack space or throw an error. Specify that the number of
-         * local slots is 0 (instead of the default script->nfixed) since the
-         * range [fp->slots(), fp->base()) may not be commited. (The calling
-         * contract requires only that the caller has reserved space for fp.)
-         */
-        {
-            stubcc.linkExitDirect(stackCheck, stubcc.masm.label());
-            OOL_STUBCALL(stubs::HitStackQuota, REJOIN_NONE);
-            stubcc.crossJump(stubcc.masm.jump(), masm.label());
-        }
-
-        markUndefinedLocals();
-
-        /*
-         * Load the scope chain into the frame if it will be needed by NAME
-         * opcodes or by the nesting prologue below. The scope chain is always
-         * set for global and eval frames, and will have been set by
-         * HeavyweightFunctionPrologue for heavyweight function frames.
-         */
-        if (!script_->function()->isHeavyweight() && analysis->usesScopeChain()) {
-            RegisterID t0 = Registers::ReturnReg;
-            Jump hasScope = masm.branchTest32(Assembler::NonZero,
-                                              FrameFlagsAddress(), Imm32(StackFrame::HAS_SCOPECHAIN));
-            masm.loadPayload(Address(JSFrameReg, StackFrame::offsetOfCallee(script_->function())), t0);
-            masm.loadPtr(Address(t0, JSFunction::offsetOfEnvironment()), t0);
-            masm.storePtr(t0, Address(JSFrameReg, StackFrame::offsetOfScopeChain()));
-            hasScope.linkTo(masm.label(), &masm);
-        }
-
-        /*
-         * When 'arguments' is used in the script, it may be optimized away
-         * which involves reading from the stack frame directly, including
-         * fp->u.nactual. fp->u.nactual is only set when numActual != numFormal,
-         * so store 'fp->u.nactual = numFormal' when there is no over/underflow.
-         */
-        if (script_->argumentsHasVarBinding()) {
-            Jump hasArgs = masm.branchTest32(Assembler::NonZero, FrameFlagsAddress(),
-                                             Imm32(StackFrame::UNDERFLOW_ARGS |
-                                                   StackFrame::OVERFLOW_ARGS));
-            masm.storePtr(ImmPtr((void *)(size_t) script_->function()->nargs),
-                          Address(JSFrameReg, StackFrame::offsetOfNumActual()));
-            hasArgs.linkTo(masm.label(), &masm);
-        }
-
-        j.linkTo(masm.label(), &masm);
-    }
-
-    if (cx->typeInferenceEnabled()) {
-#ifdef DEBUG
-        if (script_->function()) {
-            prepareStubCall(Uses(0));
-            INLINE_STUBCALL(stubs::AssertArgumentTypes, REJOIN_NONE);
-        }
-#endif
-        ensureDoubleArguments();
-    }
-
-    /* Inline StackFrame::prologue. */
-    if (script_->isActiveEval && script_->strict) {
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::StrictEvalPrologue, REJOIN_EVAL_PROLOGUE);
-    } else if (script_->function()) {
-        if (script_->function()->isHeavyweight()) {
-            prepareStubCall(Uses(0));
-            INLINE_STUBCALL(stubs::HeavyweightFunctionPrologue, REJOIN_FUNCTION_PROLOGUE);
-        }
-
-        if (isConstructing && !constructThis())
-            return Compile_Error;
-    }
-
-    CompileStatus status = methodEntryHelper();
-    if (status == Compile_Okay) {
-        if (IsIonEnabled(cx))
-            ionCompileHelper();
-        else
-            inliningCompileHelper();
-    }
-
-    return status;
-}
-
-void
-mjit::Compiler::ensureDoubleArguments()
-{
-    /* Convert integer arguments which were inferred as (int|double) to doubles. */
-    for (uint32_t i = 0; script_->function() && i < script_->function()->nargs; i++) {
-        uint32_t slot = ArgSlot(i);
-        if (a->varTypes[slot].getTypeTag() == JSVAL_TYPE_DOUBLE && analysis->trackSlot(slot))
-            frame.ensureDouble(frame.getArg(i));
-    }
-}
-
-void
-mjit::Compiler::markUndefinedLocal(uint32_t offset, uint32_t i)
-{
-    uint32_t depth = ssa.getFrame(a->inlineIndex).depth;
-    Address local(JSFrameReg, sizeof(StackFrame) + (depth + i) * sizeof(Value));
-    masm.storeValue(UndefinedValue(), local);
-}
-
-void
-mjit::Compiler::markUndefinedLocals()
-{
-    /*
-     * Set locals to undefined. Skip locals which aren't closed and are known
-     * to be defined before used,
-     */
-    for (uint32_t i = 0; i < script_->nfixed; i++)
-        markUndefinedLocal(0, i);
-
-#ifdef DEBUG
-    uint32_t depth = ssa.getFrame(a->inlineIndex).depth;
-    for (uint32_t i = script_->nfixed; i < script_->nslots; i++) {
-        Address local(JSFrameReg, sizeof(StackFrame) + (depth + i) * sizeof(Value));
-        masm.storeValue(JS::ObjectValueCrashOnTouch(), local);
-    }
-#endif
-}
-
-CompileStatus
-mjit::Compiler::generateEpilogue()
-{
-    return Compile_Okay;
-}
-
-CompileStatus
-mjit::Compiler::finishThisUp()
-{
-#ifdef JS_CPU_X64
-    /* Generate trampolines to ensure that cross chunk edges are patchable. */
-    for (unsigned i = 0; i < chunkEdges.length(); i++) {
-        chunkEdges[i].sourceTrampoline = stubcc.masm.label();
-        stubcc.masm.move(ImmPtr(NULL), Registers::ScratchReg);
-        stubcc.masm.jump(Registers::ScratchReg);
-    }
-#endif
-
-    RETURN_IF_OOM(Compile_Error);
-
-    /*
-     * Watch for reallocation of the global slots while we were in the middle
-     * of compiling due to, e.g. standard class initialization.
-     */
-    if (globalSlots && globalObj->getRawSlots() != globalSlots)
-        return Compile_Retry;
-
-    /*
-     * Watch for GCs which occurred during compilation. These may have
-     * renumbered shapes baked into the jitcode.
-     */
-    if (cx->runtime->gcNumber != gcNumber)
-        return Compile_Retry;
-
-    /* The JIT will not have been cleared if no GC has occurred. */
-    JITScript *jit = outerJIT();
-    JS_ASSERT(jit != NULL);
-
-    if (overflowICSpace) {
-        JaegerSpew(JSpew_Scripts, "dumped a constant pool while generating an IC\n");
-        return Compile_Abort;
-    }
-
-    a->mainCodeEnd = masm.size();
-    a->stubCodeEnd = stubcc.size();
-
-    for (size_t i = 0; i < branchPatches.length(); i++) {
-        Label label = labelOf(branchPatches[i].pc, branchPatches[i].inlineIndex);
-        branchPatches[i].jump.linkTo(label, &masm);
-    }
-
-#ifdef JS_CPU_ARM
-    masm.forceFlushConstantPool();
-    stubcc.masm.forceFlushConstantPool();
-#endif
-    JaegerSpew(JSpew_Insns, "## Fast code (masm) size = %lu, Slow code (stubcc) size = %lu.\n",
-               (unsigned long) masm.size(), (unsigned long) stubcc.size());
-
-    /* To make inlineDoubles and oolDoubles aligned to sizeof(double) bytes,
-       MIPS adds extra sizeof(double) bytes to codeSize.  */
-    size_t codeSize = masm.size() +
-#if defined(JS_CPU_MIPS)
-                      stubcc.size() + sizeof(double) +
-#else
-                      stubcc.size() +
-#endif
-                      (masm.numDoubles() * sizeof(double)) +
-                      (stubcc.masm.numDoubles() * sizeof(double)) +
-                      jumpTableEdges.length() * sizeof(void *);
-
-    Vector<ChunkJumpTableEdge> chunkJumps(cx);
-    if (!chunkJumps.reserve(jumpTableEdges.length()))
-        return Compile_Error;
-
-    JSC::ExecutableAllocator &execAlloc = cx->runtime->execAlloc();
-    JSC::ExecutablePool *execPool;
-    uint8_t *result = (uint8_t *)execAlloc.alloc(codeSize, &execPool, JSC::JAEGER_CODE);
-    if (!result) {
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-    JS_ASSERT(execPool);
-    JSC::ExecutableAllocator::makeWritable(result, codeSize);
-    masm.executableCopy(result);
-    stubcc.masm.executableCopy(result + masm.size());
-
-    JSC::LinkBuffer fullCode(result, codeSize, JSC::JAEGER_CODE);
-    JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size(), JSC::JAEGER_CODE);
-
-    JS_ASSERT(!loop);
-
-    size_t nNmapLive = loopEntries.length();
-    for (size_t i = outerChunk.begin; i < outerChunk.end; i++) {
-        Bytecode *opinfo = analysis->maybeCode(i);
-        if (opinfo && opinfo->safePoint)
-            nNmapLive++;
-    }
-
-    /* Please keep in sync with JITChunk::sizeOfIncludingThis! */
-    size_t dataSize = sizeof(JITChunk) +
-                      sizeof(NativeMapEntry) * nNmapLive +
-                      sizeof(InlineFrame) * inlineFrames.length() +
-                      sizeof(CallSite) * callSites.length() +
-                      sizeof(CompileTrigger) * compileTriggers.length() +
-                      sizeof(JSObject*) * rootedTemplates.length() +
-                      sizeof(RegExpShared*) * rootedRegExps.length() +
-                      sizeof(uint32_t) * monitoredBytecodes.length() +
-                      sizeof(uint32_t) * typeBarrierBytecodes.length() +
-#if defined JS_MONOIC
-                      sizeof(ic::GetGlobalNameIC) * getGlobalNames.length() +
-                      sizeof(ic::SetGlobalNameIC) * setGlobalNames.length() +
-                      sizeof(ic::CallICInfo) * callICs.length() +
-                      sizeof(ic::EqualityICInfo) * equalityICs.length() +
-#endif
-#if defined JS_POLYIC
-                      sizeof(ic::PICInfo) * pics.length() +
-                      sizeof(ic::GetElementIC) * getElemICs.length() +
-                      sizeof(ic::SetElementIC) * setElemICs.length() +
-#endif
-                      0;
-
-    uint8_t *cursor = (uint8_t *)js_calloc(dataSize);
-    if (!cursor) {
-        execPool->release();
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-
-    JITChunk *chunk = new(cursor) JITChunk;
-    cursor += sizeof(JITChunk);
-
-    JS_ASSERT(outerScript == script_);
-
-    chunk->code = JSC::MacroAssemblerCodeRef(result, execPool, masm.size() + stubcc.size());
-    chunk->pcLengths = pcLengths;
-
-    if (chunkIndex == 0) {
-        jit->invokeEntry = result;
-        if (script_->function()) {
-            jit->arityCheckEntry = stubCode.locationOf(arityLabel).executableAddress();
-            jit->argsCheckEntry = stubCode.locationOf(argsCheckLabel).executableAddress();
-            jit->fastEntry = fullCode.locationOf(fastEntryLabel).executableAddress();
-        }
-    }
-
-    /*
-     * WARNING: mics(), callICs() et al depend on the ordering of these
-     * variable-length sections.  See JITChunk's declaration for details.
-     */
-
-    /* ICs can only refer to bytecodes in the outermost script, not inlined calls. */
-    Label *jumpMap = a->jumpMap;
-
-    /* Build the pc -> ncode mapping. */
-    NativeMapEntry *jitNmap = (NativeMapEntry *)cursor;
-    chunk->nNmapPairs = nNmapLive;
-    cursor += sizeof(NativeMapEntry) * chunk->nNmapPairs;
-    size_t ix = 0;
-    if (chunk->nNmapPairs > 0) {
-        for (size_t i = outerChunk.begin; i < outerChunk.end; i++) {
-            Bytecode *opinfo = analysis->maybeCode(i);
-            if (opinfo && opinfo->safePoint) {
-                Label L = jumpMap[i];
-                JS_ASSERT(L.isSet());
-                jitNmap[ix].bcOff = i;
-                jitNmap[ix].ncode = (uint8_t *)(result + masm.distanceOf(L));
-                ix++;
-            }
-        }
-        for (size_t i = 0; i < loopEntries.length(); i++) {
-            /* Insert the entry at the right position. */
-            const LoopEntry &entry = loopEntries[i];
-            size_t j;
-            for (j = 0; j < ix; j++) {
-                if (jitNmap[j].bcOff > entry.pcOffset) {
-                    memmove(jitNmap + j + 1, jitNmap + j, (ix - j) * sizeof(NativeMapEntry));
-                    break;
-                }
-            }
-            jitNmap[j].bcOff = entry.pcOffset;
-            jitNmap[j].ncode = (uint8_t *) stubCode.locationOf(entry.label).executableAddress();
-            ix++;
-        }
-    }
-    JS_ASSERT(ix == chunk->nNmapPairs);
-
-    /* Build the table of inlined frames. */
-    InlineFrame *jitInlineFrames = (InlineFrame *)cursor;
-    chunk->nInlineFrames = inlineFrames.length();
-    cursor += sizeof(InlineFrame) * chunk->nInlineFrames;
-    for (size_t i = 0; i < chunk->nInlineFrames; i++) {
-        InlineFrame &to = jitInlineFrames[i];
-        ActiveFrame *from = inlineFrames[i];
-        if (from->parent != outer)
-            to.parent = &jitInlineFrames[from->parent->inlineIndex];
-        else
-            to.parent = NULL;
-        to.parentpc = from->parentPC;
-        to.fun = from->script->function();
-        to.depth = ssa.getFrame(from->inlineIndex).depth;
-    }
-
-    /* Build the table of call sites. */
-    CallSite *jitCallSites = (CallSite *)cursor;
-    chunk->nCallSites = callSites.length();
-    cursor += sizeof(CallSite) * chunk->nCallSites;
-    for (size_t i = 0; i < chunk->nCallSites; i++) {
-        CallSite &to = jitCallSites[i];
-        InternalCallSite &from = callSites[i];
-
-        /* Patch stores of f.regs.inlined for stubs called from within inline frames. */
-        if (cx->typeInferenceEnabled() &&
-            from.rejoin != REJOIN_TRAP &&
-            from.rejoin != REJOIN_SCRIPTED &&
-            from.inlineIndex != UINT32_MAX) {
-            if (from.ool)
-                stubCode.patch(from.inlinePatch, &to);
-            else
-                fullCode.patch(from.inlinePatch, &to);
-        }
-
-        JSScript *script =
-            (from.inlineIndex == UINT32_MAX) ? outerScript.get()
-                                             : inlineFrames[from.inlineIndex]->script;
-        uint32_t codeOffset = from.ool
-                            ? masm.size() + from.returnOffset
-                            : from.returnOffset;
-        to.initialize(codeOffset, from.inlineIndex, from.inlinepc - script->code, from.rejoin);
-
-        /*
-         * Patch stores of the base call's return address for InvariantFailure
-         * calls. InvariantFailure will patch its own return address to this
-         * pointer before triggering recompilation.
-         */
-        if (from.loopPatch.hasPatch)
-            stubCode.patch(from.loopPatch.codePatch, result + codeOffset);
-    }
-
-    CompileTrigger *jitCompileTriggers = (CompileTrigger *)cursor;
-    chunk->nCompileTriggers = compileTriggers.length();
-    cursor += sizeof(CompileTrigger) * chunk->nCompileTriggers;
-    for (size_t i = 0; i < chunk->nCompileTriggers; i++) {
-        const InternalCompileTrigger &trigger = compileTriggers[i];
-        jitCompileTriggers[i].initialize(trigger.pc - outerScript->code,
-                                         fullCode.locationOf(trigger.inlineJump),
-                                         stubCode.locationOf(trigger.stubLabel));
-    }
-
-    JSObject **jitRootedTemplates = (JSObject **)cursor;
-    chunk->nRootedTemplates = rootedTemplates.length();
-    cursor += sizeof(JSObject*) * chunk->nRootedTemplates;
-    for (size_t i = 0; i < chunk->nRootedTemplates; i++)
-        jitRootedTemplates[i] = rootedTemplates[i];
-
-    RegExpShared **jitRootedRegExps = (RegExpShared **)cursor;
-    chunk->nRootedRegExps = rootedRegExps.length();
-    cursor += sizeof(RegExpShared*) * chunk->nRootedRegExps;
-    for (size_t i = 0; i < chunk->nRootedRegExps; i++) {
-        jitRootedRegExps[i] = rootedRegExps[i];
-        jitRootedRegExps[i]->incRef();
-    }
-
-    uint32_t *jitMonitoredBytecodes = (uint32_t *)cursor;
-    chunk->nMonitoredBytecodes = monitoredBytecodes.length();
-    cursor += sizeof(uint32_t) * chunk->nMonitoredBytecodes;
-    for (size_t i = 0; i < chunk->nMonitoredBytecodes; i++)
-        jitMonitoredBytecodes[i] = monitoredBytecodes[i];
-
-    uint32_t *jitTypeBarrierBytecodes = (uint32_t *)cursor;
-    chunk->nTypeBarrierBytecodes = typeBarrierBytecodes.length();
-    cursor += sizeof(uint32_t) * chunk->nTypeBarrierBytecodes;
-    for (size_t i = 0; i < chunk->nTypeBarrierBytecodes; i++)
-        jitTypeBarrierBytecodes[i] = typeBarrierBytecodes[i];
-
-#if defined JS_MONOIC
-    if (chunkIndex == 0 && script_->function()) {
-        JS_ASSERT(jit->argsCheckPool == NULL);
-        if (cx->typeInferenceEnabled()) {
-            jit->argsCheckStub = stubCode.locationOf(argsCheckStub);
-            jit->argsCheckFallthrough = stubCode.locationOf(argsCheckFallthrough);
-            jit->argsCheckJump = stubCode.locationOf(argsCheckJump);
-        }
-    }
-
-    ic::GetGlobalNameIC *getGlobalNames_ = (ic::GetGlobalNameIC *)cursor;
-    chunk->nGetGlobalNames = getGlobalNames.length();
-    cursor += sizeof(ic::GetGlobalNameIC) * chunk->nGetGlobalNames;
-    for (size_t i = 0; i < chunk->nGetGlobalNames; i++) {
-        ic::GetGlobalNameIC &to = getGlobalNames_[i];
-        GetGlobalNameICInfo &from = getGlobalNames[i];
-        from.copyTo(to, fullCode, stubCode);
-
-        int offset = fullCode.locationOf(from.load) - to.fastPathStart;
-        to.loadStoreOffset = offset;
-        JS_ASSERT(to.loadStoreOffset == offset);
-
-        stubCode.patch(from.addrLabel, &to);
-    }
-
-    ic::SetGlobalNameIC *setGlobalNames_ = (ic::SetGlobalNameIC *)cursor;
-    chunk->nSetGlobalNames = setGlobalNames.length();
-    cursor += sizeof(ic::SetGlobalNameIC) * chunk->nSetGlobalNames;
-    for (size_t i = 0; i < chunk->nSetGlobalNames; i++) {
-        ic::SetGlobalNameIC &to = setGlobalNames_[i];
-        SetGlobalNameICInfo &from = setGlobalNames[i];
-        from.copyTo(to, fullCode, stubCode);
-        to.slowPathStart = stubCode.locationOf(from.slowPathStart);
-
-        int offset = fullCode.locationOf(from.store).labelAtOffset(0) -
-                     to.fastPathStart;
-        to.loadStoreOffset = offset;
-        JS_ASSERT(to.loadStoreOffset == offset);
-
-        to.objConst = from.objConst;
-        to.shapeReg = from.shapeReg;
-        to.objReg = from.objReg;
-        to.vr = from.vr;
-
-        offset = fullCode.locationOf(from.shapeGuardJump) -
-                 to.fastPathStart;
-        to.inlineShapeJump = offset;
-        JS_ASSERT(to.inlineShapeJump == offset);
-
-        offset = fullCode.locationOf(from.fastPathRejoin) -
-                 to.fastPathStart;
-        to.fastRejoinOffset = offset;
-        JS_ASSERT(to.fastRejoinOffset == offset);
-
-        stubCode.patch(from.addrLabel, &to);
-    }
-
-    ic::CallICInfo *jitCallICs = (ic::CallICInfo *)cursor;
-    chunk->nCallICs = callICs.length();
-    cursor += sizeof(ic::CallICInfo) * chunk->nCallICs;
-    for (size_t i = 0; i < chunk->nCallICs; i++) {
-        jitCallICs[i].funGuardLabel = fullCode.locationOf(callICs[i].funGuardLabel);
-        jitCallICs[i].funGuard = fullCode.locationOf(callICs[i].funGuard);
-        jitCallICs[i].funJump = fullCode.locationOf(callICs[i].funJump);
-        jitCallICs[i].slowPathStart = stubCode.locationOf(callICs[i].slowPathStart);
-        jitCallICs[i].typeMonitored = callICs[i].typeMonitored;
-
-        /* Compute the hot call offset. */
-        uint32_t offset = fullCode.locationOf(callICs[i].hotJump) -
-                        fullCode.locationOf(callICs[i].funGuard);
-        jitCallICs[i].hotJumpOffset = offset;
-        JS_ASSERT(jitCallICs[i].hotJumpOffset == offset);
-
-        /* Compute the join point offset. */
-        offset = fullCode.locationOf(callICs[i].joinPoint) -
-                 fullCode.locationOf(callICs[i].funGuard);
-        jitCallICs[i].joinPointOffset = offset;
-        JS_ASSERT(jitCallICs[i].joinPointOffset == offset);
-
-        offset = fullCode.locationOf(callICs[i].ionJoinPoint) -
-                 fullCode.locationOf(callICs[i].funGuard);
-        jitCallICs[i].ionJoinOffset = offset;
-        JS_ASSERT(jitCallICs[i].ionJoinOffset == offset);
-
-        /* Compute the OOL call offset. */
-        offset = stubCode.locationOf(callICs[i].oolCall) -
-                 stubCode.locationOf(callICs[i].slowPathStart);
-        jitCallICs[i].oolCallOffset = offset;
-        JS_ASSERT(jitCallICs[i].oolCallOffset == offset);
-
-        /* Compute the OOL jump offset. */
-        offset = stubCode.locationOf(callICs[i].oolJump) -
-                 stubCode.locationOf(callICs[i].slowPathStart);
-        jitCallICs[i].oolJumpOffset = offset;
-        JS_ASSERT(jitCallICs[i].oolJumpOffset == offset);
-
-        /* Compute the start of the OOL IC call. */
-        offset = stubCode.locationOf(callICs[i].icCall) -
-                 stubCode.locationOf(callICs[i].slowPathStart);
-        jitCallICs[i].icCallOffset = offset;
-        JS_ASSERT(jitCallICs[i].icCallOffset == offset);
-
-        /* Compute the slow join point offset. */
-        offset = stubCode.locationOf(callICs[i].slowJoinPoint) -
-                 stubCode.locationOf(callICs[i].slowPathStart);
-        jitCallICs[i].slowJoinOffset = offset;
-        JS_ASSERT(jitCallICs[i].slowJoinOffset == offset);
-
-        /* Compute the join point offset for continuing on the hot path. */
-        offset = stubCode.locationOf(callICs[i].hotPathLabel) -
-                 stubCode.locationOf(callICs[i].funGuard);
-        jitCallICs[i].hotPathOffset = offset;
-        JS_ASSERT(jitCallICs[i].hotPathOffset == offset);
-
-        jitCallICs[i].call = &jitCallSites[callICs[i].callIndex];
-        jitCallICs[i].frameSize = callICs[i].frameSize;
-        jitCallICs[i].funObjReg = callICs[i].funObjReg;
-        stubCode.patch(callICs[i].addrLabel1, &jitCallICs[i]);
-        stubCode.patch(callICs[i].addrLabel2, &jitCallICs[i]);
-    }
-
-    ic::EqualityICInfo *jitEqualityICs = (ic::EqualityICInfo *)cursor;
-    chunk->nEqualityICs = equalityICs.length();
-    cursor += sizeof(ic::EqualityICInfo) * chunk->nEqualityICs;
-    for (size_t i = 0; i < chunk->nEqualityICs; i++) {
-        if (equalityICs[i].trampoline) {
-            jitEqualityICs[i].target = stubCode.locationOf(equalityICs[i].trampolineStart);
-        } else {
-            uint32_t offs = uint32_t(equalityICs[i].jumpTarget - script_->code);
-            JS_ASSERT(jumpMap[offs].isSet());
-            jitEqualityICs[i].target = fullCode.locationOf(jumpMap[offs]);
-        }
-        jitEqualityICs[i].stubEntry = stubCode.locationOf(equalityICs[i].stubEntry);
-        jitEqualityICs[i].stubCall = stubCode.locationOf(equalityICs[i].stubCall);
-        jitEqualityICs[i].stub = equalityICs[i].stub;
-        jitEqualityICs[i].lvr = equalityICs[i].lvr;
-        jitEqualityICs[i].rvr = equalityICs[i].rvr;
-        jitEqualityICs[i].tempReg = equalityICs[i].tempReg;
-        jitEqualityICs[i].cond = equalityICs[i].cond;
-        if (equalityICs[i].jumpToStub.isSet())
-            jitEqualityICs[i].jumpToStub = fullCode.locationOf(equalityICs[i].jumpToStub.get());
-        jitEqualityICs[i].fallThrough = fullCode.locationOf(equalityICs[i].fallThrough);
-
-        stubCode.patch(equalityICs[i].addrLabel, &jitEqualityICs[i]);
-    }
-#endif /* JS_MONOIC */
-
-    for (size_t i = 0; i < callPatches.length(); i++) {
-        CallPatchInfo &patch = callPatches[i];
-
-        CodeLocationLabel joinPoint = patch.joinSlow
-            ? stubCode.locationOf(patch.joinPoint)
-            : fullCode.locationOf(patch.joinPoint);
-
-        if (patch.hasFastNcode)
-            fullCode.patch(patch.fastNcodePatch, joinPoint);
-        if (patch.hasSlowNcode)
-            stubCode.patch(patch.slowNcodePatch, joinPoint);
-    }
-
-#ifdef JS_POLYIC
-    ic::GetElementIC *jitGetElems = (ic::GetElementIC *)cursor;
-    chunk->nGetElems = getElemICs.length();
-    cursor += sizeof(ic::GetElementIC) * chunk->nGetElems;
-    for (size_t i = 0; i < chunk->nGetElems; i++) {
-        ic::GetElementIC &to = jitGetElems[i];
-        GetElementICInfo &from = getElemICs[i];
-
-        new (&to) ic::GetElementIC();
-        from.copyTo(to, fullCode, stubCode);
-
-        to.typeReg = from.typeReg;
-        to.objReg = from.objReg;
-        to.idRemat = from.id;
-
-        if (from.typeGuard.isSet()) {
-            int inlineTypeGuard = fullCode.locationOf(from.typeGuard.get()) -
-                                  fullCode.locationOf(from.fastPathStart);
-            to.inlineTypeGuard = inlineTypeGuard;
-            JS_ASSERT(to.inlineTypeGuard == inlineTypeGuard);
-        }
-        int inlineShapeGuard = fullCode.locationOf(from.shapeGuard) -
-                               fullCode.locationOf(from.fastPathStart);
-        to.inlineShapeGuard = inlineShapeGuard;
-        JS_ASSERT(to.inlineShapeGuard == inlineShapeGuard);
-
-        stubCode.patch(from.paramAddr, &to);
-    }
-
-    ic::SetElementIC *jitSetElems = (ic::SetElementIC *)cursor;
-    chunk->nSetElems = setElemICs.length();
-    cursor += sizeof(ic::SetElementIC) * chunk->nSetElems;
-    for (size_t i = 0; i < chunk->nSetElems; i++) {
-        ic::SetElementIC &to = jitSetElems[i];
-        SetElementICInfo &from = setElemICs[i];
-
-        new (&to) ic::SetElementIC();
-        from.copyTo(to, fullCode, stubCode);
-
-        to.strictMode = script_->strict;
-        to.vr = from.vr;
-        to.objReg = from.objReg;
-        to.objRemat = from.objRemat.toInt32();
-        JS_ASSERT(to.objRemat == from.objRemat.toInt32());
-
-        to.hasConstantKey = from.key.isConstant();
-        if (from.key.isConstant())
-            to.keyValue = from.key.index();
-        else
-            to.keyReg = from.key.reg();
-
-        int inlineShapeGuard = fullCode.locationOf(from.shapeGuard) -
-                               fullCode.locationOf(from.fastPathStart);
-        to.inlineShapeGuard = inlineShapeGuard;
-        JS_ASSERT(to.inlineShapeGuard == inlineShapeGuard);
-
-        int inlineHoleGuard = fullCode.locationOf(from.holeGuard) -
-                               fullCode.locationOf(from.fastPathStart);
-        to.inlineHoleGuard = inlineHoleGuard;
-        JS_ASSERT(to.inlineHoleGuard == inlineHoleGuard);
-
-        CheckIsStubCall(to.slowPathCall.labelAtOffset(0));
-
-        to.volatileMask = from.volatileMask;
-        JS_ASSERT(to.volatileMask == from.volatileMask);
-
-        stubCode.patch(from.paramAddr, &to);
-    }
-
-    ic::PICInfo *jitPics = (ic::PICInfo *)cursor;
-    chunk->nPICs = pics.length();
-    cursor += sizeof(ic::PICInfo) * chunk->nPICs;
-    for (size_t i = 0; i < chunk->nPICs; i++) {
-        new (&jitPics[i]) ic::PICInfo();
-        pics[i].copyTo(jitPics[i], fullCode, stubCode);
-        pics[i].copySimpleMembersTo(jitPics[i]);
-
-        jitPics[i].shapeGuard = masm.distanceOf(pics[i].shapeGuard) -
-                                masm.distanceOf(pics[i].fastPathStart);
-        JS_ASSERT(jitPics[i].shapeGuard == masm.distanceOf(pics[i].shapeGuard) -
-                                           masm.distanceOf(pics[i].fastPathStart));
-        jitPics[i].shapeRegHasBaseShape = true;
-        jitPics[i].pc = pics[i].pc;
-
-        if (pics[i].kind == ic::PICInfo::SET) {
-            jitPics[i].u.vr = pics[i].vr;
-        } else if (pics[i].kind != ic::PICInfo::NAME) {
-            if (pics[i].hasTypeCheck) {
-                int32_t distance = stubcc.masm.distanceOf(pics[i].typeCheck) -
-                                 stubcc.masm.distanceOf(pics[i].slowPathStart);
-                JS_ASSERT(distance <= 0);
-                jitPics[i].u.get.typeCheckOffset = distance;
-            }
-        }
-        stubCode.patch(pics[i].paramAddr, &jitPics[i]);
-    }
-#endif
-
-    JS_ASSERT(size_t(cursor - (uint8_t*)chunk) == dataSize);
-    /* Use the computed size here -- we don't want slop bytes to be counted. */
-    JS_ASSERT(chunk->computedSizeOfIncludingThis() == dataSize);
-
-    /* Link fast and slow paths together. */
-    stubcc.fixCrossJumps(result, masm.size(), masm.size() + stubcc.size());
-
-#if defined(JS_CPU_MIPS)
-    /* Make sure doubleOffset is aligned to sizeof(double) bytes.  */
-    size_t doubleOffset = (((size_t)result + masm.size() + stubcc.size() +
-                            sizeof(double) - 1) & (~(sizeof(double) - 1))) -
-                          (size_t)result;
-    JS_ASSERT((((size_t)result + doubleOffset) & 7) == 0);
-#else
-    size_t doubleOffset = masm.size() + stubcc.size();
-#endif
-
-    double *inlineDoubles = (double *) (result + doubleOffset);
-    double *oolDoubles = (double*) (result + doubleOffset +
-                                    masm.numDoubles() * sizeof(double));
-
-    /* Generate jump tables. */
-    void **jumpVec = (void **)(oolDoubles + stubcc.masm.numDoubles());
-
-    for (size_t i = 0; i < jumpTableEdges.length(); i++) {
-        JumpTableEdge edge = jumpTableEdges[i];
-        if (bytecodeInChunk(script_->code + edge.target)) {
-            JS_ASSERT(jumpMap[edge.target].isSet());
-            jumpVec[i] = (void *)(result + masm.distanceOf(jumpMap[edge.target]));
-        } else {
-            ChunkJumpTableEdge nedge;
-            nedge.edge = edge;
-            nedge.jumpTableEntry = &jumpVec[i];
-            chunkJumps.infallibleAppend(nedge);
-            jumpVec[i] = NULL;
-        }
-    }
-
-    /* Patch jump table references. */
-    for (size_t i = 0; i < jumpTables.length(); i++) {
-        JumpTable &jumpTable = jumpTables[i];
-        fullCode.patch(jumpTable.label, &jumpVec[jumpTable.offsetIndex]);
-    }
-
-    /* Patch all outgoing calls. */
-    masm.finalize(fullCode, inlineDoubles);
-    stubcc.masm.finalize(stubCode, oolDoubles);
-
-    JSC::ExecutableAllocator::makeExecutable(result, masm.size() + stubcc.size());
-    JSC::ExecutableAllocator::cacheFlush(result, masm.size() + stubcc.size());
-
-    a->mainCodeStart = size_t(result);
-    a->mainCodeEnd   = size_t(result + masm.size());
-    a->stubCodeStart = a->mainCodeEnd;
-    a->stubCodeEnd   = a->mainCodeEnd + stubcc.size();
-    if (!Probes::registerMJITCode(cx, chunk,
-                                  a, (JSActiveFrame**) inlineFrames.begin())) {
-        execPool->release();
-        js_free(chunk);
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
-
-    outerChunkRef().chunk = chunk;
-
-    /* Patch all incoming and outgoing cross-chunk jumps. */
-    CrossChunkEdge *crossEdges = jit->edges();
-    for (unsigned i = 0; i < jit->nedges; i++) {
-        CrossChunkEdge &edge = crossEdges[i];
-        if (bytecodeInChunk(outerScript->code + edge.source)) {
-            JS_ASSERT(!edge.sourceJump1 && !edge.sourceJump2);
-            void *label = edge.targetLabel ? edge.targetLabel : edge.shimLabel;
-            CodeLocationLabel targetLabel(label);
-            JSOp op = JSOp(script_->code[edge.source]);
-            if (op == JSOP_TABLESWITCH) {
-                if (edge.jumpTableEntries)
-                    js_free(edge.jumpTableEntries);
-                CrossChunkEdge::JumpTableEntryVector *jumpTableEntries = NULL;
-                bool failed = false;
-                for (unsigned j = 0; j < chunkJumps.length(); j++) {
-                    ChunkJumpTableEdge nedge = chunkJumps[j];
-                    if (nedge.edge.source == edge.source && nedge.edge.target == edge.target) {
-                        if (!jumpTableEntries) {
-                            jumpTableEntries = js_new<CrossChunkEdge::JumpTableEntryVector>();
-                            if (!jumpTableEntries)
-                                failed = true;
-                        }
-                        if (!jumpTableEntries->append(nedge.jumpTableEntry))
-                            failed = true;
-                        *nedge.jumpTableEntry = label;
-                    }
-                }
-                if (failed) {
-                    execPool->release();
-                    js_free(chunk);
-                    js_ReportOutOfMemory(cx);
-                    return Compile_Error;
-                }
-                edge.jumpTableEntries = jumpTableEntries;
-            }
-            for (unsigned j = 0; j < chunkEdges.length(); j++) {
-                const OutgoingChunkEdge &oedge = chunkEdges[j];
-                if (oedge.source == edge.source && oedge.target == edge.target) {
-                    /*
-                     * Only a single edge needs to be patched; we ensured while
-                     * generating chunks that no two cross chunk edges can have
-                     * the same source and target. Note that there may not be
-                     * an edge to patch, if constant folding determined the
-                     * jump is never taken.
-                     */
-                    edge.sourceJump1 = fullCode.locationOf(oedge.fastJump).executableAddress();
-                    if (oedge.slowJump.isSet()) {
-                        edge.sourceJump2 =
-                            stubCode.locationOf(oedge.slowJump.get()).executableAddress();
-                    }
-#ifdef JS_CPU_X64
-                    edge.sourceTrampoline =
-                        stubCode.locationOf(oedge.sourceTrampoline).executableAddress();
-#endif
-                    jit->patchEdge(edge, label);
-                    break;
-                }
-            }
-        } else if (bytecodeInChunk(outerScript->code + edge.target)) {
-            JS_ASSERT(!edge.targetLabel);
-            JS_ASSERT(jumpMap[edge.target].isSet());
-            edge.targetLabel = fullCode.locationOf(jumpMap[edge.target]).executableAddress();
-            jit->patchEdge(edge, edge.targetLabel);
-        }
-    }
-
-    chunk->recompileInfo = cx->compartment->types.compiledInfo;
-    return Compile_Okay;
-}
-
-#ifdef DEBUG
-#define SPEW_OPCODE()                                                         \
-    JS_BEGIN_MACRO                                                            \
-        if (IsJaegerSpewChannelActive(JSpew_JSOps)) {                         \
-            Sprinter sprinter(cx);                                            \
-            sprinter.init();                                                  \
-            RootedScript script(cx, script_);                                 \
-            js_Disassemble1(cx, script, PC, PC - script_->code,               \
-                            JS_TRUE, &sprinter);                              \
-            JaegerSpew(JSpew_JSOps, "    %2d %s",                             \
-                       frame.stackDepth(), sprinter.string());                \
-        }                                                                     \
-    JS_END_MACRO;
-#else
-#define SPEW_OPCODE()
-#endif /* DEBUG */
-
-#define BEGIN_CASE(name)        case name:
-#define END_CASE(name)                      \
-    JS_BEGIN_MACRO                          \
-        PC += name##_LENGTH;                \
-    JS_END_MACRO;                           \
-    break;
-
-static inline void
-FixDouble(Value &val)
-{
-    if (val.isInt32())
-        val.setDouble((double)val.toInt32());
-}
-
-inline bool
-mjit::Compiler::shouldStartLoop(jsbytecode *head)
-{
-    /*
-     * Don't do loop based optimizations or register allocation for loops which
-     * span multiple chunks.
-     */
-    if (*head == JSOP_LOOPHEAD && analysis->getLoop(head)) {
-        uint32_t backedge = analysis->getLoop(head)->backedge;
-        if (!bytecodeInChunk(script_->code + backedge))
-            return false;
-        return true;
-    }
-    return false;
-}
-
-CompileStatus
-mjit::Compiler::generateMethod()
-{
-    SrcNoteLineScanner scanner(script_->notes(), script_->lineno);
-
-    /* For join points, whether there was fallthrough from the previous opcode. */
-    bool fallthrough = true;
-
-    /* Last bytecode processed. */
-    jsbytecode *lastPC = NULL;
-
-    if (!outerJIT())
-        return Compile_Retry;
-
-    uint32_t chunkBegin = 0, chunkEnd = script_->length;
-    if (!a->parent) {
-        const ChunkDescriptor &desc =
-            outerJIT()->chunkDescriptor(chunkIndex);
-        chunkBegin = desc.begin;
-        chunkEnd = desc.end;
-
-        while (PC != script_->code + chunkBegin) {
-            Bytecode *opinfo = analysis->maybeCode(PC);
-            if (opinfo) {
-                if (opinfo->jumpTarget) {
-                    /* Update variable types for all new values at this bytecode. */
-                    const SlotValue *newv = analysis->newValues(PC);
-                    if (newv) {
-                        while (newv->slot) {
-                            if (newv->slot < TotalSlots(script_)) {
-                                VarType &vt = a->varTypes[newv->slot];
-                                vt.setTypes(analysis->getValueTypes(newv->value));
-                            }
-                            newv++;
-                        }
-                    }
-                }
-                if (analyze::BytecodeUpdatesSlot(JSOp(*PC))) {
-                    uint32_t slot = GetBytecodeSlot(script_, PC);
-                    if (analysis->trackSlot(slot)) {
-                        VarType &vt = a->varTypes[slot];
-                        vt.setTypes(analysis->pushedTypes(PC, 0));
-                    }
-                }
-            }
-
-            PC += GetBytecodeLength(PC);
-        }
-
-        if (chunkIndex != 0) {
-            uint32_t depth = analysis->getCode(PC).stackDepth;
-            for (uint32_t i = 0; i < depth; i++)
-                frame.pushSynced(JSVAL_TYPE_UNKNOWN);
-        }
-    }
-
-    /* Use a common root to avoid frequent re-rooting. */
-    RootedPropertyName name0(cx);
-
-    for (;;) {
-        JSOp op = JSOp(*PC);
-        int trap = stubs::JSTRAP_NONE;
-
-        if (script_->hasBreakpointsAt(PC))
-            trap |= stubs::JSTRAP_TRAP;
-
-        Bytecode *opinfo = analysis->maybeCode(PC);
-
-        if (!opinfo) {
-            if (op == JSOP_STOP)
-                break;
-            if (js_CodeSpec[op].length != -1)
-                PC += js_CodeSpec[op].length;
-            else
-                PC += js_GetVariableBytecodeLength(PC);
-            continue;
-        }
-
-        if (PC >= script_->code + script_->length)
-            break;
-
-        scanner.advanceTo(PC - script_->code);
-        if (script_->stepModeEnabled() &&
-            (scanner.isLineHeader() || opinfo->jumpTarget))
-        {
-            trap |= stubs::JSTRAP_SINGLESTEP;
-        }
-
-        frame.setPC(PC);
-        frame.setInTryBlock(opinfo->inTryBlock);
-
-        if (fallthrough) {
-            /*
-             * If there is fallthrough from the previous opcode and we changed
-             * any entries into doubles for a branch at that previous op,
-             * revert those entries into integers. Similarly, if we forgot that
-             * an entry is a double then make it a double again, as the frame
-             * may have assigned it a normal register.
-             */
-            for (unsigned i = 0; i < fixedIntToDoubleEntries.length(); i++) {
-                FrameEntry *fe = frame.getSlotEntry(fixedIntToDoubleEntries[i]);
-                frame.ensureInteger(fe);
-            }
-            for (unsigned i = 0; i < fixedDoubleToAnyEntries.length(); i++) {
-                FrameEntry *fe = frame.getSlotEntry(fixedDoubleToAnyEntries[i]);
-                frame.syncAndForgetFe(fe);
-            }
-        }
-        fixedIntToDoubleEntries.clear();
-        fixedDoubleToAnyEntries.clear();
-
-        if (PC >= script_->code + chunkEnd) {
-            if (fallthrough) {
-                if (opinfo->jumpTarget)
-                    fixDoubleTypes(PC);
-                frame.syncAndForgetEverything();
-                jsbytecode *curPC = PC;
-                do {
-                    PC--;
-                } while (!analysis->maybeCode(PC));
-                if (!jumpAndRun(masm.jump(), curPC, NULL, NULL, /* fallthrough = */ true))
-                    return Compile_Error;
-                PC = curPC;
-            }
-            break;
-        }
-
-        if (opinfo->jumpTarget || trap) {
-            if (fallthrough) {
-                fixDoubleTypes(PC);
-                fixedIntToDoubleEntries.clear();
-                fixedDoubleToAnyEntries.clear();
-
-                /*
-                 * Watch for fallthrough to the head of a 'do while' loop.
-                 * We don't know what register state we will be using at the head
-                 * of the loop so sync, branch, and fix it up after the loop
-                 * has been processed.
-                 */
-                if (cx->typeInferenceEnabled() && shouldStartLoop(PC)) {
-                    frame.syncAndForgetEverything();
-                    Jump j = masm.jump();
-                    if (!startLoop(PC, j, PC))
-                        return Compile_Error;
-                } else {
-                    Label start = masm.label();
-                    if (!frame.syncForBranch(PC, Uses(0)))
-                        return Compile_Error;
-                    if (pcLengths && lastPC) {
-                        /* Track this sync code for the previous op. */
-                        size_t length = masm.size() - masm.distanceOf(start);
-                        uint32_t offset = ssa.frameLength(a->inlineIndex) + lastPC - script_->code;
-                        pcLengths[offset].codeLengthAugment += length;
-                    }
-                    JS_ASSERT(frame.consistentRegisters(PC));
-                }
-            }
-
-            if (!frame.discardForJoin(analysis->getAllocation(PC), opinfo->stackDepth))
-                return Compile_Error;
-            updateJoinVarTypes();
-            fallthrough = true;
-
-            if (!cx->typeInferenceEnabled()) {
-                /* All join points have synced state if we aren't doing cross-branch regalloc. */
-                opinfo->safePoint = true;
-            }
-        } else if (opinfo->safePoint) {
-            frame.syncAndForgetEverything();
-        }
-        frame.assertValidRegisterState();
-        a->jumpMap[uint32_t(PC - script_->code)] = masm.label();
-
-        if (cx->typeInferenceEnabled() && opinfo->safePoint) {
-            /*
-             * We may have come in from a table switch, which does not watch
-             * for the new types introduced for variables at each dispatch
-             * target. Make sure that new SSA values at this safe point with
-             * double type have the correct in memory representation.
-             */
-            const SlotValue *newv = analysis->newValues(PC);
-            if (newv) {
-                while (newv->slot) {
-                    if (newv->value.kind() == SSAValue::PHI &&
-                        newv->value.phiOffset() == uint32_t(PC - script_->code) &&
-                        analysis->trackSlot(newv->slot) &&
-                        a->varTypes[newv->slot].getTypeTag() == JSVAL_TYPE_DOUBLE) {
-                        FrameEntry *fe = frame.getSlotEntry(newv->slot);
-                        masm.ensureInMemoryDouble(frame.addressOf(fe));
-                    }
-                    newv++;
-                }
-            }
-        }
-
-        // Now that we have the PC's register allocation, make sure it gets
-        // explicitly updated if this is the loop entry and new loop registers
-        // are allocated later on.
-        if (loop && !a->parent)
-            loop->setOuterPC(PC);
-
-        SPEW_OPCODE();
-        JS_ASSERT(frame.stackDepth() == opinfo->stackDepth);
-
-        if (op == JSOP_LOOPHEAD && analysis->getLoop(PC)) {
-            jsbytecode *backedge = script_->code + analysis->getLoop(PC)->backedge;
-            if (!bytecodeInChunk(backedge)){
-                for (uint32_t slot = ArgSlot(0); slot < TotalSlots(script_); slot++) {
-                    if (a->varTypes[slot].getTypeTag() == JSVAL_TYPE_DOUBLE) {
-                        FrameEntry *fe = frame.getSlotEntry(slot);
-                        masm.ensureInMemoryDouble(frame.addressOf(fe));
-                    }
-                }
-            }
-        }
-
-        // If this is an exception entry point, then jsl_InternalThrow has set
-        // VMFrame::fp to the correct fp for the entry point. We need to copy
-        // that value here to FpReg so that FpReg also has the correct sp.
-        // Otherwise, we would simply be using a stale FpReg value.
-        if (op == JSOP_ENTERBLOCK && analysis->getCode(PC).exceptionEntry)
-            masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
-
-        if (trap) {
-            prepareStubCall(Uses(0));
-            masm.move(Imm32(trap), Registers::ArgReg1);
-            Call cl = emitStubCall(JS_FUNC_TO_DATA_PTR(void *, stubs::Trap), NULL);
-            InternalCallSite site(masm.callReturnOffset(cl), a->inlineIndex, PC,
-                                  REJOIN_TRAP, false);
-            addCallSite(site);
-        }
-
-        /* Don't compile fat opcodes, run the decomposed version instead. */
-        if (js_CodeSpec[op].format & JOF_DECOMPOSE) {
-            PC += js_CodeSpec[op].length;
-            continue;
-        }
-
-        Label inlineStart = masm.label();
-        Label stubStart = stubcc.masm.label();
-        bool countsUpdated = false;
-        bool arithUpdated = false;
-
-        JSValueType arithFirstUseType = JSVAL_TYPE_UNKNOWN;
-        JSValueType arithSecondUseType = JSVAL_TYPE_UNKNOWN;
-        if (script_->hasScriptCounts && !!(js_CodeSpec[op].format & JOF_ARITH)) {
-            if (GetUseCount(script_, PC - script_->code) == 1) {
-                FrameEntry *use = frame.peek(-1);
-                /*
-                 * Pretend it's a binary operation and the second operand has
-                 * the same type as the first one.
-                 */
-                if (use->isTypeKnown())
-                    arithFirstUseType = arithSecondUseType = use->getKnownType();
-            } else {
-                FrameEntry *use = frame.peek(-1);
-                if (use->isTypeKnown())
-                    arithFirstUseType = use->getKnownType();
-                use = frame.peek(-2);
-                if (use->isTypeKnown())
-                    arithSecondUseType = use->getKnownType();
-            }
-        }
-
-        /*
-         * Update PC counts for jump opcodes at their start, so that we don't
-         * miss them when taking the jump. This is delayed for other opcodes,
-         * as we want to skip updating for ops we didn't generate any code for.
-         */
-        if (script_->hasScriptCounts && JOF_OPTYPE(op) == JOF_JUMP)
-            updatePCCounts(PC, &countsUpdated);
-
-    /**********************
-     * BEGIN COMPILER OPS *
-     **********************/
-
-        lastPC = PC;
-
-        switch (op) {
-          BEGIN_CASE(JSOP_NOP)
-          END_CASE(JSOP_NOP)
-
-          BEGIN_CASE(JSOP_NOTEARG)
-          END_CASE(JSOP_NOTEARG)
-
-          BEGIN_CASE(JSOP_UNDEFINED)
-            frame.push(UndefinedValue());
-          END_CASE(JSOP_UNDEFINED)
-
-          BEGIN_CASE(JSOP_POPV)
-          BEGIN_CASE(JSOP_SETRVAL)
-          {
-            RegisterID reg = frame.allocReg();
-            masm.load32(FrameFlagsAddress(), reg);
-            masm.or32(Imm32(StackFrame::HAS_RVAL), reg);
-            masm.store32(reg, FrameFlagsAddress());
-            frame.freeReg(reg);
-
-            /* Scripts which write to the frame's return slot aren't inlined. */
-            JS_ASSERT(a == outer);
-
-            FrameEntry *fe = frame.peek(-1);
-            frame.storeTo(fe, Address(JSFrameReg, StackFrame::offsetOfReturnValue()), true);
-            frame.pop();
-          }
-          END_CASE(JSOP_POPV)
-
-          BEGIN_CASE(JSOP_RETURN)
-            if (script_->hasScriptCounts)
-                updatePCCounts(PC, &countsUpdated);
-            emitReturn(frame.peek(-1));
-            fallthrough = false;
-          END_CASE(JSOP_RETURN)
-
-          BEGIN_CASE(JSOP_GOTO)
-          BEGIN_CASE(JSOP_DEFAULT)
-          {
-            unsigned targetOffset = FollowBranch(cx, script_, PC - script_->code);
-            jsbytecode *target = script_->code + targetOffset;
-
-            fixDoubleTypes(target);
-
-            /*
-             * Watch for gotos which are entering a 'for' or 'while' loop.
-             * These jump to the loop condition test and are immediately
-             * followed by the head of the loop.
-             */
-            jsbytecode *next = PC + js_CodeSpec[op].length;
-            if (cx->typeInferenceEnabled() &&
-                analysis->maybeCode(next) &&
-                shouldStartLoop(next))
-            {
-                frame.syncAndForgetEverything();
-                Jump j = masm.jump();
-                if (!startLoop(next, j, target))
-                    return Compile_Error;
-            } else {
-                if (!frame.syncForBranch(target, Uses(0)))
-                    return Compile_Error;
-                Jump j = masm.jump();
-                if (!jumpAndRun(j, target))
-                    return Compile_Error;
-            }
-            fallthrough = false;
-            PC += js_CodeSpec[op].length;
-            break;
-          }
-          END_CASE(JSOP_GOTO)
-
-          BEGIN_CASE(JSOP_IFEQ)
-          BEGIN_CASE(JSOP_IFNE)
-          {
-            jsbytecode *target = PC + GET_JUMP_OFFSET(PC);
-            fixDoubleTypes(target);
-            if (!jsop_ifneq(op, target))
-                return Compile_Error;
-            PC += js_CodeSpec[op].length;
-            break;
-          }
-          END_CASE(JSOP_IFNE)
-
-          BEGIN_CASE(JSOP_ARGUMENTS)
-            if (script_->needsArgsObj()) {
-                prepareStubCall(Uses(0));
-                INLINE_STUBCALL(stubs::Arguments, REJOIN_FALLTHROUGH);
-                pushSyncedEntry(0);
-            } else {
-                frame.push(MagicValue(JS_OPTIMIZED_ARGUMENTS));
-            }
-          END_CASE(JSOP_ARGUMENTS)
-
-          BEGIN_CASE(JSOP_ITERNEXT)
-            iterNext();
-          END_CASE(JSOP_ITERNEXT)
-
-          BEGIN_CASE(JSOP_DUP)
-            frame.dup();
-          END_CASE(JSOP_DUP)
-
-          BEGIN_CASE(JSOP_DUP2)
-            frame.dup2();
-          END_CASE(JSOP_DUP2)
-
-          BEGIN_CASE(JSOP_SWAP)
-            frame.dup2();
-            frame.shift(-3);
-            frame.shift(-1);
-          END_CASE(JSOP_SWAP)
-
-          BEGIN_CASE(JSOP_PICK)
-          {
-            uint32_t amt = GET_UINT8(PC);
-
-            // Push -(amt + 1), say amt == 2
-            // Stack before: X3 X2 X1
-            // Stack after:  X3 X2 X1 X3
-            frame.dupAt(-int32_t(amt + 1));
-
-            // For each item X[i...1] push it then move it down.
-            // The above would transition like so:
-            //   X3 X2 X1 X3 X2 (dupAt)
-            //   X2 X2 X1 X3    (shift)
-            //   X2 X2 X1 X3 X1 (dupAt)
-            //   X2 X1 X1 X3    (shift)
-            for (int32_t i = -int32_t(amt); i < 0; i++) {
-                frame.dupAt(i - 1);
-                frame.shift(i - 2);
-            }
-
-            // The stack looks like:
-            // Xn ... X1 X1 X{n+1}
-            // So shimmy the last value down.
-            frame.shimmy(1);
-          }
-          END_CASE(JSOP_PICK)
-
-          BEGIN_CASE(JSOP_BITOR)
-          BEGIN_CASE(JSOP_BITXOR)
-          BEGIN_CASE(JSOP_BITAND)
-            jsop_bitop(op);
-          END_CASE(JSOP_BITAND)
-
-          BEGIN_CASE(JSOP_LT)
-          BEGIN_CASE(JSOP_LE)
-          BEGIN_CASE(JSOP_GT)
-          BEGIN_CASE(JSOP_GE)
-          BEGIN_CASE(JSOP_EQ)
-          BEGIN_CASE(JSOP_NE)
-          {
-           if (script_->hasScriptCounts) {
-               updateArithCounts(PC, NULL, arithFirstUseType, arithSecondUseType);
-               arithUpdated = true;
-           }
-
-            /* Detect fusions. */
-            jsbytecode *next = &PC[JSOP_GE_LENGTH];
-            JSOp fused = JSOp(*next);
-            if ((fused != JSOP_IFEQ && fused != JSOP_IFNE) || analysis->jumpTarget(next))
-                fused = JSOP_NOP;
-
-            /* Get jump target, if any. */
-            jsbytecode *target = NULL;
-            if (fused != JSOP_NOP) {
-                if (script_->hasScriptCounts)
-                    updatePCCounts(PC, &countsUpdated);
-                target = next + GET_JUMP_OFFSET(next);
-                fixDoubleTypes(target);
-            }
-
-            BoolStub stub = NULL;
-            switch (op) {
-              case JSOP_LT:
-                stub = stubs::LessThan;
-                break;
-              case JSOP_LE:
-                stub = stubs::LessEqual;
-                break;
-              case JSOP_GT:
-                stub = stubs::GreaterThan;
-                break;
-              case JSOP_GE:
-                stub = stubs::GreaterEqual;
-                break;
-              case JSOP_EQ:
-                stub = stubs::Equal;
-                break;
-              case JSOP_NE:
-                stub = stubs::NotEqual;
-                break;
-              default:
-                JS_NOT_REACHED("WAT");
-                break;
-            }
-
-            /*
-             * We need to ensure in the target case that we always rejoin
-             * before the rval test. In the non-target case we will rejoin
-             * correctly after the op finishes.
-             */
-
-            FrameEntry *rhs = frame.peek(-1);
-            FrameEntry *lhs = frame.peek(-2);
-
-            /* Check for easy cases that the parser does not constant fold. */
-            if (lhs->isConstant() && rhs->isConstant()) {
-                /* Primitives can be trivially constant folded. */
-                const Value &lv = lhs->getValue();
-                const Value &rv = rhs->getValue();
-
-                if (lv.isPrimitive() && rv.isPrimitive()) {
-                    bool result = compareTwoValues(cx, op, lv, rv);
-
-                    frame.pop();
-                    frame.pop();
-
-                    if (!target) {
-                        frame.push(Value(BooleanValue(result)));
-                    } else {
-                        if (fused == JSOP_IFEQ)
-                            result = !result;
-                        if (!constantFoldBranch(target, result))
-                            return Compile_Error;
-                    }
-                } else {
-                    if (!emitStubCmpOp(stub, target, fused))
-                        return Compile_Error;
-                }
-            } else {
-                /* Anything else should go through the fast path generator. */
-                if (!jsop_relational(op, stub, target, fused))
-                    return Compile_Error;
-            }
-
-            /* Advance PC manually. */
-            JS_STATIC_ASSERT(JSOP_LT_LENGTH == JSOP_GE_LENGTH);
-            JS_STATIC_ASSERT(JSOP_LE_LENGTH == JSOP_GE_LENGTH);
-            JS_STATIC_ASSERT(JSOP_GT_LENGTH == JSOP_GE_LENGTH);
-            JS_STATIC_ASSERT(JSOP_EQ_LENGTH == JSOP_GE_LENGTH);
-            JS_STATIC_ASSERT(JSOP_NE_LENGTH == JSOP_GE_LENGTH);
-
-            PC += JSOP_GE_LENGTH;
-            if (fused != JSOP_NOP) {
-                SPEW_OPCODE();
-                PC += JSOP_IFNE_LENGTH;
-            }
-            break;
-          }
-          END_CASE(JSOP_GE)
-
-          BEGIN_CASE(JSOP_LSH)
-            jsop_bitop(op);
-          END_CASE(JSOP_LSH)
-
-          BEGIN_CASE(JSOP_RSH)
-            jsop_bitop(op);
-          END_CASE(JSOP_RSH)
-
-          BEGIN_CASE(JSOP_URSH)
-            jsop_bitop(op);
-          END_CASE(JSOP_URSH)
-
-          BEGIN_CASE(JSOP_ADD)
-            if (!jsop_binary(op, stubs::Add, knownPushedType(0), pushedTypeSet(0)))
-                return Compile_Retry;
-          END_CASE(JSOP_ADD)
-
-          BEGIN_CASE(JSOP_SUB)
-            if (!jsop_binary(op, stubs::Sub, knownPushedType(0), pushedTypeSet(0)))
-                return Compile_Retry;
-          END_CASE(JSOP_SUB)
-
-          BEGIN_CASE(JSOP_MUL)
-            if (!jsop_binary(op, stubs::Mul, knownPushedType(0), pushedTypeSet(0)))
-                return Compile_Retry;
-          END_CASE(JSOP_MUL)
-
-          BEGIN_CASE(JSOP_DIV)
-            if (!jsop_binary(op, stubs::Div, knownPushedType(0), pushedTypeSet(0)))
-                return Compile_Retry;
-          END_CASE(JSOP_DIV)
-
-          BEGIN_CASE(JSOP_MOD)
-            if (!jsop_mod())
-                return Compile_Retry;
-          END_CASE(JSOP_MOD)
-
-          BEGIN_CASE(JSOP_NOT)
-            jsop_not();
-          END_CASE(JSOP_NOT)
-
-          BEGIN_CASE(JSOP_BITNOT)
-          {
-            FrameEntry *top = frame.peek(-1);
-            if (top->isConstant() && top->getValue().isPrimitive()) {
-                int32_t i;
-                JS_ALWAYS_TRUE(ToInt32(cx, top->getValue(), &i));
-                i = ~i;
-                frame.pop();
-                frame.push(Int32Value(i));
-            } else {
-                jsop_bitnot();
-            }
-          }
-          END_CASE(JSOP_BITNOT)
-
-          BEGIN_CASE(JSOP_NEG)
-          {
-            FrameEntry *top = frame.peek(-1);
-            if (top->isConstant() && top->getValue().isPrimitive()) {
-                double d;
-                JS_ALWAYS_TRUE(ToNumber(cx, top->getValue(), &d));
-                d = -d;
-                Value v = NumberValue(d);
-
-                /* Watch for overflow in constant propagation. */
-                types::TypeSet *pushed = pushedTypeSet(0);
-                if (!v.isInt32() && pushed && !pushed->hasType(types::Type::DoubleType())) {
-                    RootedScript script(cx, script_);
-                    types::TypeScript::MonitorOverflow(cx, script, PC);
-                    return Compile_Retry;
-                }
-
-                frame.pop();
-                frame.push(v);
-            } else {
-                jsop_neg();
-            }
-          }
-          END_CASE(JSOP_NEG)
-
-          BEGIN_CASE(JSOP_POS)
-            jsop_pos();
-          END_CASE(JSOP_POS)
-
-          BEGIN_CASE(JSOP_DELNAME)
-          {
-            uint32_t index = GET_UINT32_INDEX(PC);
-            name0 = script_->getName(index);
-
-            prepareStubCall(Uses(0));
-            masm.move(ImmPtr(name0), Registers::ArgReg1);
-            INLINE_STUBCALL(stubs::DelName, REJOIN_FALLTHROUGH);
-            pushSyncedEntry(0);
-          }
-          END_CASE(JSOP_DELNAME)
-
-          BEGIN_CASE(JSOP_DELPROP)
-          {
-            uint32_t index = GET_UINT32_INDEX(PC);
-            name0 = script_->getName(index);
-
-            prepareStubCall(Uses(1));
-            masm.move(ImmPtr(name0), Registers::ArgReg1);
-            INLINE_STUBCALL(STRICT_VARIANT(script_, stubs::DelProp), REJOIN_FALLTHROUGH);
-            frame.pop();
-            pushSyncedEntry(0);
-          }
-          END_CASE(JSOP_DELPROP)
-
-          BEGIN_CASE(JSOP_DELELEM)
-          {
-            prepareStubCall(Uses(2));
-            INLINE_STUBCALL(STRICT_VARIANT(script_, stubs::DelElem), REJOIN_FALLTHROUGH);
-            frame.popn(2);
-            pushSyncedEntry(0);
-          }
-          END_CASE(JSOP_DELELEM)
-
-          BEGIN_CASE(JSOP_TYPEOF)
-          BEGIN_CASE(JSOP_TYPEOFEXPR)
-            jsop_typeof();
-          END_CASE(JSOP_TYPEOF)
-
-          BEGIN_CASE(JSOP_VOID)
-            frame.pop();
-            frame.push(UndefinedValue());
-          END_CASE(JSOP_VOID)
-
-          BEGIN_CASE(JSOP_GETPROP)
-          BEGIN_CASE(JSOP_CALLPROP)
-          BEGIN_CASE(JSOP_LENGTH)
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_getprop(name0, knownPushedType(0)))
-                return Compile_Error;
-          END_CASE(JSOP_GETPROP)
-
-          BEGIN_CASE(JSOP_GETELEM)
-          BEGIN_CASE(JSOP_CALLELEM)
-            if (script_->hasScriptCounts)
-                updateElemCounts(PC, frame.peek(-2), frame.peek(-1));
-            if (!jsop_getelem())
-                return Compile_Error;
-          END_CASE(JSOP_GETELEM)
-
-          BEGIN_CASE(JSOP_TOID)
-            jsop_toid();
-          END_CASE(JSOP_TOID)
-
-          BEGIN_CASE(JSOP_SETELEM)
-          {
-            if (script_->hasScriptCounts)
-                updateElemCounts(PC, frame.peek(-3), frame.peek(-2));
-            jsbytecode *next = &PC[JSOP_SETELEM_LENGTH];
-            bool pop = (JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next));
-            if (!jsop_setelem(pop))
-                return Compile_Error;
-          }
-          END_CASE(JSOP_SETELEM);
-
-          BEGIN_CASE(JSOP_EVAL)
-          {
-            JaegerSpew(JSpew_Insns, " --- EVAL --- \n");
-            emitEval(GET_ARGC(PC));
-            JaegerSpew(JSpew_Insns, " --- END EVAL --- \n");
-          }
-          END_CASE(JSOP_EVAL)
-
-          BEGIN_CASE(JSOP_CALL)
-          BEGIN_CASE(JSOP_NEW)
-          BEGIN_CASE(JSOP_FUNAPPLY)
-          BEGIN_CASE(JSOP_FUNCALL)
-          {
-            bool callingNew = (op == JSOP_NEW);
-
-            bool done = false;
-            if ((op == JSOP_CALL || op == JSOP_NEW) && !monitored(PC)) {
-                CompileStatus status = inlineNativeFunction(GET_ARGC(PC), callingNew);
-                if (status == Compile_Okay)
-                    done = true;
-                else if (status != Compile_InlineAbort)
-                    return status;
-            }
-            if (!done && inlining()) {
-                CompileStatus status = inlineScriptedFunction(GET_ARGC(PC), callingNew);
-                if (status == Compile_Okay)
-                    done = true;
-                else if (status != Compile_InlineAbort)
-                    return status;
-                if (script_->hasScriptCounts) {
-                    /* Code generated while inlining has been accounted for. */
-                    countsUpdated = true;
-                }
-            }
-
-            FrameSize frameSize;
-            frameSize.initStatic(frame.totalDepth(), GET_ARGC(PC));
-
-            if (!done) {
-                JaegerSpew(JSpew_Insns, " --- SCRIPTED CALL --- \n");
-                if (!inlineCallHelper(GET_ARGC(PC), callingNew, frameSize))
-                    return Compile_Error;
-                JaegerSpew(JSpew_Insns, " --- END SCRIPTED CALL --- \n");
-            }
-          }
-          END_CASE(JSOP_CALL)
-
-          BEGIN_CASE(JSOP_NAME)
-          BEGIN_CASE(JSOP_CALLNAME)
-          {
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            jsop_name(name0, knownPushedType(0));
-            frame.extra(frame.peek(-1)).name = name0;
-          }
-          END_CASE(JSOP_NAME)
-
-          BEGIN_CASE(JSOP_GETINTRINSIC)
-          BEGIN_CASE(JSOP_CALLINTRINSIC)
-          {
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_intrinsic(name0, knownPushedType(0)))
-                return Compile_Error;
-            frame.extra(frame.peek(-1)).name = name0;
-          }
-          END_CASE(JSOP_GETINTRINSIC)
-
-          BEGIN_CASE(JSOP_IMPLICITTHIS)
-          {
-            prepareStubCall(Uses(0));
-            masm.move(ImmPtr(script_->getName(GET_UINT32_INDEX(PC))), Registers::ArgReg1);
-            INLINE_STUBCALL(stubs::ImplicitThis, REJOIN_FALLTHROUGH);
-            frame.pushSynced(JSVAL_TYPE_UNKNOWN);
-          }
-          END_CASE(JSOP_IMPLICITTHIS)
-
-          BEGIN_CASE(JSOP_DOUBLE)
-          {
-            double d = script_->getConst(GET_UINT32_INDEX(PC)).toDouble();
-            frame.push(Value(DoubleValue(d)));
-          }
-          END_CASE(JSOP_DOUBLE)
-
-          BEGIN_CASE(JSOP_STRING)
-            frame.push(StringValue(script_->getAtom(GET_UINT32_INDEX(PC))));
-          END_CASE(JSOP_STRING)
-
-          BEGIN_CASE(JSOP_ZERO)
-            frame.push(JSVAL_ZERO);
-          END_CASE(JSOP_ZERO)
-
-          BEGIN_CASE(JSOP_ONE)
-            frame.push(JSVAL_ONE);
-          END_CASE(JSOP_ONE)
-
-          BEGIN_CASE(JSOP_NULL)
-            frame.push(NullValue());
-          END_CASE(JSOP_NULL)
-
-          BEGIN_CASE(JSOP_CALLEE)
-            frame.pushCallee();
-          END_CASE(JSOP_CALLEE)
-
-          BEGIN_CASE(JSOP_THIS)
-            jsop_this();
-          END_CASE(JSOP_THIS)
-
-          BEGIN_CASE(JSOP_FALSE)
-            frame.push(Value(BooleanValue(false)));
-          END_CASE(JSOP_FALSE)
-
-          BEGIN_CASE(JSOP_TRUE)
-            frame.push(Value(BooleanValue(true)));
-          END_CASE(JSOP_TRUE)
-
-          BEGIN_CASE(JSOP_OR)
-          BEGIN_CASE(JSOP_AND)
-          {
-            jsbytecode *target = PC + GET_JUMP_OFFSET(PC);
-            fixDoubleTypes(target);
-            if (!jsop_andor(op, target))
-                return Compile_Error;
-          }
-          END_CASE(JSOP_AND)
-
-          BEGIN_CASE(JSOP_TABLESWITCH)
-            /*
-             * Note: there is no need to syncForBranch for the various targets of
-             * switch statement. The liveness analysis has already marked these as
-             * allocated with no registers in use. There is also no need to fix
-             * double types, as we don't track types of slots in scripts with
-             * switch statements (could be fixed).
-             */
-            if (script_->hasScriptCounts)
-                updatePCCounts(PC, &countsUpdated);
-#if defined JS_CPU_ARM /* Need to implement jump(BaseIndex) for ARM */
-            frame.syncAndKillEverything();
-            masm.move(ImmPtr(PC), Registers::ArgReg1);
-
-            /* prepareStubCall() is not needed due to syncAndForgetEverything() */
-            INLINE_STUBCALL(stubs::TableSwitch, REJOIN_NONE);
-            frame.pop();
-
-            masm.jump(Registers::ReturnReg);
-#else
-            if (!jsop_tableswitch(PC))
-                return Compile_Error;
-#endif
-            PC += js_GetVariableBytecodeLength(PC);
-            break;
-          END_CASE(JSOP_TABLESWITCH)
-
-          BEGIN_CASE(JSOP_CASE)
-            // X Y
-
-            frame.dupAt(-2);
-            // X Y X
-
-            jsop_stricteq(JSOP_STRICTEQ);
-            // X cond
-
-            if (!jsop_ifneq(JSOP_IFNE, PC + GET_JUMP_OFFSET(PC)))
-                return Compile_Error;
-          END_CASE(JSOP_CASE)
-
-          BEGIN_CASE(JSOP_STRICTEQ)
-          BEGIN_CASE(JSOP_STRICTNE)
-            if (script_->hasScriptCounts) {
-                updateArithCounts(PC, NULL, arithFirstUseType, arithSecondUseType);
-                arithUpdated = true;
-            }
-            jsop_stricteq(op);
-          END_CASE(JSOP_STRICTEQ)
-
-          BEGIN_CASE(JSOP_ITER)
-            if (!iter(GET_UINT8(PC)))
-                return Compile_Error;
-          END_CASE(JSOP_ITER)
-
-          BEGIN_CASE(JSOP_MOREITER)
-          {
-            /* At the byte level, this is always fused with IFNE or IFNEX. */
-            if (script_->hasScriptCounts)
-                updatePCCounts(PC, &countsUpdated);
-            jsbytecode *target = &PC[JSOP_MOREITER_LENGTH];
-            JSOp next = JSOp(*target);
-            JS_ASSERT(next == JSOP_IFNE);
-
-            target += GET_JUMP_OFFSET(target);
-
-            fixDoubleTypes(target);
-            if (!iterMore(target))
-                return Compile_Error;
-            PC += JSOP_MOREITER_LENGTH;
-            PC += js_CodeSpec[next].length;
-            break;
-          }
-          END_CASE(JSOP_MOREITER)
-
-          BEGIN_CASE(JSOP_ENDITER)
-            iterEnd();
-          END_CASE(JSOP_ENDITER)
-
-          BEGIN_CASE(JSOP_POP)
-            frame.pop();
-          END_CASE(JSOP_POP)
-
-          BEGIN_CASE(JSOP_GETARG)
-          BEGIN_CASE(JSOP_CALLARG)
-          {
-            restoreVarType();
-            uint32_t arg = GET_SLOTNO(PC);
-            if (JSObject *singleton = pushedSingleton(0))
-                frame.push(ObjectValue(*singleton));
-            else if (script_->argsObjAliasesFormals())
-                jsop_aliasedArg(arg, /* get = */ true);
-            else
-                frame.pushArg(arg);
-          }
-          END_CASE(JSOP_GETARG)
-
-          BEGIN_CASE(JSOP_BINDGNAME)
-            jsop_bindgname();
-          END_CASE(JSOP_BINDGNAME)
-
-          BEGIN_CASE(JSOP_SETARG)
-          {
-            jsbytecode *next = &PC[JSOP_SETARG_LENGTH];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
-
-            uint32_t arg = GET_SLOTNO(PC);
-            if (script_->argsObjAliasesFormals())
-                jsop_aliasedArg(arg, /* get = */ false, pop);
-            else
-                frame.storeArg(arg, pop);
-
-            updateVarType();
-
-            if (pop) {
-                frame.pop();
-                PC += JSOP_SETARG_LENGTH + JSOP_POP_LENGTH;
-                break;
-            }
-          }
-          END_CASE(JSOP_SETARG)
-
-          BEGIN_CASE(JSOP_GETLOCAL)
-          BEGIN_CASE(JSOP_CALLLOCAL)
-          {
-            /*
-             * Update the var type unless we are about to pop the variable.
-             * Sync is not guaranteed for types of dead locals, and GETLOCAL
-             * followed by POP is not regarded as a use of the variable.
-             */
-            jsbytecode *next = &PC[JSOP_GETLOCAL_LENGTH];
-            if (JSOp(*next) != JSOP_POP || analysis->jumpTarget(next))
-                restoreVarType();
-            if (JSObject *singleton = pushedSingleton(0))
-                frame.push(ObjectValue(*singleton));
-            else
-                frame.pushLocal(GET_SLOTNO(PC));
-          }
-          END_CASE(JSOP_GETLOCAL)
-
-          BEGIN_CASE(JSOP_GETALIASEDVAR)
-          BEGIN_CASE(JSOP_CALLALIASEDVAR)
-            jsop_aliasedVar(ScopeCoordinate(PC), /* get = */ true);
-          END_CASE(JSOP_GETALIASEDVAR);
-
-          BEGIN_CASE(JSOP_SETLOCAL)
-          BEGIN_CASE(JSOP_SETALIASEDVAR)
-          {
-            jsbytecode *next = &PC[GetBytecodeLength(PC)];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
-            if (JOF_OPTYPE(*PC) == JOF_SCOPECOORD) {
-                jsop_aliasedVar(ScopeCoordinate(PC), /* get = */ false, pop);
-            } else {
-                frame.storeLocal(GET_SLOTNO(PC), pop);
-                updateVarType();
-            }
-
-            if (pop) {
-                frame.pop();
-                PC = next + JSOP_POP_LENGTH;
-                break;
-            }
-
-            PC = next;
-            break;
-          }
-          END_CASE(JSOP_SETLOCAL)
-
-          BEGIN_CASE(JSOP_UINT16)
-            frame.push(Value(Int32Value((int32_t) GET_UINT16(PC))));
-          END_CASE(JSOP_UINT16)
-
-          BEGIN_CASE(JSOP_NEWINIT)
-            if (!jsop_newinit())
-                return Compile_Error;
-          END_CASE(JSOP_NEWINIT)
-
-          BEGIN_CASE(JSOP_NEWARRAY)
-            if (!jsop_newinit())
-                return Compile_Error;
-          END_CASE(JSOP_NEWARRAY)
-
-          BEGIN_CASE(JSOP_NEWOBJECT)
-            if (!jsop_newinit())
-                return Compile_Error;
-          END_CASE(JSOP_NEWOBJECT)
-
-          BEGIN_CASE(JSOP_ENDINIT)
-          END_CASE(JSOP_ENDINIT)
-
-          BEGIN_CASE(JSOP_INITPROP)
-            jsop_initprop();
-            frame.pop();
-          END_CASE(JSOP_INITPROP)
-
-          BEGIN_CASE(JSOP_INITELEM_ARRAY)
-            jsop_initelem_array();
-          END_CASE(JSOP_INITELEM_ARRAY)
-
-          BEGIN_CASE(JSOP_INITELEM)
-            prepareStubCall(Uses(3));
-            INLINE_STUBCALL(stubs::InitElem, REJOIN_FALLTHROUGH);
-            frame.popn(2);
-          END_CASE(JSOP_INITELEM)
-
-          BEGIN_CASE(JSOP_BINDNAME)
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            jsop_bindname(name0);
-          END_CASE(JSOP_BINDNAME)
-
-          BEGIN_CASE(JSOP_SETPROP)
-          {
-            jsbytecode *next = &PC[JSOP_SETPROP_LENGTH];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_setprop(name0, pop))
-                return Compile_Error;
-          }
-          END_CASE(JSOP_SETPROP)
-
-          BEGIN_CASE(JSOP_SETNAME)
-          {
-            jsbytecode *next = &PC[JSOP_SETNAME_LENGTH];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_setprop(name0, pop))
-                return Compile_Error;
-          }
-          END_CASE(JSOP_SETNAME)
-
-          BEGIN_CASE(JSOP_THROW)
-            prepareStubCall(Uses(1));
-            INLINE_STUBCALL(stubs::Throw, REJOIN_NONE);
-            frame.pop();
-            fallthrough = false;
-          END_CASE(JSOP_THROW)
-
-          BEGIN_CASE(JSOP_IN)
-          {
-            jsop_in();
-          }
-          END_CASE(JSOP_IN)
-
-          BEGIN_CASE(JSOP_INSTANCEOF)
-            if (!jsop_instanceof())
-                return Compile_Error;
-          END_CASE(JSOP_INSTANCEOF)
-
-          BEGIN_CASE(JSOP_EXCEPTION)
-          {
-            prepareStubCall(Uses(0));
-            INLINE_STUBCALL(stubs::Exception, REJOIN_FALLTHROUGH);
-            frame.pushSynced(JSVAL_TYPE_UNKNOWN);
-          }
-          END_CASE(JSOP_EXCEPTION)
-
-          BEGIN_CASE(JSOP_LINENO)
-          END_CASE(JSOP_LINENO)
-
-          BEGIN_CASE(JSOP_ENUMELEM)
-            // Normally, SETELEM transforms the stack
-            //  from: OBJ ID VALUE
-            //  to:   VALUE
-            //
-            // Here, the stack transition is
-            //  from: VALUE OBJ ID
-            //  to:
-            // So we make the stack look like a SETELEM, and re-use it.
-
-            // Before: VALUE OBJ ID
-            // After:  VALUE OBJ ID VALUE
-            frame.dupAt(-3);
-
-            // Before: VALUE OBJ ID VALUE
-            // After:  VALUE VALUE
-            if (!jsop_setelem(true))
-                return Compile_Error;
-
-            // Before: VALUE VALUE
-            // After:
-            frame.popn(2);
-          END_CASE(JSOP_ENUMELEM)
-
-          BEGIN_CASE(JSOP_CONDSWITCH)
-            /* No-op for the decompiler. */
-          END_CASE(JSOP_CONDSWITCH)
-
-          BEGIN_CASE(JSOP_LABEL)
-          END_CASE(JSOP_LABEL)
-
-          BEGIN_CASE(JSOP_DEFFUN)
-          {
-            JSFunction *innerFun = script_->getFunction(GET_UINT32_INDEX(PC));
-
-            prepareStubCall(Uses(0));
-            masm.move(ImmPtr(innerFun), Registers::ArgReg1);
-            INLINE_STUBCALL(STRICT_VARIANT(script_, stubs::DefFun), REJOIN_FALLTHROUGH);
-          }
-          END_CASE(JSOP_DEFFUN)
-
-          BEGIN_CASE(JSOP_DEFVAR)
-          BEGIN_CASE(JSOP_DEFCONST)
-          {
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-
-            prepareStubCall(Uses(0));
-            masm.move(ImmPtr(name0), Registers::ArgReg1);
-            INLINE_STUBCALL(stubs::DefVarOrConst, REJOIN_FALLTHROUGH);
-          }
-          END_CASE(JSOP_DEFVAR)
-
-          BEGIN_CASE(JSOP_SETCONST)
-          {
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-
-            prepareStubCall(Uses(1));
-            masm.move(ImmPtr(name0), Registers::ArgReg1);
-            INLINE_STUBCALL(stubs::SetConst, REJOIN_FALLTHROUGH);
-          }
-          END_CASE(JSOP_SETCONST)
-
-          BEGIN_CASE(JSOP_LAMBDA)
-          {
-            JSFunction *fun = script_->getFunction(GET_UINT32_INDEX(PC));
-
-            JSObjStubFun stub = stubs::Lambda;
-            uint32_t uses = 0;
-
-            prepareStubCall(Uses(uses));
-            masm.move(ImmPtr(fun), Registers::ArgReg1);
-
-            INLINE_STUBCALL(stub, REJOIN_PUSH_OBJECT);
-
-            frame.takeReg(Registers::ReturnReg);
-            frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
-          }
-          END_CASE(JSOP_LAMBDA)
-
-          BEGIN_CASE(JSOP_TRY)
-            frame.syncAndForgetEverything();
-          END_CASE(JSOP_TRY)
-
-          BEGIN_CASE(JSOP_RETRVAL)
-            emitReturn(NULL);
-            fallthrough = false;
-          END_CASE(JSOP_RETRVAL)
-
-          BEGIN_CASE(JSOP_GETGNAME)
-          BEGIN_CASE(JSOP_CALLGNAME)
-          {
-            uint32_t index = GET_UINT32_INDEX(PC);
-            if (!jsop_getgname(index))
-                return Compile_Error;
-            frame.extra(frame.peek(-1)).name = script_->getName(index);
-          }
-          END_CASE(JSOP_GETGNAME)
-
-          BEGIN_CASE(JSOP_SETGNAME)
-          {
-            jsbytecode *next = &PC[JSOP_SETGNAME_LENGTH];
-            bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_setgname(name0, pop))
-                return Compile_Error;
-          }
-          END_CASE(JSOP_SETGNAME)
-
-          BEGIN_CASE(JSOP_REGEXP)
-            if (!jsop_regexp())
-                return Compile_Error;
-          END_CASE(JSOP_REGEXP)
-
-          BEGIN_CASE(JSOP_OBJECT)
-          {
-            JSObject *object = script_->getObject(GET_UINT32_INDEX(PC));
-            RegisterID reg = frame.allocReg();
-            masm.move(ImmPtr(object), reg);
-            frame.pushTypedPayload(JSVAL_TYPE_OBJECT, reg);
-          }
-          END_CASE(JSOP_OBJECT)
-
-          BEGIN_CASE(JSOP_UINT24)
-            frame.push(Value(Int32Value((int32_t) GET_UINT24(PC))));
-          END_CASE(JSOP_UINT24)
-
-          BEGIN_CASE(JSOP_STOP)
-            if (script_->hasScriptCounts)
-                updatePCCounts(PC, &countsUpdated);
-            emitReturn(NULL);
-            goto done;
-          END_CASE(JSOP_STOP)
-
-          BEGIN_CASE(JSOP_GETXPROP)
-            name0 = script_->getName(GET_UINT32_INDEX(PC));
-            if (!jsop_xname(name0))
-                return Compile_Error;
-          END_CASE(JSOP_GETXPROP)
-
-          BEGIN_CASE(JSOP_ENTERBLOCK)
-          BEGIN_CASE(JSOP_ENTERLET0)
-          BEGIN_CASE(JSOP_ENTERLET1)
-            enterBlock(&script_->getObject(GET_UINT32_INDEX(PC))->asStaticBlock());
-          END_CASE(JSOP_ENTERBLOCK);
-
-          BEGIN_CASE(JSOP_LEAVEBLOCK)
-            leaveBlock();
-          END_CASE(JSOP_LEAVEBLOCK)
-
-          BEGIN_CASE(JSOP_INT8)
-            frame.push(Value(Int32Value(GET_INT8(PC))));
-          END_CASE(JSOP_INT8)
-
-          BEGIN_CASE(JSOP_INT32)
-            frame.push(Value(Int32Value(GET_INT32(PC))));
-          END_CASE(JSOP_INT32)
-
-          BEGIN_CASE(JSOP_HOLE)
-            frame.push(MagicValue(JS_ELEMENTS_HOLE));
-          END_CASE(JSOP_HOLE)
-
-          BEGIN_CASE(JSOP_LOOPHEAD)
-            if (analysis->jumpTarget(PC))
-                interruptCheckHelper();
-          END_CASE(JSOP_LOOPHEAD)
-
-          BEGIN_CASE(JSOP_LOOPENTRY)
-            // Unlike JM, IonMonkey OSR enters loops at the LOOPENTRY op.
-            // Insert the recompile check here so that we can immediately
-            // enter Ion.
-            if (loop) {
-                if (IsIonEnabled(cx))
-                    ionCompileHelper();
-                else
-                    inliningCompileHelper();
-            }
-          END_CASE(JSOP_LOOPENTRY)
-
-          BEGIN_CASE(JSOP_DEBUGGER)
-          {
-            prepareStubCall(Uses(0));
-            masm.move(ImmPtr(PC), Registers::ArgReg1);
-            INLINE_STUBCALL(stubs::DebuggerStatement, REJOIN_FALLTHROUGH);
-          }
-          END_CASE(JSOP_DEBUGGER)
-
-          default:
-            JS_NOT_REACHED("Opcode not implemented");
-        }
-
-    /**********************
-     *  END COMPILER OPS  *
-     **********************/
-
-        if (cx->typeInferenceEnabled() && PC == lastPC + GetBytecodeLength(lastPC)) {
-            /*
-             * Inform the frame of the type sets for values just pushed. Skip
-             * this if we did any opcode fusions, we don't keep track of the
-             * associated type sets in such cases.
-             */
-            unsigned nuses = GetUseCount(script_, lastPC - script_->code);
-            unsigned ndefs = GetDefCount(script_, lastPC - script_->code);
-            for (unsigned i = 0; i < ndefs; i++) {
-                FrameEntry *fe = frame.getStack(opinfo->stackDepth - nuses + i);
-                if (fe) {
-                    /* fe may be NULL for conditionally pushed entries, e.g. JSOP_AND */
-                    frame.extra(fe).types = analysis->pushedTypes(lastPC - script_->code, i);
-                }
-            }
-        }
-
-        if (script_->hasScriptCounts) {
-            size_t length = masm.size() - masm.distanceOf(inlineStart);
-            bool typesUpdated = false;
-
-            /* Update information about the type of value pushed by arithmetic ops. */
-            if ((js_CodeSpec[op].format & JOF_ARITH) && !arithUpdated) {
-                FrameEntry *pushed = NULL;
-                if (PC == lastPC + GetBytecodeLength(lastPC))
-                    pushed = frame.peek(-1);
-                updateArithCounts(lastPC, pushed, arithFirstUseType, arithSecondUseType);
-                typesUpdated = true;
-            }
-
-            /* Update information about the result type of access operations. */
-            if (PCCounts::accessOp(op) &&
-                op != JSOP_SETPROP && op != JSOP_SETELEM) {
-                FrameEntry *fe = (GetDefCount(script_, lastPC - script_->code) == 1)
-                    ? frame.peek(-1)
-                    : frame.peek(-2);
-                updatePCTypes(lastPC, fe);
-                typesUpdated = true;
-            }
-
-            if (!countsUpdated && (typesUpdated || length != 0))
-                updatePCCounts(lastPC, &countsUpdated);
-        }
-        /* Update how much code we generated for this opcode */
-        if (pcLengths) {
-            size_t length     = masm.size() - masm.distanceOf(inlineStart);
-            size_t stubLength = stubcc.size() - stubcc.masm.distanceOf(stubStart);
-            uint32_t offset   = ssa.frameLength(a->inlineIndex) + lastPC - script_->code;
-            pcLengths[offset].inlineLength += length;
-            pcLengths[offset].stubLength   += stubLength;
-        }
-
-        frame.assertValidRegisterState();
-    }
-
-  done:
-    return Compile_Okay;
-}
-
-#undef END_CASE
-#undef BEGIN_CASE
-
-void
-mjit::Compiler::updatePCCounts(jsbytecode *pc, bool *updated)
-{
-    JS_ASSERT(script_->hasScriptCounts);
-
-    Label start = masm.label();
-
-    /*
-     * Bump the METHODJIT count for the opcode, read the METHODJIT_CODE_LENGTH
-     * and METHODJIT_PICS_LENGTH counts, indicating the amounts of inline path
-     * code and generated code, respectively, and add them to the accumulated
-     * total for the op.
-     */
-    uint32_t offset = ssa.frameLength(a->inlineIndex) + pc - script_->code;
-
-    /*
-     * Base register for addresses, we can't use AbsoluteAddress in all places.
-     * This may hold a live value, so write it out to the top of the stack
-     * first. This cannot overflow the stack, as space is always reserved for
-     * an extra callee frame.
-     */
-    RegisterID reg = Registers::ReturnReg;
-    masm.storePtr(reg, frame.addressOfTop());
-
-    PCCounts counts = script_->getPCCounts(pc);
-
-    /*
-     * The inlineLength represents the actual length of the opcode generated,
-     * but this includes the instrumentation as well as other possibly not
-     * useful bytes. This extra cruft is accumulated in codeLengthAugment and
-     * will be taken out accordingly.
-     */
-    double *code = &counts.get(PCCounts::BASE_METHODJIT_CODE);
-    masm.addCount(&pcLengths[offset].inlineLength, code, reg);
-    masm.addCount(&pcLengths[offset].codeLengthAugment, code, reg);
-
-    double *pics = &counts.get(PCCounts::BASE_METHODJIT_PICS);
-    double *picsLength = &pcLengths[offset].picsLength;
-    masm.addCount(picsLength, pics, reg);
-
-    double *count = &counts.get(PCCounts::BASE_METHODJIT);
-    masm.bumpCount(count, reg);
-
-    /* Reload the base register's original value. */
-    masm.loadPtr(frame.addressOfTop(), reg);
-
-    /* The count of code executed should not reflect instrumentation as well */
-    pcLengths[offset].codeLengthAugment -= masm.size() - masm.distanceOf(start);
-
-    *updated = true;
-}
-
-static inline bool
-HasPayloadType(types::TypeSet *types)
-{
-    if (types->unknown())
-        return false;
-
-    types::TypeFlags flags = types->baseFlags();
-    bool objects = !!(flags & types::TYPE_FLAG_ANYOBJECT) || !!types->getObjectCount();
-
-    if (objects && !!(flags & types::TYPE_FLAG_STRING))
-        return false;
-
-    flags = flags & ~(types::TYPE_FLAG_ANYOBJECT | types::TYPE_FLAG_STRING);
-
-    return (flags == types::TYPE_FLAG_UNDEFINED)
-        || (flags == types::TYPE_FLAG_NULL)
-        || (flags == types::TYPE_FLAG_BOOLEAN);
-}
-
-void
-mjit::Compiler::updatePCTypes(jsbytecode *pc, FrameEntry *fe)
-{
-    JS_ASSERT(script_->hasScriptCounts);
-
-    /*
-     * Get a temporary register, as for updatePCCounts. Don't overlap with
-     * the backing store for the entry's type tag, if there is one.
-     */
-    RegisterID reg = Registers::ReturnReg;
-    if (frame.peekTypeInRegister(fe) && reg == frame.tempRegForType(fe)) {
-        JS_STATIC_ASSERT(Registers::ReturnReg != Registers::ArgReg1);
-        reg = Registers::ArgReg1;
-    }
-    masm.push(reg);
-
-    PCCounts counts = script_->getPCCounts(pc);
-
-    /* Update the counts for pushed type tags and possible access types. */
-    if (fe->isTypeKnown()) {
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_MONOMORPHIC), reg);
-        PCCounts::AccessCounts count = PCCounts::ACCESS_OBJECT;
-        switch (fe->getKnownType()) {
-          case JSVAL_TYPE_UNDEFINED:  count = PCCounts::ACCESS_UNDEFINED;  break;
-          case JSVAL_TYPE_NULL:       count = PCCounts::ACCESS_NULL;       break;
-          case JSVAL_TYPE_BOOLEAN:    count = PCCounts::ACCESS_BOOLEAN;    break;
-          case JSVAL_TYPE_INT32:      count = PCCounts::ACCESS_INT32;      break;
-          case JSVAL_TYPE_DOUBLE:     count = PCCounts::ACCESS_DOUBLE;     break;
-          case JSVAL_TYPE_STRING:     count = PCCounts::ACCESS_STRING;     break;
-          case JSVAL_TYPE_OBJECT:     count = PCCounts::ACCESS_OBJECT;     break;
-          default:;
-        }
-        if (count)
-            masm.bumpCount(&counts.get(count), reg);
-    } else {
-        types::TypeSet *types = frame.extra(fe).types;
-        if (types && HasPayloadType(types))
-            masm.bumpCount(&counts.get(PCCounts::ACCESS_DIMORPHIC), reg);
-        else
-            masm.bumpCount(&counts.get(PCCounts::ACCESS_POLYMORPHIC), reg);
-
-        frame.loadTypeIntoReg(fe, reg);
-
-        Jump j = masm.testUndefined(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_UNDEFINED), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testNull(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_NULL), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testBoolean(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_BOOLEAN), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testInt32(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_INT32), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testDouble(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_DOUBLE), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testString(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_STRING), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-
-        j = masm.testObject(Assembler::NotEqual, reg);
-        masm.bumpCount(&counts.get(PCCounts::ACCESS_OBJECT), reg);
-        frame.loadTypeIntoReg(fe, reg);
-        j.linkTo(masm.label(), &masm);
-    }
-
-    /* Update the count for accesses with type barriers. */
-    if (js_CodeSpec[*pc].format & JOF_TYPESET) {
-        double *count = &counts.get(hasTypeBarriers(pc)
-                                      ? PCCounts::ACCESS_BARRIER
-                                      : PCCounts::ACCESS_NOBARRIER);
-        masm.bumpCount(count, reg);
-    }
-
-    /* Reload the base register's original value. */
-    masm.pop(reg);
-}
-
-void
-mjit::Compiler::updateArithCounts(jsbytecode *pc, FrameEntry *fe,
-                                  JSValueType firstUseType, JSValueType secondUseType)
-{
-    JS_ASSERT(script_->hasScriptCounts);
-
-    RegisterID reg = Registers::ReturnReg;
-    masm.push(reg);
-
-    /*
-     * What count we bump for arithmetic expressions depend on the
-     * known types of its operands.
-     *
-     * ARITH_INT: operands are known ints, result is int
-     * ARITH_OVERFLOW: operands are known ints, result is double
-     * ARITH_DOUBLE: either operand is a known double, result is double
-     * ARITH_OTHER: operands are monomorphic but not int or double
-     * ARITH_UNKNOWN: operands are polymorphic
-     */
-
-    PCCounts::ArithCounts count;
-    if (firstUseType == JSVAL_TYPE_INT32 && secondUseType == JSVAL_TYPE_INT32 &&
-        (!fe || fe->isNotType(JSVAL_TYPE_DOUBLE))) {
-        count = PCCounts::ARITH_INT;
-    } else if (firstUseType == JSVAL_TYPE_INT32 || firstUseType == JSVAL_TYPE_DOUBLE ||
-               secondUseType == JSVAL_TYPE_INT32 || secondUseType == JSVAL_TYPE_DOUBLE) {
-        count = PCCounts::ARITH_DOUBLE;
-    } else if (firstUseType != JSVAL_TYPE_UNKNOWN && secondUseType != JSVAL_TYPE_UNKNOWN &&
-               (!fe || fe->isTypeKnown())) {
-        count = PCCounts::ARITH_OTHER;
-    } else {
-        count = PCCounts::ARITH_UNKNOWN;
-    }
-
-    masm.bumpCount(&script_->getPCCounts(pc).get(count), reg);
-    masm.pop(reg);
-}
-
-void
-mjit::Compiler::updateElemCounts(jsbytecode *pc, FrameEntry *obj, FrameEntry *id)
-{
-    JS_ASSERT(script_->hasScriptCounts);
-
-    RegisterID reg = Registers::ReturnReg;
-    masm.push(reg);
-
-    PCCounts counts = script_->getPCCounts(pc);
-
-    PCCounts::ElementCounts count;
-    if (id->isTypeKnown()) {
-        switch (id->getKnownType()) {
-          case JSVAL_TYPE_INT32:   count = PCCounts::ELEM_ID_INT;     break;
-          case JSVAL_TYPE_DOUBLE:  count = PCCounts::ELEM_ID_DOUBLE;  break;
-          default:                 count = PCCounts::ELEM_ID_OTHER;   break;
-        }
-    } else {
-        count = PCCounts::ELEM_ID_UNKNOWN;
-    }
-    masm.bumpCount(&counts.get(count), reg);
-
-    if (obj->mightBeType(JSVAL_TYPE_OBJECT)) {
-        types::StackTypeSet *types = frame.extra(obj).types;
-        if (types && types->getTypedArrayType() != TypedArray::TYPE_MAX) {
-            count = PCCounts::ELEM_OBJECT_TYPED;
-        } else if (types && types->getKnownClass() == &ArrayClass &&
-                   !types->hasObjectFlags(cx, types::OBJECT_FLAG_SPARSE_INDEXES |
-                                          types::OBJECT_FLAG_LENGTH_OVERFLOW)) {
-            if (!types->hasObjectFlags(cx, types::OBJECT_FLAG_NON_PACKED))
-                count = PCCounts::ELEM_OBJECT_PACKED;
-            else
-                count = PCCounts::ELEM_OBJECT_DENSE;
-        } else {
-            count = PCCounts::ELEM_OBJECT_OTHER;
-        }
-        masm.bumpCount(&counts.get(count), reg);
-    } else {
-        masm.bumpCount(&counts.get(PCCounts::ELEM_OBJECT_OTHER), reg);
-    }
-
-    masm.pop(reg);
-}
-
-void
-mjit::Compiler::bumpPropCount(jsbytecode *pc, int count)
-{
-    /* Don't accumulate counts for property ops fused with other ops. */
-    if (!(js_CodeSpec[*pc].format & JOF_PROP))
-        return;
-    RegisterID reg = Registers::ReturnReg;
-    masm.push(reg);
-    masm.bumpCount(&script_->getPCCounts(pc).get(count), reg);
-    masm.pop(reg);
-}
-
-JSC::MacroAssembler::Label
-mjit::Compiler::labelOf(jsbytecode *pc, uint32_t inlineIndex)
-{
-    ActiveFrame *a = (inlineIndex == UINT32_MAX) ? outer : inlineFrames[inlineIndex];
-    JS_ASSERT(uint32_t(pc - a->script->code) < a->script->length);
-
-    uint32_t offs = uint32_t(pc - a->script->code);
-    JS_ASSERT(a->jumpMap[offs].isSet());
-    return a->jumpMap[offs];
-}
-
-bool
-mjit::Compiler::knownJump(jsbytecode *pc)
-{
-    return pc < PC;
-}
-
-bool
-mjit::Compiler::jumpInScript(Jump j, jsbytecode *pc)
-{
-    JS_ASSERT(pc >= script_->code && uint32_t(pc - script_->code) < script_->length);
-
-    if (pc < PC) {
-        j.linkTo(a->jumpMap[uint32_t(pc - script_->code)], &masm);
-        return true;
-    }
-    return branchPatches.append(BranchPatch(j, pc, a->inlineIndex));
-}
-
-void
-mjit::Compiler::emitFinalReturn(Assembler &masm)
-{
-    masm.loadPtr(Address(JSFrameReg, StackFrame::offsetOfNcode()), Registers::ReturnReg);
-    masm.jump(Registers::ReturnReg);
-}
-
-// Emits code to load a return value of the frame into the scripted-ABI
-// type & data register pair. If the return value is in fp->rval, then |fe|
-// is NULL. Otherwise, |fe| contains the return value.
-//
-// If reading from fp->rval, |undefined| is loaded optimistically, before
-// checking if fp->rval is set in the frame flags and loading that instead.
-//
-// Otherwise, if |masm| is the inline path, it is loaded as efficiently as
-// the FrameState can manage. If |masm| is the OOL path, the value is simply
-// loaded from its slot in the frame, since the caller has guaranteed it's
-// been synced.
-//
-void
-mjit::Compiler::loadReturnValue(Assembler *masm, FrameEntry *fe)
-{
-    RegisterID typeReg = JSReturnReg_Type;
-    RegisterID dataReg = JSReturnReg_Data;
-
-    if (fe) {
-        // If using the OOL assembler, the caller signifies that the |fe| is
-        // synced, but not to rely on its register state.
-        if (masm != &this->masm) {
-            if (fe->isConstant()) {
-                stubcc.masm.loadValueAsComponents(fe->getValue(), typeReg, dataReg);
-            } else {
-                Address rval(frame.addressOf(fe));
-                if (fe->isTypeKnown() && !fe->isType(JSVAL_TYPE_DOUBLE)) {
-                    stubcc.masm.loadPayload(rval, dataReg);
-                    stubcc.masm.move(ImmType(fe->getKnownType()), typeReg);
-                } else {
-                    stubcc.masm.loadValueAsComponents(rval, typeReg, dataReg);
-                }
-            }
-        } else {
-            frame.loadForReturn(fe, typeReg, dataReg, Registers::ReturnReg);
-        }
-    } else {
-         // Load a return value from POPV or SETRVAL into the return registers,
-         // otherwise return undefined.
-        masm->loadValueAsComponents(UndefinedValue(), typeReg, dataReg);
-        if (analysis->usesReturnValue()) {
-            Jump rvalClear = masm->branchTest32(Assembler::Zero,
-                                               FrameFlagsAddress(),
-                                               Imm32(StackFrame::HAS_RVAL));
-            Address rvalAddress(JSFrameReg, StackFrame::offsetOfReturnValue());
-            masm->loadValueAsComponents(rvalAddress, typeReg, dataReg);
-            rvalClear.linkTo(masm->label(), masm);
-        }
-    }
-}
-
-// This ensures that constructor return values are an object. If a non-object
-// is returned, either explicitly or implicitly, the newly created object is
-// loaded out of the frame. Otherwise, the explicitly returned object is kept.
-//
-void
-mjit::Compiler::fixPrimitiveReturn(Assembler *masm, FrameEntry *fe)
-{
-    JS_ASSERT(isConstructing);
-
-    bool ool = (masm != &this->masm);
-    Address thisv(JSFrameReg, StackFrame::offsetOfThis(script_->function()));
-
-    // We can just load |thisv| if either of the following is true:
-    //  (1) There is no explicit return value, AND fp->rval is not used.
-    //  (2) There is an explicit return value, and it's known to be primitive.
-    if ((!fe && !analysis->usesReturnValue()) ||
-        (fe && fe->isTypeKnown() && fe->getKnownType() != JSVAL_TYPE_OBJECT))
-    {
-        if (ool)
-            masm->loadValueAsComponents(thisv, JSReturnReg_Type, JSReturnReg_Data);
-        else
-            frame.loadThisForReturn(JSReturnReg_Type, JSReturnReg_Data, Registers::ReturnReg);
-        return;
-    }
-
-    // If the type is known to be an object, just load the return value as normal.
-    if (fe && fe->isTypeKnown() && fe->getKnownType() == JSVAL_TYPE_OBJECT) {
-        loadReturnValue(masm, fe);
-        return;
-    }
-
-    // There's a return value, and its type is unknown. Test the type and load
-    // |thisv| if necessary. Sync the 'this' entry before doing so, as it may
-    // be stored in registers if we constructed it inline.
-    frame.syncThis();
-    loadReturnValue(masm, fe);
-    Jump j = masm->testObject(Assembler::Equal, JSReturnReg_Type);
-    masm->loadValueAsComponents(thisv, JSReturnReg_Type, JSReturnReg_Data);
-    j.linkTo(masm->label(), masm);
-}
-
-// Loads the return value into the scripted ABI register pair, such that JS
-// semantics in constructors are preserved.
-//
-void
-mjit::Compiler::emitReturnValue(Assembler *masm, FrameEntry *fe)
-{
-    if (isConstructing)
-        fixPrimitiveReturn(masm, fe);
-    else
-        loadReturnValue(masm, fe);
-}
-
-void
-mjit::Compiler::emitInlineReturnValue(FrameEntry *fe)
-{
-    JS_ASSERT(!isConstructing && a->needReturnValue);
-
-    if (a->syncReturnValue) {
-        /* Needed return value with unknown type, the caller's entry is synced. */
-        Address address = frame.addressForInlineReturn();
-        if (fe)
-            frame.storeTo(fe, address);
-        else
-            masm.storeValue(UndefinedValue(), address);
-        return;
-    }
-
-    /*
-     * For inlined functions that simply return an entry present in the outer
-     * script (e.g. a loop invariant term), mark the copy and propagate it
-     * after popping the frame.
-     */
-    if (!a->exitState && fe && fe->isCopy() && frame.isOuterSlot(fe->backing())) {
-        a->returnEntry = fe->backing();
-        return;
-    }
-
-    if (a->returnValueDouble) {
-        JS_ASSERT(fe);
-        frame.ensureDouble(fe);
-        Registers mask(a->returnSet
-                       ? Registers::maskReg(a->returnRegister)
-                       : Registers::AvailFPRegs);
-        FPRegisterID fpreg;
-        if (!fe->isConstant()) {
-            fpreg = frame.tempRegInMaskForData(fe, mask.freeMask).fpreg();
-            frame.syncAndForgetFe(fe, true);
-            frame.takeReg(fpreg);
-        } else {
-            fpreg = frame.allocReg(mask.freeMask).fpreg();
-            masm.slowLoadConstantDouble(fe->getValue().toDouble(), fpreg);
-        }
-        JS_ASSERT_IF(a->returnSet, fpreg == a->returnRegister.fpreg());
-        a->returnRegister = fpreg;
-    } else {
-        Registers mask(a->returnSet
-                       ? Registers::maskReg(a->returnRegister)
-                       : Registers::AvailRegs);
-        RegisterID reg;
-        if (fe && !fe->isConstant()) {
-            reg = frame.tempRegInMaskForData(fe, mask.freeMask).reg();
-            frame.syncAndForgetFe(fe, true);
-            frame.takeReg(reg);
-        } else {
-            reg = frame.allocReg(mask.freeMask).reg();
-            Value val = fe ? fe->getValue() : UndefinedValue();
-            masm.loadValuePayload(val, reg);
-        }
-        JS_ASSERT_IF(a->returnSet, reg == a->returnRegister.reg());
-        a->returnRegister = reg;
-    }
-
-    a->returnSet = true;
-    if (a->exitState)
-        a->exitState->setUnassigned(a->returnRegister);
-}
-
-void
-mjit::Compiler::emitReturn(FrameEntry *fe)
-{
-    JS_ASSERT_IF(!script_->function(), JSOp(*PC) == JSOP_STOP);
-
-    /* Only the top of the stack can be returned. */
-    JS_ASSERT_IF(fe, fe == frame.peek(-1));
-
-    if (debugMode()) {
-        /* If the return value isn't in the frame's rval slot, move it there. */
-        if (fe) {
-            frame.storeTo(fe, Address(JSFrameReg, StackFrame::offsetOfReturnValue()), true);
-
-            /* Set the frame flag indicating it's there. */
-            RegisterID reg = frame.allocReg();
-            masm.load32(FrameFlagsAddress(), reg);
-            masm.or32(Imm32(StackFrame::HAS_RVAL), reg);
-            masm.store32(reg, FrameFlagsAddress());
-            frame.freeReg(reg);
-
-            /* Use the frame's return value when generating further code. */
-            fe = NULL;
-        }
-
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::ScriptDebugEpilogue, REJOIN_RESUME);
-    }
-
-    if (a != outer) {
-        JS_ASSERT(!debugMode());
-        profilingPopHelper();
-
-        /*
-         * Returning from an inlined script. The checks we do for inlineability
-         * and recompilation triggered by args object construction ensure that
-         * there can't be an arguments or call object.
-         */
-
-        if (a->needReturnValue)
-            emitInlineReturnValue(fe);
-
-        if (a->exitState) {
-            /*
-             * Restore the register state to reflect that at the original call,
-             * modulo entries which will be popped once the call finishes and any
-             * entry which will be clobbered by the return value register.
-             */
-            frame.syncForAllocation(a->exitState, true, Uses(0));
-        }
-
-        /*
-         * Simple tests to see if we are at the end of the script and will
-         * fallthrough after the script body finishes, thus won't need to jump.
-         */
-        bool endOfScript =
-            (JSOp(*PC) == JSOP_STOP) ||
-            (JSOp(*PC) == JSOP_RETURN &&
-             (JSOp(PC[JSOP_RETURN_LENGTH]) == JSOP_STOP &&
-              !analysis->maybeCode(PC + JSOP_RETURN_LENGTH)));
-        if (!endOfScript)
-            a->returnJumps->append(masm.jump());
-
-        if (a->returnSet)
-            frame.freeReg(a->returnRegister);
-        return;
-    }
-
-    /* Inline StackFrame::epilogue. */
-    if (debugMode()) {
-        sps.skipNextReenter();
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::Epilogue, REJOIN_NONE);
-    } else {
-        profilingPopHelper();
-    }
-
-    emitReturnValue(&masm, fe);
-    emitFinalReturn(masm);
-
-    /*
-     * After we've placed the call object, all tracked state can be
-     * thrown away. This will happen anyway because the next live opcode (if
-     * any) must have an incoming edge. It's an optimization to throw it away
-     * early - the tracker won't be spilled on further exits or join points.
-     */
-    frame.discardFrame();
-}
-
-void
-mjit::Compiler::prepareStubCall(Uses uses)
-{
-    JaegerSpew(JSpew_Insns, " ---- STUB CALL, SYNCING FRAME ---- \n");
-    frame.syncAndKill(Registers(Registers::AvailAnyRegs), uses);
-    JaegerSpew(JSpew_Insns, " ---- FRAME SYNCING DONE ---- \n");
-}
-
-JSC::MacroAssembler::Call
-mjit::Compiler::emitStubCall(void *ptr, DataLabelPtr *pinline)
-{
-    JaegerSpew(JSpew_Insns, " ---- CALLING STUB ---- \n");
-
-    masm.bumpStubCount(script_, PC, Registers::tempCallReg());
-
-    Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
-                                  ptr, outerPC(), pinline, frame.totalDepth());
-    JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
-    return cl;
-}
-
-void
-mjit::Compiler::interruptCheckHelper()
-{
-    Jump jump;
-    if (cx->runtime->gcZeal() == js::gc::ZealVerifierPreValue ||
-        cx->runtime->gcZeal() == js::gc::ZealVerifierPostValue)
-    {
-        /* For barrier verification, always take the interrupt so we can verify. */
-        jump = masm.jump();
-    } else {
-        void *interrupt = (void*) &cx->runtime->interrupt;
-#if defined(JS_CPU_X86) || defined(JS_CPU_ARM) || defined(JS_CPU_MIPS)
-        jump = masm.branch32(Assembler::NotEqual, AbsoluteAddress(interrupt), Imm32(0));
-#else
-        /* Handle processors that can't load from absolute addresses. */
-        RegisterID reg = frame.allocReg();
-        masm.move(ImmPtr(interrupt), reg);
-        jump = masm.branchTest32(Assembler::NonZero, Address(reg, 0));
-        frame.freeReg(reg);
-#endif
-    }
-
-    stubcc.linkExitDirect(jump, stubcc.masm.label());
-
-    frame.sync(stubcc.masm, Uses(0));
-    stubcc.masm.move(ImmPtr(PC), Registers::ArgReg1);
-    OOL_STUBCALL(stubs::Interrupt, REJOIN_RESUME);
-    stubcc.rejoin(Changes(0));
-}
-
-static inline bool
-MaybeIonCompileable(JSContext *cx, JSScript *script, bool *recompileCheckForIon)
-{
-#ifdef JS_ION
-    *recompileCheckForIon = true;
-
-    if (!ion::IsEnabled(cx))
-        return false;
-    if (!script->canIonCompile())
-        return false;
-
-    // If this script is small, doesn't have any function calls, and doesn't have
-    // any big loops, then throwing in a recompile check and causing an invalidation
-    // when we otherwise wouldn't have would be wasteful.
-    if (script->isShortRunning())
-        *recompileCheckForIon = false;
-
-    return true;
-#endif
-    return false;
-}
-
-void
-mjit::Compiler::ionCompileHelper()
-{
-    JS_ASSERT(script_ == outerScript);
-
-    JS_ASSERT(IsIonEnabled(cx));
-    JS_ASSERT(!inlining());
-
-#ifdef JS_ION
-    if (debugMode() || !globalObj || !cx->typeInferenceEnabled() || outerScript->hasIonScript())
-        return;
-
-    bool recompileCheckForIon = false;
-    if (!MaybeIonCompileable(cx, outerScript, &recompileCheckForIon))
-        return;
-
-    uint32_t minUses = ion::UsesBeforeIonRecompile(outerScript, PC);
-
-    uint32_t *useCountAddress = script_->addressOfUseCount();
-    masm.add32(Imm32(1), AbsoluteAddress(useCountAddress));
-
-    // We cannot inline a JM -> Ion constructing call.
-    // Compiling this function is pointless and would disable the JM -> JM fastpath.
-    // This function will start running in Ion, when caller runs in Ion/Interpreter.
-    if (isConstructing && outerScript->code == PC)
-        return;
-
-    // If we don't want to do a recompileCheck for Ion, then this just needs to
-    // increment the useCount so that we know when to recompile this function
-    // from an Ion call.  No need to call out to recompiler stub.
-    if (!recompileCheckForIon)
-        return;
-
-    const void *ionScriptAddress = script_->addressOfIonScript();
-
-#ifdef JS_CPU_X64
-    // Allocate a temp register. Note that we have to do this before calling
-    // syncExitAndJump below.
-    RegisterID reg = frame.allocReg();
-#endif
-
-    InternalCompileTrigger trigger;
-    trigger.pc = PC;
-    trigger.stubLabel = stubcc.syncExitAndJump(Uses(0));
-
-    // Trigger ion compilation if (a) the script has been used enough times for
-    // this opcode, and (b) the script does not already have ion information
-    // (whether successful, failed, or in progress off thread compilation)
-    // *OR* off thread compilation is not being used.
-    //
-    // If off thread compilation is in use, we retain the CompileTrigger so
-    // that (b) can be short circuited to force a call to TriggerIonCompile
-    // (see DisableScriptAtPC).
-    //
-    // If off thread compilation is not in use, (b) is unnecessary and
-    // negatively affects tuning on some benchmarks (see bug 774253). Thus,
-    // we immediately short circuit the check for (b).
-
-    Label secondTest = stubcc.masm.label();
-
-#if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
-    trigger.inlineJump = masm.branch32(Assembler::GreaterThanOrEqual,
-                                       AbsoluteAddress(useCountAddress),
-                                       Imm32(minUses));
-    Jump scriptJump = stubcc.masm.branch32(Assembler::Equal,
-                                           AbsoluteAddress((void *)ionScriptAddress),
-                                           Imm32(0));
-#elif defined(JS_CPU_X64)
-    /* Handle processors that can't load from absolute addresses. */
-    masm.move(ImmPtr(useCountAddress), reg);
-    trigger.inlineJump = masm.branch32(Assembler::GreaterThanOrEqual,
-                                       Address(reg),
-                                       Imm32(minUses));
-    stubcc.masm.move(ImmPtr((void *)ionScriptAddress), reg);
-    Jump scriptJump = stubcc.masm.branchPtr(Assembler::Equal, Address(reg), ImmPtr(NULL));
-    frame.freeReg(reg);
-#else
-#error "Unknown platform"
-#endif
-
-    stubcc.linkExitDirect(trigger.inlineJump,
-                          OffThreadCompilationEnabled(cx)
-                          ? secondTest
-                          : trigger.stubLabel);
-
-    scriptJump.linkTo(trigger.stubLabel, &stubcc.masm);
-    stubcc.crossJump(stubcc.masm.jump(), masm.label());
-
-    stubcc.leave();
-    OOL_STUBCALL(stubs::TriggerIonCompile, REJOIN_RESUME);
-    stubcc.rejoin(Changes(0));
-
-    compileTriggers.append(trigger);
-#endif /* JS_ION */
-}
-
-void
-mjit::Compiler::inliningCompileHelper()
-{
-    JS_ASSERT(!IsIonEnabled(cx));
-
-    if (inlining() || debugMode() || !globalObj ||
-        !analysis->hasFunctionCalls() || !cx->typeInferenceEnabled()) {
-        return;
-    }
-
-    uint32_t *addr = script_->addressOfUseCount();
-    masm.add32(Imm32(1), AbsoluteAddress(addr));
-#if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
-    Jump jump = masm.branch32(Assembler::GreaterThanOrEqual, AbsoluteAddress(addr),
-                              Imm32(USES_BEFORE_INLINING));
-#else
-    /* Handle processors that can't load from absolute addresses. */
-    RegisterID reg = frame.allocReg();
-    masm.move(ImmPtr(addr), reg);
-    Jump jump = masm.branch32(Assembler::GreaterThanOrEqual, Address(reg, 0),
-                              Imm32(USES_BEFORE_INLINING));
-    frame.freeReg(reg);
-#endif
-    stubcc.linkExit(jump, Uses(0));
-    stubcc.leave();
-
-    OOL_STUBCALL(stubs::RecompileForInline, REJOIN_RESUME);
-    stubcc.rejoin(Changes(0));
-}
-
-CompileStatus
-mjit::Compiler::methodEntryHelper()
-{
-    if (debugMode()) {
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::ScriptDebugPrologue, REJOIN_RESUME);
-
-    /*
-     * If necessary, call the tracking probe to trigger SPS assertions. We can
-     * only do this when not inlining because the same StackFrame instance will
-     * be used to enter a function, triggering an assertion in enterScript
-     */
-    } else if (Probes::callTrackingActive(cx) ||
-               (sps.slowAssertions() && a->inlineIndex == UINT32_MAX)) {
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::ScriptProbeOnlyPrologue, REJOIN_RESUME);
-    } else {
-        return profilingPushHelper();
-    }
-    /* Ensure that we've flagged that the push has happened */
-    if (sps.enabled()) {
-        RegisterID reg = frame.allocReg();
-        sps.pushManual(script_, masm, reg);
-        frame.freeReg(reg);
-    }
-    return Compile_Okay;
-}
-
-CompileStatus
-mjit::Compiler::profilingPushHelper()
-{
-    if (!sps.enabled())
-        return Compile_Okay;
-    RegisterID reg = frame.allocReg();
-    if (!sps.push(cx, script_, masm, reg))
-        return Compile_Error;
-
-    /* Set the flags that we've pushed information onto the SPS stack */
-    masm.load32(FrameFlagsAddress(), reg);
-    masm.or32(Imm32(StackFrame::HAS_PUSHED_SPS_FRAME), reg);
-    masm.store32(reg, FrameFlagsAddress());
-    frame.freeReg(reg);
-
-    return Compile_Okay;
-}
-
-void
-mjit::Compiler::profilingPopHelper()
-{
-    if (Probes::callTrackingActive(cx) || sps.slowAssertions()) {
-        sps.skipNextReenter();
-        prepareStubCall(Uses(0));
-        INLINE_STUBCALL(stubs::ScriptProbeOnlyEpilogue, REJOIN_RESUME);
-    } else if (cx->runtime->spsProfiler.enabled()) {
-        RegisterID reg = frame.allocReg();
-        sps.pop(masm, reg);
-        frame.freeReg(reg);
-    }
-}
-
-void
-mjit::Compiler::addReturnSite()
-{
-    InternalCallSite site(masm.distanceOf(masm.label()), a->inlineIndex, PC,
-                          REJOIN_SCRIPTED, false);
-    addCallSite(site);
-    masm.loadPtr(Address(JSFrameReg, StackFrame::offsetOfPrev()), JSFrameReg);
-}
-
-void
-mjit::Compiler::emitUncachedCall(uint32_t argc, bool callingNew)
-{
-    CallPatchInfo callPatch;
-
-    RegisterID r0 = Registers::ReturnReg;
-    VoidPtrStubUInt32 stub = callingNew ? stubs::UncachedNew : stubs::UncachedCall;
-
-    frame.syncAndKill(Uses(argc + 2));
-    prepareStubCall(Uses(argc + 2));
-    masm.move(Imm32(argc), Registers::ArgReg1);
-    INLINE_STUBCALL(stub, REJOIN_CALL_PROLOGUE);
-
-    Jump notCompiled = masm.branchTestPtr(Assembler::Zero, r0, r0);
-
-    masm.loadPtr(FrameAddress(VMFrame::offsetOfRegsSp()), JSFrameReg);
-    callPatch.hasFastNcode = true;
-    callPatch.fastNcodePatch =
-        masm.storePtrWithPatch(ImmPtr(NULL),
-                               Address(JSFrameReg, StackFrame::offsetOfNcode()));
-
-    masm.jump(r0);
-    callPatch.joinPoint = masm.label();
-    addReturnSite();
-
-    frame.popn(argc + 2);
-
-    frame.takeReg(JSReturnReg_Type);
-    frame.takeReg(JSReturnReg_Data);
-    frame.pushRegs(JSReturnReg_Type, JSReturnReg_Data, knownPushedType(0));
-
-    BarrierState barrier = testBarrier(JSReturnReg_Type, JSReturnReg_Data,
-                                       /* testUndefined = */ false,
-                                       /* testReturn = */ true);
-
-    stubcc.linkExitDirect(notCompiled, stubcc.masm.label());
-    stubcc.rejoin(Changes(1));
-    callPatches.append(callPatch);
-
-    finishBarrier(barrier, REJOIN_FALLTHROUGH, 0);
-    if (sps.enabled()) {
-        RegisterID reg = frame.allocReg();
-        sps.reenter(masm, reg);
-        frame.freeReg(reg);
-    }
-}
-
-void
-mjit::Compiler::checkCallApplySpeculation(uint32_t argc, FrameEntry *origCallee, FrameEntry *origThis,
-                                          MaybeRegisterID origCalleeType, RegisterID origCalleeData,
-                                          MaybeRegisterID origThisType, RegisterID origThisData,
-                                          Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch)
-{
-    JS_ASSERT(IsLowerableFunCallOrApply(PC));
-
-    RegisterID temp;
-    Registers tempRegs(Registers::AvailRegs);
-    if (origCalleeType.isSet())
-        tempRegs.takeReg(origCalleeType.reg());
-    tempRegs.takeReg(origCalleeData);
-    if (origThisType.isSet())
-        tempRegs.takeReg(origThisType.reg());
-    tempRegs.takeReg(origThisData);
-    temp = tempRegs.takeAnyReg().reg();
-
-    /*
-     * if (origCallee.isObject() &&
-     *     origCallee.toObject().isFunction &&
-     *     origCallee.toObject().toFunction() == js_fun_{call,apply})
-     */
-    MaybeJump isObj;
-    if (origCalleeType.isSet())
-        isObj = masm.testObject(Assembler::NotEqual, origCalleeType.reg());
-    Jump isFun = masm.testFunction(Assembler::NotEqual, origCalleeData, temp);
-    Native native = *PC == JSOP_FUNCALL ? js_fun_call : js_fun_apply;
-    Jump isNative = masm.branchPtr(Assembler::NotEqual,
-                                   Address(origCalleeData, JSFunction::offsetOfNativeOrScript()),
-                                   ImmPtr(JS_FUNC_TO_DATA_PTR(void *, native)));
-
-    /*
-     * If speculation fails, we can't use the ic, since it is compiled on the
-     * assumption that speculation succeeds. Instead, just do an uncached call.
-     */
-    {
-        if (isObj.isSet())
-            stubcc.linkExitDirect(isObj.getJump(), stubcc.masm.label());
-        stubcc.linkExitDirect(isFun, stubcc.masm.label());
-        stubcc.linkExitDirect(isNative, stubcc.masm.label());
-
-        stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
-        JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
-        OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);
-        JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
-
-        /*
-         * inlineCallHelper will link uncachedCallSlowRejoin to the join point
-         * at the end of the ic. At that join point, we'll load the rval into
-         * the return registers.
-         */
-        *uncachedCallSlowRejoin = stubcc.masm.jump();
-    }
-}
-
-/* See MonoIC.cpp, CallCompiler for more information on call ICs. */
-bool
-mjit::Compiler::inlineCallHelper(uint32_t argc, bool callingNew, FrameSize &callFrameSize)
-{
-    /*
-     * Check for interrupts on function call. We don't do this for lazy
-     * arguments objects as the interrupt may kick this frame into the
-     * interpreter, which doesn't know about the apply tricks. Instead, we
-     * do the interrupt check at the start of the JSOP_ARGUMENTS.
-     */
-    interruptCheckHelper();
-    if (sps.enabled()) {
-        RegisterID reg = frame.allocReg();
-        sps.leave(PC, masm, reg);
-        frame.freeReg(reg);
-    }
-
-    FrameEntry *origCallee = frame.peek(-(int(argc) + 2));
-    FrameEntry *origThis = frame.peek(-(int(argc) + 1));
-
-    /*
-     * 'this' does not need to be synced for constructing. :FIXME: is it
-     * possible that one of the arguments is directly copying the 'this'
-     * entry (something like 'new x.f(x)')?
-     */
-    if (callingNew) {
-        frame.discardFe(origThis);
-
-        /*
-         * We store NULL here to ensure that the slot doesn't contain
-         * garbage. Additionally, we need to store a non-object value here for
-         * TI. If a GC gets triggered before the callee can fill in the slot
-         * (i.e. the GC happens on constructing the 'new' object or the call
-         * object for a heavyweight callee), it needs to be able to read the
-         * 'this' value to tell whether newScript constraints will need to be
-         * regenerated afterwards.
-         */
-        masm.storeValue(NullValue(), frame.addressOf(origThis));
-    }
-
-    /*
-     * From the presence of JSOP_FUN{CALL,APPLY}, we speculate that we are
-     * going to call js_fun_{call,apply}. Normally, this call would go through
-     * js::Invoke to ultimately call 'this'. We can do much better by having
-     * the callIC cache and call 'this' directly. However, if it turns out that
-     * we are not actually calling js_fun_call, the callIC must act as normal.
-     *
-     * Note: do *NOT* use type information or inline state in any way when
-     * deciding whether to lower a CALL or APPLY. The stub calls here store
-     * their return values in a different slot, so when recompiling we need
-     * to go down the exact same path.
-     */
-    bool lowerFunCallOrApply = IsLowerableFunCallOrApply(PC);
-
-    RootedScript script(cx, script_);
-    bool newType = callingNew && cx->typeInferenceEnabled() && types::UseNewType(cx, script, PC);
-
-#ifdef JS_MONOIC
-    if (debugMode() || newType) {
-#endif
-        emitUncachedCall(argc, callingNew);
-        return true;
-#ifdef JS_MONOIC
-    }
-
-    frame.forgetMismatchedObject(origCallee);
-    if (lowerFunCallOrApply)
-        frame.forgetMismatchedObject(origThis);
-
-    /* Initialized by both branches below. */
-    CallGenInfo     callIC;
-    CallPatchInfo   callPatch;
-    MaybeRegisterID icCalleeType; /* type to test for function-ness */
-    RegisterID      icCalleeData; /* data to call */