Merge baseline compiler branch and mozilla-central. IGNORE BAD COMMIT MESSAGES
authorJan de Mooij <jdemooij@mozilla.com>
Wed, 03 Apr 2013 17:37:32 +0200
changeset 127460 79542849f3f3
parent 127040 3a5929ebc886 (current diff)
parent 127459 d8b068c9dbc1 (diff)
child 127461 b5cb88ccd907
push id24503
push userjandemooij@gmail.com
push date2013-04-03 15:43 +0000
treeherdermozilla-central@b5cb88ccd907 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone23.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge baseline compiler branch and mozilla-central. IGNORE BAD COMMIT MESSAGES
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -964,16 +964,18 @@ static const char js_methodjit_always_st
 static const char js_typeinfer_str[]          = JS_OPTIONS_DOT_STR "typeinference";
 static const char js_pccounts_content_str[]   = JS_OPTIONS_DOT_STR "pccounts.content";
 static const char js_pccounts_chrome_str[]    = JS_OPTIONS_DOT_STR "pccounts.chrome";
 static const char js_jit_hardening_str[]      = JS_OPTIONS_DOT_STR "jit_hardening";
 static const char js_memlog_option_str[]      = JS_OPTIONS_DOT_STR "mem.log";
 static const char js_memnotify_option_str[]   = JS_OPTIONS_DOT_STR "mem.notify";
 static const char js_disable_explicit_compartment_gc[] =
   JS_OPTIONS_DOT_STR "mem.disable_explicit_compartment_gc";
+static const char js_baselinejit_content_str[] = JS_OPTIONS_DOT_STR "baselinejit.content";
+static const char js_baselinejit_chrome_str[]  = JS_OPTIONS_DOT_STR "baselinejit.chrome";
 static const char js_ion_content_str[]        = JS_OPTIONS_DOT_STR "ion.content";
 static const char js_asmjs_content_str[]      = JS_OPTIONS_DOT_STR "experimental_asmjs";
 static const char js_ion_parallel_compilation_str[] = JS_OPTIONS_DOT_STR "ion.parallel_compilation";
 
 int
 nsJSContext::JSOptionChangedCallback(const char *pref, void *data)
 {
   nsJSContext *context = reinterpret_cast<nsJSContext *>(data);
@@ -1005,29 +1007,33 @@ nsJSContext::JSOptionChangedCallback(con
                                                js_methodjit_chrome_str :
                                                js_methodjit_content_str);
   bool usePCCounts = Preferences::GetBool(chromeWindow || !contentWindow ?
                                             js_pccounts_chrome_str :
                                             js_pccounts_content_str);
   bool useMethodJITAlways = Preferences::GetBool(js_methodjit_always_str);
   bool useTypeInference = !chromeWindow && contentWindow && Preferences::GetBool(js_typeinfer_str);
   bool useHardening = Preferences::GetBool(js_jit_hardening_str);
+  bool useBaselineJIT = Preferences::GetBool(chromeWindow || !contentWindow ?
+                                               js_baselinejit_chrome_str :
+                                               js_baselinejit_content_str);
   bool useIon = Preferences::GetBool(js_ion_content_str);
   bool useAsmJS = Preferences::GetBool(js_asmjs_content_str);
   bool parallelIonCompilation = Preferences::GetBool(js_ion_parallel_compilation_str);
   nsCOMPtr<nsIXULRuntime> xr = do_GetService(XULRUNTIME_SERVICE_CONTRACTID);
   if (xr) {
     bool safeMode = false;
     xr->GetInSafeMode(&safeMode);
     if (safeMode) {
       useMethodJIT = false;
       usePCCounts = false;
       useTypeInference = false;
       useMethodJITAlways = true;
       useHardening = false;
+      useBaselineJIT = false;
       useIon = false;
       useAsmJS = false;
     }
   }
 
   if (useMethodJIT)
     newDefaultJSOptions |= JSOPTION_METHODJIT;
   else
@@ -1043,16 +1049,21 @@ nsJSContext::JSOptionChangedCallback(con
   else
     newDefaultJSOptions &= ~JSOPTION_METHODJIT_ALWAYS;
 
   if (useTypeInference)
     newDefaultJSOptions |= JSOPTION_TYPE_INFERENCE;
   else
     newDefaultJSOptions &= ~JSOPTION_TYPE_INFERENCE;
 
+  if (useBaselineJIT)
+    newDefaultJSOptions |= JSOPTION_BASELINE;
+  else
+    newDefaultJSOptions &= ~JSOPTION_BASELINE;
+
   if (useIon)
     newDefaultJSOptions |= JSOPTION_ION;
   else
     newDefaultJSOptions &= ~JSOPTION_ION;
 
   if (useAsmJS)
     newDefaultJSOptions |= JSOPTION_ASMJS;
   else
--- a/dom/workers/RuntimeService.cpp
+++ b/dom/workers/RuntimeService.cpp
@@ -148,16 +148,17 @@ MOZ_STATIC_ASSERT(NS_ARRAY_LENGTH(gStrin
 enum {
   PREF_strict = 0,
   PREF_werror,
   PREF_methodjit,
   PREF_methodjit_always,
   PREF_typeinference,
   PREF_jit_hardening,
   PREF_mem_max,
+  PREF_baselinejit,
   PREF_ion,
   PREF_asmjs,
   PREF_mem_gc_allocation_threshold_mb,
 
 #ifdef JS_GC_ZEAL
   PREF_gczeal,
 #endif
 
@@ -169,16 +170,17 @@ enum {
 const char* gPrefsToWatch[] = {
   JS_OPTIONS_DOT_STR "strict",
   JS_OPTIONS_DOT_STR "werror",
   JS_OPTIONS_DOT_STR "methodjit.content",
   JS_OPTIONS_DOT_STR "methodjit_always",
   JS_OPTIONS_DOT_STR "typeinference",
   JS_OPTIONS_DOT_STR "jit_hardening",
   JS_OPTIONS_DOT_STR "mem.max",
+  JS_OPTIONS_DOT_STR "baselinejit.content",
   JS_OPTIONS_DOT_STR "ion.content",
   JS_OPTIONS_DOT_STR "experimental_asmjs",
   "dom.workers.mem.gc_allocation_threshold_mb"
 
 #ifdef JS_GC_ZEAL
   , PREF_WORKERS_GCZEAL
 #endif
 };
@@ -222,16 +224,19 @@ PrefCallback(const char* aPrefName, void
       newOptions |= JSOPTION_METHODJIT;
     }
     if (Preferences::GetBool(gPrefsToWatch[PREF_methodjit_always])) {
       newOptions |= JSOPTION_METHODJIT_ALWAYS;
     }
     if (Preferences::GetBool(gPrefsToWatch[PREF_typeinference])) {
       newOptions |= JSOPTION_TYPE_INFERENCE;
     }
+    if (Preferences::GetBool(gPrefsToWatch[PREF_baselinejit])) {
+      newOptions |= JSOPTION_BASELINE;
+    }
     if (Preferences::GetBool(gPrefsToWatch[PREF_ion])) {
       newOptions |= JSOPTION_ION;
     }
     if (Preferences::GetBool(gPrefsToWatch[PREF_asmjs])) {
       newOptions |= JSOPTION_ASMJS;
     }
 
     RuntimeService::SetDefaultJSContextOptions(newOptions);
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -239,16 +239,19 @@ struct CompartmentStats
         gcHeapScripts(0),
         objectsExtra(),
         shapesExtraTreeTables(0),
         shapesExtraDictTables(0),
         shapesExtraTreeShapeKids(0),
         shapesCompartmentTables(0),
         scriptData(0),
         jaegerData(0),
+        baselineData(0),
+        baselineFallbackStubs(0),
+        baselineOptimizedStubs(0),
         ionData(0),
         compartmentObject(0),
         crossCompartmentWrappersTable(0),
         regexpCompartment(0),
         debuggeesSet(0),
         typeInference()
     {}
 
@@ -267,16 +270,19 @@ struct CompartmentStats
         gcHeapScripts(other.gcHeapScripts),
         objectsExtra(other.objectsExtra),
         shapesExtraTreeTables(other.shapesExtraTreeTables),
         shapesExtraDictTables(other.shapesExtraDictTables),
         shapesExtraTreeShapeKids(other.shapesExtraTreeShapeKids),
         shapesCompartmentTables(other.shapesCompartmentTables),
         scriptData(other.scriptData),
         jaegerData(other.jaegerData),
+        baselineData(other.baselineData),
+        baselineFallbackStubs(other.baselineFallbackStubs),
+        baselineOptimizedStubs(other.baselineOptimizedStubs),
         ionData(other.ionData),
         compartmentObject(other.compartmentObject),
         crossCompartmentWrappersTable(other.crossCompartmentWrappersTable),
         regexpCompartment(other.regexpCompartment),
         debuggeesSet(other.debuggeesSet),
         typeInference(other.typeInference)
     {
     }
@@ -300,16 +306,19 @@ struct CompartmentStats
     ObjectsExtraSizes objectsExtra;
 
     size_t shapesExtraTreeTables;
     size_t shapesExtraDictTables;
     size_t shapesExtraTreeShapeKids;
     size_t shapesCompartmentTables;
     size_t scriptData;
     size_t jaegerData;
+    size_t baselineData;
+    size_t baselineFallbackStubs;
+    size_t baselineOptimizedStubs;
     size_t ionData;
     size_t compartmentObject;
     size_t crossCompartmentWrappersTable;
     size_t regexpCompartment;
     size_t debuggeesSet;
 
     TypeInferenceSizes typeInference;
 
@@ -330,16 +339,19 @@ struct CompartmentStats
         objectsExtra.add(cStats.objectsExtra);
 
         ADD(shapesExtraTreeTables);
         ADD(shapesExtraDictTables);
         ADD(shapesExtraTreeShapeKids);
         ADD(shapesCompartmentTables);
         ADD(scriptData);
         ADD(jaegerData);
+        ADD(baselineData);
+        ADD(baselineFallbackStubs);
+        ADD(baselineOptimizedStubs);
         ADD(ionData);
         ADD(compartmentObject);
         ADD(crossCompartmentWrappersTable);
         ADD(regexpCompartment);
         ADD(debuggeesSet);
 
         #undef ADD
 
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -253,16 +253,23 @@ CPPSRCS += 	MethodJIT.cpp \
 		$(NULL)
 
 # Ion
 ifdef ENABLE_ION
 VPATH +=	$(srcdir)/ion
 VPATH +=	$(srcdir)/ion/shared
 
 CPPSRCS +=	MIR.cpp \
+		BaselineCompiler.cpp \
+		BaselineIC.cpp \
+		BaselineFrame.cpp \
+		BaselineFrameInfo.cpp \
+		BaselineJIT.cpp \
+		BaselineInspector.cpp \
+		BaselineBailouts.cpp \
 		BacktrackingAllocator.cpp \
 		Bailouts.cpp \
 		BitSet.cpp \
 		C1Spewer.cpp \
 		CodeGenerator.cpp \
 		CodeGenerator-shared.cpp \
 		Ion.cpp \
 		IonAnalysis.cpp \
@@ -290,29 +297,32 @@ CPPSRCS +=	MIR.cpp \
 		TypePolicy.cpp \
 		ValueNumbering.cpp \
 		RangeAnalysis.cpp \
 		VMFunctions.cpp \
 		ParallelFunctions.cpp \
 		AliasAnalysis.cpp \
 		ParallelArrayAnalysis.cpp \
 		UnreachableCodeElimination.cpp \
+		BaselineCompiler-shared.cpp \
 		EffectiveAddressAnalysis.cpp \
 		AsmJS.cpp \
 		AsmJSLink.cpp \
 		AsmJSSignalHandlers.cpp \
 		$(NULL)
 endif #ENABLE_ION
 ifeq (86, $(findstring 86,$(TARGET_CPU)))
 ifdef ENABLE_ION
 CPPSRCS +=	CodeGenerator-x86-shared.cpp
 CPPSRCS +=	IonFrames-x86-shared.cpp
 CPPSRCS +=	MoveEmitter-x86-shared.cpp
 CPPSRCS +=	Assembler-x86-shared.cpp
 CPPSRCS +=	Lowering-x86-shared.cpp
+CPPSRCS +=	BaselineCompiler-x86-shared.cpp
+CPPSRCS +=	BaselineIC-x86-shared.cpp
 endif #ENABLE_ION
 ifeq (x86_64, $(TARGET_CPU))
 ifdef _MSC_VER
 ASFILES +=	TrampolineMasmX64.asm
 endif
 ifeq ($(OS_ARCH),WINNT)
 ifdef GNU_CC
 ASFILES +=	TrampolineMingwX64.s
@@ -324,27 +334,31 @@ endif
 ifdef ENABLE_ION
 VPATH +=	$(srcdir)/ion/x64
 CPPSRCS += 	Lowering-x64.cpp \
 		CodeGenerator-x64.cpp \
 		Trampoline-x64.cpp \
 		Assembler-x64.cpp \
 		Bailouts-x64.cpp \
 		MacroAssembler-x64.cpp \
+		BaselineCompiler-x64.cpp \
+		BaselineIC-x64.cpp \
 		$(NULL)
 endif #ENABLE_ION
 else
 ifdef ENABLE_ION
 VPATH +=	$(srcdir)/ion/x86
 CPPSRCS +=	Lowering-x86.cpp \
 		CodeGenerator-x86.cpp \
 		Trampoline-x86.cpp \
 		Assembler-x86.cpp \
 		Bailouts-x86.cpp \
 		MacroAssembler-x86.cpp \
+		BaselineCompiler-x86.cpp \
+		BaselineIC-x86.cpp \
 		$(NULL)
 endif #ENABLE_ION
 ifdef SOLARIS_SUNPRO_CXX
 ASFILES +=	TrampolineSUNWX86.s
 endif
 endif
 endif
 ifdef ENABLE_ION
@@ -355,16 +369,18 @@ CPPSRCS +=	Lowering-arm.cpp \
 		CodeGenerator-arm.cpp \
 		Trampoline-arm.cpp \
 		Assembler-arm.cpp \
 		Bailouts-arm.cpp \
 		IonFrames-arm.cpp \
 		MoveEmitter-arm.cpp \
 		Architecture-arm.cpp \
 		MacroAssembler-arm.cpp \
+		BaselineCompiler-arm.cpp \
+		BaselineIC-arm.cpp \
 		$(NULL)
 endif #ENABLE_ION
 endif
 endif #ENABLE_ION
 ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
 ASFILES +=	TrampolineSparc.s
 endif
 ifeq (mips, $(findstring mips,$(TARGET_CPU)))
--- a/js/src/builtin/Eval.cpp
+++ b/js/src/builtin/Eval.cpp
@@ -408,17 +408,17 @@ js::DirectEval(JSContext *cx, const Call
 {
     // Direct eval can assume it was called from an interpreted or baseline frame.
     ScriptFrameIter iter(cx);
     AbstractFramePtr caller = iter.abstractFramePtr();
 
     JS_ASSERT(IsBuiltinEvalForScope(caller.scopeChain(), args.calleev()));
     JS_ASSERT(JSOp(*iter.pc()) == JSOP_EVAL);
     JS_ASSERT_IF(caller.isFunctionFrame(),
-                 caller.compartment() == caller.callee().compartment());
+                 caller.compartment() == caller.callee()->compartment());
 
     if (!WarnOnTooManyArgs(cx, args))
         return false;
 
     RootedObject scopeChain(cx, caller.scopeChain());
     return EvalKernel(cx, args, DIRECT_EVAL, caller, scopeChain, iter.pc());
 }
 
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -9,16 +9,17 @@
 #include "jscntxt.h"
 #include "jsgc.h"
 #include "jsprf.h"
 
 #include "js/HashTable.h"
 #include "gc/GCInternals.h"
 
 #ifdef JS_ION
+#include "ion/BaselineJIT.h"
 #include "ion/IonCompartment.h"
 #include "ion/Ion.h"
 #endif
 
 #include "jsobjinlines.h"
 #include "jsgcinlines.h"
 
 using namespace js;
@@ -170,32 +171,57 @@ Zone::discardJitCode(FreeOp *fop, bool d
      * redirected to the interpoline.
      */
     mjit::ClearAllFrames(this);
 
     if (isPreservingCode()) {
         PurgeJITCaches(this);
     } else {
 # ifdef JS_ION
+
+#  ifdef DEBUG
+        /* Assert no baseline scripts are marked as active. */
+        for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
+            JSScript *script = i.get<JSScript>();
+            JS_ASSERT_IF(script->hasBaselineScript(), !script->baseline->active());
+        }
+#  endif
+
+        /* Mark baseline scripts on the stack as active. */
+        ion::MarkActiveBaselineScripts(this);
+
         /* Only mark OSI points if code is being discarded. */
         ion::InvalidateAll(fop, this);
 # endif
         for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
 
             mjit::ReleaseScriptCode(fop, script);
 # ifdef JS_ION
             ion::FinishInvalidation(fop, script);
+
+            /*
+             * Discard baseline script if it's not marked as active. Note that
+             * this also resets the active flag.
+             */
+            ion::FinishDiscardBaselineScript(fop, script);
 # endif
 
             /*
              * Use counts for scripts are reset on GC. After discarding code we
              * need to let it warm back up to get information such as which
              * opcodes are setting array holes or accessing getter properties.
              */
             script->resetUseCount();
         }
 
-        for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
+        for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+#ifdef JS_ION
+            /* Free optimized baseline stubs. */
+            if (comp->ionCompartment())
+                comp->ionCompartment()->optimizedStubSpace()->free();
+#endif
+
             comp->types.sweepCompilerOutputs(fop, discardConstraints);
+        }
     }
 #endif /* JS_METHODJIT */
 }
--- a/js/src/ion/Bailouts.cpp
+++ b/js/src/ion/Bailouts.cpp
@@ -12,16 +12,17 @@
 #include "SnapshotReader.h"
 #include "Ion.h"
 #include "IonCompartment.h"
 #include "IonSpewer.h"
 #include "jsinfer.h"
 #include "jsanalyze.h"
 #include "jsinferinlines.h"
 #include "IonFrames-inl.h"
+#include "BaselineJIT.h"
 
 using namespace js;
 using namespace js::ion;
 
 // These constructor are exactly the same except for the type of the iterator
 // which is given to the SnapshotIterator constructor. Doing so avoid the
 // creation of virtual functions for the IonIterator but may introduce some
 // weirdness as IonInlineIterator is using an IonFrameIterator reference.
@@ -319,96 +320,138 @@ ConvertFrames(JSContext *cx, IonActivati
         return BAILOUT_RETURN_ARGUMENT_CHECK;
     }
 
     JS_NOT_REACHED("bad bailout kind");
     return BAILOUT_RETURN_FATAL_ERROR;
 }
 
 uint32_t
-ion::Bailout(BailoutStack *sp)
+ion::Bailout(BailoutStack *sp, BaselineBailoutInfo **bailoutInfo)
 {
+    JS_ASSERT(bailoutInfo);
     JSContext *cx = GetIonContext()->cx;
     // We don't have an exit frame.
     cx->mainThread().ionTop = NULL;
     IonActivationIterator ionActivations(cx);
     IonBailoutIterator iter(ionActivations, sp);
     IonActivation *activation = ionActivations.activation();
 
     // IonCompartment *ioncompartment = cx->compartment->ionCompartment();
     // IonActivation *activation = cx->runtime->ionActivation;
     // FrameRecovery in = FrameRecoveryFromBailout(ioncompartment, sp);
 
     IonSpew(IonSpew_Bailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset());
 
-    uint32_t retval = ConvertFrames(cx, activation, iter);
+    uint32_t retval;
+    if (IsBaselineEnabled(cx)) {
+        *bailoutInfo = NULL;
+        retval = BailoutIonToBaseline(cx, activation, iter, false, bailoutInfo);
+        JS_ASSERT(retval == BAILOUT_RETURN_BASELINE ||
+                  retval == BAILOUT_RETURN_FATAL_ERROR ||
+                  retval == BAILOUT_RETURN_OVERRECURSED);
+        JS_ASSERT_IF(retval == BAILOUT_RETURN_BASELINE, *bailoutInfo != NULL);
+    } else {
+        retval = ConvertFrames(cx, activation, iter);
+    }
 
-    EnsureExitFrame(iter.jsFrame());
+    if (retval != BAILOUT_RETURN_BASELINE)
+        EnsureExitFrame(iter.jsFrame());
+
     return retval;
 }
 
 uint32_t
-ion::InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut)
+ion::InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut,
+                         BaselineBailoutInfo **bailoutInfo)
 {
     sp->checkInvariants();
 
     JSContext *cx = GetIonContext()->cx;
 
     // We don't have an exit frame.
     cx->mainThread().ionTop = NULL;
     IonActivationIterator ionActivations(cx);
     IonBailoutIterator iter(ionActivations, sp);
     IonActivation *activation = ionActivations.activation();
 
     IonSpew(IonSpew_Bailouts, "Took invalidation bailout! Snapshot offset: %d", iter.snapshotOffset());
 
     // Note: the frame size must be computed before we return from this function.
     *frameSizeOut = iter.topFrameSize();
 
-    uint32_t retval = ConvertFrames(cx, activation, iter);
+    uint32_t retval;
+    if (IsBaselineEnabled(cx)) {
+        *bailoutInfo = NULL;
+        retval = BailoutIonToBaseline(cx, activation, iter, true, bailoutInfo);
+        JS_ASSERT(retval == BAILOUT_RETURN_BASELINE ||
+                  retval == BAILOUT_RETURN_FATAL_ERROR ||
+                  retval == BAILOUT_RETURN_OVERRECURSED);
+        JS_ASSERT_IF(retval == BAILOUT_RETURN_BASELINE, *bailoutInfo != NULL);
 
-    {
+        if (retval != BAILOUT_RETURN_BASELINE) {
+            IonJSFrameLayout *frame = iter.jsFrame();
+            IonSpew(IonSpew_Invalidate, "converting to exit frame");
+            IonSpew(IonSpew_Invalidate, "   orig calleeToken %p", (void *) frame->calleeToken());
+            IonSpew(IonSpew_Invalidate, "   orig frameSize %u", unsigned(frame->prevFrameLocalSize()));
+            IonSpew(IonSpew_Invalidate, "   orig ra %p", (void *) frame->returnAddress());
+
+            frame->replaceCalleeToken(NULL);
+            EnsureExitFrame(frame);
+
+            IonSpew(IonSpew_Invalidate, "   new  calleeToken %p", (void *) frame->calleeToken());
+            IonSpew(IonSpew_Invalidate, "   new  frameSize %u", unsigned(frame->prevFrameLocalSize()));
+            IonSpew(IonSpew_Invalidate, "   new  ra %p", (void *) frame->returnAddress());
+        }
+
+        iter.ionScript()->decref(cx->runtime->defaultFreeOp());
+
+        return retval;
+    } else {
+        retval = ConvertFrames(cx, activation, iter);
+
         IonJSFrameLayout *frame = iter.jsFrame();
         IonSpew(IonSpew_Invalidate, "converting to exit frame");
         IonSpew(IonSpew_Invalidate, "   orig calleeToken %p", (void *) frame->calleeToken());
         IonSpew(IonSpew_Invalidate, "   orig frameSize %u", unsigned(frame->prevFrameLocalSize()));
         IonSpew(IonSpew_Invalidate, "   orig ra %p", (void *) frame->returnAddress());
 
         frame->replaceCalleeToken(NULL);
         EnsureExitFrame(frame);
 
         IonSpew(IonSpew_Invalidate, "   new  calleeToken %p", (void *) frame->calleeToken());
         IonSpew(IonSpew_Invalidate, "   new  frameSize %u", unsigned(frame->prevFrameLocalSize()));
         IonSpew(IonSpew_Invalidate, "   new  ra %p", (void *) frame->returnAddress());
-    }
+
+        iter.ionScript()->decref(cx->runtime->defaultFreeOp());
 
-    iter.ionScript()->decref(cx->runtime->defaultFreeOp());
-
-    if (cx->runtime->hasIonReturnOverride())
-        cx->regs().sp[-1] = cx->runtime->takeIonReturnOverride();
+        // Only need to take ion return override if resuming to interpreter.
+        if (cx->runtime->hasIonReturnOverride())
+            cx->regs().sp[-1] = cx->runtime->takeIonReturnOverride();
 
-    if (retval != BAILOUT_RETURN_FATAL_ERROR) {
-        // If invalidation was triggered inside a stub call, we may still have to
-        // monitor the result, since the bailout happens before the MMonitorTypes
-        // instruction is executed.
-        jsbytecode *pc = activation->bailout()->bailoutPc();
+        if (retval != BAILOUT_RETURN_FATAL_ERROR) {
+            // If invalidation was triggered inside a stub call, we may still have to
+            // monitor the result, since the bailout happens before the MMonitorTypes
+            // instruction is executed.
+            jsbytecode *pc = activation->bailout()->bailoutPc();
 
-        // If this is not a ResumeAfter bailout, there's nothing to monitor,
-        // we will redo the op in the interpreter.
-        bool isResumeAfter = GetNextPc(pc) == cx->regs().pc;
+            // If this is not a ResumeAfter bailout, there's nothing to monitor,
+            // we will redo the op in the interpreter.
+            bool isResumeAfter = GetNextPc(pc) == cx->regs().pc;
 
-        if ((js_CodeSpec[*pc].format & JOF_TYPESET) && isResumeAfter) {
-            JS_ASSERT(retval == BAILOUT_RETURN_OK);
-            return BAILOUT_RETURN_MONITOR;
+            if ((js_CodeSpec[*pc].format & JOF_TYPESET) && isResumeAfter) {
+                JS_ASSERT(retval == BAILOUT_RETURN_OK);
+                return BAILOUT_RETURN_MONITOR;
+            }
+
+            return retval;
         }
 
-        return retval;
+        return BAILOUT_RETURN_FATAL_ERROR;
     }
-
-    return BAILOUT_RETURN_FATAL_ERROR;
 }
 
 static void
 ReflowArgTypes(JSContext *cx)
 {
     StackFrame *fp = cx->fp();
     unsigned nargs = fp->fun()->nargs;
     RootedScript script(cx, fp->script());
@@ -453,23 +496,23 @@ ion::ReflowTypeInfo(uint32_t bailoutResu
     Value &result = cx->regs().sp[-1];
     types::TypeScript::Monitor(cx, script, pc, result);
 
     return true;
 }
 
 // Initialize the decl env Object and the call object of the current frame.
 bool
-ion::EnsureHasScopeObjects(JSContext *cx, StackFrame *fp)
+ion::EnsureHasScopeObjects(JSContext *cx, AbstractFramePtr fp)
 {
-    if (fp->isFunctionFrame() &&
-        fp->fun()->isHeavyweight() &&
-        !fp->hasCallObj())
+    if (fp.isFunctionFrame() &&
+        fp.fun()->isHeavyweight() &&
+        !fp.hasCallObj())
     {
-        return fp->initFunctionScopeObjects(cx);
+        return fp.initFunctionScopeObjects(cx);
     }
     return true;
 }
 
 uint32_t
 ion::BoundsCheckFailure()
 {
     JSContext *cx = GetIonContext()->cx;
--- a/js/src/ion/Bailouts.h
+++ b/js/src/ion/Bailouts.h
@@ -103,16 +103,17 @@ static const uint32_t BAILOUT_RETURN_OK 
 static const uint32_t BAILOUT_RETURN_FATAL_ERROR = 1;
 static const uint32_t BAILOUT_RETURN_ARGUMENT_CHECK = 2;
 static const uint32_t BAILOUT_RETURN_TYPE_BARRIER = 3;
 static const uint32_t BAILOUT_RETURN_MONITOR = 4;
 static const uint32_t BAILOUT_RETURN_BOUNDS_CHECK = 5;
 static const uint32_t BAILOUT_RETURN_SHAPE_GUARD = 6;
 static const uint32_t BAILOUT_RETURN_OVERRECURSED = 7;
 static const uint32_t BAILOUT_RETURN_CACHED_SHAPE_GUARD = 8;
+static const uint32_t BAILOUT_RETURN_BASELINE = 9;
 
 // Attached to the compartment for easy passing through from ::Bailout to
 // ::ThunkToInterpreter.
 class BailoutClosure
 {
     // These class are used to control the stack usage and the order of
     // declaration is used by the destructor to restore the stack in the
     // expected order when classes are created. This class is only created
@@ -198,33 +199,38 @@ class IonBailoutIterator : public IonFra
         if (topIonScript_)
             return topIonScript_;
         return IonFrameIterator::ionScript();
     }
 
     void dump() const;
 };
 
-bool EnsureHasScopeObjects(JSContext *cx, StackFrame *fp);
+bool EnsureHasScopeObjects(JSContext *cx, AbstractFramePtr fp);
+
+struct BaselineBailoutInfo;
 
 // Called from a bailout thunk. Returns a BAILOUT_* error code.
-uint32_t Bailout(BailoutStack *sp);
+uint32_t Bailout(BailoutStack *sp, BaselineBailoutInfo **info);
 
 // Called from the invalidation thunk. Returns a BAILOUT_* error code.
-uint32_t InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut);
+uint32_t InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut,
+                             BaselineBailoutInfo **info);
 
 // Called from a bailout thunk. Interprets the frame(s) that have been bailed
 // out.
 uint32_t ThunkToInterpreter(Value *vp);
 
 uint32_t ReflowTypeInfo(uint32_t bailoutResult);
 
 uint32_t BoundsCheckFailure();
 
 uint32_t ShapeGuardFailure();
 
 uint32_t CachedShapeGuardFailure();
 
+uint32_t FinishBailoutToBaseline(BaselineBailoutInfo *bailoutInfo);
+
 } // namespace ion
 } // namespace js
 
 #endif // jsion_bailouts_h__
 
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineBailouts.cpp
@@ -0,0 +1,1260 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaselineCompiler.h"
+#include "BaselineIC.h"
+#include "BaselineJIT.h"
+#include "CompileInfo.h"
+#include "IonSpewer.h"
+#include "IonFrames-inl.h"
+
+#include "vm/Stack-inl.h"
+
+#include "jsopcodeinlines.h"
+
+using namespace js;
+using namespace js::ion;
+
+// BaselineStackBuilder may reallocate its buffer if the current one is too
+// small. To avoid dangling pointers, BufferPointer represents a pointer into
+// this buffer as a pointer to the header and a fixed offset.
+template <typename T>
+class BufferPointer
+{
+    BaselineBailoutInfo **header_;
+    size_t offset_;
+    bool heap_;
+
+  public:
+    BufferPointer(BaselineBailoutInfo **header, size_t offset, bool heap)
+      : header_(header), offset_(offset), heap_(heap)
+    { }
+
+    T *get() const {
+        BaselineBailoutInfo *header = *header_;
+        if (!heap_)
+            return (T*)(header->incomingStack + offset_);
+
+        uint8_t *p = header->copyStackTop - offset_;
+        JS_ASSERT(p >= header->copyStackBottom && p < header->copyStackTop);
+        return (T*)p;
+    }
+
+    T &operator*() const { return *get(); }
+    T *operator->() const { return get(); }
+};
+
+/**
+ * BaselineStackBuilder helps abstract the process of rebuilding the C stack on the heap.
+ * It takes a bailout iterator and keeps track of the point on the C stack from which
+ * the reconstructed frames will be written.
+ *
+ * It exposes methods to write data into the heap memory storing the reconstructed
+ * stack.  It also exposes method to easily calculate addresses.  This includes both the
+ * virtual address that a particular value will be at when it's eventually copied onto
+ * the stack, as well as the current actual address of that value (whether on the heap
+ * allocated portion being constructed or the existing stack).
+ *
+ * The abstraction handles transparent re-allocation of the heap memory when it
+ * needs to be enlarged to accomodate new data.  Similarly to the C stack, the
+ * data that's written to the reconstructed stack grows from high to low in memory.
+ *
+ * The lowest region of the allocated memory contains a BaselineBailoutInfo structure that
+ * points to the start and end of the written data.
+ */
+struct BaselineStackBuilder
+{
+    IonBailoutIterator &iter_;
+    IonJSFrameLayout *frame_;
+
+    static size_t HeaderSize() {
+        return AlignBytes(sizeof(BaselineBailoutInfo), sizeof(void *));
+    };
+    size_t bufferTotal_;
+    size_t bufferAvail_;
+    size_t bufferUsed_;
+    uint8_t *buffer_;
+    BaselineBailoutInfo *header_;
+
+    size_t framePushed_;
+
+    BaselineStackBuilder(IonBailoutIterator &iter, size_t initialSize)
+      : iter_(iter),
+        frame_(static_cast<IonJSFrameLayout*>(iter.current())),
+        bufferTotal_(initialSize),
+        bufferAvail_(0),
+        bufferUsed_(0),
+        buffer_(NULL),
+        header_(NULL),
+        framePushed_(0)
+    {
+        JS_ASSERT(bufferTotal_ >= HeaderSize());
+    }
+
+    ~BaselineStackBuilder() {
+        if (buffer_)
+            js_free(buffer_);
+    }
+
+    bool init() {
+        JS_ASSERT(!buffer_);
+        JS_ASSERT(bufferUsed_ == 0);
+        buffer_ = reinterpret_cast<uint8_t *>(js_calloc(bufferTotal_));
+        if (!buffer_)
+            return false;
+        bufferAvail_ = bufferTotal_ - HeaderSize();
+        bufferUsed_ = 0;
+
+        header_ = reinterpret_cast<BaselineBailoutInfo *>(buffer_);
+        header_->incomingStack = reinterpret_cast<uint8_t *>(frame_);
+        header_->copyStackTop = buffer_ + bufferTotal_;
+        header_->copyStackBottom = header_->copyStackTop;
+        header_->setR0 = 0;
+        header_->valueR0 = UndefinedValue();
+        header_->setR1 = 0;
+        header_->valueR1 = UndefinedValue();
+        header_->resumeFramePtr = NULL;
+        header_->resumeAddr = NULL;
+        header_->monitorStub = NULL;
+        header_->numFrames = 0;
+        return true;
+    }
+
+    bool enlarge() {
+        JS_ASSERT(buffer_ != NULL);
+        size_t newSize = bufferTotal_ * 2;
+        uint8_t *newBuffer = reinterpret_cast<uint8_t *>(js_calloc(newSize));
+        if (!newBuffer)
+            return false;
+        memcpy((newBuffer + newSize) - bufferUsed_, header_->copyStackBottom, bufferUsed_);
+        memcpy(newBuffer, header_, sizeof(BaselineBailoutInfo));
+        js_free(buffer_);
+        buffer_ = newBuffer;
+        bufferTotal_ = newSize;
+        bufferAvail_ = newSize - (HeaderSize() + bufferUsed_);
+
+        header_ = reinterpret_cast<BaselineBailoutInfo *>(buffer_);
+        header_->copyStackTop = buffer_ + bufferTotal_;
+        header_->copyStackBottom = header_->copyStackTop - bufferUsed_;
+        return true;
+    }
+
+    BaselineBailoutInfo *info() {
+        JS_ASSERT(header_ == reinterpret_cast<BaselineBailoutInfo *>(buffer_));
+        return header_;
+    }
+
+    BaselineBailoutInfo *takeBuffer() {
+        JS_ASSERT(header_ == reinterpret_cast<BaselineBailoutInfo *>(buffer_));
+        buffer_ = NULL;
+        return header_;
+    }
+
+    void resetFramePushed() {
+        framePushed_ = 0;
+    }
+
+    size_t framePushed() const {
+        return framePushed_;
+    }
+
+    bool subtract(size_t size, const char *info=NULL) {
+        // enlarge the buffer if need be.
+        while (size > bufferAvail_) {
+            if (!enlarge())
+                return false;
+        }
+
+        // write out element.
+        header_->copyStackBottom -= size;
+        bufferAvail_ -= size;
+        bufferUsed_ += size;
+        framePushed_ += size;
+        if (info) {
+            IonSpew(IonSpew_BaselineBailouts,
+                    "      SUB_%03d   %p/%p %-15s",
+                    (int) size, header_->copyStackBottom, virtualPointerAtStackOffset(0), info);
+        }
+        return true;
+    }
+
+    template <typename T>
+    bool write(const T &t) {
+        if (!subtract(sizeof(T)))
+            return false;
+        memcpy(header_->copyStackBottom, &t, sizeof(T));
+        return true;
+    }
+
+    template <typename T>
+    bool writePtr(T *t, const char *info) {
+        if (!write<T *>(t))
+            return false;
+        if (info)
+            IonSpew(IonSpew_BaselineBailouts,
+                    "      WRITE_PTR %p/%p %-15s %p",
+                    header_->copyStackBottom, virtualPointerAtStackOffset(0), info, t);
+        return true;
+    }
+
+    bool writeWord(size_t w, const char *info) {
+        if (!write<size_t>(w))
+            return false;
+        if (info) {
+            if (sizeof(size_t) == 4) {
+                IonSpew(IonSpew_BaselineBailouts,
+                        "      WRITE_WRD %p/%p %-15s %08x",
+                        header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w);
+            } else {
+                IonSpew(IonSpew_BaselineBailouts,
+                        "      WRITE_WRD %p/%p %-15s %016llx",
+                        header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w);
+            }
+        }
+        return true;
+    }
+
+    bool writeValue(Value val, const char *info) {
+        if (!write<Value>(val))
+            return false;
+        if (info) {
+            IonSpew(IonSpew_BaselineBailouts,
+                    "      WRITE_VAL %p/%p %-15s %016llx",
+                    header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+                    *((uint64_t *) &val));
+        }
+        return true;
+    }
+
+    Value popValue() {
+        JS_ASSERT(bufferUsed_ >= sizeof(Value));
+        JS_ASSERT(framePushed_ >= sizeof(Value));
+        bufferAvail_ += sizeof(Value);
+        bufferUsed_ -= sizeof(Value);
+        framePushed_ -= sizeof(Value);
+        Value result = *((Value *) header_->copyStackBottom);
+        header_->copyStackBottom += sizeof(Value);
+        return result;
+    }
+
+    void popValueInto(PCMappingSlotInfo::SlotLocation loc) {
+        JS_ASSERT(PCMappingSlotInfo::ValidSlotLocation(loc));
+        switch(loc) {
+          case PCMappingSlotInfo::SlotInR0:
+            header_->setR0 = 1;
+            header_->valueR0 = popValue();
+            break;
+          case PCMappingSlotInfo::SlotInR1:
+            header_->setR1 = 1;
+            header_->valueR1 = popValue();
+            break;
+          default:
+            JS_ASSERT(loc == PCMappingSlotInfo::SlotIgnore);
+            popValue();
+            break;
+        }
+    }
+
+    void setResumeFramePtr(void *resumeFramePtr) {
+        header_->resumeFramePtr = resumeFramePtr;
+    }
+
+    void setResumeAddr(void *resumeAddr) {
+        header_->resumeAddr = resumeAddr;
+    }
+
+    void setMonitorStub(ICStub *stub) {
+        header_->monitorStub = stub;
+    }
+
+    template <typename T>
+    BufferPointer<T> pointerAtStackOffset(size_t offset) {
+        if (offset < bufferUsed_) {
+            // Calculate offset from copyStackTop.
+            offset = header_->copyStackTop - (header_->copyStackBottom + offset);
+            return BufferPointer<T>(&header_, offset, /* heap = */ true);
+        }
+
+        return BufferPointer<T>(&header_, offset - bufferUsed_, /* heap = */ false);
+    }
+
+    BufferPointer<Value> valuePointerAtStackOffset(size_t offset) {
+        return pointerAtStackOffset<Value>(offset);
+    }
+
+    inline uint8_t *virtualPointerAtStackOffset(size_t offset) {
+        if (offset < bufferUsed_)
+            return reinterpret_cast<uint8_t *>(frame_) - (bufferUsed_ - offset);
+        return reinterpret_cast<uint8_t *>(frame_) + (offset - bufferUsed_);
+    }
+
+    inline IonJSFrameLayout *startFrame() {
+        return frame_;
+    }
+
+    BufferPointer<IonJSFrameLayout> topFrameAddress() {
+        return pointerAtStackOffset<IonJSFrameLayout>(0);
+    }
+
+    //
+    // This method should only be called when the builder is in a state where it is
+    // starting to construct the stack frame for the next callee.  This means that
+    // the lowest value on the constructed stack is the return address for the previous
+    // caller frame.
+    //
+    // This method is used to compute the value of the frame pointer (e.g. ebp on x86)
+    // that would have been saved by the baseline jitcode when it was entered.  In some
+    // cases, this value can be bogus since we can ensure that the caller would have saved
+    // it anyway.
+    //
+    void *calculatePrevFramePtr() {
+        // Get the incoming frame.
+        BufferPointer<IonJSFrameLayout> topFrame = topFrameAddress();
+        FrameType type = topFrame->prevType();
+
+        // For OptimizedJS and Entry frames, the "saved" frame pointer in the baseline
+        // frame is meaningless, since Ion saves all registers before calling other ion
+        // frames, and the entry frame saves all registers too.
+        if (type == IonFrame_OptimizedJS || type == IonFrame_Entry)
+            return NULL;
+
+        // BaselineStub - Baseline calling into Ion.
+        //  PrevFramePtr needs to point to the BaselineStubFrame's saved frame pointer.
+        //      STACK_START_ADDR + IonJSFrameLayout::Size() + PREV_FRAME_SIZE
+        //                      - IonBaselineStubFrameLayout::reverseOffsetOfSavedFramePtr()
+        if (type == IonFrame_BaselineStub) {
+            size_t offset = IonJSFrameLayout::Size() + topFrame->prevFrameLocalSize() +
+                            IonBaselineStubFrameLayout::reverseOffsetOfSavedFramePtr();
+            return virtualPointerAtStackOffset(offset);
+        }
+
+        JS_ASSERT(type == IonFrame_Rectifier);
+        // Rectifier - behaviour depends on the frame preceding the rectifier frame, and
+        // whether the arch is x86 or not.  The x86 rectifier frame saves the frame pointer,
+        // so we can calculate it directly.  For other archs, the previous frame pointer
+        // is stored on the stack in the frame that precedes the rectifier frame.
+        size_t priorOffset = IonJSFrameLayout::Size() + topFrame->prevFrameLocalSize();
+#if defined(JS_CPU_X86)
+        // On X86, the FramePointer is pushed as the first value in the Rectifier frame.
+        JS_ASSERT(BaselineFrameReg == FramePointer);
+        priorOffset -= sizeof(void *);
+        return virtualPointerAtStackOffset(priorOffset);
+#elif defined(JS_CPU_X64) || defined(JS_CPU_ARM)
+        // On X64 and ARM, the frame pointer save location depends on the caller of the
+        // the rectifier frame.
+        BufferPointer<IonRectifierFrameLayout> priorFrame =
+            pointerAtStackOffset<IonRectifierFrameLayout>(priorOffset);
+        FrameType priorType = priorFrame->prevType();
+        JS_ASSERT(priorType == IonFrame_OptimizedJS || priorType == IonFrame_BaselineStub);
+
+        // If the frame preceding the rectifier is an OptimizedJS frame, then once again
+        // the frame pointer does not matter.
+        if (priorType == IonFrame_OptimizedJS)
+            return NULL;
+
+        // Otherwise, the frame preceding the rectifier is a BaselineStub frame.
+        //  let X = STACK_START_ADDR + IonJSFrameLayout::Size() + PREV_FRAME_SIZE
+        //      X + IonRectifierFrameLayout::Size()
+        //        + ((IonRectifierFrameLayout *) X)->prevFrameLocalSize()
+        //        - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr()
+        size_t extraOffset = IonRectifierFrameLayout::Size() + priorFrame->prevFrameLocalSize() +
+                             IonBaselineStubFrameLayout::reverseOffsetOfSavedFramePtr();
+        return virtualPointerAtStackOffset(priorOffset + extraOffset);
+#else
+#  error "Bad architecture!"
+#endif
+    }
+};
+
+// For every inline frame, we write out the following data:
+//
+//                      |      ...      |
+//                      +---------------+
+//                      |  Descr(???)   |  --- Descr size here is (PREV_FRAME_SIZE)
+//                      +---------------+
+//                      |  ReturnAddr   |
+//             --       +===============+  --- OVERWRITE STARTS HERE  (START_STACK_ADDR)
+//             |        | PrevFramePtr  |
+//             |    +-> +---------------+
+//             |    |   |   Baseline    |
+//             |    |   |    Frame      |
+//             |    |   +---------------+
+//             |    |   |    Fixed0     |
+//             |    |   +---------------+
+//         +--<     |   |     ...       |
+//         |   |    |   +---------------+
+//         |   |    |   |    FixedF     |
+//         |   |    |   +---------------+
+//         |   |    |   |    Stack0     |
+//         |   |    |   +---------------+
+//         |   |    |   |     ...       |
+//         |   |    |   +---------------+
+//         |   |    |   |    StackS     |
+//         |   --   |   +---------------+  --- IF NOT LAST INLINE FRAME,
+//         +------------|  Descr(BLJS)  |  --- CALLING INFO STARTS HERE
+//                  |   +---------------+
+//                  |   |  ReturnAddr   | <-- return into main jitcode after IC
+//             --   |   +===============+
+//             |    |   |    StubPtr    |
+//             |    |   +---------------+
+//             |    +---|   FramePtr    |
+//             |        +---------------+
+//             |        |     ArgA      |
+//             |        +---------------+
+//             |        |     ...       |
+//         +--<         +---------------+
+//         |   |        |     Arg0      |
+//         |   |        +---------------+
+//         |   |        |     ThisV     |
+//         |   --       +---------------+
+//         |            |  ActualArgC   |
+//         |            +---------------+
+//         |            |  CalleeToken  |
+//         |            +---------------+
+//         +------------| Descr(BLStub) |
+//                      +---------------+
+//                      |  ReturnAddr   | <-- return into ICCall_Scripted IC
+//             --       +===============+ --- IF CALLEE FORMAL ARGS > ActualArgC
+//             |        |  UndefinedU   |
+//             |        +---------------+
+//             |        |     ...       |
+//             |        +---------------+
+//             |        |  Undefined0   |
+//             |        +---------------+
+//         +--<         |     ArgA      |
+//         |   |        +---------------+
+//         |   |        |     ...       |
+//         |   |        +---------------+
+//         |   |        |     Arg0      |
+//         |   |        +---------------+
+//         |   |        |     ThisV     |
+//         |   --       +---------------+
+//         |            |  ActualArgC   |
+//         |            +---------------+
+//         |            |  CalleeToken  |
+//         |            +---------------+
+//         +------------|  Descr(Rect)  |
+//                      +---------------+
+//                      |  ReturnAddr   | <-- return into ArgumentsRectifier after call
+//                      +===============+
+//
+static bool
+InitFromBailout(JSContext *cx, HandleScript caller, jsbytecode *callerPC,
+                HandleFunction fun, HandleScript script, SnapshotIterator &iter,
+                bool invalidate, BaselineStackBuilder &builder,
+                MutableHandleFunction nextCallee, jsbytecode **callPC)
+{
+    uint32_t exprStackSlots = iter.slots() - (script->nfixed + CountArgSlots(fun));
+
+    builder.resetFramePushed();
+
+    // Build first baseline frame:
+    // +===============+
+    // | PrevFramePtr  |
+    // +---------------+
+    // |   Baseline    |
+    // |    Frame      |
+    // +---------------+
+    // |    Fixed0     |
+    // +---------------+
+    // |     ...       |
+    // +---------------+
+    // |    FixedF     |
+    // +---------------+
+    // |    Stack0     |
+    // +---------------+
+    // |     ...       |
+    // +---------------+
+    // |    StackS     |
+    // +---------------+  --- IF NOT LAST INLINE FRAME,
+    // |  Descr(BLJS)  |  --- CALLING INFO STARTS HERE
+    // +---------------+
+    // |  ReturnAddr   | <-- return into main jitcode after IC
+    // +===============+
+
+    IonSpew(IonSpew_BaselineBailouts, "      Unpacking %s:%d", script->filename(), script->lineno);
+    IonSpew(IonSpew_BaselineBailouts, "      [BASELINE-JS FRAME]");
+
+    // Calculate and write the previous frame pointer value.
+    // Record the virtual stack offset at this location.  Later on, if we end up
+    // writing out a BaselineStub frame for the next callee, we'll need to save the
+    // address.
+    void *prevFramePtr = builder.calculatePrevFramePtr();
+    if (!builder.writePtr(prevFramePtr, "PrevFramePtr"))
+        return false;
+    prevFramePtr = builder.virtualPointerAtStackOffset(0);
+
+    // Write struct BaselineFrame.
+    if (!builder.subtract(BaselineFrame::Size(), "BaselineFrame"))
+        return false;
+    BufferPointer<BaselineFrame> blFrame = builder.pointerAtStackOffset<BaselineFrame>(0);
+
+    // Initialize BaselineFrame::frameSize
+    uint32_t frameSize = BaselineFrame::Size() + BaselineFrame::FramePointerOffset +
+                         (sizeof(Value) * (script->nfixed + exprStackSlots));
+    IonSpew(IonSpew_BaselineBailouts, "      FrameSize=%d", (int) frameSize);
+    blFrame->setFrameSize(frameSize);
+
+    uint32_t flags = 0;
+
+    // If SPS Profiler is enabled, mark the frame as having pushed an SPS entry.
+    // This may be wrong for the last frame of ArgumentCheck bailout, but
+    // that will be fixed later.
+    if (cx->runtime->spsProfiler.enabled()) {
+        IonSpew(IonSpew_BaselineBailouts, "      Setting SPS flag on frame!");
+        flags |= BaselineFrame::HAS_PUSHED_SPS_FRAME;
+    }
+
+    // Initialize BaselineFrame::scopeChain
+    JSObject *scopeChain = NULL;
+    BailoutKind bailoutKind = iter.bailoutKind();
+    if (bailoutKind == Bailout_ArgumentCheck) {
+        // Temporary hack -- skip the (unused) scopeChain, because it could be
+        // bogus (we can fail before the scope chain slot is set). Strip the
+        // hasScopeChain flag and we'll check this later to run prologue().
+        IonSpew(IonSpew_BaselineBailouts, "      Bailout_ArgumentCheck! (no valid scopeChain)");
+        iter.skip();
+    } else {
+        Value v = iter.read();
+        if (v.isObject()) {
+            scopeChain = &v.toObject();
+            if (fun && fun->isHeavyweight())
+                flags |= BaselineFrame::HAS_CALL_OBJ;
+        } else {
+            JS_ASSERT(v.isUndefined());
+
+            // Get scope chain from function or script.
+            if (fun) {
+                // If pcOffset == 0, we may have to push a new call object, so
+                // we leave scopeChain NULL and enter baseline code before the
+                // prologue.
+                if (iter.pcOffset() != 0 || iter.resumeAfter())
+                    scopeChain = fun->environment();
+            } else {
+                // For global, compile-and-go scripts the scope chain is the
+                // script's global (Ion does not compile non-compile-and-go
+                // scripts). Also note that it's invalid to resume into the
+                // prologue in this case because the prologue expects the scope
+                // chain in R1 for eval and global scripts.
+                JS_ASSERT(!script->isForEval());
+                JS_ASSERT(script->compileAndGo);
+                scopeChain = &(script->global());
+            }
+        }
+    }
+    IonSpew(IonSpew_BaselineBailouts, "      ScopeChain=%p", scopeChain);
+    blFrame->setScopeChain(scopeChain);
+    // Do not need to initialize scratchValue or returnValue fields in BaselineFrame.
+
+    // No flags are set.
+    blFrame->setFlags(flags);
+
+    // Ion doesn't compile code with try/catch, so the block object will always be
+    // null.
+    blFrame->setBlockChainNull();
+
+    if (fun) {
+        // The unpacked thisv and arguments should overwrite the pushed args present
+        // in the calling frame.
+        Value thisv = iter.read();
+        IonSpew(IonSpew_BaselineBailouts, "      Is function!");
+        IonSpew(IonSpew_BaselineBailouts, "      thisv=%016llx", *((uint64_t *) &thisv));
+
+        size_t thisvOffset = builder.framePushed() + IonJSFrameLayout::offsetOfThis();
+        *builder.valuePointerAtStackOffset(thisvOffset) = thisv;
+
+        JS_ASSERT(iter.slots() >= CountArgSlots(fun));
+        IonSpew(IonSpew_BaselineBailouts, "      frame slots %u, nargs %u, nfixed %u",
+                iter.slots(), fun->nargs, script->nfixed);
+
+        for (uint32_t i = 0; i < fun->nargs; i++) {
+            Value arg = iter.read();
+            IonSpew(IonSpew_BaselineBailouts, "      arg %d = %016llx",
+                        (int) i, *((uint64_t *) &arg));
+            size_t argOffset = builder.framePushed() + IonJSFrameLayout::offsetOfActualArg(i);
+            *builder.valuePointerAtStackOffset(argOffset) = arg;
+        }
+    }
+
+    for (uint32_t i = 0; i < script->nfixed; i++) {
+        Value slot = iter.read();
+        if (!builder.writeValue(slot, "FixedValue"))
+            return false;
+    }
+
+    IonSpew(IonSpew_BaselineBailouts, "      pushing %d expression stack slots",
+                                      (int) exprStackSlots);
+    for (uint32_t i = 0; i < exprStackSlots; i++) {
+        Value v;
+
+        // If coming from an invalidation bailout, and this is the topmost
+        // value, and a value override has been specified, don't read from the
+        // iterator. Otherwise, we risk using a garbage value.
+        if (!iter.moreFrames() && i == exprStackSlots - 1 && cx->runtime->hasIonReturnOverride()) {
+            JS_ASSERT(invalidate);
+            iter.skip();
+            IonSpew(IonSpew_BaselineBailouts, "      [Return Override]");
+            v = cx->runtime->takeIonReturnOverride();
+        } else {
+            v = iter.read();
+        }
+        if (!builder.writeValue(v, "StackValue"))
+            return false;
+    }
+
+    size_t endOfBaselineJSFrameStack = builder.framePushed();
+
+    // Get the PC
+    jsbytecode *pc = script->code + iter.pcOffset();
+    JSOp op = JSOp(*pc);
+    bool resumeAfter = iter.resumeAfter();
+
+    // If we are resuming at a LOOPENTRY op, resume at the next op to avoid
+    // a bailout -> enter Ion -> bailout loop with --ion-eager. See also
+    // ThunkToInterpreter.
+    if (!resumeAfter) {
+        while (true) {
+            op = JSOp(*pc);
+            if (op == JSOP_GOTO)
+                pc += GET_JUMP_OFFSET(pc);
+            else if (op == JSOP_LOOPENTRY || op == JSOP_NOP || op == JSOP_LOOPHEAD)
+                pc = GetNextPc(pc);
+            else
+                break;
+        }
+    }
+
+    uint32_t pcOff = pc - script->code;
+    bool isCall = js_CodeSpec[op].format & JOF_INVOKE;
+    BaselineScript *baselineScript = script->baselineScript();
+
+    // For fun.apply({}, arguments) the reconstructStackDepth will be atleast 4,
+    // but it could be that we inlined the funapply. In that case exprStackSlots,
+    // will have the real arguments in the slots and not always be equal.
+#ifdef DEBUG
+    uint32_t expectedDepth = js_ReconstructStackDepth(cx, script,
+                                                      resumeAfter ? GetNextPc(pc) : pc);
+    JS_ASSERT_IF(op != JSOP_FUNAPPLY || !iter.moreFrames() || resumeAfter,
+                 exprStackSlots == expectedDepth);
+
+    IonSpew(IonSpew_BaselineBailouts, "      Resuming %s pc offset %d (op %s) (line %d) of %s:%d",
+                resumeAfter ? "after" : "at", (int) pcOff, js_CodeName[op],
+                PCToLineNumber(script, pc), script->filename(), (int) script->lineno);
+    IonSpew(IonSpew_BaselineBailouts, "      Bailout kind: %s",
+            BailoutKindString(bailoutKind));
+#endif
+
+    // If this was the last inline frame, then unpacking is almost done.
+    if (!iter.moreFrames()) {
+        // Last frame, so PC for call to next frame is set to NULL.
+        *callPC = NULL;
+
+        // If the bailout was a resumeAfter, and the opcode is monitored,
+        // then the bailed out state should be in a position to enter
+        // into the ICTypeMonitor chain for the op.
+        bool enterMonitorChain = false;
+        if (resumeAfter && (js_CodeSpec[op].format & JOF_TYPESET)) {
+            // Not every monitored op has a monitored fallback stub, e.g.
+            // JSOP_GETINTRINSIC will always return the same value so does
+            // not need a monitor chain.
+            ICEntry &icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+            ICFallbackStub *fallbackStub = icEntry.firstStub()->getChainFallback();
+            if (fallbackStub->isMonitoredFallback())
+                enterMonitorChain = true;
+        }
+
+        uint32_t numCallArgs = isCall ? GET_ARGC(pc) : 0;
+
+        if (resumeAfter && !enterMonitorChain)
+            pc = GetNextPc(pc);
+
+        builder.setResumeFramePtr(prevFramePtr);
+
+        if (enterMonitorChain) {
+            ICEntry &icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+            ICFallbackStub *fallbackStub = icEntry.firstStub()->getChainFallback();
+            JS_ASSERT(fallbackStub->isMonitoredFallback());
+            IonSpew(IonSpew_BaselineBailouts, "      [TYPE-MONITOR CHAIN]");
+            ICMonitoredFallbackStub *monFallbackStub = fallbackStub->toMonitoredFallbackStub();
+            ICStub *firstMonStub = monFallbackStub->fallbackMonitorStub()->firstMonitorStub();
+
+            // To enter a monitoring chain, we load the top stack value into R0
+            IonSpew(IonSpew_BaselineBailouts, "      Popping top stack value into R0.");
+            builder.popValueInto(PCMappingSlotInfo::SlotInR0);
+
+            // Need to adjust the frameSize for the frame to match the values popped
+            // into registers.
+            frameSize -= sizeof(Value);
+            blFrame->setFrameSize(frameSize);
+            IonSpew(IonSpew_BaselineBailouts, "      Adjusted framesize -= %d: %d",
+                            (int) sizeof(Value), (int) frameSize);
+
+            // If resuming into a JSOP_CALL, baseline keeps the arguments on the
+            // stack and pops them only after returning from the call IC.
+            // Push undefs onto the stack in anticipation of the popping of the
+            // callee, thisv, and actual arguments passed from the caller's frame.
+            if (isCall) {
+                builder.writeValue(UndefinedValue(), "CallOp FillerCallee");
+                builder.writeValue(UndefinedValue(), "CallOp FillerThis");
+                for (uint32_t i = 0; i < numCallArgs; i++)
+                    builder.writeValue(UndefinedValue(), "CallOp FillerArg");
+
+                frameSize += (numCallArgs + 2) * sizeof(Value);
+                blFrame->setFrameSize(frameSize);
+                IonSpew(IonSpew_BaselineBailouts, "      Adjusted framesize += %d: %d",
+                                (int) ((numCallArgs + 2) * sizeof(Value)), (int) frameSize);
+            }
+
+            // Set the resume address to the return point from the IC, and set
+            // the monitor stub addr.
+            builder.setResumeAddr(baselineScript->returnAddressForIC(icEntry));
+            builder.setMonitorStub(firstMonStub);
+            IonSpew(IonSpew_BaselineBailouts, "      Set resumeAddr=%p monitorStub=%p",
+                    baselineScript->returnAddressForIC(icEntry), firstMonStub);
+
+        } else {
+            // If needed, initialize BaselineBailoutInfo's valueR0 and/or valueR1 with the
+            // top stack values.
+            PCMappingSlotInfo slotInfo;
+            uint8_t *nativeCodeForPC = baselineScript->nativeCodeForPC(script, pc, &slotInfo);
+            unsigned numUnsynced = slotInfo.numUnsynced();
+            JS_ASSERT(numUnsynced <= 2);
+            PCMappingSlotInfo::SlotLocation loc1, loc2;
+            if (numUnsynced > 0) {
+                loc1 = slotInfo.topSlotLocation();
+                IonSpew(IonSpew_BaselineBailouts, "      Popping top stack value into %d.",
+                            (int) loc1);
+                builder.popValueInto(loc1);
+            }
+            if (numUnsynced > 1) {
+                loc2 = slotInfo.nextSlotLocation();
+                IonSpew(IonSpew_BaselineBailouts, "      Popping next stack value into %d.",
+                            (int) loc2);
+                JS_ASSERT_IF(loc1 != PCMappingSlotInfo::SlotIgnore, loc1 != loc2);
+                builder.popValueInto(loc2);
+            }
+
+            // Need to adjust the frameSize for the frame to match the values popped
+            // into registers.
+            frameSize -= sizeof(Value) * numUnsynced;
+            blFrame->setFrameSize(frameSize);
+            IonSpew(IonSpew_BaselineBailouts, "      Adjusted framesize -= %d: %d",
+                            int(sizeof(Value) * numUnsynced), int(frameSize));
+
+            // If scopeChain is NULL, then bailout is occurring during argument check.
+            // In this case, resume into the prologue.
+            uint8_t *opReturnAddr;
+            if (scopeChain == NULL) {
+                // Global and eval scripts expect the scope chain in R1, so only
+                // resume into the prologue for function scripts.
+                JS_ASSERT(fun);
+                JS_ASSERT(numUnsynced == 0);
+                opReturnAddr = baselineScript->prologueEntryAddr();
+                IonSpew(IonSpew_BaselineBailouts, "      Resuming into prologue.");
+
+                // If bailing into prologue, HAS_PUSHED_SPS_FRAME should not be set on frame.
+                blFrame->unsetPushedSPSFrame();
+
+                // Additionally, if SPS is enabled, there are two corner cases to handle:
+                //  1. If resuming into the prologue, and innermost frame is an inlined frame,
+                //     and bailout is because of argument check failure, then:
+                //          Top SPS profiler entry would be for caller frame.
+                //          Ion would not have set the PC index field on that frame
+                //              (since this bailout happens before MFunctionBoundary).
+                //          Make sure that's done now.
+                //  2. If resuming into the prologue, and the bailout is NOT because of an
+                //     argument check, then:
+                //          Top SPS profiler entry would be for callee frame.
+                //          Ion would already have pushed an SPS entry for this frame.
+                //          The pc for this entry would be set to NULL.
+                //          Make sure it's set to script->pc.
+                if (cx->runtime->spsProfiler.enabled()) {
+                    if (caller && bailoutKind == Bailout_ArgumentCheck) {
+                        IonSpew(IonSpew_BaselineBailouts, "      Setting PCidx on innermost "
+                                "inlined frame's parent's SPS entry (%s:%d) (pcIdx=%d)!",
+                                caller->filename(), caller->lineno, callerPC - caller->code);
+                        cx->runtime->spsProfiler.updatePC(caller, callerPC);
+                    } else if (bailoutKind != Bailout_ArgumentCheck) {
+                        IonSpew(IonSpew_BaselineBailouts,
+                                "      Popping SPS entry for innermost inlined frame's SPS entry");
+                        cx->runtime->spsProfiler.exit(cx, script, fun);
+                    }
+                }
+            } else {
+                opReturnAddr = nativeCodeForPC;
+            }
+            builder.setResumeAddr(opReturnAddr);
+            IonSpew(IonSpew_BaselineBailouts, "      Set resumeAddr=%p", opReturnAddr);
+        }
+
+        return true;
+    }
+
+    *callPC = pc;
+
+    // Write out descriptor of BaselineJS frame.
+    size_t baselineFrameDescr = MakeFrameDescriptor((uint32_t) builder.framePushed(),
+                                                    IonFrame_BaselineJS);
+    if (!builder.writeWord(baselineFrameDescr, "Descriptor"))
+        return false;
+
+    // Calculate and write out return address.
+    // The icEntry in question MUST have a ICCall_Fallback as its fallback stub.
+    ICEntry &icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+    JS_ASSERT(icEntry.firstStub()->getChainFallback()->isCall_Fallback());
+    if (!builder.writePtr(baselineScript->returnAddressForIC(icEntry), "ReturnAddr"))
+        return false;
+
+    // Build baseline stub frame:
+    // +===============+
+    // |    StubPtr    |
+    // +---------------+
+    // |   FramePtr    |
+    // +---------------+
+    // |     ArgA      |
+    // +---------------+
+    // |     ...       |
+    // +---------------+
+    // |     Arg0      |
+    // +---------------+
+    // |     ThisV     |
+    // +---------------+
+    // |  ActualArgC   |
+    // +---------------+
+    // |  CalleeToken  |
+    // +---------------+
+    // | Descr(BLStub) |
+    // +---------------+
+    // |  ReturnAddr   |
+    // +===============+
+
+    IonSpew(IonSpew_BaselineBailouts, "      [BASELINE-STUB FRAME]");
+
+    size_t startOfBaselineStubFrame = builder.framePushed();
+
+    // Write stub pointer.
+    JS_ASSERT(icEntry.fallbackStub()->isCall_Fallback());
+    if (!builder.writePtr(icEntry.fallbackStub(), "StubPtr"))
+        return false;
+
+    // Write previous frame pointer (saved earlier).
+    if (!builder.writePtr(prevFramePtr, "PrevFramePtr"))
+        return false;
+    prevFramePtr = builder.virtualPointerAtStackOffset(0);
+
+    // Write out actual arguments (and thisv), copied from unpacked stack of BaselineJS frame.
+    // Arguments are reversed on the BaselineJS frame's stack values.
+    JS_ASSERT(isCall);
+    unsigned actualArgc = GET_ARGC(pc);
+    if (op == JSOP_FUNAPPLY)
+        actualArgc = blFrame->numActualArgs();
+
+    JS_ASSERT(actualArgc + 2 <= exprStackSlots);
+    for (unsigned i = 0; i < actualArgc + 1; i++) {
+        size_t argSlot = (script->nfixed + exprStackSlots) - (i + 1);
+        if (!builder.writeValue(*blFrame->valueSlot(argSlot), "ArgVal"))
+            return false;
+    }
+    // In case these arguments need to be copied on the stack again for a rectifier frame,
+    // save the framePushed values here for later use.
+    size_t endOfBaselineStubArgs = builder.framePushed();
+
+    // Calculate frame size for descriptor.
+    size_t baselineStubFrameSize = builder.framePushed() - startOfBaselineStubFrame;
+    size_t baselineStubFrameDescr = MakeFrameDescriptor((uint32_t) baselineStubFrameSize,
+                                                        IonFrame_BaselineStub);
+
+    // Push actual argc
+    if (!builder.writeWord(actualArgc, "ActualArgc"))
+        return false;
+
+    // Push callee token (must be a JS Function)
+    uint32_t calleeStackSlot = exprStackSlots - uint32_t(actualArgc + 2);
+    size_t calleeOffset = (builder.framePushed() - endOfBaselineJSFrameStack)
+                            + ((exprStackSlots - (calleeStackSlot + 1)) * sizeof(Value));
+    Value callee = *builder.valuePointerAtStackOffset(calleeOffset);
+    IonSpew(IonSpew_BaselineBailouts, "      CalleeStackSlot=%d", (int) calleeStackSlot);
+    IonSpew(IonSpew_BaselineBailouts, "      Callee = %016llx", *((uint64_t *) &callee));
+    JS_ASSERT(callee.isObject() && callee.toObject().isFunction());
+    JSFunction *calleeFun = callee.toObject().toFunction();
+    if (!builder.writePtr(CalleeToToken(calleeFun), "CalleeToken"))
+        return false;
+    nextCallee.set(calleeFun);
+
+    // Push BaselineStub frame descriptor
+    if (!builder.writeWord(baselineStubFrameDescr, "Descriptor"))
+        return false;
+
+    // Push return address into ICCall_Scripted stub, immediately after the call.
+    void *baselineCallReturnAddr = cx->compartment->ionCompartment()->baselineCallReturnAddr();
+    JS_ASSERT(baselineCallReturnAddr);
+    if (!builder.writePtr(baselineCallReturnAddr, "ReturnAddr"))
+        return false;
+
+    // If actualArgc >= fun->nargs, then we are done.  Otherwise, we need to push on
+    // a reconstructed rectifier frame.
+    if (actualArgc >= calleeFun->nargs)
+        return true;
+
+    // Push a reconstructed rectifier frame.
+    // +===============+
+    // |  UndefinedU   |
+    // +---------------+
+    // |     ...       |
+    // +---------------+
+    // |  Undefined0   |
+    // +---------------+
+    // |     ArgA      |
+    // +---------------+
+    // |     ...       |
+    // +---------------+
+    // |     Arg0      |
+    // +---------------+
+    // |     ThisV     |
+    // +---------------+
+    // |  ActualArgC   |
+    // +---------------+
+    // |  CalleeToken  |
+    // +---------------+
+    // |  Descr(Rect)  |
+    // +---------------+
+    // |  ReturnAddr   |
+    // +===============+
+
+    IonSpew(IonSpew_BaselineBailouts, "      [RECTIFIER FRAME]");
+
+    size_t startOfRectifierFrame = builder.framePushed();
+
+    // On x86-only, the frame pointer is saved again in the rectifier frame.
+#if defined(JS_CPU_X86)
+    if (!builder.writePtr(prevFramePtr, "PrevFramePtr-X86Only"))
+        return false;
+#endif
+
+    // Push undefined for missing arguments.
+    for (unsigned i = 0; i < (calleeFun->nargs - actualArgc); i++) {
+        if (!builder.writeValue(UndefinedValue(), "FillerVal"))
+            return false;
+    }
+
+    // Copy arguments + thisv from BaselineStub frame.
+    if (!builder.subtract((actualArgc + 1) * sizeof(Value), "CopiedArgs"))
+        return false;
+    BufferPointer<uint8_t> stubArgsEnd =
+        builder.pointerAtStackOffset<uint8_t>(builder.framePushed() - endOfBaselineStubArgs);
+    IonSpew(IonSpew_BaselineBailouts, "      MemCpy from %p", stubArgsEnd.get());
+    memcpy(builder.pointerAtStackOffset<uint8_t>(0).get(), stubArgsEnd.get(),
+           (actualArgc + 1) * sizeof(Value));
+
+    // Calculate frame size for descriptor.
+    size_t rectifierFrameSize = builder.framePushed() - startOfRectifierFrame;
+    size_t rectifierFrameDescr = MakeFrameDescriptor((uint32_t) rectifierFrameSize,
+                                                     IonFrame_Rectifier);
+
+    // Push actualArgc
+    if (!builder.writeWord(actualArgc, "ActualArgc"))
+        return false;
+
+    // Push calleeToken again.
+    if (!builder.writePtr(CalleeToToken(calleeFun), "CalleeToken"))
+        return false;
+
+    // Push rectifier frame descriptor
+    if (!builder.writeWord(rectifierFrameDescr, "Descriptor"))
+        return false;
+
+    // Push return address into the ArgumentsRectifier code, immediately after the ioncode
+    // call.
+    void *rectReturnAddr = cx->compartment->ionCompartment()->getArgumentsRectifierReturnAddr();
+    JS_ASSERT(rectReturnAddr);
+    if (!builder.writePtr(rectReturnAddr, "ReturnAddr"))
+        return false;
+
+    return true;
+}
+
+uint32_t
+ion::BailoutIonToBaseline(JSContext *cx, IonActivation *activation, IonBailoutIterator &iter,
+                          bool invalidate, BaselineBailoutInfo **bailoutInfo)
+{
+    JS_ASSERT(bailoutInfo != NULL);
+    JS_ASSERT(*bailoutInfo == NULL);
+
+    // The caller of the top frame must be one of the following:
+    //      OptimizedJS - Ion calling into Ion.
+    //      BaselineStub - Baseline calling into Ion.
+    //      Entry - Interpreter or other calling into Ion.
+    //      Rectifier - Arguments rectifier calling into Ion.
+    JS_ASSERT(iter.isOptimizedJS());
+    FrameType prevFrameType = iter.prevType();
+    JS_ASSERT(prevFrameType == IonFrame_OptimizedJS ||
+              prevFrameType == IonFrame_BaselineStub ||
+              prevFrameType == IonFrame_Entry ||
+              prevFrameType == IonFrame_Rectifier);
+
+    // All incoming frames are going to look like this:
+    //
+    //      +---------------+
+    //      |     ...       |
+    //      +---------------+
+    //      |     Args      |
+    //      |     ...       |
+    //      +---------------+
+    //      |    ThisV      |
+    //      +---------------+
+    //      |  ActualArgC   |
+    //      +---------------+
+    //      |  CalleeToken  |
+    //      +---------------+
+    //      |  Descriptor   |
+    //      +---------------+
+    //      |  ReturnAddr   |
+    //      +---------------+
+    //      |    |||||      | <---- Overwrite starting here.
+    //      |    |||||      |
+    //      |    |||||      |
+    //      +---------------+
+
+    IonSpew(IonSpew_BaselineBailouts, "Bailing to baseline %s:%u (IonScript=%p) (FrameType=%d)",
+            iter.script()->filename(), iter.script()->lineno, (void *) iter.ionScript(),
+            (int) prevFrameType);
+    IonSpew(IonSpew_BaselineBailouts, "  Reading from snapshot offset %u size %u",
+            iter.snapshotOffset(), iter.ionScript()->snapshotsSize());
+    iter.ionScript()->setBailoutExpected();
+
+    // Allocate buffer to hold stack replacement data.
+    BaselineStackBuilder builder(iter, 1024);
+    if (!builder.init())
+        return BAILOUT_RETURN_FATAL_ERROR;
+    IonSpew(IonSpew_BaselineBailouts, "  Incoming frame ptr = %p", builder.startFrame());
+
+    SnapshotIterator snapIter(iter);
+
+    RootedFunction callee(cx, iter.maybeCallee());
+    if (callee) {
+        IonSpew(IonSpew_BaselineBailouts, "  Callee function (%s:%u)",
+                callee->nonLazyScript()->filename(), callee->nonLazyScript()->lineno);
+    } else {
+        IonSpew(IonSpew_BaselineBailouts, "  No callee!");
+    }
+
+    if (iter.isConstructing())
+        IonSpew(IonSpew_BaselineBailouts, "  Constructing!");
+    else
+        IonSpew(IonSpew_BaselineBailouts, "  Not constructing!");
+
+    IonSpew(IonSpew_BaselineBailouts, "  Restoring frames:");
+    int frameNo = 0;
+
+    // Reconstruct baseline frames using the builder.
+    RootedScript caller(cx);
+    jsbytecode *callerPC = NULL;
+    RootedFunction fun(cx, callee);
+    RootedScript scr(cx, iter.script());
+    while (true) {
+        IonSpew(IonSpew_BaselineBailouts, "    FrameNo %d", frameNo);
+        jsbytecode *callPC = NULL;
+        RootedFunction nextCallee(cx, NULL);
+        if (!InitFromBailout(cx, caller, callerPC, fun, scr, snapIter, invalidate, builder,
+                             &nextCallee, &callPC))
+        {
+            return BAILOUT_RETURN_FATAL_ERROR;
+        }
+
+        if (!snapIter.moreFrames()) {
+            JS_ASSERT(!callPC);
+            break;
+        }
+
+        JS_ASSERT(nextCallee);
+        JS_ASSERT(callPC);
+        caller = scr;
+        callerPC = callPC;
+        fun = nextCallee;
+        scr = fun->nonLazyScript();
+        snapIter.nextFrame();
+
+        frameNo++;
+    }
+    IonSpew(IonSpew_BaselineBailouts, "  Done restoring frames");
+    BailoutKind bailoutKind = snapIter.bailoutKind();
+
+    // Take the reconstructed baseline stack so it doesn't get freed when builder destructs.
+    BaselineBailoutInfo *info = builder.takeBuffer();
+    info->numFrames = frameNo + 1;
+
+    // Do stack check.
+    bool overRecursed = false;
+    JS_CHECK_RECURSION_WITH_EXTRA_DONT_REPORT(cx, info->copyStackTop - info->copyStackBottom,
+                                              overRecursed = true);
+    if (overRecursed)
+        return BAILOUT_RETURN_OVERRECURSED;
+
+    info->bailoutKind = bailoutKind;
+    *bailoutInfo = info;
+    return BAILOUT_RETURN_BASELINE;
+}
+
+static bool
+HandleBoundsCheckFailure(JSContext *cx, HandleScript outerScript, HandleScript innerScript)
+{
+    IonSpew(IonSpew_Bailouts, "Bounds check failure %s:%d, inlined into %s:%d",
+            innerScript->filename(), innerScript->lineno,
+            outerScript->filename(), outerScript->lineno);
+
+    JS_ASSERT(outerScript->hasIonScript());
+    JS_ASSERT(!outerScript->ion->invalidated());
+
+    // TODO: Currently this mimic's Ion's handling of this case.  Investigate setting
+    // the flag on innerScript as opposed to outerScript, and maybe invalidating both
+    // inner and outer scripts, instead of just the outer one.
+    if (!outerScript->failedBoundsCheck) {
+        outerScript->failedBoundsCheck = true;
+    }
+    IonSpew(IonSpew_BaselineBailouts, "Invalidating due to bounds check failure");
+    return Invalidate(cx, outerScript);
+}
+
+static bool
+HandleShapeGuardFailure(JSContext *cx, HandleScript outerScript, HandleScript innerScript)
+{
+    IonSpew(IonSpew_Bailouts, "Shape guard failure %s:%d, inlined into %s:%d",
+            innerScript->filename(), innerScript->lineno,
+            outerScript->filename(), outerScript->lineno);
+
+    JS_ASSERT(outerScript->hasIonScript());
+    JS_ASSERT(!outerScript->ion->invalidated());
+
+    // TODO: Currently this mimic's Ion's handling of this case.  Investigate setting
+    // the flag on innerScript as opposed to outerScript, and maybe invalidating both
+    // inner and outer scripts, instead of just the outer one.
+    outerScript->failedShapeGuard = true;
+    IonSpew(IonSpew_BaselineBailouts, "Invalidating due to shape guard failure");
+    return Invalidate(cx, outerScript);
+}
+
+static bool
+HandleCachedShapeGuardFailure(JSContext *cx, HandleScript outerScript, HandleScript innerScript)
+{
+    IonSpew(IonSpew_Bailouts, "Cached shape guard failure %s:%d, inlined into %s:%d",
+            innerScript->filename(), innerScript->lineno,
+            outerScript->filename(), outerScript->lineno);
+
+    JS_ASSERT(outerScript->hasIonScript());
+    JS_ASSERT(!outerScript->ion->invalidated());
+
+    outerScript->failedShapeGuard = true;
+
+    // No need to purge baseline ICs.  Baseline will do one of two things: add a new
+    // optimized stub (preventing monomorphic IC caching), or set a flag indicating that
+    // an unoptimizable access was made, also preventing mono IC caching.
+
+    IonSpew(IonSpew_BaselineBailouts, "Invalidating due to cached shape guard failure");
+
+    return Invalidate(cx, outerScript);
+}
+
+uint32_t
+ion::FinishBailoutToBaseline(BaselineBailoutInfo *bailoutInfo)
+{
+    // The caller pushes R0 and R1 on the stack without rooting them.
+    // Since GC here is very unlikely just suppress it.
+    JSContext *cx = GetIonContext()->cx;
+    js::gc::AutoSuppressGC suppressGC(cx);
+
+    IonSpew(IonSpew_BaselineBailouts, "  Done restoring frames");
+
+    uint32_t numFrames = bailoutInfo->numFrames;
+    JS_ASSERT(numFrames > 0);
+    BailoutKind bailoutKind = bailoutInfo->bailoutKind;
+
+    // Free the bailout buffer.
+    js_free(bailoutInfo);
+    bailoutInfo = NULL;
+
+    // Ensure the frame has a call object if it needs one. If the scope chain
+    // is NULL, we will enter baseline code at the prologue so no need to do
+    // anything in that case.
+    BaselineFrame *topFrame = GetTopBaselineFrame(cx);
+    if (topFrame->scopeChain() && !EnsureHasScopeObjects(cx, topFrame))
+        return false;
+
+    // Create arguments objects for bailed out frames, to maintain the invariant
+    // that script->needsArgsObj() implies frame->hasArgsObj().
+    RootedScript innerScript(cx, NULL);
+    RootedScript outerScript(cx, NULL);
+    IonFrameIterator iter(cx);
+    uint32_t frameno = 0;
+    while (frameno < numFrames) {
+        JS_ASSERT(!iter.isOptimizedJS());
+
+        if (iter.isBaselineJS()) {
+            BaselineFrame *frame = iter.baselineFrame();
+            JS_ASSERT(!frame->hasArgsObj());
+
+            if (frame->script()->needsArgsObj()) {
+                ArgumentsObject *argsobj = ArgumentsObject::createExpected(cx, frame);
+                if (!argsobj)
+                    return false;
+
+                // The arguments is a local binding and needsArgsObj does not
+                // check if it is clobbered. Ensure that the local binding
+                // restored during bailout before storing the arguments object
+                // to the slot.
+                RootedScript script(cx, frame->script());
+                SetFrameArgumentsObject(cx, frame, script, argsobj);
+            }
+
+            if (frameno == 0)
+                innerScript = frame->script();
+
+            if (frameno == numFrames - 1)
+                outerScript = frame->script();
+
+            frameno++;
+        }
+
+        ++iter;
+    }
+
+    JS_ASSERT(innerScript);
+    JS_ASSERT(outerScript);
+    IonSpew(IonSpew_BaselineBailouts,
+            "  Restored outerScript=(%s:%u,%u) innerScript=(%s:%u,%u) (bailoutKind=%u)",
+            outerScript->filename(), outerScript->lineno, outerScript->getUseCount(),
+            innerScript->filename(), innerScript->lineno, innerScript->getUseCount(),
+            (unsigned) bailoutKind);
+
+    switch (bailoutKind) {
+      case Bailout_Normal:
+        // Do nothing.
+        break;
+      case Bailout_ArgumentCheck:
+      case Bailout_TypeBarrier:
+      case Bailout_Monitor:
+        // Reflow types.  But in baseline, this will happen automatically because
+        // for any monitored op (or for argument checks), bailout will resume into
+        // the monitoring IC which will handle the type updates.
+        break;
+      case Bailout_BoundsCheck:
+        if (!HandleBoundsCheckFailure(cx, outerScript, innerScript))
+            return false;
+        break;
+      case Bailout_ShapeGuard:
+        if (!HandleShapeGuardFailure(cx, outerScript, innerScript))
+            return false;
+        break;
+      case Bailout_CachedShapeGuard:
+        if (!HandleCachedShapeGuardFailure(cx, outerScript, innerScript))
+            return false;
+        break;
+      default:
+        JS_NOT_REACHED("Unknown bailout kind!");
+    }
+
+    return true;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineCompiler.cpp
@@ -0,0 +1,2433 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaselineJIT.h"
+#include "BaselineIC.h"
+#include "BaselineHelpers.h"
+#include "BaselineCompiler.h"
+#include "FixedList.h"
+#include "IonLinker.h"
+#include "IonSpewer.h"
+#include "VMFunctions.h"
+#include "IonFrames-inl.h"
+
+#include "jsinterpinlines.h"
+#include "jsopcodeinlines.h"
+
+using namespace js;
+using namespace js::ion;
+
+BaselineCompiler::BaselineCompiler(JSContext *cx, HandleScript script)
+  : BaselineCompilerSpecific(cx, script),
+    return_(new HeapLabel())
+{
+}
+
+bool
+BaselineCompiler::init()
+{
+    if (!labels_.init(script->length))
+        return false;
+
+    for (size_t i = 0; i < script->length; i++)
+        new (&labels_[i]) Label();
+
+    if (!frame.init())
+        return false;
+
+    return true;
+}
+
+MethodStatus
+BaselineCompiler::compile()
+{
+    IonSpew(IonSpew_BaselineScripts, "Baseline compiling script %s:%d (%p)",
+            script->filename(), script->lineno, script.get());
+
+    if (!script->ensureRanAnalysis(cx))
+        return Method_Error;
+
+    // Pin analysis info during compilation.
+    types::AutoEnterAnalysis autoEnterAnalysis(cx);
+
+    if (!emitPrologue())
+        return Method_Error;
+
+    MethodStatus status = emitBody();
+    if (status != Method_Compiled)
+        return status;
+
+    if (!emitEpilogue())
+        return Method_Error;
+
+    if (masm.oom())
+        return Method_Error;
+
+    Linker linker(masm);
+    IonCode *code = linker.newCode(cx, JSC::BASELINE_CODE);
+    if (!code)
+        return Method_Error;
+
+    JS_ASSERT(!script->hasBaselineScript());
+
+    // Encode the pc mapping table. See PCMappingIndexEntry for
+    // more information.
+    Vector<PCMappingIndexEntry> pcMappingIndexEntries(cx);
+    CompactBufferWriter pcEntries;
+    uint32_t previousOffset = 0;
+
+    for (size_t i = 0; i < pcMappingEntries_.length(); i++) {
+        PCMappingEntry &entry = pcMappingEntries_[i];
+        entry.fixupNativeOffset(masm);
+
+        if (entry.addIndexEntry) {
+            PCMappingIndexEntry indexEntry;
+            indexEntry.pcOffset = entry.pcOffset;
+            indexEntry.nativeOffset = entry.nativeOffset;
+            indexEntry.bufferOffset = pcEntries.length();
+            if (!pcMappingIndexEntries.append(indexEntry))
+                return Method_Error;
+            previousOffset = entry.nativeOffset;
+        }
+
+        // Use the high bit of the SlotInfo byte to indicate the
+        // native code offset (relative to the previous op) > 0 and
+        // comes next in the buffer.
+        JS_ASSERT((entry.slotInfo.toByte() & 0x80) == 0);
+
+        if (entry.nativeOffset == previousOffset) {
+            pcEntries.writeByte(entry.slotInfo.toByte());
+        } else {
+            JS_ASSERT(entry.nativeOffset > previousOffset);
+            pcEntries.writeByte(0x80 | entry.slotInfo.toByte());
+            pcEntries.writeUnsigned(entry.nativeOffset - previousOffset);
+        }
+
+        previousOffset = entry.nativeOffset;
+    }
+
+    if (pcEntries.oom())
+        return Method_Error;
+
+    prologueOffset_.fixup(&masm);
+    spsPushToggleOffset_.fixup(&masm);
+
+    BaselineScript *baselineScript = BaselineScript::New(cx, prologueOffset_.offset(),
+                                                         spsPushToggleOffset_.offset(),
+                                                         icEntries_.length(),
+                                                         pcMappingIndexEntries.length(),
+                                                         pcEntries.length());
+    if (!baselineScript)
+        return Method_Error;
+    script->baseline = baselineScript;
+
+    IonSpew(IonSpew_BaselineScripts, "Created BaselineScript %p (raw %p) for %s:%d",
+            (void *) script->baseline, (void *) code->raw(),
+            script->filename(), script->lineno);
+
+    script->baseline->setMethod(code);
+
+    JS_ASSERT(pcMappingIndexEntries.length() > 0);
+    baselineScript->copyPCMappingIndexEntries(&pcMappingIndexEntries[0]);
+
+    JS_ASSERT(pcEntries.length() > 0);
+    baselineScript->copyPCMappingEntries(pcEntries);
+
+    // Copy IC entries
+    if (icEntries_.length())
+        baselineScript->copyICEntries(script, &icEntries_[0], masm);
+
+    // Adopt fallback stubs from the compiler into the baseline script.
+    baselineScript->adoptFallbackStubs(&stubSpace_);
+
+    // Patch IC loads using IC entries
+    for (size_t i = 0; i < icLoadLabels_.length(); i++) {
+        CodeOffsetLabel label = icLoadLabels_[i].label;
+        label.fixup(&masm);
+        size_t icEntry = icLoadLabels_[i].icEntry;
+        ICEntry *entryAddr = &(baselineScript->icEntry(icEntry));
+        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, label),
+                                           ImmWord(uintptr_t(entryAddr)),
+                                           ImmWord(uintptr_t(-1)));
+    }
+
+    // All barriers are emitted off-by-default, toggle them on if needed.
+    if (cx->zone()->needsBarrier())
+        baselineScript->toggleBarriers(true);
+
+    // All SPS instrumentation is emitted toggled off.  Toggle them on if needed.
+    if (cx->runtime->spsProfiler.enabled())
+        baselineScript->toggleSPS(true);
+
+    return Method_Compiled;
+}
+
+#ifdef DEBUG
+#define SPEW_OPCODE()                                                         \
+    JS_BEGIN_MACRO                                                            \
+        if (IsJaegerSpewChannelActive(JSpew_JSOps)) {                         \
+            Sprinter sprinter(cx);                                            \
+            sprinter.init();                                                  \
+            RootedScript script_(cx, script);                                 \
+            js_Disassemble1(cx, script_, pc, pc - script_->code,              \
+                            JS_TRUE, &sprinter);                              \
+            JaegerSpew(JSpew_JSOps, "    %2u %s",                             \
+                       (unsigned)frame.stackDepth(), sprinter.string());      \
+        }                                                                     \
+    JS_END_MACRO;
+#else
+#define SPEW_OPCODE()
+#endif /* DEBUG */
+
+bool
+BaselineCompiler::emitPrologue()
+{
+    masm.push(BaselineFrameReg);
+    masm.mov(BaselineStackReg, BaselineFrameReg);
+
+    masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg);
+    masm.checkStackAlignment();
+
+    // Initialize BaselineFrame. For eval scripts, the scope chain
+    // is passed in R1, so we have to be careful not to clobber
+    // it.
+
+    // Initialize BaselineFrame::flags.
+    uint32_t flags = 0;
+    if (script->isForEval())
+        flags |= BaselineFrame::EVAL;
+    masm.store32(Imm32(flags), frame.addressOfFlags());
+
+    if (script->isForEval())
+        masm.storePtr(ImmGCPtr(script), frame.addressOfEvalScript());
+
+    // Initialize locals to |undefined|. Use R0 to minimize code size.
+    if (frame.nlocals() > 0) {
+        masm.moveValue(UndefinedValue(), R0);
+        for (size_t i = 0; i < frame.nlocals(); i++)
+            masm.pushValue(R0);
+    }
+
+    // Record the offset of the prologue, because Ion can bailout before
+    // the scope chain is initialized.
+    prologueOffset_ = masm.currentOffset();
+
+    // Initialize the scope chain before any operation that may
+    // call into the VM and trigger a GC.
+    if (!initScopeChain())
+        return false;
+
+    if (!emitStackCheck())
+        return false;
+
+    if (!emitDebugPrologue())
+        return false;
+
+    if (!emitUseCountIncrement())
+        return false;
+
+    if (!emitArgumentTypeChecks())
+        return false;
+
+    if (!emitSPSPush())
+        return false;
+
+    return true;
+}
+
+bool
+BaselineCompiler::emitEpilogue()
+{
+    masm.bind(return_);
+
+    // Pop SPS frame if necessary
+    emitSPSPop();
+
+    masm.mov(BaselineFrameReg, BaselineStackReg);
+    masm.pop(BaselineFrameReg);
+
+    masm.ret();
+    return true;
+}
+
+bool
+BaselineCompiler::emitIC(ICStub *stub, bool isForOp)
+{
+    ICEntry *entry = allocateICEntry(stub, isForOp);
+    if (!entry)
+        return false;
+
+    CodeOffsetLabel patchOffset;
+    EmitCallIC(&patchOffset, masm);
+    entry->setReturnOffset(masm.currentOffset());
+    if (!addICLoadLabel(patchOffset))
+        return false;
+
+    return true;
+}
+
+typedef bool (*DebugPrologueFn)(JSContext *, BaselineFrame *, JSBool *);
+static const VMFunction DebugPrologueInfo = FunctionInfo<DebugPrologueFn>(ion::DebugPrologue);
+
+bool
+BaselineCompiler::emitDebugPrologue()
+{
+    if (!debugMode_)
+        return true;
+
+    // Load pointer to BaselineFrame in R0.
+    masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    prepareVMCall();
+    pushArg(R0.scratchReg());
+    if (!callVM(DebugPrologueInfo))
+        return false;
+
+    // If the stub returns |true|, we have to return the value stored in the
+    // frame's return value slot.
+    Label done;
+    masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
+    {
+        masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+        masm.jump(return_);
+    }
+    masm.bind(&done);
+    return true;
+}
+
+typedef bool (*StrictEvalPrologueFn)(JSContext *, BaselineFrame *);
+static const VMFunction StrictEvalPrologueInfo =
+    FunctionInfo<StrictEvalPrologueFn>(ion::StrictEvalPrologue);
+
+typedef bool (*HeavyweightFunPrologueFn)(JSContext *, BaselineFrame *);
+static const VMFunction HeavyweightFunPrologueInfo =
+    FunctionInfo<HeavyweightFunPrologueFn>(ion::HeavyweightFunPrologue);
+
+bool
+BaselineCompiler::initScopeChain()
+{
+    RootedFunction fun(cx, function());
+    if (fun) {
+        // Use callee->environment as scope chain. Note that we do
+        // this also for heavy-weight functions, so that the scope
+        // chain slot is properly initialized if the call triggers GC.
+        Register callee = R0.scratchReg();
+        Register scope = R1.scratchReg();
+        masm.loadPtr(frame.addressOfCallee(), callee);
+        masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), scope);
+        masm.storePtr(scope, frame.addressOfScopeChain());
+
+        if (fun->isHeavyweight()) {
+            // Call into the VM to create a new call object.
+            prepareVMCall();
+
+            masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+            pushArg(R0.scratchReg());
+
+            if (!callVM(HeavyweightFunPrologueInfo))
+                return false;
+        }
+    } else {
+        // For global and eval scripts, the scope chain is in R1.
+        masm.storePtr(R1.scratchReg(), frame.addressOfScopeChain());
+
+        if (script->isForEval() && script->strict) {
+            // Strict eval needs its own call object.
+            prepareVMCall();
+
+            masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+            pushArg(R0.scratchReg());
+
+            if (!callVM(StrictEvalPrologueInfo))
+                return false;
+        }
+    }
+
+    return true;
+}
+
+typedef bool (*ReportOverRecursedFn)(JSContext *);
+static const VMFunction CheckOverRecursedInfo =
+    FunctionInfo<ReportOverRecursedFn>(CheckOverRecursed);
+
+bool
+BaselineCompiler::emitStackCheck()
+{
+    Label skipCall;
+    uintptr_t *limitAddr = &cx->runtime->mainThread.ionStackLimit;
+    masm.loadPtr(AbsoluteAddress(limitAddr), R0.scratchReg());
+    masm.branchPtr(Assembler::AboveOrEqual, BaselineStackReg, R0.scratchReg(), &skipCall);
+
+    prepareVMCall();
+    if (!callVM(CheckOverRecursedInfo))
+        return false;
+
+    masm.bind(&skipCall);
+    return true;
+}
+
+typedef bool (*InterruptCheckFn)(JSContext *);
+static const VMFunction InterruptCheckInfo = FunctionInfo<InterruptCheckFn>(InterruptCheck);
+
+bool
+BaselineCompiler::emitInterruptCheck()
+{
+    Label done;
+    void *interrupt = (void *)&cx->compartment->rt->interrupt;
+    masm.branch32(Assembler::Equal, AbsoluteAddress(interrupt), Imm32(0), &done);
+
+    prepareVMCall();
+    if (!callVM(InterruptCheckInfo))
+        return false;
+
+    masm.bind(&done);
+    return true;
+}
+
+bool
+BaselineCompiler::emitUseCountIncrement()
+{
+    // Emit no use count increments or bailouts if Ion is not
+    // enabled, or if the script will never be Ion-compileable
+
+    if (!ionCompileable_)
+        return true;
+
+    Register scriptReg = R2.scratchReg();
+    Register countReg = R0.scratchReg();
+    Address useCountAddr(scriptReg, JSScript::offsetOfUseCount());
+
+    masm.movePtr(ImmGCPtr(script), scriptReg);
+    masm.load32(useCountAddr, countReg);
+    masm.add32(Imm32(1), countReg);
+    masm.store32(countReg, useCountAddr);
+
+    Label skipCall;
+
+    uint32_t minUses = UsesBeforeIonRecompile(script, pc);
+    masm.branch32(Assembler::LessThan, countReg, Imm32(minUses), &skipCall);
+
+    masm.branchPtr(Assembler::Equal,
+                   Address(scriptReg, offsetof(JSScript, ion)),
+                   ImmWord(ION_COMPILING_SCRIPT), &skipCall);
+
+    // Call IC.
+    ICUseCount_Fallback::Compiler stubCompiler(cx);
+    if (!emitNonOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    masm.bind(&skipCall);
+
+    return true;
+}
+
+bool
+BaselineCompiler::emitArgumentTypeChecks()
+{
+    if (!function())
+        return true;
+
+    frame.pushThis();
+    frame.popRegsAndSync(1);
+
+    ICTypeMonitor_Fallback::Compiler compiler(cx, (uint32_t) 0);
+    if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    for (size_t i = 0; i < function()->nargs; i++) {
+        frame.pushArg(i);
+        frame.popRegsAndSync(1);
+
+        ICTypeMonitor_Fallback::Compiler compiler(cx, i + 1);
+        if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
+            return false;
+    }
+
+    return true;
+}
+
+bool
+BaselineCompiler::emitDebugTrap()
+{
+    JS_ASSERT(debugMode_);
+    JS_ASSERT(frame.numUnsyncedSlots() == 0);
+
+    bool enabled = script->stepModeEnabled() || script->hasBreakpointsAt(pc);
+
+    // Emit patchable call to debug trap handler.
+    IonCode *handler = cx->compartment->ionCompartment()->debugTrapHandler(cx);
+    mozilla::DebugOnly<CodeOffsetLabel> offset = masm.toggledCall(handler, enabled);
+
+#ifdef DEBUG
+    // Patchable call offset has to match the pc mapping offset.
+    PCMappingEntry &entry = pcMappingEntries_[pcMappingEntries_.length() - 1];
+    JS_ASSERT((&offset)->offset() == entry.nativeOffset);
+#endif
+
+    // Add an IC entry for the return offset -> pc mapping.
+    ICEntry icEntry(pc - script->code, false);
+    icEntry.setReturnOffset(masm.currentOffset());
+    if (!icEntries_.append(icEntry))
+        return false;
+
+    return true;
+}
+
+bool
+BaselineCompiler::emitSPSPush()
+{
+    // Enter the IC, guarded by a toggled jump (initially disabled).
+    Label noPush;
+    CodeOffsetLabel toggleOffset = masm.toggledJump(&noPush);
+    JS_ASSERT(frame.numUnsyncedSlots() == 0);
+    ICProfiler_Fallback::Compiler compiler(cx);
+    if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+    masm.bind(&noPush);
+
+    // Store the start offset in the appropriate location.
+    JS_ASSERT(spsPushToggleOffset_.offset() == 0);
+    spsPushToggleOffset_ = toggleOffset;
+    return true;
+}
+
+void
+BaselineCompiler::emitSPSPop()
+{
+    // If profiler entry was pushed on this frame, pop it.
+    Label noPop;
+    masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+                      Imm32(BaselineFrame::HAS_PUSHED_SPS_FRAME), &noPop);
+    masm.spsPopFrame(&cx->runtime->spsProfiler, R1.scratchReg());
+    masm.bind(&noPop);
+}
+
+MethodStatus
+BaselineCompiler::emitBody()
+{
+    JS_ASSERT(pc == script->code);
+
+    bool lastOpUnreachable = false;
+    uint32_t emittedOps = 0;
+
+    while (true) {
+        SPEW_OPCODE();
+        JSOp op = JSOp(*pc);
+        IonSpew(IonSpew_BaselineOp, "Compiling op @ %d: %s",
+                int(pc - script->code), js_CodeName[op]);
+
+        analyze::Bytecode *code = script->analysis()->maybeCode(pc);
+
+        // Skip unreachable ops.
+        if (!code) {
+            if (op == JSOP_STOP)
+                break;
+            pc += GetBytecodeLength(pc);
+            lastOpUnreachable = true;
+            continue;
+        }
+
+        // Fully sync the stack if there are incoming jumps.
+        if (code->jumpTarget) {
+            frame.syncStack(0);
+            frame.setStackDepth(code->stackDepth);
+        }
+
+        // Always sync in debug mode.
+        if (debugMode_)
+            frame.syncStack(0);
+
+        // At the beginning of any op, at most the top 2 stack-values are unsynced.
+        if (frame.stackDepth() > 2)
+            frame.syncStack(2);
+
+        frame.assertValidState(pc);
+
+        masm.bind(labelOf(pc));
+
+        // Add a PC -> native mapping entry for the current op. These entries are
+        // used when we need the native code address for a given pc, for instance
+        // for bailouts from Ion, the debugger and exception handling. See
+        // PCMappingIndexEntry for more information.
+        bool addIndexEntry = (pc == script->code || lastOpUnreachable || emittedOps > 100);
+        if (addIndexEntry)
+            emittedOps = 0;
+        if (!addPCMappingEntry(addIndexEntry))
+            return Method_Error;
+
+        // Emit traps for breakpoints and step mode.
+        if (debugMode_ && !emitDebugTrap())
+            return Method_Error;
+
+        switch (op) {
+          default:
+            // Ignore fat opcodes, we compile the decomposed version instead.
+            if (js_CodeSpec[op].format & JOF_DECOMPOSE)
+                break;
+            IonSpew(IonSpew_BaselineAbort, "Unhandled op: %s", js_CodeName[op]);
+            return Method_CantCompile;
+
+#define EMIT_OP(OP)                            \
+          case OP:                             \
+            if (!this->emit_##OP())            \
+                return Method_Error;           \
+            break;
+OPCODE_LIST(EMIT_OP)
+#undef EMIT_OP
+        }
+
+        if (op == JSOP_STOP)
+            break;
+
+        pc += GetBytecodeLength(pc);
+        emittedOps++;
+        lastOpUnreachable = false;
+    }
+
+    JS_ASSERT(JSOp(*pc) == JSOP_STOP);
+    return Method_Compiled;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOP()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_LABEL()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOTEARG()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POP()
+{
+    frame.pop();
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DUP()
+{
+    // Keep top stack value in R0, sync the rest so that we can use R1. We use
+    // separate registers because every register can be used by at most one
+    // StackValue.
+    frame.popRegsAndSync(1);
+    masm.moveValue(R0, R1);
+
+    // inc/dec ops use DUP followed by ONE, ADD. Push R0 last to avoid a move.
+    frame.push(R1);
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DUP2()
+{
+    frame.syncStack(0);
+
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1);
+
+    frame.push(R0);
+    frame.push(R1);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SWAP()
+{
+    // Keep top stack values in R0 and R1.
+    frame.popRegsAndSync(2);
+
+    frame.push(R1);
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_PICK()
+{
+    frame.syncStack(0);
+
+    // Pick takes a value on the stack and moves it to the top.
+    // For instance, pick 2:
+    //     before: A B C D E
+    //     after : A B D E C
+
+    // First, move value at -(amount + 1) into R0.
+    int depth = -(GET_INT8(pc) + 1);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(depth)), R0);
+
+    // Move the other values down.
+    depth++;
+    for (; depth < 0; depth++) {
+        Address source = frame.addressOfStackValue(frame.peek(depth));
+        Address dest = frame.addressOfStackValue(frame.peek(depth - 1));
+        masm.loadValue(source, R1);
+        masm.storeValue(R1, dest);
+    }
+
+    // Push R0.
+    frame.pop();
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GOTO()
+{
+    frame.syncStack(0);
+
+    jsbytecode *target = pc + GET_JUMP_OFFSET(pc);
+    masm.jump(labelOf(target));
+    return true;
+}
+
+bool
+BaselineCompiler::emitToBoolean()
+{
+    Label skipIC;
+    masm.branchTestBoolean(Assembler::Equal, R0, &skipIC);
+
+    // Call IC
+    ICToBool_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    masm.bind(&skipIC);
+    return true;
+}
+
+bool
+BaselineCompiler::emitTest(bool branchIfTrue)
+{
+    bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+    // Keep top stack value in R0.
+    frame.popRegsAndSync(1);
+
+    if (!knownBoolean && !emitToBoolean())
+            return false;
+
+    // IC will leave a JSBool value (guaranteed) in R0, just need to branch on it.
+    masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_IFEQ()
+{
+    return emitTest(false);
+}
+
+bool
+BaselineCompiler::emit_JSOP_IFNE()
+{
+    return emitTest(true);
+}
+
+bool
+BaselineCompiler::emitAndOr(bool branchIfTrue)
+{
+    bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+    // AND and OR leave the original value on the stack.
+    frame.syncStack(0);
+
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+    if (!knownBoolean && !emitToBoolean())
+        return false;
+
+    masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_AND()
+{
+    return emitAndOr(false);
+}
+
+bool
+BaselineCompiler::emit_JSOP_OR()
+{
+    return emitAndOr(true);
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOT()
+{
+    bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+    // Keep top stack value in R0.
+    frame.popRegsAndSync(1);
+
+    if (!knownBoolean && !emitToBoolean())
+        return false;
+
+    masm.notBoolean(R0);
+
+    frame.push(R0, JSVAL_TYPE_BOOLEAN);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POS()
+{
+    // Keep top stack value in R0.
+    frame.popRegsAndSync(1);
+
+    // Inline path for int32 and double.
+    Label done;
+    masm.branchTestNumber(Assembler::Equal, R0, &done);
+
+    // Call IC.
+    ICToNumber_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    masm.bind(&done);
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_LOOPHEAD()
+{
+    return emitInterruptCheck();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LOOPENTRY()
+{
+    frame.syncStack(0);
+    return emitUseCountIncrement();
+}
+
+bool
+BaselineCompiler::emit_JSOP_VOID()
+{
+    frame.pop();
+    frame.push(UndefinedValue());
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UNDEFINED()
+{
+    frame.push(UndefinedValue());
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_HOLE()
+{
+    frame.push(MagicValue(JS_ELEMENTS_HOLE));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NULL()
+{
+    frame.push(NullValue());
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_THIS()
+{
+    // Keep this value in R0
+    frame.pushThis();
+
+    // In strict mode function or self-hosted function, |this| is left alone.
+    if (!function() || function()->strict() || function()->isSelfHostedBuiltin())
+        return true;
+
+    Label skipIC;
+    // Keep |thisv| in R0
+    frame.popRegsAndSync(1);
+    // If |this| is already an object, skip the IC.
+    masm.branchTestObject(Assembler::Equal, R0, &skipIC);
+
+    // Call IC
+    ICThis_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    masm.storeValue(R0, frame.addressOfThis());
+
+    // R0 is new pushed |this| value.
+    masm.bind(&skipIC);
+    frame.push(R0);
+
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TRUE()
+{
+    frame.push(BooleanValue(true));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_FALSE()
+{
+    frame.push(BooleanValue(false));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ZERO()
+{
+    frame.push(Int32Value(0));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ONE()
+{
+    frame.push(Int32Value(1));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INT8()
+{
+    frame.push(Int32Value(GET_INT8(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INT32()
+{
+    frame.push(Int32Value(GET_INT32(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UINT16()
+{
+    frame.push(Int32Value(GET_UINT16(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UINT24()
+{
+    frame.push(Int32Value(GET_UINT24(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DOUBLE()
+{
+    frame.push(script->getConst(GET_UINT32_INDEX(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRING()
+{
+    frame.push(StringValue(script->getAtom(pc)));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_OBJECT()
+{
+    frame.push(ObjectValue(*script->getObject(pc)));
+    return true;
+}
+
+typedef JSObject *(*CloneRegExpObjectFn)(JSContext *, JSObject *, JSObject *);
+static const VMFunction CloneRegExpObjectInfo =
+    FunctionInfo<CloneRegExpObjectFn>(CloneRegExpObject);
+
+bool
+BaselineCompiler::emit_JSOP_REGEXP()
+{
+    RootedObject reObj(cx, script->getRegExp(GET_UINT32_INDEX(pc)));
+    RootedObject proto(cx, script->global().getOrCreateRegExpPrototype(cx));
+    if (!proto)
+        return false;
+
+    prepareVMCall();
+
+    pushArg(ImmGCPtr(proto));
+    pushArg(ImmGCPtr(reObj));
+
+    if (!callVM(CloneRegExpObjectInfo))
+        return false;
+
+    // Box and push return value.
+    masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+    frame.push(R0);
+    return true;
+}
+
+typedef JSObject *(*LambdaFn)(JSContext *, HandleFunction, HandleObject);
+static const VMFunction LambdaInfo = FunctionInfo<LambdaFn>(js::Lambda);
+
+bool
+BaselineCompiler::emit_JSOP_LAMBDA()
+{
+    RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc)));
+
+    prepareVMCall();
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    pushArg(R0.scratchReg());
+    pushArg(ImmGCPtr(fun));
+
+    if (!callVM(LambdaInfo))
+        return false;
+
+    // Box and push return value.
+    masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+    frame.push(R0);
+    return true;
+}
+
+void
+BaselineCompiler::storeValue(const StackValue *source, const Address &dest,
+                             const ValueOperand &scratch)
+{
+    switch (source->kind()) {
+      case StackValue::Constant:
+        masm.storeValue(source->constant(), dest);
+        break;
+      case StackValue::Register:
+        masm.storeValue(source->reg(), dest);
+        break;
+      case StackValue::LocalSlot:
+        masm.loadValue(frame.addressOfLocal(source->localSlot()), scratch);
+        masm.storeValue(scratch, dest);
+        break;
+      case StackValue::ArgSlot:
+        masm.loadValue(frame.addressOfArg(source->argSlot()), scratch);
+        masm.storeValue(scratch, dest);
+        break;
+      case StackValue::ThisSlot:
+        masm.loadValue(frame.addressOfThis(), scratch);
+        masm.storeValue(scratch, dest);
+        break;
+      case StackValue::Stack:
+        masm.loadValue(frame.addressOfStackValue(source), scratch);
+        masm.storeValue(scratch, dest);
+        break;
+      default:
+        JS_NOT_REACHED("Invalid kind");
+    }
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITOR()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITXOR()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITAND()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LSH()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_RSH()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_URSH()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_ADD()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SUB()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_MUL()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_DIV()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_MOD()
+{
+    return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emitBinaryArith()
+{
+    // Keep top JSStack value in R0 and R2
+    frame.popRegsAndSync(2);
+
+    // Call IC
+    ICBinaryArith_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emitUnaryArith()
+{
+    // Keep top stack value in R0.
+    frame.popRegsAndSync(1);
+
+    // Call IC
+    ICUnaryArith_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITNOT()
+{
+    return emitUnaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEG()
+{
+    return emitUnaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LT()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LE()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GT()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GE()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_EQ()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NE()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emitCompare()
+{
+    // CODEGEN
+
+    // Keep top JSStack value in R0 and R1.
+    frame.popRegsAndSync(2);
+
+    // Call IC.
+    ICCompare_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0, JSVAL_TYPE_BOOLEAN);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTEQ()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTNE()
+{
+    return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_CONDSWITCH()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CASE()
+{
+    frame.popRegsAndSync(2);
+    frame.push(R0);
+    frame.syncStack(0);
+
+    // Call IC.
+    ICCompare_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    Register payload = masm.extractInt32(R0, R0.scratchReg());
+    jsbytecode *target = pc + GET_JUMP_OFFSET(pc);
+
+    Label done;
+    masm.branch32(Assembler::Equal, payload, Imm32(0), &done);
+    {
+        // Pop the switch value if the case matches.
+        masm.addPtr(Imm32(sizeof(Value)), StackPointer);
+        masm.jump(labelOf(target));
+    }
+    masm.bind(&done);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DEFAULT()
+{
+    frame.pop();
+    return emit_JSOP_GOTO();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LINENO()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWARRAY()
+{
+    frame.syncStack(0);
+
+    uint32_t length = GET_UINT24(pc);
+    RootedTypeObject type(cx);
+    if (!types::UseNewTypeForInitializer(cx, script, pc, JSProto_Array)) {
+        type = types::TypeScript::InitObject(cx, script, pc, JSProto_Array);
+        if (!type)
+            return false;
+    }
+
+    // Pass length in R0, type in R1.
+    masm.move32(Imm32(length), R0.scratchReg());
+    masm.movePtr(ImmGCPtr(type), R1.scratchReg());
+
+    ICNewArray_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM_ARRAY()
+{
+    // Keep the object and rhs on the stack.
+    frame.syncStack(0);
+
+    // Load object in R0, index in R1.
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+    masm.moveValue(Int32Value(GET_UINT24(pc)), R1);
+
+    // Call IC.
+    ICSetElem_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Pop the rhs, so that the object is on the top of the stack.
+    frame.pop();
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWOBJECT()
+{
+    frame.syncStack(0);
+
+    RootedTypeObject type(cx);
+    if (!types::UseNewTypeForInitializer(cx, script, pc, JSProto_Object)) {
+        type = types::TypeScript::InitObject(cx, script, pc, JSProto_Object);
+        if (!type)
+            return false;
+    }
+
+    RootedObject baseObject(cx, script->getObject(pc));
+    RootedObject templateObject(cx, CopyInitializerObject(cx, baseObject, MaybeSingletonObject));
+    if (!templateObject)
+        return false;
+
+    if (type) {
+        templateObject->setType(type);
+    } else {
+        if (!JSObject::setSingletonType(cx, templateObject))
+            return false;
+    }
+
+    // Pass base object in R0.
+    masm.movePtr(ImmGCPtr(templateObject), R0.scratchReg());
+
+    ICNewObject_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWINIT()
+{
+    frame.syncStack(0);
+    JSProtoKey key = JSProtoKey(GET_UINT8(pc));
+
+    RootedTypeObject type(cx);
+    if (!types::UseNewTypeForInitializer(cx, script, pc, key)) {
+        type = types::TypeScript::InitObject(cx, script, pc, key);
+        if (!type)
+            return false;
+    }
+
+    if (key == JSProto_Array) {
+        // Pass length in R0, type in R1.
+        masm.move32(Imm32(0), R0.scratchReg());
+        masm.movePtr(ImmGCPtr(type), R1.scratchReg());
+
+        ICNewArray_Fallback::Compiler stubCompiler(cx);
+        if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+            return false;
+    } else {
+        JS_ASSERT(key == JSProto_Object);
+
+        RootedObject templateObject(cx);
+        templateObject = NewBuiltinClassInstance(cx, &ObjectClass, MaybeSingletonObject);
+        if (!templateObject)
+            return false;
+
+        if (type) {
+            templateObject->setType(type);
+        } else {
+            if (!JSObject::setSingletonType(cx, templateObject))
+                return false;
+        }
+
+        // Pass base object in R0.
+        masm.movePtr(ImmGCPtr(templateObject), R0.scratchReg());
+
+        ICNewObject_Fallback::Compiler stubCompiler(cx);
+        if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+            return false;
+    }
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM()
+{
+    // Store RHS in the scratch slot.
+    storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
+    frame.pop();
+
+    // Keep object and index in R0 and R1.
+    frame.popRegsAndSync(2);
+
+    // Push the object to store the result of the IC.
+    frame.push(R0);
+    frame.syncStack(0);
+
+    // Keep RHS on the stack.
+    frame.pushScratchValue();
+
+    // Call IC.
+    ICSetElem_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Pop the rhs, so that the object is on the top of the stack.
+    frame.pop();
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITPROP()
+{
+    // Keep lhs in R0, rhs in R1.
+    frame.popRegsAndSync(2);
+
+    // Push the object to store the result of the IC.
+    frame.push(R0);
+    frame.syncStack(0);
+
+    // Call IC.
+    ICSetProp_Fallback::Compiler compiler(cx);
+    return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENDINIT()
+{
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETELEM()
+{
+    // Keep top two stack values in R0 and R1.
+    frame.popRegsAndSync(2);
+
+    // Call IC.
+    ICGetElem_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLELEM()
+{
+    return emit_JSOP_GETELEM();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETELEM()
+{
+    // Store RHS in the scratch slot.
+    storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
+    frame.pop();
+
+    // Keep object and index in R0 and R1.
+    frame.popRegsAndSync(2);
+
+    // Keep RHS on the stack.
+    frame.pushScratchValue();
+
+    // Call IC.
+    ICSetElem_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENUMELEM()
+{
+    // ENUMELEM is a SETELEM with a different stack arrangement.
+    // Instead of:   OBJ ID RHS
+    // The stack is: RHS OBJ ID
+
+    // Keep object and index in R0 and R1, and keep RHS on the stack.
+    frame.popRegsAndSync(2);
+
+    // Call IC.
+    ICSetElem_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.pop();
+    return true;
+}
+
+typedef bool (*DeleteElementFn)(JSContext *, HandleValue, HandleValue, JSBool *);
+static const VMFunction DeleteElementStrictInfo = FunctionInfo<DeleteElementFn>(DeleteElement<true>);
+static const VMFunction DeleteElementNonStrictInfo = FunctionInfo<DeleteElementFn>(DeleteElement<false>);
+
+bool
+BaselineCompiler::emit_JSOP_DELELEM()
+{
+    // Keep values on the stack for the decompiler.
+    frame.syncStack(0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1);
+
+    prepareVMCall();
+
+    pushArg(R1);
+    pushArg(R0);
+
+    if (!callVM(script->strict ? DeleteElementStrictInfo : DeleteElementNonStrictInfo))
+        return false;
+
+    masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+    frame.popn(2);
+    frame.push(R1);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_IN()
+{
+    frame.popRegsAndSync(2);
+
+    ICIn_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETGNAME()
+{
+    RootedPropertyName name(cx, script->getName(pc));
+
+    if (name == cx->names().undefined) {
+        frame.push(UndefinedValue());
+        return true;
+    }
+    if (name == cx->names().NaN) {
+        frame.push(cx->runtime->NaNValue);
+        return true;
+    }
+    if (name == cx->names().Infinity) {
+        frame.push(cx->runtime->positiveInfinityValue);
+        return true;
+    }
+
+    frame.syncStack(0);
+
+    masm.movePtr(ImmGCPtr(&script->global()), R0.scratchReg());
+
+    // Call IC.
+    ICGetName_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLGNAME()
+{
+    return emit_JSOP_GETGNAME();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BINDGNAME()
+{
+    frame.push(ObjectValue(script->global()));
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETPROP()
+{
+    // Keep lhs in R0, rhs in R1.
+    frame.popRegsAndSync(2);
+
+    // Call IC.
+    ICSetProp_Fallback::Compiler compiler(cx);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    // The IC will return the RHS value in R0, mark it as pushed value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETNAME()
+{
+    return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETGNAME()
+{
+    return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETPROP()
+{
+    // Keep object in R0.
+    frame.popRegsAndSync(1);
+
+    // Call IC.
+    ICGetProp_Fallback::Compiler compiler(cx);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLPROP()
+{
+    return emit_JSOP_GETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LENGTH()
+{
+    return emit_JSOP_GETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETXPROP()
+{
+    return emit_JSOP_GETPROP();
+}
+
+typedef bool (*DeletePropertyFn)(JSContext *, HandleValue, HandlePropertyName, JSBool *);
+static const VMFunction DeletePropertyStrictInfo = FunctionInfo<DeletePropertyFn>(DeleteProperty<true>);
+static const VMFunction DeletePropertyNonStrictInfo = FunctionInfo<DeletePropertyFn>(DeleteProperty<false>);
+
+bool
+BaselineCompiler::emit_JSOP_DELPROP()
+{
+    // Keep value on the stack for the decompiler.
+    frame.syncStack(0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+    prepareVMCall();
+
+    pushArg(ImmGCPtr(script->getName(pc)));
+    pushArg(R0);
+
+    if (!callVM(script->strict ? DeletePropertyStrictInfo : DeletePropertyNonStrictInfo))
+        return false;
+
+    masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+    frame.pop();
+    frame.push(R1);
+    return true;
+}
+
+Address
+BaselineCompiler::getScopeCoordinateAddress(Register reg)
+{
+    ScopeCoordinate sc(pc);
+
+    masm.loadPtr(frame.addressOfScopeChain(), reg);
+    for (unsigned i = sc.hops; i; i--)
+        masm.extractObject(Address(reg, ScopeObject::offsetOfEnclosingScope()), reg);
+
+    RawShape shape = ScopeCoordinateToStaticScopeShape(cx, script, pc);
+    Address addr;
+    if (shape->numFixedSlots() <= sc.slot) {
+        masm.loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
+        return Address(reg, (sc.slot - shape->numFixedSlots()) * sizeof(Value));
+    }
+
+    return Address(reg, JSObject::getFixedSlotOffset(sc.slot));
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETALIASEDVAR()
+{
+    frame.syncStack(0);
+
+    Address address = getScopeCoordinateAddress(R0.scratchReg());
+    masm.loadValue(address, R0);
+
+    ICTypeMonitor_Fallback::Compiler compiler(cx, (ICMonitoredFallbackStub *) NULL);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLALIASEDVAR()
+{
+    return emit_JSOP_GETALIASEDVAR();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETALIASEDVAR()
+{
+    // Sync everything except the top value, so that we can use R0 as scratch
+    // (storeValue does not touch it if the top value is in R0).
+    frame.syncStack(1);
+
+    Address address = getScopeCoordinateAddress(R2.scratchReg());
+    masm.patchableCallPreBarrier(address, MIRType_Value);
+    storeValue(frame.peek(-1), address, R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NAME()
+{
+    frame.syncStack(0);
+
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    // Call IC.
+    ICGetName_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLNAME()
+{
+    return emit_JSOP_NAME();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BINDNAME()
+{
+    frame.syncStack(0);
+
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    // Call IC.
+    ICBindName_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Mark R0 as pushed stack value.
+    frame.push(R0);
+    return true;
+}
+
+typedef bool (*DeleteNameFn)(JSContext *, HandlePropertyName, HandleObject,
+                             MutableHandleValue);
+static const VMFunction DeleteNameInfo = FunctionInfo<DeleteNameFn>(DeleteNameOperation);
+
+bool
+BaselineCompiler::emit_JSOP_DELNAME()
+{
+    frame.syncStack(0);
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    prepareVMCall();
+
+    pushArg(R0.scratchReg());
+    pushArg(ImmGCPtr(script->getName(pc)));
+
+    if (!callVM(DeleteNameInfo))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETINTRINSIC()
+{
+    frame.syncStack(0);
+
+    ICGetIntrinsic_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLINTRINSIC()
+{
+    return emit_JSOP_GETINTRINSIC();
+}
+
+typedef bool (*DefVarOrConstFn)(JSContext *, HandlePropertyName, unsigned, HandleObject);
+static const VMFunction DefVarOrConstInfo = FunctionInfo<DefVarOrConstFn>(DefVarOrConst);
+
+bool
+BaselineCompiler::emit_JSOP_DEFVAR()
+{
+    frame.syncStack(0);
+
+    unsigned attrs = JSPROP_ENUMERATE;
+    if (!script->isForEval())
+        attrs |= JSPROP_PERMANENT;
+    if (JSOp(*pc) == JSOP_DEFCONST)
+        attrs |= JSPROP_READONLY;
+    JS_ASSERT(attrs <= UINT32_MAX);
+
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    prepareVMCall();
+
+    pushArg(R0.scratchReg());
+    pushArg(Imm32(attrs));
+    pushArg(ImmGCPtr(script->getName(pc)));
+
+    return callVM(DefVarOrConstInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_DEFCONST()
+{
+    return emit_JSOP_DEFVAR();
+}
+
+typedef bool (*SetConstFn)(JSContext *, HandlePropertyName, HandleObject, HandleValue);
+static const VMFunction SetConstInfo = FunctionInfo<SetConstFn>(SetConst);
+
+bool
+BaselineCompiler::emit_JSOP_SETCONST()
+{
+    frame.popRegsAndSync(1);
+    frame.push(R0);
+    frame.syncStack(0);
+
+    masm.loadPtr(frame.addressOfScopeChain(), R1.scratchReg());
+
+    prepareVMCall();
+
+    pushArg(R0);
+    pushArg(R1.scratchReg());
+    pushArg(ImmGCPtr(script->getName(pc)));
+
+    return callVM(SetConstInfo);
+}
+
+typedef bool (*DefFunOperationFn)(JSContext *, HandleScript, HandleObject, HandleFunction);
+static const VMFunction DefFunOperationInfo = FunctionInfo<DefFunOperationFn>(DefFunOperation);
+
+bool
+BaselineCompiler::emit_JSOP_DEFFUN()
+{
+    RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc)));
+
+    frame.syncStack(0);
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    prepareVMCall();
+
+    pushArg(ImmGCPtr(fun));
+    pushArg(R0.scratchReg());
+    pushArg(ImmGCPtr(script));
+
+    return callVM(DefFunOperationInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETLOCAL()
+{
+    uint32_t local = GET_SLOTNO(pc);
+
+    if (local >= frame.nlocals()) {
+        // Destructuring assignments may use GETLOCAL to access stack values.
+        frame.syncStack(0);
+        masm.loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfLocal(local)), R0);
+        frame.push(R0);
+        return true;
+    }
+
+    frame.pushLocal(local);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLLOCAL()
+{
+    return emit_JSOP_GETLOCAL();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETLOCAL()
+{
+    // Ensure no other StackValue refers to the old value, for instance i + (i = 3).
+    // This also allows us to use R0 as scratch below.
+    frame.syncStack(1);
+
+    uint32_t local = GET_SLOTNO(pc);
+    storeValue(frame.peek(-1), frame.addressOfLocal(local), R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emitFormalArgAccess(uint32_t arg, bool get)
+{
+    // Fast path: the script does not use |arguments|, or is strict. In strict
+    // mode, formals do not alias the arguments object.
+    if (!script->argumentsHasVarBinding() || script->strict) {
+        if (get) {
+            frame.pushArg(arg);
+        } else {
+            // See the comment in emit_JSOP_SETLOCAL.
+            frame.syncStack(1);
+            storeValue(frame.peek(-1), frame.addressOfArg(arg), R0);
+        }
+
+        return true;
+    }
+
+    // Sync so that we can use R0.
+    frame.syncStack(0);
+
+    // If the script is known to have an arguments object, we can just use it.
+    // Else, we *may* have an arguments object (because we can't invalidate
+    // when needsArgsObj becomes |true|), so we have to test HAS_ARGS_OBJ.
+    Label done;
+    if (!script->needsArgsObj()) {
+        Label hasArgsObj;
+        masm.branchTest32(Assembler::NonZero, frame.addressOfFlags(),
+                          Imm32(BaselineFrame::HAS_ARGS_OBJ), &hasArgsObj);
+        if (get)
+            masm.loadValue(frame.addressOfArg(arg), R0);
+        else
+            storeValue(frame.peek(-1), frame.addressOfArg(arg), R0);
+        masm.jump(&done);
+        masm.bind(&hasArgsObj);
+    }
+
+    // Load the arguments object data vector.
+    Register reg = R2.scratchReg();
+    masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()), reg);
+    masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
+
+    // Load/store the argument.
+    Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value));
+    if (get) {
+        masm.loadValue(argAddr, R0);
+        frame.push(R0);
+    } else {
+        masm.patchableCallPreBarrier(argAddr, MIRType_Value);
+        storeValue(frame.peek(-1), argAddr, R0);
+    }
+
+    masm.bind(&done);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETARG()
+{
+    uint32_t arg = GET_SLOTNO(pc);
+    return emitFormalArgAccess(arg, /* get = */ true);
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLARG()
+{
+    return emit_JSOP_GETARG();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETARG()
+{
+    uint32_t arg = GET_SLOTNO(pc);
+    return emitFormalArgAccess(arg, /* get = */ false);
+}
+
+bool
+BaselineCompiler::emitCall()
+{
+    JS_ASSERT(js_CodeSpec[*pc].format & JOF_INVOKE);
+
+    uint32_t argc = GET_ARGC(pc);
+
+    frame.syncStack(0);
+    masm.mov(Imm32(argc), R0.scratchReg());
+
+    // Call IC
+    ICCall_Fallback::Compiler stubCompiler(cx, /* isConstructing = */ JSOp(*pc) == JSOP_NEW);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    // Update FrameInfo.
+    frame.popn(argc + 2);
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALL()
+{
+    return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEW()
+{
+    return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_FUNCALL()
+{
+    return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_FUNAPPLY()
+{
+    return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_EVAL()
+{
+    return emitCall();
+}
+
+typedef bool (*ImplicitThisFn)(JSContext *, HandleObject, HandlePropertyName,
+                               MutableHandleValue);
+static const VMFunction ImplicitThisInfo = FunctionInfo<ImplicitThisFn>(ImplicitThisOperation);
+
+bool
+BaselineCompiler::emit_JSOP_IMPLICITTHIS()
+{
+    frame.syncStack(0);
+    masm.loadPtr(frame.addressOfScopeChain(), R0.scratchReg());
+
+    prepareVMCall();
+
+    pushArg(ImmGCPtr(script->getName(pc)));
+    pushArg(R0.scratchReg());
+
+    if (!callVM(ImplicitThisInfo))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INSTANCEOF()
+{
+    frame.popRegsAndSync(2);
+
+    ICInstanceOf_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TYPEOF()
+{
+    frame.popRegsAndSync(1);
+
+    ICTypeOf_Fallback::Compiler stubCompiler(cx);
+    if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TYPEOFEXPR()
+{
+    return emit_JSOP_TYPEOF();
+}
+
+typedef bool (*ThrowFn)(JSContext *, HandleValue);
+static const VMFunction ThrowInfo = FunctionInfo<ThrowFn>(js::Throw);
+
+bool
+BaselineCompiler::emit_JSOP_THROW()
+{
+    // Keep value to throw in R0.
+    frame.popRegsAndSync(1);
+
+    prepareVMCall();
+    pushArg(R0);
+
+    return callVM(ThrowInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_TRY()
+{
+    return true;
+}
+
+typedef bool (*EnterBlockFn)(JSContext *, BaselineFrame *, Handle<StaticBlockObject *>);
+static const VMFunction EnterBlockInfo = FunctionInfo<EnterBlockFn>(ion::EnterBlock);
+
+bool
+BaselineCompiler::emitEnterBlock()
+{
+    StaticBlockObject &blockObj = script->getObject(pc)->asStaticBlock();
+
+    if (JSOp(*pc) == JSOP_ENTERBLOCK) {
+        for (size_t i = 0; i < blockObj.slotCount(); i++)
+            frame.push(UndefinedValue());
+
+        // Pushed values will be accessed using GETLOCAL and SETLOCAL, so ensure
+        // they are synced.
+        frame.syncStack(0);
+    }
+
+    // Call a stub to push the block on the block chain.
+    prepareVMCall();
+    masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    pushArg(ImmGCPtr(&blockObj));
+    pushArg(R0.scratchReg());
+
+    return callVM(EnterBlockInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENTERBLOCK()
+{
+    return emitEnterBlock();
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENTERLET0()
+{
+    return emitEnterBlock();
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENTERLET1()
+{
+    return emitEnterBlock();
+}
+
+typedef bool (*LeaveBlockFn)(JSContext *, BaselineFrame *);
+static const VMFunction LeaveBlockInfo = FunctionInfo<LeaveBlockFn>(ion::LeaveBlock);
+
+bool
+BaselineCompiler::emit_JSOP_LEAVEBLOCK()
+{
+    // Call a stub to pop the block from the block chain.
+    prepareVMCall();
+
+    masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+    pushArg(R0.scratchReg());
+
+    if (!callVM(LeaveBlockInfo))
+        return false;
+
+    // Pop slots pushed by ENTERBLOCK.
+    size_t n = StackUses(script, pc);
+    frame.popn(n);
+    return true;
+}
+
+typedef bool (*GetAndClearExceptionFn)(JSContext *, MutableHandleValue);
+static const VMFunction GetAndClearExceptionInfo =
+    FunctionInfo<GetAndClearExceptionFn>(GetAndClearException);
+
+bool
+BaselineCompiler::emit_JSOP_EXCEPTION()
+{
+    prepareVMCall();
+
+    if (!callVM(GetAndClearExceptionInfo))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+typedef bool (*OnDebuggerStatementFn)(JSContext *, BaselineFrame *, jsbytecode *pc, JSBool *);
+static const VMFunction OnDebuggerStatementInfo =
+    FunctionInfo<OnDebuggerStatementFn>(ion::OnDebuggerStatement);
+
+bool
+BaselineCompiler::emit_JSOP_DEBUGGER()
+{
+    prepareVMCall();
+    pushArg(ImmWord(pc));
+
+    masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+    pushArg(R0.scratchReg());
+
+    if (!callVM(OnDebuggerStatementInfo))
+        return false;
+
+    // If the stub returns |true|, return the frame's return value.
+    Label done;
+    masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
+    {
+        masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+        masm.jump(return_);
+    }
+    masm.bind(&done);
+    return true;
+}
+
+typedef bool (*DebugEpilogueFn)(JSContext *, BaselineFrame *, JSBool);
+static const VMFunction DebugEpilogueInfo = FunctionInfo<DebugEpilogueFn>(ion::DebugEpilogue);
+
+bool
+BaselineCompiler::emitReturn()
+{
+    if (debugMode_) {
+        // Move return value into the frame's rval slot.
+        masm.storeValue(JSReturnOperand, frame.addressOfReturnValue());
+        masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+
+        // Load BaselineFrame pointer in R0.
+        frame.syncStack(0);
+        masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+        prepareVMCall();
+        pushArg(Imm32(1));
+        pushArg(R0.scratchReg());
+        if (!callVM(DebugEpilogueInfo))
+            return false;
+
+        masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+    }
+
+    if (JSOp(*pc) != JSOP_STOP) {
+        // JSOP_STOP is immediately followed by the return label, so we don't
+        // need a jump.
+        masm.jump(return_);
+    }
+
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_RETURN()
+{
+    JS_ASSERT(frame.stackDepth() == 1);
+
+    frame.popValue(JSReturnOperand);
+    return emitReturn();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STOP()
+{
+    JS_ASSERT(frame.stackDepth() == 0);
+
+    masm.moveValue(UndefinedValue(), JSReturnOperand);
+
+    if (!script->noScriptRval) {
+        // Return the value in the return value slot, if any.
+        Label done;
+        Address flags = frame.addressOfFlags();
+        masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done);
+        masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+        masm.bind(&done);
+    }
+
+    return emitReturn();
+}
+
+bool
+BaselineCompiler::emit_JSOP_RETRVAL()
+{
+    return emit_JSOP_STOP();
+}
+
+typedef bool (*ToIdFn)(JSContext *, HandleScript, jsbytecode *, HandleValue, HandleValue,
+                       MutableHandleValue);
+static const VMFunction ToIdInfo = FunctionInfo<ToIdFn>(js::ToIdOperation);
+
+bool
+BaselineCompiler::emit_JSOP_TOID()
+{
+    // Load index in R0, but keep values on the stack for the decompiler.
+    frame.syncStack(0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+    // No-op if index is int32.
+    Label done;
+    masm.branchTestInt32(Assembler::Equal, R0, &done);
+
+    prepareVMCall();
+
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R1);
+
+    pushArg(R0);
+    pushArg(R1);
+    pushArg(ImmWord(pc));
+    pushArg(ImmGCPtr(script));
+
+    if (!callVM(ToIdInfo))
+        return false;
+
+    masm.bind(&done);
+    frame.pop(); // Pop index.
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TABLESWITCH()
+{
+    frame.popRegsAndSync(1);
+
+    // Call IC.
+    ICTableSwitch::Compiler compiler(cx, pc);
+    return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_ITER()
+{
+    frame.popRegsAndSync(1);
+
+    ICIteratorNew_Fallback::Compiler compiler(cx);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_MOREITER()
+{
+    frame.syncStack(0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+    ICIteratorMore_Fallback::Compiler compiler(cx);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ITERNEXT()
+{
+    frame.syncStack(0);
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+    ICIteratorNext_Fallback::Compiler compiler(cx);
+    if (!emitOpIC(compiler.getStub(&stubSpace_)))
+        return false;
+
+    frame.push(R0);
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENDITER()
+{
+    frame.popRegsAndSync(1);
+
+    ICIteratorClose_Fallback::Compiler compiler(cx);
+    return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETRVAL()
+{
+    // Store to the frame's return value slot.
+    storeValue(frame.peek(-1), frame.addressOfReturnValue(), R2);
+    masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+    frame.pop();
+    return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POPV()
+{
+    return emit_JSOP_SETRVAL();
+}
+
+typedef bool (*NewArgumentsObjectFn)(JSContext *, BaselineFrame *, MutableHandleValue);
+static const VMFunction NewArgumentsObjectInfo =
+    FunctionInfo<NewArgumentsObjectFn>(ion::NewArgumentsObject);
+
+bool
+BaselineCompiler::emit_JSOP_ARGUMENTS()
+{
+    frame.syncStack(0);
+
+    Label done;
+    if (!script->needsArgsObj()) {
+        // We assume the script does not need an arguments object. However, this
+        // assumption can be invalidated later, see argumentsOptimizationFailed
+        // in JSScript. Because we can't invalidate baseline JIT code, we set a
+        // flag on BaselineScript when that happens and guard on it here.
+        masm.moveValue(MagicValue(JS_OPTIMIZED_ARGUMENTS), R0);
+
+        // Load script->baseline.
+        Register scratch = R1.scratchReg();
+        masm.movePtr(ImmGCPtr(script), scratch);
+        masm.loadPtr(Address(scratch, offsetof(JSScript, baseline)), scratch);
+
+        // If we don't need an arguments object, skip the VM call.
+        masm.branchTest32(Assembler::Zero, Address(scratch, BaselineScript::offsetOfFlags()),
+                          Imm32(BaselineScript::NEEDS_ARGS_OBJ), &done);
+    }
+
+    prepareVMCall();
+
+    masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+    pushArg(R0.scratchReg());
+
+    if (!callVM(NewArgumentsObjectInfo))
+        return false;
+
+    masm.bind(&done);
+    frame.push(R0);
+    return true;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineCompiler.h
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_baseline_compiler_h__) && defined(JS_ION)
+#define jsion_baseline_compiler_h__
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "IonCode.h"
+#include "jsinfer.h"
+#include "jsinterp.h"
+
+#include "BaselineJIT.h"
+#include "BaselineIC.h"
+#include "FixedList.h"
+
+#if defined(JS_CPU_X86)
+# include "x86/BaselineCompiler-x86.h"
+#elif defined(JS_CPU_X64)
+# include "x64/BaselineCompiler-x64.h"
+#else
+# include "arm/BaselineCompiler-arm.h"
+#endif
+
+namespace js {
+namespace ion {
+
+#define OPCODE_LIST(_)         \
+    _(JSOP_NOP)                \
+    _(JSOP_LABEL)              \
+    _(JSOP_NOTEARG)            \
+    _(JSOP_POP)                \
+    _(JSOP_DUP)                \
+    _(JSOP_DUP2)               \
+    _(JSOP_SWAP)               \
+    _(JSOP_PICK)               \
+    _(JSOP_GOTO)               \
+    _(JSOP_IFEQ)               \
+    _(JSOP_IFNE)               \
+    _(JSOP_AND)                \
+    _(JSOP_OR)                 \
+    _(JSOP_NOT)                \
+    _(JSOP_POS)                \
+    _(JSOP_LOOPHEAD)           \
+    _(JSOP_LOOPENTRY)          \
+    _(JSOP_VOID)               \
+    _(JSOP_UNDEFINED)          \
+    _(JSOP_HOLE)               \
+    _(JSOP_NULL)               \
+    _(JSOP_THIS)               \
+    _(JSOP_TRUE)               \
+    _(JSOP_FALSE)              \
+    _(JSOP_ZERO)               \
+    _(JSOP_ONE)                \
+    _(JSOP_INT8)               \
+    _(JSOP_INT32)              \
+    _(JSOP_UINT16)             \
+    _(JSOP_UINT24)             \
+    _(JSOP_DOUBLE)             \
+    _(JSOP_STRING)             \
+    _(JSOP_OBJECT)             \
+    _(JSOP_REGEXP)             \
+    _(JSOP_LAMBDA)             \
+    _(JSOP_BITOR)              \
+    _(JSOP_BITXOR)             \
+    _(JSOP_BITAND)             \
+    _(JSOP_LSH)                \
+    _(JSOP_RSH)                \
+    _(JSOP_URSH)               \
+    _(JSOP_ADD)                \
+    _(JSOP_SUB)                \
+    _(JSOP_MUL)                \
+    _(JSOP_DIV)                \
+    _(JSOP_MOD)                \
+    _(JSOP_LT)                 \
+    _(JSOP_LE)                 \
+    _(JSOP_GT)                 \
+    _(JSOP_GE)                 \
+    _(JSOP_EQ)                 \
+    _(JSOP_NE)                 \
+    _(JSOP_STRICTEQ)           \
+    _(JSOP_STRICTNE)           \
+    _(JSOP_CONDSWITCH)         \
+    _(JSOP_CASE)               \
+    _(JSOP_DEFAULT)            \
+    _(JSOP_LINENO)             \
+    _(JSOP_BITNOT)             \
+    _(JSOP_NEG)                \
+    _(JSOP_NEWARRAY)           \
+    _(JSOP_INITELEM_ARRAY)     \
+    _(JSOP_NEWOBJECT)          \
+    _(JSOP_NEWINIT)            \
+    _(JSOP_INITELEM)           \
+    _(JSOP_INITPROP)           \
+    _(JSOP_ENDINIT)            \
+    _(JSOP_GETELEM)            \
+    _(JSOP_SETELEM)            \
+    _(JSOP_CALLELEM)           \
+    _(JSOP_ENUMELEM)           \
+    _(JSOP_DELELEM)            \
+    _(JSOP_IN)                 \
+    _(JSOP_GETGNAME)           \
+    _(JSOP_CALLGNAME)          \
+    _(JSOP_BINDGNAME)          \
+    _(JSOP_SETGNAME)           \
+    _(JSOP_SETNAME)            \
+    _(JSOP_GETPROP)            \
+    _(JSOP_SETPROP)            \
+    _(JSOP_CALLPROP)           \
+    _(JSOP_DELPROP)            \
+    _(JSOP_LENGTH)             \
+    _(JSOP_GETXPROP)           \
+    _(JSOP_GETALIASEDVAR)      \
+    _(JSOP_CALLALIASEDVAR)     \
+    _(JSOP_SETALIASEDVAR)      \
+    _(JSOP_NAME)               \
+    _(JSOP_CALLNAME)           \
+    _(JSOP_BINDNAME)           \
+    _(JSOP_DELNAME)            \
+    _(JSOP_GETINTRINSIC)       \
+    _(JSOP_CALLINTRINSIC)      \
+    _(JSOP_DEFVAR)             \
+    _(JSOP_DEFCONST)           \
+    _(JSOP_SETCONST)           \
+    _(JSOP_DEFFUN)             \
+    _(JSOP_GETLOCAL)           \
+    _(JSOP_CALLLOCAL)          \
+    _(JSOP_SETLOCAL)           \
+    _(JSOP_GETARG)             \
+    _(JSOP_CALLARG)            \
+    _(JSOP_SETARG)             \
+    _(JSOP_CALL)               \
+    _(JSOP_FUNCALL)            \
+    _(JSOP_FUNAPPLY)           \
+    _(JSOP_NEW)                \
+    _(JSOP_EVAL)               \
+    _(JSOP_IMPLICITTHIS)       \
+    _(JSOP_INSTANCEOF)         \
+    _(JSOP_TYPEOF)             \
+    _(JSOP_TYPEOFEXPR)         \
+    _(JSOP_THROW)              \
+    _(JSOP_TRY)                \
+    _(JSOP_ENTERBLOCK)         \
+    _(JSOP_ENTERLET0)          \
+    _(JSOP_ENTERLET1)          \
+    _(JSOP_LEAVEBLOCK)         \
+    _(JSOP_EXCEPTION)          \
+    _(JSOP_DEBUGGER)           \
+    _(JSOP_ARGUMENTS)          \
+    _(JSOP_TOID)               \
+    _(JSOP_TABLESWITCH)        \
+    _(JSOP_ITER)               \
+    _(JSOP_MOREITER)           \
+    _(JSOP_ITERNEXT)           \
+    _(JSOP_ENDITER)            \
+    _(JSOP_POPV)               \
+    _(JSOP_SETRVAL)            \
+    _(JSOP_RETURN)             \
+    _(JSOP_STOP)               \
+    _(JSOP_RETRVAL)
+
+class BaselineCompiler : public BaselineCompilerSpecific
+{
+    FixedList<Label>            labels_;
+    HeapLabel *                 return_;
+
+    // Native code offset right before the scope chain is initialized.
+    CodeOffsetLabel prologueOffset_;
+
+    Label *labelOf(jsbytecode *pc) {
+        return &labels_[pc - script->code];
+    }
+
+  public:
+    BaselineCompiler(JSContext *cx, HandleScript script);
+    bool init();
+
+    MethodStatus compile();
+
+  private:
+    MethodStatus emitBody();
+
+    bool emitPrologue();
+    bool emitEpilogue();
+    bool emitIC(ICStub *stub, bool isForOp);
+    bool emitOpIC(ICStub *stub) {
+        return emitIC(stub, true);
+    }
+    bool emitNonOpIC(ICStub *stub) {
+        return emitIC(stub, false);
+    }
+
+    bool emitStackCheck();
+    bool emitInterruptCheck();
+    bool emitUseCountIncrement();
+    bool emitArgumentTypeChecks();
+    bool emitDebugPrologue();
+    bool emitDebugTrap();
+    bool emitSPSPush();
+    void emitSPSPop();
+
+    bool initScopeChain();
+
+    void storeValue(const StackValue *source, const Address &dest,
+                    const ValueOperand &scratch);
+
+#define EMIT_OP(op) bool emit_##op();
+    OPCODE_LIST(EMIT_OP)
+#undef EMIT_OP
+
+    // JSOP_NEG, JSOP_BITNOT
+    bool emitUnaryArith();
+
+    // JSOP_BITXOR, JSOP_LSH, JSOP_ADD etc.
+    bool emitBinaryArith();
+
+    // Handles JSOP_LT, JSOP_GT, and friends
+    bool emitCompare();
+
+    bool emitReturn();
+
+    bool emitToBoolean();
+    bool emitTest(bool branchIfTrue);
+    bool emitAndOr(bool branchIfTrue);
+    bool emitCall();
+
+    bool emitFormalArgAccess(uint32_t arg, bool get);
+
+    bool emitEnterBlock();
+
+    Address getScopeCoordinateAddress(Register reg);
+};
+
+} // namespace ion
+} // namespace js
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineFrame-inl.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_baseline_frame_inl_h__) && defined(JS_ION)
+#define jsion_baseline_frame_inl_h__
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "IonFrames.h"
+#include "vm/ScopeObject-inl.h"
+
+namespace js {
+namespace ion {
+
+inline void
+BaselineFrame::pushOnScopeChain(ScopeObject &scope)
+{
+    JS_ASSERT(*scopeChain() == scope.enclosingScope() ||
+              *scopeChain() == scope.asCall().enclosingScope().asDeclEnv().enclosingScope());
+    scopeChain_ = &scope;
+}
+
+inline void
+BaselineFrame::popOffScopeChain()
+{
+    scopeChain_ = &scopeChain_->asScope().enclosingScope();
+}
+
+inline bool
+BaselineFrame::pushBlock(JSContext *cx, Handle<StaticBlockObject *> block)
+{
+    JS_ASSERT_IF(hasBlockChain(), blockChain() == *block->enclosingBlock());
+
+    if (block->needsClone()) {
+        ClonedBlockObject *clone = ClonedBlockObject::create(cx, block, this);
+        if (!clone)
+            return false;
+
+        pushOnScopeChain(*clone);
+    }
+
+    setBlockChain(*block);
+    return true;
+}
+
+inline void
+BaselineFrame::popBlock(JSContext *cx)
+{
+    JS_ASSERT(hasBlockChain());
+
+    if (cx->compartment->debugMode())
+        DebugScopes::onPopBlock(cx, this);
+
+    if (blockChain_->needsClone()) {
+        JS_ASSERT(scopeChain_->asClonedBlock().staticBlock() == *blockChain_);
+        popOffScopeChain();
+    }
+
+    setBlockChain(*blockChain_->enclosingBlock());
+}
+
+} // namespace ion
+} // namespace js
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineFrame.cpp
@@ -0,0 +1,168 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaselineFrame.h"
+#include "BaselineFrame-inl.h"
+#include "BaselineIC.h"
+#include "BaselineJIT.h"
+#include "Ion.h"
+#include "IonFrames-inl.h"
+
+#include "vm/Debugger.h"
+#include "vm/ScopeObject.h"
+
+using namespace js;
+using namespace js::ion;
+
+void
+BaselineFrame::trace(JSTracer *trc)
+{
+    MarkCalleeToken(trc, calleeToken());
+
+    gc::MarkValueRoot(trc, &thisValue(), "baseline-this");
+
+    // Mark actual and formal args.
+    if (isNonEvalFunctionFrame()) {
+        unsigned numArgs = js::Max(numActualArgs(), numFormalArgs());
+        JS_ASSERT(actuals() == formals());
+        gc::MarkValueRootRange(trc, numArgs, actuals(), "baseline-args");
+    }
+
+    // Mark scope chain.
+    gc::MarkObjectRoot(trc, &scopeChain_, "baseline-scopechain");
+
+    // Mark return value.
+    if (hasReturnValue())
+        gc::MarkValueRoot(trc, returnValue(), "baseline-rval");
+
+    if (isEvalFrame())
+        gc::MarkScriptRoot(trc, &evalScript_, "baseline-evalscript");
+
+    if (hasArgsObj())
+        gc::MarkObjectRoot(trc, &argsObj_, "baseline-args-obj");
+
+    // Mark locals and stack values.
+    size_t nvalues = numValueSlots();
+    if (nvalues > 0) {
+        // The stack grows down, so start at the last Value.
+        Value *last = valueSlot(nvalues - 1);
+        gc::MarkValueRootRange(trc, nvalues, last, "baseline-stack");
+    }
+}
+
+bool
+BaselineFrame::copyRawFrameSlots(AutoValueVector *vec) const
+{
+    unsigned nfixed = script()->nfixed;
+    unsigned nformals = numFormalArgs();
+
+    if (!vec->resize(nformals + nfixed))
+        return false;
+
+    mozilla::PodCopy(vec->begin(), formals(), nformals);
+    for (unsigned i = 0; i < nfixed; i++)
+        (*vec)[nformals + i] = *valueSlot(i);
+    return true;
+}
+
+bool
+BaselineFrame::strictEvalPrologue(JSContext *cx)
+{
+    JS_ASSERT(isStrictEvalFrame());
+
+    CallObject *callobj = CallObject::createForStrictEval(cx, this);
+    if (!callobj)
+        return false;
+
+    pushOnScopeChain(*callobj);
+    flags_ |= HAS_CALL_OBJ;
+    return true;
+}
+
+bool
+BaselineFrame::heavyweightFunPrologue(JSContext *cx)
+{
+    return initFunctionScopeObjects(cx);
+}
+
+bool
+BaselineFrame::initFunctionScopeObjects(JSContext *cx)
+{
+    JS_ASSERT(isNonEvalFunctionFrame());
+    JS_ASSERT(fun()->isHeavyweight());
+
+    CallObject *callobj = CallObject::createForFunction(cx, this);
+    if (!callobj)
+        return false;
+
+    pushOnScopeChain(*callobj);
+    flags_ |= HAS_CALL_OBJ;
+    return true;
+}
+
+bool
+BaselineFrame::initForOsr(StackFrame *fp, uint32_t numStackValues)
+{
+    mozilla::PodZero(this);
+
+    scopeChain_ = fp->scopeChain();
+
+    if (fp->hasCallObjUnchecked())
+        flags_ |= BaselineFrame::HAS_CALL_OBJ;
+
+    if (fp->hasBlockChain()) {
+        flags_ |= BaselineFrame::HAS_BLOCKCHAIN;
+        blockChain_ = &fp->blockChain();
+    }
+
+    if (fp->isEvalFrame()) {
+        flags_ |= BaselineFrame::EVAL;
+        evalScript_ = fp->script();
+    }
+
+    if (fp->script()->needsArgsObj() && fp->hasArgsObj()) {
+        flags_ |= BaselineFrame::HAS_ARGS_OBJ;
+        argsObj_ = &fp->argsObj();
+    }
+
+    if (fp->hasHookData()) {
+        flags_ |= BaselineFrame::HAS_HOOK_DATA;
+        hookData_ = fp->hookData();
+    }
+
+    if (fp->hasPushedSPSFrame())
+        flags_ |= BaselineFrame::HAS_PUSHED_SPS_FRAME;
+
+    frameSize_ = BaselineFrame::FramePointerOffset +
+        BaselineFrame::Size() +
+        numStackValues * sizeof(Value);
+
+    JS_ASSERT(numValueSlots() == numStackValues);
+
+    for (uint32_t i = 0; i < numStackValues; i++)
+        *valueSlot(i) = fp->slots()[i];
+
+    JSContext *cx = GetIonContext()->cx;
+    if (cx->compartment->debugMode()) {
+        // In debug mode, update any Debugger.Frame objects for the StackFrame to
+        // point to the BaselineFrame.
+
+        // The caller pushed a fake return address. ScriptFrameIter, used by the
+        // debugger, wants a valid return address, but it's okay to just pick one.
+        // In debug mode there's always at least 1 ICEntry (since there are always
+        // debug prologue/epilogue calls).
+        IonFrameIterator iter(cx->mainThread().ionTop);
+        JS_ASSERT(iter.returnAddress() == NULL);
+        BaselineScript *baseline = fp->script()->baseline;
+        iter.current()->setReturnAddress(baseline->returnAddressForIC(baseline->icEntry(0)));
+
+        if (!Debugger::handleBaselineOsr(cx, fp, this))
+            return false;
+    }
+
+    return true;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineFrame.h
@@ -0,0 +1,405 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_baseline_frame_h__) && defined(JS_ION)
+#define jsion_baseline_frame_h__
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "IonFrames.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace ion {
+
+// The stack looks like this, fp is the frame pointer:
+//
+// fp+y   arguments
+// fp+x   IonJSFrameLayout (frame header)
+// fp  => saved frame pointer
+// fp-x   BaselineFrame
+//        locals
+//        stack values
+
+// Eval frames
+//
+// Like js::StackFrame, every BaselineFrame is either a global frame
+// or a function frame. Both global and function frames can optionally
+// be "eval frames". The callee token for eval function frames is the
+// enclosing function. BaselineFrame::evalScript_ stores the eval script
+// itself.
+class BaselineFrame
+{
+  public:
+    enum Flags {
+        // The frame has a valid return value. See also StackFrame::HAS_RVAL.
+        HAS_RVAL         = 1 << 0,
+
+        // Frame has blockChain_ set.
+        HAS_BLOCKCHAIN   = 1 << 1,
+
+        // A call object has been pushed on the scope chain.
+        HAS_CALL_OBJ     = 1 << 2,
+
+        // Frame has an arguments object, argsObj_.
+        HAS_ARGS_OBJ     = 1 << 4,
+
+        // See StackFrame::PREV_UP_TO_DATE.
+        PREV_UP_TO_DATE  = 1 << 5,
+
+        // Eval frame, see the "eval frames" comment.
+        EVAL             = 1 << 6,
+
+        // Frame has hookData_ set.
+        HAS_HOOK_DATA    = 1 << 7,
+
+        // Frame has profiler entry pushed.
+        HAS_PUSHED_SPS_FRAME = 1 << 8
+    };
+
+  protected: // Silence Clang warning about unused private fields.
+    // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+    // compiler may add some padding between the fields.
+    uint32_t loScratchValue_;
+    uint32_t hiScratchValue_;
+    uint32_t loReturnValue_;        // If HAS_RVAL, the frame's return value.
+    uint32_t hiReturnValue_;
+    uint32_t frameSize_;
+    JSObject *scopeChain_;          // Scope chain (always initialized).
+    StaticBlockObject *blockChain_; // If HAS_BLOCKCHAIN, the static block chain.
+    JSScript *evalScript_;          // If isEvalFrame(), the current eval script.
+    ArgumentsObject *argsObj_;      // If HAS_ARGS_OBJ, the arguments object.
+    void *hookData_;                // If HAS_HOOK_DATA, debugger call hook data.
+    uint32_t flags_;
+
+  public:
+    // Distance between the frame pointer and the frame header (return address).
+    // This is the old frame pointer saved in the prologue.
+    static const uint32_t FramePointerOffset = sizeof(void *);
+
+    bool initForOsr(StackFrame *fp, uint32_t numStackValues);
+
+    uint32_t frameSize() const {
+        return frameSize_;
+    }
+    void setFrameSize(uint32_t frameSize) {
+        frameSize_ = frameSize;
+    }
+    inline uint32_t *addressOfFrameSize() {
+        return &frameSize_;
+    }
+    JSObject *scopeChain() const {
+        return scopeChain_;
+    }
+    void setScopeChain(JSObject *scopeChain) {
+        scopeChain_ = scopeChain;
+    }
+    inline JSObject **addressOfScopeChain() {
+        return &scopeChain_;
+    }
+
+    inline Value *addressOfScratchValue() {
+        return reinterpret_cast<Value *>(&loScratchValue_);
+    }
+
+    inline void pushOnScopeChain(ScopeObject &scope);
+    inline void popOffScopeChain();
+
+    CalleeToken calleeToken() const {
+        uint8_t *pointer = (uint8_t *)this + Size() + offsetOfCalleeToken();
+        return *(CalleeToken *)pointer;
+    }
+    JSScript *script() const {
+        if (isEvalFrame())
+            return evalScript();
+        return ScriptFromCalleeToken(calleeToken());
+    }
+    JSFunction *fun() const {
+        return CalleeTokenToFunction(calleeToken());
+    }
+    JSFunction *maybeFun() const {
+        return isFunctionFrame() ? fun() : NULL;
+    }
+    JSFunction *callee() const {
+        return CalleeTokenToFunction(calleeToken());
+    }
+    Value calleev() const {
+        return ObjectValue(*callee());
+    }
+    size_t numValueSlots() const {
+        size_t size = frameSize();
+
+        JS_ASSERT(size >= BaselineFrame::FramePointerOffset + BaselineFrame::Size());
+        size -= BaselineFrame::FramePointerOffset + BaselineFrame::Size();
+
+        JS_ASSERT((size % sizeof(Value)) == 0);
+        return size / sizeof(Value);
+    }
+    Value *valueSlot(size_t slot) const {
+        JS_ASSERT(slot < numValueSlots());
+        return (Value *)this - (slot + 1);
+    }
+
+    Value &unaliasedVar(unsigned i, MaybeCheckAliasing checkAliasing) const {
+        JS_ASSERT_IF(checkAliasing, !script()->varIsAliased(i));
+        JS_ASSERT(i < script()->nfixed);
+        return *valueSlot(i);
+    }
+
+    Value &unaliasedFormal(unsigned i, MaybeCheckAliasing checkAliasing) const {
+        JS_ASSERT(i < numFormalArgs());
+        JS_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+        JS_ASSERT_IF(checkAliasing, !script()->formalIsAliased(i));
+        return formals()[i];
+    }
+
+    Value &unaliasedActual(unsigned i, MaybeCheckAliasing checkAliasing) const {
+        JS_ASSERT(i < numActualArgs());
+        JS_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+        JS_ASSERT_IF(checkAliasing && i < numFormalArgs(), !script()->formalIsAliased(i));
+        return actuals()[i];
+    }
+
+    Value &unaliasedLocal(unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) const {
+#ifdef DEBUG
+        CheckLocalUnaliased(checkAliasing, script(), maybeBlockChain(), i);
+#endif
+        return *valueSlot(i);
+    }
+
+    unsigned numActualArgs() const {
+        return *(size_t *)(reinterpret_cast<const uint8_t *>(this) +
+                             BaselineFrame::Size() +
+                             offsetOfNumActualArgs());
+    }
+    unsigned numFormalArgs() const {
+        return script()->function()->nargs;
+    }
+    Value &thisValue() const {
+        return *(Value *)(reinterpret_cast<const uint8_t *>(this) +
+                         BaselineFrame::Size() +
+                         offsetOfThis());
+    }
+    Value *formals() const {
+        return (Value *)(reinterpret_cast<const uint8_t *>(this) +
+                         BaselineFrame::Size() +
+                         offsetOfArg(0));
+    }
+    Value *actuals() const {
+        return formals();
+    }
+
+    bool copyRawFrameSlots(AutoValueVector *vec) const;
+
+    bool hasReturnValue() const {
+        return flags_ & HAS_RVAL;
+    }
+    Value *returnValue() {
+        return reinterpret_cast<Value *>(&loReturnValue_);
+    }
+    void setReturnValue(const Value &v) {
+        flags_ |= HAS_RVAL;
+        *returnValue() = v;
+    }
+    inline Value *addressOfReturnValue() {
+        return reinterpret_cast<Value *>(&loReturnValue_);
+    }
+
+    bool hasBlockChain() const {
+        return (flags_ & HAS_BLOCKCHAIN) && blockChain_;
+    }
+    StaticBlockObject &blockChain() const {
+        JS_ASSERT(hasBlockChain());
+        return *blockChain_;
+    }
+    StaticBlockObject *maybeBlockChain() const {
+        return hasBlockChain() ? blockChain_ : NULL;
+    }
+    void setBlockChain(StaticBlockObject &block) {
+        flags_ |= HAS_BLOCKCHAIN;
+        blockChain_ = &block;
+    }
+    void setBlockChainNull() {
+        JS_ASSERT(!hasBlockChain());
+        blockChain_ = NULL;
+    }
+    StaticBlockObject **addressOfBlockChain() {
+        return &blockChain_;
+    }
+
+    bool hasCallObj() const {
+        return flags_ & HAS_CALL_OBJ;
+    }
+
+    CallObject &callObj() const {
+        JS_ASSERT(hasCallObj());
+        JS_ASSERT(fun()->isHeavyweight());
+
+        JSObject *obj = scopeChain();
+        while (!obj->isCall())
+            obj = obj->enclosingScope();
+        return obj->asCall();
+    }
+
+    void setFlags(uint32_t flags) {
+        flags_ = flags;
+    }
+    uint32_t *addressOfFlags() {
+        return &flags_;
+    }
+
+    inline bool pushBlock(JSContext *cx, Handle<StaticBlockObject *> block);
+    inline void popBlock(JSContext *cx);
+
+    bool strictEvalPrologue(JSContext *cx);
+    bool heavyweightFunPrologue(JSContext *cx);
+    bool initFunctionScopeObjects(JSContext *cx);
+
+    void initArgsObj(ArgumentsObject &argsobj) {
+        JS_ASSERT(script()->needsArgsObj());
+        flags_ |= HAS_ARGS_OBJ;
+        argsObj_ = &argsobj;
+    }
+    bool hasArgsObj() const {
+        return flags_ & HAS_ARGS_OBJ;
+    }
+    ArgumentsObject &argsObj() const {
+        JS_ASSERT(hasArgsObj());
+        JS_ASSERT(script()->needsArgsObj());
+        return *argsObj_;
+    }
+
+    bool prevUpToDate() const {
+        return flags_ & PREV_UP_TO_DATE;
+    }
+    void setPrevUpToDate() {
+        flags_ |= PREV_UP_TO_DATE;
+    }
+
+    JSScript *evalScript() const {
+        JS_ASSERT(isEvalFrame());
+        return evalScript_;
+    }
+
+    bool hasHookData() const {
+        return flags_ & HAS_HOOK_DATA;
+    }
+
+    void *maybeHookData() const {
+        return hasHookData() ? hookData_ : NULL;
+    }
+
+    void setHookData(void *v) {
+        hookData_ = v;
+        flags_ |= HAS_HOOK_DATA;
+    }
+
+    bool hasPushedSPSFrame() const {
+        return flags_ & HAS_PUSHED_SPS_FRAME;
+    }
+
+    void setPushedSPSFrame() {
+        flags_ |= HAS_PUSHED_SPS_FRAME;
+    }
+
+    void unsetPushedSPSFrame() {
+        flags_ &= ~HAS_PUSHED_SPS_FRAME;
+    }
+
+    void trace(JSTracer *trc);
+
+    bool isFunctionFrame() const {
+        return CalleeTokenIsFunction(calleeToken());
+    }
+    bool isGlobalFrame() const {
+        return !CalleeTokenIsFunction(calleeToken());
+    }
+     bool isEvalFrame() const {
+        return flags_ & EVAL;
+    }
+    bool isStrictEvalFrame() const {
+        return isEvalFrame() && script()->strict;
+    }
+    bool isNonStrictEvalFrame() const {
+        return isEvalFrame() && !script()->strict;
+    }
+    bool isDirectEvalFrame() const {
+        return isEvalFrame() && script()->staticLevel > 0;
+    }
+    bool isNonStrictDirectEvalFrame() const {
+        return isNonStrictEvalFrame() && isDirectEvalFrame();
+    }
+    bool isNonEvalFunctionFrame() const {
+        return isFunctionFrame() && !isEvalFrame();
+    }
+    bool isDebuggerFrame() const {
+        return false;
+    }
+
+    IonJSFrameLayout *framePrefix() const {
+        uint8_t *fp = (uint8_t *)this + Size() + FramePointerOffset;
+        return (IonJSFrameLayout *)fp;
+    }
+
+    // Methods below are used by the compiler.
+    static size_t offsetOfCalleeToken() {
+        return FramePointerOffset + js::ion::IonJSFrameLayout::offsetOfCalleeToken();
+    }
+    static size_t offsetOfThis() {
+        return FramePointerOffset + js::ion::IonJSFrameLayout::offsetOfThis();
+    }
+    static size_t offsetOfArg(size_t index) {
+        return FramePointerOffset + js::ion::IonJSFrameLayout::offsetOfActualArg(index);
+    }
+    static size_t offsetOfNumActualArgs() {
+        return FramePointerOffset + js::ion::IonJSFrameLayout::offsetOfNumActualArgs();
+    }
+    static size_t Size() {
+        return sizeof(BaselineFrame);
+    }
+
+    // The reverseOffsetOf methods below compute the offset relative to the
+    // frame's base pointer. Since the stack grows down, these offsets are
+    // negative.
+    static int reverseOffsetOfFrameSize() {
+        return -int(Size()) + offsetof(BaselineFrame, frameSize_);
+    }
+    static int reverseOffsetOfScratchValue() {
+        return -int(Size()) + offsetof(BaselineFrame, loScratchValue_);
+    }
+    static int reverseOffsetOfScopeChain() {
+        return -int(Size()) + offsetof(BaselineFrame, scopeChain_);
+    }
+    static int reverseOffsetOfBlockChain() {
+        return -int(Size()) + offsetof(BaselineFrame, blockChain_);
+    }
+    static int reverseOffsetOfArgsObj() {
+        return -int(Size()) + offsetof(BaselineFrame, argsObj_);
+    }
+    static int reverseOffsetOfFlags() {
+        return -int(Size()) + offsetof(BaselineFrame, flags_);
+    }
+    static int reverseOffsetOfEvalScript() {
+        return -int(Size()) + offsetof(BaselineFrame, evalScript_);
+    }
+    static int reverseOffsetOfReturnValue() {
+        return -int(Size()) + offsetof(BaselineFrame, loReturnValue_);
+    }
+    static int reverseOffsetOfLocal(size_t index) {
+        return -int(Size()) - (index + 1) * sizeof(Value);
+    }
+};
+
+// Ensure the frame is 8-byte aligned (required on ARM).
+JS_STATIC_ASSERT(((sizeof(BaselineFrame) + BaselineFrame::FramePointerOffset) % 8) == 0);
+
+} // namespace ion
+} // namespace js
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineFrameInfo.cpp
@@ -0,0 +1,185 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaselineFrameInfo.h"
+#include "IonSpewer.h"
+
+#include "jsanalyze.h"
+#include "jsinferinlines.h"
+
+using namespace js;
+using namespace js::ion;
+
+bool
+FrameInfo::init() {
+    // One slot is always needed for this/arguments type checks.
+    size_t nstack = Max(script->nslots - script->nfixed, 1);
+    if (!stack.init(nstack))
+        return false;
+
+    return true;
+}
+
+void
+FrameInfo::sync(StackValue *val)
+{
+    switch (val->kind()) {
+      case StackValue::Stack:
+        break;
+      case StackValue::LocalSlot:
+        masm.pushValue(addressOfLocal(val->localSlot()));
+        break;
+      case StackValue::ArgSlot:
+        masm.pushValue(addressOfArg(val->argSlot()));
+        break;
+      case StackValue::ThisSlot:
+        masm.pushValue(addressOfThis());
+        break;
+      case StackValue::Register:
+        masm.pushValue(val->reg());
+        break;
+      case StackValue::Constant:
+        masm.pushValue(val->constant());
+        break;
+      default:
+        JS_NOT_REACHED("Invalid kind");
+        break;
+    }
+
+    val->setStack();
+}
+
+void
+FrameInfo::syncStack(uint32_t uses)
+{
+    JS_ASSERT(uses <= stackDepth());
+
+    uint32_t depth = stackDepth() - uses;
+
+    for (uint32_t i = 0; i < depth; i++) {
+        StackValue *current = &stack[i];
+        sync(current);
+    }
+}
+
+uint32_t
+FrameInfo::numUnsyncedSlots()
+{
+    // Start at the bottom, find the first value that's not synced.
+    uint32_t i = 0;
+    for (; i < stackDepth(); i++) {
+        if (peek(-int32_t(i + 1))->kind() == StackValue::Stack)
+            break;
+    }
+    return i;
+}
+
+void
+FrameInfo::popValue(ValueOperand dest)
+{
+    StackValue *val = peek(-1);
+
+    switch (val->kind()) {
+      case StackValue::Constant:
+        masm.moveValue(val->constant(), dest);
+        break;
+      case StackValue::LocalSlot:
+        masm.loadValue(addressOfLocal(val->localSlot()), dest);
+        break;
+      case StackValue::ArgSlot:
+        masm.loadValue(addressOfArg(val->argSlot()), dest);
+        break;
+      case StackValue::ThisSlot:
+        masm.loadValue(addressOfThis(), dest);
+        break;
+      case StackValue::Stack:
+        masm.popValue(dest);
+        break;
+      case StackValue::Register:
+        masm.moveValue(val->reg(), dest);
+        break;
+      default:
+        JS_NOT_REACHED("Invalid kind");
+    }
+
+    // masm.popValue already adjusted the stack pointer, don't do it twice.
+    pop(DontAdjustStack);
+}
+
+void
+FrameInfo::popRegsAndSync(uint32_t uses)
+{
+    // x86 has only 3 Value registers. Only support 2 regs here for now,
+    // so that there's always a scratch Value register for reg -> reg
+    // moves.
+    JS_ASSERT(uses > 0);
+    JS_ASSERT(uses <= 2);
+    JS_ASSERT(uses <= stackDepth());
+
+    syncStack(uses);
+
+    switch (uses) {
+      case 1:
+        popValue(R0);
+        break;
+      case 2: {
+        // If the second value is in R1, move it to R2 so that it's not
+        // clobbered by the first popValue.
+        StackValue *val = peek(-2);
+        if (val->kind() == StackValue::Register && val->reg() == R1) {
+            masm.moveValue(R1, R2);
+            val->setRegister(R2);
+        }
+        popValue(R1);
+        popValue(R0);
+        break;
+      }
+      default:
+        JS_NOT_REACHED("Invalid uses");
+    }
+}
+
+#ifdef DEBUG
+void
+FrameInfo::assertValidState(jsbytecode *pc)
+{
+    // Check stack depth.
+    analyze::Bytecode *code = script->analysis()->maybeCode(pc);
+    JS_ASSERT_IF(code, stackDepth() == code->stackDepth);
+
+    // Start at the bottom, find the first value that's not synced.
+    uint32_t i = 0;
+    for (; i < stackDepth(); i++) {
+        if (stack[i].kind() != StackValue::Stack)
+            break;
+    }
+
+    // Assert all values on top of it are also not synced.
+    for (; i < stackDepth(); i++)
+        JS_ASSERT(stack[i].kind() != StackValue::Stack);
+
+    // Assert every Value register is used by at most one StackValue.
+    // R2 is used as scratch register by the compiler and FrameInfo,
+    // so it shouldn't be used for StackValues.
+    bool usedR0 = false, usedR1 = false;
+
+    for (i = 0; i < stackDepth(); i++) {
+        if (stack[i].kind() == StackValue::Register) {
+            ValueOperand reg = stack[i].reg();
+            if (reg == R0) {
+                JS_ASSERT(!usedR0);
+                usedR0 = true;
+            } else if (reg == R1) {
+                JS_ASSERT(!usedR1);
+                usedR1 = true;
+            } else {
+                JS_NOT_REACHED("Invalid register");
+            }
+        }
+    }
+}
+#endif
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineFrameInfo.h
@@ -0,0 +1,331 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_baseline_frameinfo_h__) && defined(JS_ION)
+#define jsion_baseline_frameinfo_h__
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "BaselineJIT.h"
+#include "BaselineFrame.h"
+#include "BaselineRegisters.h"
+#include "IonMacroAssembler.h"
+#include "FixedList.h"
+
+namespace js {
+namespace ion {
+
+// FrameInfo overview.
+//
+// FrameInfo is used by the compiler to track values stored in the frame. This
+// includes locals, arguments and stack values. Locals and arguments are always
+// fully synced. Stack values can either be synced, stored as constant, stored in
+// a Value register or refer to a local slot. Syncing a StackValue ensures it's
+// stored on the stack, e.g. kind == Stack.
+//
+// To see how this works, consider the following statement:
+//
+//    var y = x + 9;
+//
+// Here two values are pushed: StackValue(LocalSlot(0)) and StackValue(Int32Value(9)).
+// Only when we reach the ADD op, code is generated to load the operands directly
+// into the right operand registers and sync all other stack values.
+//
+// For stack values, the following invariants hold (and are checked between ops):
+//
+// (1) If a value is synced (kind == Stack), all values below it must also be synced.
+//     In other words, values with kind other than Stack can only appear on top of the
+//     abstract stack.
+//
+// (2) When we call a stub or IC, all values still on the stack must be synced.
+
+// Represents a value pushed on the stack. Note that StackValue is not used for
+// locals or arguments since these are always fully synced.
+class StackValue
+{
+  public:
+    enum Kind {
+        Constant,
+        Register,
+        Stack,
+        LocalSlot,
+        ArgSlot,
+        ThisSlot
+#ifdef DEBUG
+        // In debug builds, assert Kind is initialized.
+        , Uninitialized
+#endif
+    };
+
+  private:
+    Kind kind_;
+
+    union {
+        struct {
+            Value v;
+        } constant;
+        struct {
+            mozilla::AlignedStorage2<ValueOperand> reg;
+        } reg;
+        struct {
+            uint32_t slot;
+        } local;
+        struct {
+            uint32_t slot;
+        } arg;
+    } data;
+
+    JSValueType knownType_;
+
+  public:
+    StackValue() {
+        reset();
+    }
+
+    Kind kind() const {
+        return kind_;
+    }
+    bool hasKnownType() const {
+        return knownType_ != JSVAL_TYPE_UNKNOWN;
+    }
+    bool hasKnownType(JSValueType type) const {
+        JS_ASSERT(type != JSVAL_TYPE_UNKNOWN);
+        return knownType_ == type;
+    }
+    bool isKnownBoolean() const {
+        return hasKnownType(JSVAL_TYPE_BOOLEAN);
+    }
+    JSValueType knownType() const {
+        JS_ASSERT(hasKnownType());
+        return knownType_;
+    }
+    void reset() {
+#ifdef DEBUG
+        kind_ = Uninitialized;
+        knownType_ = JSVAL_TYPE_UNKNOWN;
+#endif
+    }
+    Value constant() const {
+        JS_ASSERT(kind_ == Constant);
+        return data.constant.v;
+    }
+    ValueOperand reg() const {
+        JS_ASSERT(kind_ == Register);
+        return *data.reg.reg.addr();
+    }
+    uint32_t localSlot() const {
+        JS_ASSERT(kind_ == LocalSlot);
+        return data.local.slot;
+    }
+    uint32_t argSlot() const {
+        JS_ASSERT(kind_ == ArgSlot);
+        return data.arg.slot;
+    }
+
+    void setConstant(const Value &v) {
+        kind_ = Constant;
+        data.constant.v = v;
+        knownType_ = v.isDouble() ? JSVAL_TYPE_DOUBLE : v.extractNonDoubleType();
+    }
+    void setRegister(const ValueOperand &val, JSValueType knownType = JSVAL_TYPE_UNKNOWN) {
+        kind_ = Register;
+        *data.reg.reg.addr() = val;
+        knownType_ = knownType;
+    }
+    void setLocalSlot(uint32_t slot) {
+        kind_ = LocalSlot;
+        data.local.slot = slot;
+        knownType_ = JSVAL_TYPE_UNKNOWN;
+    }
+    void setArgSlot(uint32_t slot) {
+        kind_ = ArgSlot;
+        data.arg.slot = slot;
+        knownType_ = JSVAL_TYPE_UNKNOWN;
+    }
+    void setThis() {
+        kind_ = ThisSlot;
+        knownType_ = JSVAL_TYPE_UNKNOWN;
+    }
+    void setStack() {
+        kind_ = Stack;
+        knownType_ = JSVAL_TYPE_UNKNOWN;
+    }
+};
+
+enum StackAdjustment { AdjustStack, DontAdjustStack };
+
+class FrameInfo
+{
+    RootedScript script;
+    MacroAssembler &masm;
+
+    FixedList<StackValue> stack;
+    size_t spIndex;
+
+  public:
+    FrameInfo(JSContext *cx, HandleScript script, MacroAssembler &masm)
+      : script(cx, script),
+        masm(masm),
+        stack(),
+        spIndex(0)
+    { }
+
+    bool init();
+
+    uint32_t nlocals() const {
+        return script->nfixed;
+    }
+    uint32_t nargs() const {
+        return script->function()->nargs;
+    }
+
+  private:
+    inline StackValue *rawPush() {
+        StackValue *val = &stack[spIndex++];
+        val->reset();
+        return val;
+    }
+
+  public:
+    inline size_t stackDepth() const {
+        return spIndex;
+    }
+    inline void setStackDepth(uint32_t newDepth) {
+        if (newDepth <= stackDepth()) {
+            spIndex = newDepth;
+        } else {
+            uint32_t diff = newDepth - stackDepth();
+            for (uint32_t i = 0; i < diff; i++) {
+                StackValue *val = rawPush();
+                val->setStack();
+            }
+
+            JS_ASSERT(spIndex == newDepth);
+        }
+    }
+    inline StackValue *peek(int32_t index) const {
+        JS_ASSERT(index < 0);
+        return const_cast<StackValue *>(&stack[spIndex + index]);
+    }
+
+    inline void pop(StackAdjustment adjust = AdjustStack) {
+        spIndex--;
+        StackValue *popped = &stack[spIndex];
+
+        if (adjust == AdjustStack && popped->kind() == StackValue::Stack)
+            masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+
+        // Assert when anything uses this value.
+        popped->reset();
+    }
+    inline void popn(uint32_t n, StackAdjustment adjust = AdjustStack) {
+        uint32_t poppedStack = 0;
+        for (uint32_t i = 0; i < n; i++) {
+            if (peek(-1)->kind() == StackValue::Stack)
+                poppedStack++;
+            pop(DontAdjustStack);
+        }
+        if (adjust == AdjustStack && poppedStack > 0)
+            masm.addPtr(Imm32(sizeof(Value) * poppedStack), BaselineStackReg);
+    }
+    inline void push(const Value &val) {
+        StackValue *sv = rawPush();
+        sv->setConstant(val);
+    }
+    inline void push(const ValueOperand &val, JSValueType knownType=JSVAL_TYPE_UNKNOWN) {
+        StackValue *sv = rawPush();
+        sv->setRegister(val, knownType);
+    }
+    inline void pushLocal(uint32_t local) {
+        StackValue *sv = rawPush();
+        sv->setLocalSlot(local);
+    }
+    inline void pushArg(uint32_t arg) {
+        StackValue *sv = rawPush();
+        sv->setArgSlot(arg);
+    }
+    inline void pushThis() {
+        StackValue *sv = rawPush();
+        sv->setThis();
+    }
+    inline void pushScratchValue() {
+        masm.pushValue(addressOfScratchValue());
+        StackValue *sv = rawPush();
+        sv->setStack();
+    }
+    inline Address addressOfLocal(size_t local) const {
+#ifdef DEBUG
+        if (local >= nlocals()) {
+            // GETLOCAL and SETLOCAL can be used to access stack values. This is
+            // fine, as long as they are synced.
+            size_t slot = local - nlocals();
+            JS_ASSERT(slot < stackDepth());
+            JS_ASSERT(stack[slot].kind() == StackValue::Stack);
+        }
+#endif
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfLocal(local));
+    }
+    Address addressOfArg(size_t arg) const {
+        JS_ASSERT(arg < nargs());
+        return Address(BaselineFrameReg, BaselineFrame::offsetOfArg(arg));
+    }
+    Address addressOfThis() const {
+        return Address(BaselineFrameReg, BaselineFrame::offsetOfThis());
+    }
+    Address addressOfCallee() const {
+        return Address(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
+    }
+    Address addressOfScopeChain() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScopeChain());
+    }
+    Address addressOfBlockChain() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfBlockChain());
+    }
+    Address addressOfFlags() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags());
+    }
+    Address addressOfEvalScript() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfEvalScript());
+    }
+    Address addressOfReturnValue() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue());
+    }
+    Address addressOfStackValue(const StackValue *value) const {
+        JS_ASSERT(value->kind() == StackValue::Stack);
+        size_t slot = value - &stack[0];
+        JS_ASSERT(slot < stackDepth());
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfLocal(nlocals() + slot));
+    }
+    Address addressOfScratchValue() const {
+        return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScratchValue());
+    }
+
+    void popValue(ValueOperand dest);
+
+    void sync(StackValue *val);
+    void syncStack(uint32_t uses);
+    uint32_t numUnsyncedSlots();
+    void popRegsAndSync(uint32_t uses);
+
+    inline void assertSyncedStack() const {
+        JS_ASSERT_IF(stackDepth() > 0, peek(-1)->kind() == StackValue::Stack);
+    }
+
+#ifdef DEBUG
+    // Assert the state is valid before excuting "pc".
+    void assertValidState(jsbytecode *pc);
+#else
+    inline void assertValidState(jsbytecode *pc) {}
+#endif
+};
+
+} // namespace ion
+} // namespace js
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineHelpers.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_baseline_helpers_h__) && defined(JS_ION)
+#define jsion_baseline_helpers_h__
+
+#if defined(JS_CPU_X86)
+# include "x86/BaselineHelpers-x86.h"
+#elif defined(JS_CPU_X64)
+# include "x64/BaselineHelpers-x64.h"
+#else
+# include "arm/BaselineHelpers-arm.h"
+#endif
+
+namespace js {
+namespace ion {
+
+} // namespace ion
+} // namespace js
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BaselineIC.cpp
@@ -0,0 +1,6955 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaselineJIT.h"
+#include "BaselineCompiler.h"
+#include "BaselineHelpers.h"
+#include "BaselineIC.h"
+#include "IonLinker.h"
+#include "IonSpewer.h"
+#include "VMFunctions.h"
+#include "IonFrames-inl.h"
+
+#include "builtin/Eval.h"
+
+#include "jsinterpinlines.h"
+
+namespace js {
+namespace ion {
+
+#ifdef DEBUG
+void
+FallbackICSpew(JSContext *cx, ICFallbackStub *stub, const char *fmt, ...)
+{
+    if (IonSpewEnabled(IonSpew_BaselineICFallback)) {
+        RootedScript script(cx, GetTopIonJSScript(cx));
+        jsbytecode *pc = stub->icEntry()->pc(script);
+
+        char fmtbuf[100];
+        va_list args;
+        va_start(args, fmt);
+        vsnprintf(fmtbuf, 100, fmt, args);
+        va_end(args);
+
+        IonSpew(IonSpew_BaselineICFallback,
+                "Fallback hit for (%s:%d) (pc=%d,line=%d,uses=%d,stubs=%d): %s",
+                script->filename(),
+                script->lineno,
+                (int) (pc - script->code),
+                PCToLineNumber(script, pc),
+                script->getUseCount(),
+                (int) stub->numOptimizedStubs(),
+                fmtbuf);
+    }
+}
+
+void
+TypeFallbackICSpew(JSContext *cx, ICTypeMonitor_Fallback *stub, const char *fmt, ...)
+{
+    if (IonSpewEnabled(IonSpew_BaselineICFallback)) {
+        RootedScript script(cx, GetTopIonJSScript(cx));
+        jsbytecode *pc = stub->icEntry()->pc(script);
+
+        char fmtbuf[100];
+        va_list args;
+        va_start(args, fmt);
+        vsnprintf(fmtbuf, 100, fmt, args);
+        va_end(args);
+
+        IonSpew(IonSpew_BaselineICFallback,
+                "Type monitor fallback hit for (%s:%d) (pc=%d,line=%d,uses=%d,stubs=%d): %s",
+                script->filename(),
+                script->lineno,
+                (int) (pc - script->code),
+                PCToLineNumber(script, pc),
+                script->getUseCount(),
+                (int) stub->numOptimizedMonitorStubs(),
+                fmtbuf);
+    }
+}
+
+#else
+#define FallbackICSpew(...)
+#define TypeFallbackICSpew(...)
+#endif
+
+
+ICFallbackStub *
+ICEntry::fallbackStub() const
+{
+    return firstStub()->getChainFallback();
+}
+
+
+ICStubConstIterator &
+ICStubConstIterator::operator++()
+{
+    JS_ASSERT(currentStub_ != NULL);
+    currentStub_ = currentStub_->next();
+    return *this;
+}
+
+
+ICStubIterator::ICStubIterator(ICFallbackStub *fallbackStub, bool end)
+  : icEntry_(fallbackStub->icEntry()),
+    fallbackStub_(fallbackStub),
+    previousStub_(NULL),
+    currentStub_(end ? fallbackStub : icEntry_->firstStub()),
+    unlinked_(false)
+{ }
+
+ICStubIterator &
+ICStubIterator::operator++()
+{
+    JS_ASSERT(currentStub_->next() != NULL);
+    if (!unlinked_)
+        previousStub_ = currentStub_;
+    currentStub_ = currentStub_->next();
+    unlinked_ = false;
+    return *this;
+}
+
+void
+ICStubIterator::unlink(Zone *zone)
+{
+    JS_ASSERT(currentStub_->next() != NULL);
+    JS_ASSERT(currentStub_ != fallbackStub_);
+    JS_ASSERT(!unlinked_);
+    fallbackStub_->unlinkStub(zone, previousStub_, currentStub_);
+
+    // Mark the current iterator position as unlinked, so operator++ works properly.
+    unlinked_ = true;
+}
+
+
+void
+ICStub::markCode(JSTracer *trc, const char *name)
+{
+    IonCode *stubIonCode = ionCode();
+    MarkIonCodeUnbarriered(trc, &stubIonCode, name);
+}
+
+void
+ICStub::updateCode(IonCode *code)
+{
+    // Write barrier on the old code.
+#ifdef JSGC_INCREMENTAL
+    IonCode::writeBarrierPre(ionCode());
+#endif
+    stubCode_ = code->raw();
+}
+
+/* static */ void
+ICStub::trace(JSTracer *trc)
+{
+    markCode(trc, "baseline-stub-ioncode");
+
+    // If the stub is a monitored fallback stub, then mark the monitor ICs hanging
+    // off of that stub.  We don't need to worry about the regular monitored stubs,
+    // because the regular monitored stubs will always have a monitored fallback stub
+    // that references the same stub chain.
+    if (isMonitoredFallback()) {
+        ICTypeMonitor_Fallback *lastMonStub = toMonitoredFallbackStub()->fallbackMonitorStub();
+        for (ICStubConstIterator iter = lastMonStub->firstMonitorStub(); !iter.atEnd(); iter++) {
+            JS_ASSERT_IF(iter->next() == NULL, *iter == lastMonStub);
+            iter->markCode(trc, "baseline-monitor-stub-ioncode");
+        }
+    }
+
+    if (isUpdated()) {
+        for (ICStubConstIterator iter = toUpdatedStub()->firstUpdateStub(); !iter.atEnd(); iter++) {
+            JS_ASSERT_IF(iter->next() == NULL, iter->isTypeUpdate_Fallback());
+            iter->markCode(trc, "baseline-update-stub-ioncode");
+        }
+    }
+
+    switch (kind()) {
+      case ICStub::Call_Scripted: {
+        ICCall_Scripted *callStub = toCall_Scripted();
+        MarkScript(trc, &callStub->calleeScript(), "baseline-callscripted-callee");
+        break;
+      }
+      case ICStub::Call_Native: {
+        ICCall_Native *callStub = toCall_Native();
+        MarkObject(trc, &callStub->callee(), "baseline-callnative-callee");
+        break;
+      }
+      case ICStub::GetElem_Native: {
+        ICGetElem_Native *getElemStub = toGetElem_Native();
+        MarkShape(trc, &getElemStub->shape(), "baseline-getelem-native-shape");
+        gc::MarkValue(trc, &getElemStub->idval(), "baseline-getelem-native-idval");
+        break;
+      }
+      case ICStub::GetElem_NativePrototype: {
+        ICGetElem_NativePrototype *getElemStub = toGetElem_NativePrototype();
+        MarkShape(trc, &getElemStub->shape(), "baseline-getelem-nativeproto-shape");
+        gc::MarkValue(trc, &getElemStub->idval(), "baseline-getelem-nativeproto-idval");
+        MarkObject(trc, &getElemStub->holder(), "baseline-getelem-nativeproto-holder");
+        MarkShape(trc, &getElemStub->holderShape(), "baseline-getelem-nativeproto-holdershape");
+        break;
+      }
+      case ICStub::GetElem_Dense: {
+        ICGetElem_Dense *getElemStub = toGetElem_Dense();
+        MarkShape(trc, &getElemStub->shape(), "baseline-getelem-dense-shape");
+        break;
+      }
+      case ICStub::GetElem_TypedArray: {
+        ICGetElem_TypedArray *getElemStub = toGetElem_TypedArray();
+        MarkShape(trc, &getElemStub->shape(), "baseline-getelem-typedarray-shape");
+        break;
+      }
+      case ICStub::SetElem_Dense: {
+        ICSetElem_Dense *setElemStub = toSetElem_Dense();
+        MarkShape(trc, &setElemStub->shape(), "baseline-getelem-dense-shape");
+        MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-dense-type");
+        break;
+      }
+      case ICStub::SetElem_DenseAdd: {
+        ICSetElem_DenseAdd *setElemStub = toSetElem_DenseAdd();
+        MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-denseadd-type");
+
+        JS_STATIC_ASSERT(ICSetElem_DenseAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+
+        switch (setElemStub->protoChainDepth()) {
+          case 0: setElemStub->toImpl<0>()->traceShapes(trc); break;
+          case 1: setElemStub->toImpl<1>()->traceShapes(trc); break;
+          case 2: setElemStub->toImpl<2>()->traceShapes(trc); break;
+          case 3: setElemStub->toImpl<3>()->traceShapes(trc); break;
+          case 4: setElemStub->toImpl<4>()->traceShapes(trc); break;
+          default: JS_NOT_REACHED("Invalid proto stub.");
+        }
+        break;
+      }
+      case ICStub::SetElem_TypedArray: {
+        ICSetElem_TypedArray *setElemStub = toSetElem_TypedArray();
+        MarkShape(trc, &setElemStub->shape(), "baseline-setelem-typedarray-shape");
+        break;
+      }
+      case ICStub::TypeMonitor_SingleObject: {
+        ICTypeMonitor_SingleObject *monitorStub = toTypeMonitor_SingleObject();
+        MarkObject(trc, &monitorStub->object(), "baseline-monitor-singleobject");
+        break;
+      }
+      case ICStub::TypeMonitor_TypeObject: {
+        ICTypeMonitor_TypeObject *monitorStub = toTypeMonitor_TypeObject();
+        MarkTypeObject(trc, &monitorStub->type(), "baseline-monitor-typeobject");
+        break;
+      }
+      case ICStub::TypeUpdate_SingleObject: {
+        ICTypeUpdate_SingleObject *updateStub = toTypeUpdate_SingleObject();
+        MarkObject(trc, &updateStub->object(), "baseline-update-singleobject");
+        break;
+      }
+      case ICStub::TypeUpdate_TypeObject: {
+        ICTypeUpdate_TypeObject *updateStub = toTypeUpdate_TypeObject();
+        MarkTypeObject(trc, &updateStub->type(), "baseline-update-typeobject");
+        break;
+      }
+      case ICStub::Profiler_PushFunction: {
+        ICProfiler_PushFunction *pushFunStub = toProfiler_PushFunction();
+        MarkScript(trc, &pushFunStub->script(), "baseline-profilerpushfunction-stub-script");
+        break;
+      }
+      case ICStub::GetName_Global: {
+        ICGetName_Global *globalStub = toGetName_Global();
+        MarkShape(trc, &globalStub->shape(), "baseline-global-stub-shape");
+        break;
+      }
+      case ICStub::GetName_Scope0:
+        static_cast<ICGetName_Scope<0>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope1:
+        static_cast<ICGetName_Scope<1>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope2:
+        static_cast<ICGetName_Scope<2>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope3:
+        static_cast<ICGetName_Scope<3>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope4:
+        static_cast<ICGetName_Scope<4>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope5:
+        static_cast<ICGetName_Scope<5>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetName_Scope6:
+        static_cast<ICGetName_Scope<6>*>(this)->traceScopes(trc);
+        break;
+      case ICStub::GetIntrinsic_Constant: {
+        ICGetIntrinsic_Constant *constantStub = toGetIntrinsic_Constant();
+        gc::MarkValue(trc, &constantStub->value(), "baseline-getintrinsic-constant-value");
+        break;
+      }
+      case ICStub::GetProp_String: {
+        ICGetProp_String *propStub = toGetProp_String();
+        MarkShape(trc, &propStub->stringProtoShape(), "baseline-getpropstring-stub-shape");
+        break;
+      }
+      case ICStub::GetProp_Native: {
+        ICGetProp_Native *propStub = toGetProp_Native();
+        MarkShape(trc, &propStub->shape(), "baseline-getpropnative-stub-shape");
+        break;
+      }
+      case ICStub::GetProp_NativePrototype: {
+        ICGetProp_NativePrototype *propStub = toGetProp_NativePrototype();
+        MarkShape(trc, &propStub->shape(), "baseline-getpropnativeproto-stub-shape");
+        MarkObject(trc, &propStub->holder(), "baseline-getpropnativeproto-stub-holder");
+        MarkShape(trc, &propStub->holderShape(), "baseline-getpropnativeproto-stub-holdershape");
+        break;
+      }
+      case ICStub::GetProp_CallScripted: {
+        ICGetProp_CallScripted *callStub = toGetProp_CallScripted();
+        MarkShape(trc, &callStub->shape(), "baseline-getpropcallscripted-stub-shape");
+        MarkObject(trc, &callStub->holder(), "baseline-getpropcallscripted-stub-holder");
+        MarkShape(trc, &callStub->holderShape(), "baseline-getpropcallscripted-stub-holdershape");
+        MarkObject(trc, &callStub->getter(), "baseline-getpropcallscripted-stub-getter");
+        break;
+      }
+      case ICStub::GetProp_CallNative: {
+        ICGetProp_CallNative *callStub = toGetProp_CallNative();
+        MarkShape(trc, &callStub->shape(), "baseline-getpropcallnative-stub-shape");
+        MarkObject(trc, &callStub->holder(), "baseline-getpropcallnative-stub-holder");
+        MarkShape(trc, &callStub->holderShape(), "baseline-getpropcallnative-stub-holdershape");
+        MarkObject(trc, &callStub->getter(), "baseline-getpropcallnative-stub-getter");
+        break;
+      }
+      case ICStub::SetProp_Native: {
+        ICSetProp_Native *propStub = toSetProp_Native();
+        MarkShape(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
+        MarkTypeObject(trc, &propStub->type(), "baseline-setpropnative-stub-type");
+        break;
+      }
+      case ICStub::SetProp_NativeAdd: {
+        ICSetProp_NativeAdd *propStub = toSetProp_NativeAdd();
+        MarkTypeObject(trc, &propStub->type(), "baseline-setpropnativeadd-stub-type");
+        MarkShape(trc, &propStub->newShape(), "baseline-setpropnativeadd-stub-newshape");
+        JS_STATIC_ASSERT(ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+        switch (propStub->protoChainDepth()) {
+          case 0: propStub->toImpl<0>()->traceShapes(trc); break;
+          case 1: propStub->toImpl<1>()->traceShapes(trc); break;
+          case 2: propStub->toImpl<2>()->traceShapes(trc); break;
+          case 3: propStub->toImpl<3>()->traceShapes(trc); break;
+          case 4: propStub->toImpl<4>()->traceShapes(trc); break;
+          default: JS_NOT_REACHED("Invalid proto stub.");
+        }
+        break;
+      }
+      case ICStub::SetProp_CallScripted: {
+        ICSetProp_CallScripted *callStub = toSetProp_CallScripted();
+        MarkShape(trc, &callStub->shape(), "baseline-setpropcallscripted-stub-shape");
+        MarkObject(trc, &callStub->holder(), "baseline-setpropcallscripted-stub-holder");
+        MarkShape(trc, &callStub->holderShape(), "baseline-setpropcallscripted-stub-holdershape");
+        MarkObject(trc, &callStub->setter(), "baseline-setpropcallscripted-stub-setter");
+        break;
+      }
+      case ICStub::SetProp_CallNative: {
+        ICSetProp_CallNative *callStub = toSetProp_CallNative();
+        MarkShape(trc, &callStub->shape(), "baseline-setpropcallnative-stub-shape");
+        MarkObject(trc, &callStub->holder(), "baseline-setpropcallnative-stub-holder");
+        MarkShape(trc, &callStub->holderShape(), "baseline-setpropcallnative-stub-holdershape");
+        MarkObject(trc, &callStub->setter(), "baseline-setpropcallnative-stub-setter");
+        break;
+      }
+      default:
+        break;
+    }
+}
+
+void
+ICFallbackStub::unlinkStub(Zone *zone, ICStub *prev, ICStub *stub)
+{
+    JS_ASSERT(stub->next());
+
+    // If stub is the last optimized stub, update lastStubPtrAddr.
+    if (stub->next() == this) {
+        JS_ASSERT(lastStubPtrAddr_ == stub->addressOfNext());
+        if (prev)
+            lastStubPtrAddr_ = prev->addressOfNext();
+        else
+            lastStubPtrAddr_ = icEntry()->addressOfFirstStub();
+        *lastStubPtrAddr_ = this;
+    } else {
+        if (prev) {
+            JS_ASSERT(prev->next() == stub);
+            prev->setNext(stub->next());
+        } else {
+            JS_ASSERT(icEntry()->firstStub() == stub);
+            icEntry()->setFirstStub(stub->next());
+        }
+    }
+
+    JS_ASSERT(numOptimizedStubs_ > 0);
+    numOptimizedStubs_--;
+
+    if (zone->needsBarrier()) {
+        // We are removing edges from ICStub to gcthings. Perform one final trace
+        // of the stub for incremental GC, as it must know about those edges.
+        stub->trace(zone->barrierTracer());
+    }
+
+    if (ICStub::CanMakeCalls(stub->kind()) && stub->isMonitored()) {
+        // This stub can make calls so we can return to it if it's on the stack.
+        // We just have to reset its firstMonitorStub_ field to avoid a stale
+        // pointer when purgeOptimizedStubs destroys all optimized monitor
+        // stubs (unlinked stubs won't be updated).
+        ICTypeMonitor_Fallback *monitorFallback = toMonitoredFallbackStub()->fallbackMonitorStub();
+        stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback);
+    }
+
+#ifdef DEBUG
+    // Poison stub code to ensure we don't call this stub again. However, if this
+    // stub can make calls, a pointer to it may be stored in a stub frame on the
+    // stack, so we can't touch the stubCode_ or GC will crash when marking this
+    // pointer.
+    if (!ICStub::CanMakeCalls(stub->kind()))
+        stub->stubCode_ = (uint8_t *)0xbad;
+#endif
+}
+
+void
+ICFallbackStub::unlinkStubsWithKind(JSContext *cx, ICStub::Kind kind)
+{
+    for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) {
+        if (iter->kind() == kind)
+            iter.unlink(cx->zone());
+    }
+}
+
+void
+ICTypeMonitor_Fallback::resetMonitorStubChain(Zone *zone)
+{
+    if (zone->needsBarrier()) {
+        // We are removing edges from monitored stubs to gcthings (IonCode).
+        // Perform one final trace of all monitor stubs for incremental GC,
+        // as it must know about those edges.
+        this->trace(zone->barrierTracer());
+    }
+
+    firstMonitorStub_ = this;
+    numOptimizedMonitorStubs_ = 0;
+
+    if (hasFallbackStub_) {
+        lastMonitorStubPtrAddr_ = NULL;
+
+        // Reset firstMonitorStub_ field of all monitored stubs.
+        for (ICStubConstIterator iter = mainFallbackStub_->beginChainConst();
+             !iter.atEnd(); iter++)
+        {
+            if (!iter->isMonitored())
+                continue;
+            iter->toMonitoredStub()->resetFirstMonitorStub(this);
+        }
+    } else {
+        icEntry_->setFirstStub(this);
+        lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub();
+    }
+}
+
+ICMonitoredStub::ICMonitoredStub(Kind kind, IonCode *stubCode, ICStub *firstMonitorStub)
+  : ICStub(kind, ICStub::Monitored, stubCode),
+    firstMonitorStub_(firstMonitorStub)
+{
+    // If the first monitored stub is a ICTypeMonitor_Fallback stub, then
+    // double check that _its_ firstMonitorStub is the same as this one.
+    JS_ASSERT_IF(firstMonitorStub_->isTypeMonitor_Fallback(),
+                 firstMonitorStub_->toTypeMonitor_Fallback()->firstMonitorStub() ==
+                    firstMonitorStub_);
+}
+
+bool
+ICMonitoredFallbackStub::initMonitoringChain(JSContext *cx, ICStubSpace *space)
+{
+    JS_ASSERT(fallbackMonitorStub_ == NULL);
+
+    ICTypeMonitor_Fallback::Compiler compiler(cx, this);
+    ICTypeMonitor_Fallback *stub = compiler.getStub(space);
+    if (!stub)
+        return false;
+    fallbackMonitorStub_ = stub;
+    return true;
+}
+
+bool
+ICMonitoredFallbackStub::addMonitorStubForValue(JSContext *cx, HandleScript script, HandleValue val)
+{
+    return fallbackMonitorStub_->addMonitorStubForValue(cx, script, val);
+}
+
+bool
+ICUpdatedStub::initUpdatingChain(JSContext *cx, ICStubSpace *space)
+{
+    JS_ASSERT(firstUpdateStub_ == NULL);
+
+    ICTypeUpdate_Fallback::Compiler compiler(cx);
+    ICTypeUpdate_Fallback *stub = compiler.getStub(space);
+    if (!stub)
+        return false;
+
+    firstUpdateStub_ = stub;
+    return true;
+}
+
+IonCode *
+ICStubCompiler::getStubCode()
+{
+    IonCompartment *ion = cx->compartment->ionCompartment();
+
+    // Check for existing cached stubcode.
+    uint32_t stubKey = getKey();
+    IonCode *stubCode = ion->getStubCode(stubKey);
+    if (stubCode)
+        return stubCode;
+
+    // Compile new stubcode.
+    MacroAssembler masm;
+#ifdef JS_CPU_ARM
+    masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+
+    AutoFlushCache afc("ICStubCompiler::getStubCode", cx->runtime->ionRuntime());
+    if (!generateStubCode(masm))
+        return NULL;
+    Linker linker(masm);
+    Rooted<IonCode *> newStubCode(cx, linker.newCode(cx, JSC::BASELINE_CODE));
+    if (!newStubCode)
+        return NULL;
+
+    // After generating code, run postGenerateStubCode()
+    if (!postGenerateStubCode(masm, newStubCode))
+        return NULL;
+
+    // All barriers are emitted off-by-default, enable them if needed.
+    if (cx->zone()->needsBarrier())
+        newStubCode->togglePreBarriers(true);
+
+    // Cache newly compiled stubcode.
+    if (!ion->putStubCode(stubKey, newStubCode))
+        return NULL;
+
+    JS_ASSERT(entersStubFrame_ == ICStub::CanMakeCalls(kind));
+
+    return newStubCode;
+}
+
+bool
+ICStubCompiler::tailCallVM(const VMFunction &fun, MacroAssembler &masm)
+{
+    IonCompartment *ion = cx->compartment->ionCompartment();
+    IonCode *code = ion->getVMWrapper(fun);
+    if (!code)
+        return false;
+
+    uint32_t argSize = fun.explicitStackSlots() * sizeof(void *);
+    EmitTailCallVM(code, masm, argSize);
+    return true;
+}
+
+bool
+ICStubCompiler::callVM(const VMFunction &fun, MacroAssembler &masm)
+{
+    IonCompartment *ion = cx->compartment->ionCompartment();
+    IonCode *code = ion->getVMWrapper(fun);
+    if (!code)
+        return false;
+
+    EmitCallVM(code, masm);
+    return true;
+}
+
+bool
+ICStubCompiler::callTypeUpdateIC(MacroAssembler &masm, uint32_t objectOffset)
+{
+    IonCompartment *ion = cx->compartment->ionCompartment();
+    IonCode *code = ion->getVMWrapper(DoTypeUpdateFallbackInfo);
+    if (!code)
+        return false;
+
+    EmitCallTypeUpdateIC(masm, code, objectOffset);
+    return true;
+}
+
+void
+ICStubCompiler::enterStubFrame(MacroAssembler &masm, Register scratch)
+{
+    EmitEnterStubFrame(masm, scratch);
+#ifdef DEBUG
+    entersStubFrame_ = true;
+#endif
+}
+
+void
+ICStubCompiler::leaveStubFrame(MacroAssembler &masm, bool calledIntoIon)
+{
+    JS_ASSERT(entersStubFrame_);
+    EmitLeaveStubFrame(masm, calledIntoIon);
+}
+
+void
+ICStubCompiler::guardProfilingEnabled(MacroAssembler &masm, Register scratch, Label *skip)
+{
+    // This should only be called from the following stubs.
+    JS_ASSERT(kind == ICStub::Call_Scripted      || kind == ICStub::Call_AnyScripted     ||
+              kind == ICStub::Call_Native        || kind == ICStub::GetProp_CallScripted ||
+              kind == ICStub::GetProp_CallNative || kind == ICStub::SetProp_CallScripted ||
+              kind == ICStub::SetProp_CallNative);
+
+    // Guard on bit in frame that indicates if the SPS frame was pushed in the first
+    // place.  This code is expected to be called from within a stub that has already
+    // entered a stub frame.
+    JS_ASSERT(entersStubFrame_);
+    masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
+    masm.branchTest32(Assembler::Zero,
+                      Address(scratch, BaselineFrame::reverseOffsetOfFlags()),
+                      Imm32(BaselineFrame::HAS_PUSHED_SPS_FRAME),
+                      skip);
+
+    // Check if profiling is enabled
+    uint32_t *enabledAddr = cx->runtime->spsProfiler.addressOfEnabled();
+    masm.branch32(Assembler::Equal, AbsoluteAddress(enabledAddr), Imm32(0), skip);
+}
+
+//
+// UseCount_Fallback
+//
+static bool
+IsTopFrameConstructing(JSContext *cx)
+{
+    IonFrameIterator iter(cx);
+    JS_ASSERT(iter.type() == IonFrame_Exit);
+
+    ++iter;
+    JS_ASSERT(iter.type() == IonFrame_BaselineStub);
+
+    ++iter;
+    JS_ASSERT(iter.isBaselineJS());
+
+    return iter.isConstructing();
+}
+
+static bool
+EnsureCanEnterIon(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
+                  HandleScript script, jsbytecode *pc, void **jitcodePtr)
+{
+    JS_ASSERT(jitcodePtr);
+    JS_ASSERT(!*jitcodePtr);
+
+    bool isLoopEntry = (JSOp(*pc) == JSOP_LOOPENTRY);
+
+    bool isConstructing = IsTopFrameConstructing(cx);
+    MethodStatus stat;
+    if (isLoopEntry) {
+        IonSpew(IonSpew_BaselineOSR, "  Compile at loop entry!");
+        stat = CanEnterAtBranch(cx, script, frame, pc, isConstructing);
+    } else if (frame->isFunctionFrame()) {
+        IonSpew(IonSpew_BaselineOSR, "  Compile function from top for later entry!");
+        stat = CompileFunctionForBaseline(cx, script, frame, isConstructing);
+    } else {
+        return true;
+    }
+
+    if (stat == Method_Error) {
+        IonSpew(IonSpew_BaselineOSR, "  Compile with Ion errored!");
+        return false;
+    }
+
+    if (stat == Method_CantCompile)
+        IonSpew(IonSpew_BaselineOSR, "  Can't compile with Ion!");
+    else if (stat == Method_Skipped)
+        IonSpew(IonSpew_BaselineOSR, "  Skipped compile with Ion!");
+    else if (stat == Method_Compiled)
+        IonSpew(IonSpew_BaselineOSR, "  Compiled with Ion!");
+    else
+        JS_NOT_REACHED("Invalid MethodStatus!");
+
+    // Failed to compile.  Reset use count and return.
+    if (stat != Method_Compiled) {
+        // TODO: If stat == Method_CantCompile, insert stub that just skips the useCount
+        // entirely, instead of resetting it.
+        if (stat == Method_CantCompile ||
+            (script->hasIonScript() && script->ion->bailoutExpected()))
+        {
+            IonSpew(IonSpew_BaselineOSR, "  Reset UseCount cantCompile=%s bailoutExpected=%s!",
+                    stat == Method_CantCompile ? "yes" : "no",
+                    (script->hasIonScript() && script->ion->bailoutExpected()) ? "yes" : "no");
+            script->resetUseCount();
+        }
+        return true;
+    }
+
+    if (isLoopEntry) {
+        IonSpew(IonSpew_BaselineOSR, "  OSR possible!");
+        IonScript *ion = script->ionScript();
+        *jitcodePtr = ion->method()->raw() + ion->osrEntryOffset();
+    }
+
+    return true;
+}
+
+//
+// The following data is kept in a temporary heap-allocated buffer, stored in
+// IonRuntime (high memory addresses at top, low at bottom):
+//
+//            +=================================+  --      <---- High Address
+//            |                                 |   |
+//            |     ...Locals/Stack...          |   |
+//            |                                 |   |
+//            +---------------------------------+   |
+//            |                                 |   |
+//            |     ...StackFrame...            |   |-- Fake StackFrame
+//            |                                 |   |
+//     +----> +---------------------------------+   |
+//     |      |                                 |   |
+//     |      |     ...Args/This...             |   |
+//     |      |                                 |   |
+//     |      +=================================+  --
+//     |      |     Padding(Maybe Empty)        |
+//     |      +=================================+  --
+//     +------|-- stackFrame                    |   |-- IonOsrTempData
+//            |   jitcode                       |   |
+//            +=================================+  --      <---- Low Address
+//
+// A pointer to the IonOsrTempData is returned.
+
+struct IonOsrTempData
+{
+    void *jitcode;
+    uint8_t *stackFrame;
+};
+
+static IonOsrTempData *
+PrepareOsrTempData(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
+                   HandleScript script, jsbytecode *pc, void *jitcode)
+{
+    // Calculate the (numLocals + numStackVals), and the number  of formal args.
+    size_t numLocalsAndStackVals = frame->numValueSlots();
+    size_t numFormalArgs = frame->isFunctionFrame() ? frame->numFormalArgs() : 0;
+
+    // Calculate the amount of space to allocate:
+    //      StackFrame space:
+    //          (sizeof(Value) * (numLocals + numStackVals))
+    //        + sizeof(StackFrame)
+    //        + (sizeof(Value) * (numFormalArgs + 1))   // +1 for ThisV
+    //
+    //      IonOsrTempData space:
+    //          sizeof(IonOsrTempData)
+
+    size_t stackFrameSpace = (sizeof(Value) * numLocalsAndStackVals) + sizeof(StackFrame)
+                           + (sizeof(Value) * (numFormalArgs + 1));
+    size_t ionOsrTempDataSpace = sizeof(IonOsrTempData);
+
+    size_t totalSpace = AlignBytes(stackFrameSpace, sizeof(Value)) +
+                        AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+
+    IonOsrTempData *info = (IonOsrTempData *)cx->runtime->getIonRuntime(cx)->allocateOsrTempData(totalSpace);
+    if (!info)
+        return NULL;
+
+    memset(info, 0, totalSpace);
+
+    info->jitcode = jitcode;
+
+    uint8_t *stackFrameStart = (uint8_t *)info + AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+    info->stackFrame = stackFrameStart + (numFormalArgs * sizeof(Value)) + sizeof(Value);
+
+    //
+    // Initialize the fake StackFrame.
+    //
+
+    // Copy formal args and thisv.
+    memcpy(stackFrameStart, frame->formals() - 1, (numFormalArgs + 1) * sizeof(Value));
+
+    // Initialize ScopeChain, Exec, and Flags fields in StackFrame struct.
+    uint8_t *stackFrame = info->stackFrame;
+    *((JSObject **) (stackFrame + StackFrame::offsetOfScopeChain())) = frame->scopeChain();
+    if (frame->isFunctionFrame()) {
+        // Store the function in exec field, and StackFrame::FUNCTION for flags.
+        *((JSFunction **) (stackFrame + StackFrame::offsetOfExec())) = frame->fun();
+        *((uint32_t *) (stackFrame + StackFrame::offsetOfFlags())) = StackFrame::FUNCTION;
+    } else {
+        *((RawScript *) (stackFrame + StackFrame::offsetOfExec())) = frame->script();
+        *((uint32_t *) (stackFrame + StackFrame::offsetOfFlags())) = 0;
+    }
+
+    // Do locals and stack values.  Note that in the fake StackFrame, these go from
+    // low to high addresses, while on the C stack, they go from high to low addresses.
+    // So we can't use memcpy on this, but must copy the values in reverse order.
+    Value *stackFrameLocalsStart = (Value *) (stackFrame + sizeof(StackFrame));
+    for (size_t i = 0; i < numLocalsAndStackVals; i++)
+        stackFrameLocalsStart[i] = *(frame->valueSlot(i));
+
+    IonSpew(IonSpew_BaselineOSR, "Allocated IonOsrTempData at %p", (void *) info);
+    IonSpew(IonSpew_BaselineOSR, "Jitcode is %p", info->jitcode);
+
+    // All done.
+    return info;
+}
+
+static bool
+DoUseCountFallback(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
+                   IonOsrTempData **infoPtr)
+{
+    JS_ASSERT(infoPtr);
+    *infoPtr = NULL;
+
+    // A TI OOM will disable TI and Ion.
+    if (!ion::IsEnabled(cx))
+        return true;
+
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    bool isLoopEntry = JSOp(*pc) == JSOP_LOOPENTRY;
+
+    FallbackICSpew(cx, stub, "UseCount(%d)", isLoopEntry ? int(pc - script->code) : int(-1));
+
+    if (!script->canIonCompile()) {
+        // TODO: ASSERT that ion-compilation-disabled checker stub doesn't exist.
+        // TODO: Clear all optimized stubs.
+        // TODO: Add a ion-compilation-disabled checker IC stub
+        script->resetUseCount();
+        return true;
+    }
+
+    JS_ASSERT(!script->isIonCompilingOffThread());
+
+    // If Ion script exists, but PC is not at a loop entry, then Ion will be entered for
+    // this script at an appropriate LOOPENTRY or the next time this function is called.
+    if (script->hasIonScript() && !isLoopEntry) {
+        IonSpew(IonSpew_BaselineOSR, "IonScript exists, but not at loop entry!");
+        // TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
+        // TODO: Clear all optimized stubs.
+        // TODO: Add a ion-script-already-exists checker stub.
+        return true;
+    }
+
+    // Ensure that Ion-compiled code is available.
+    IonSpew(IonSpew_BaselineOSR,
+            "UseCount for %s:%d reached %d at pc %p, trying to switch to Ion!",
+            script->filename(), script->lineno, (int) script->getUseCount(), (void *) pc);
+    void *jitcode = NULL;
+    if (!EnsureCanEnterIon(cx, stub, frame, script, pc, &jitcode))
+        return false;
+
+    // Jitcode should only be set here if not at loop entry.
+    JS_ASSERT_IF(!isLoopEntry, !jitcode);
+    if (!jitcode)
+        return true;
+
+    // Prepare the temporary heap copy of the fake StackFrame and actual args list.
+    IonSpew(IonSpew_BaselineOSR, "Got jitcode.  Preparing for OSR into ion.");
+    IonOsrTempData *info = PrepareOsrTempData(cx, stub, frame, script, pc, jitcode);
+    if (!info)
+        return false;
+    *infoPtr = info;
+
+    return true;
+}
+
+typedef bool (*DoUseCountFallbackFn)(JSContext *, ICUseCount_Fallback *, BaselineFrame *frame,
+                                     IonOsrTempData **infoPtr);
+static const VMFunction DoUseCountFallbackInfo =
+    FunctionInfo<DoUseCountFallbackFn>(DoUseCountFallback);
+
+bool
+ICUseCount_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    // enterStubFrame is going to clobber the BaselineFrameReg, save it in R0.scratchReg()
+    // first.
+    masm.movePtr(BaselineFrameReg, R0.scratchReg());
+
+    // Push a stub frame so that we can perform a non-tail call.
+    enterStubFrame(masm, R1.scratchReg());
+
+    Label noCompiledCode;
+    // Call DoUseCountFallback to compile/check-for Ion-compiled function
+    {
+        // Push IonOsrTempData pointer storage
+        masm.subPtr(Imm32(sizeof(void *)), BaselineStackReg);
+        masm.push(BaselineStackReg);
+
+        // Push IonJSFrameLayout pointer.
+        masm.loadBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+        masm.push(R0.scratchReg());
+
+        // Push stub pointer.
+        masm.push(BaselineStubReg);
+
+        if (!callVM(DoUseCountFallbackInfo, masm))
+            return false;
+
+        // Pop IonOsrTempData pointer.
+        masm.pop(R0.scratchReg());
+
+        leaveStubFrame(masm);
+
+        // If no IonCode was found, then skip just exit the IC.
+        masm.branchPtr(Assembler::Equal, R0.scratchReg(), ImmWord((void*) NULL), &noCompiledCode);
+    }
+
+    // Get a scratch register.
+    GeneralRegisterSet regs(availableGeneralRegs(0));
+    Register osrDataReg = R0.scratchReg();
+    regs.take(osrDataReg);
+    regs.takeUnchecked(OsrFrameReg);
+
+    Register scratchReg = regs.takeAny();
+
+    // At this point, stack looks like:
+    //  +-> [...Calling-Frame...]
+    //  |   [...Actual-Args/ThisV/ArgCount/Callee...]
+    //  |   [Descriptor]
+    //  |   [Return-Addr]
+    //  +---[Saved-FramePtr]            <-- BaselineFrameReg points here.
+    //      [...Baseline-Frame...]
+
+    // Restore the stack pointer to point to the saved frame pointer.
+    masm.movePtr(BaselineFrameReg, BaselineStackReg);
+
+    // Discard saved frame pointer, so that the return address is on top of
+    // the stack.
+    masm.pop(scratchReg);
+
+    // Jump into Ion.
+    masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, jitcode)), scratchReg);
+    masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, stackFrame)), OsrFrameReg);
+    masm.jump(scratchReg);
+
+    // No jitcode available, do nothing.
+    masm.bind(&noCompiledCode);
+    EmitReturnFromIC(masm);
+    return true;
+}
+
+//
+// ICProfile_Fallback
+//
+
+static bool
+DoProfilerFallback(JSContext *cx, BaselineFrame *frame, ICProfiler_Fallback *stub)
+{
+    RootedScript script(cx, frame->script());
+    RootedFunction func(cx, frame->maybeFun());
+    mozilla::DebugOnly<ICEntry *> icEntry = stub->icEntry();
+
+    FallbackICSpew(cx, stub, "Profiler");
+
+    SPSProfiler *profiler = &cx->runtime->spsProfiler;
+
+    // Manually enter SPS this time.
+    JS_ASSERT(profiler->enabled());
+    if (!cx->runtime->spsProfiler.enter(cx, script, func))
+        return false;
+    frame->setPushedSPSFrame();
+
+    // Unlink any existing PushFunction stub (which may hold stale 'const char *' to
+    // the profile string.
+    JS_ASSERT_IF(icEntry->firstStub() != stub,
+                 icEntry->firstStub()->isProfiler_PushFunction() &&
+                 icEntry->firstStub()->next() == stub);
+    stub->unlinkStubsWithKind(cx, ICStub::Profiler_PushFunction);
+    JS_ASSERT(icEntry->firstStub() == stub);
+
+    // Generate the string to use to identify this stack frame.
+    const char *string = profiler->profileString(cx, script, func);
+    if (string == NULL)
+        return false;
+
+    IonSpew(IonSpew_BaselineIC, "  Generating Profiler_PushFunction stub for %s:%d",
+            script->filename(), script->lineno);
+
+    // Create a new optimized stub.
+    ICProfiler_PushFunction::Compiler compiler(cx, string, script);
+    ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
+    if (!optStub)
+        return false;
+    stub->addNewStub(optStub);
+
+    return true;
+}
+
+typedef bool (*DoProfilerFallbackFn)(JSContext *, BaselineFrame *frame, ICProfiler_Fallback *);
+static const VMFunction DoProfilerFallbackInfo =
+    FunctionInfo<DoProfilerFallbackFn>(DoProfilerFallback);
+
+bool
+ICProfiler_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    EmitRestoreTailCallReg(masm);
+
+    masm.push(BaselineStubReg);         // Push stub.
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); // Push frame.
+
+    return tailCallVM(DoProfilerFallbackInfo, masm);
+}
+
+bool
+ICProfiler_PushFunction::Compiler::generateStubCode(MacroAssembler &masm)
+{
+
+    Register scratch = R0.scratchReg();
+    Register scratch2 = R1.scratchReg();
+
+    // Profiling should be enabled if we ever reach here.
+#ifdef DEBUG
+    Label spsEnabled;
+    uint32_t *enabledAddr = cx->runtime->spsProfiler.addressOfEnabled();
+    masm.branch32(Assembler::NotEqual, AbsoluteAddress(enabledAddr), Imm32(0), &spsEnabled);
+    masm.breakpoint();
+    masm.bind(&spsEnabled);
+#endif
+
+    // Push SPS entry.
+    masm.spsPushFrame(&cx->runtime->spsProfiler,
+                      Address(BaselineStubReg, ICProfiler_PushFunction::offsetOfStr()),
+                      Address(BaselineStubReg, ICProfiler_PushFunction::offsetOfScript()),
+                      scratch,
+                      scratch2);
+
+    // Mark frame as having profiler entry pushed.
+    Address flagsOffset(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags());
+    masm.or32(Imm32(BaselineFrame::HAS_PUSHED_SPS_FRAME), flagsOffset);
+
+    EmitReturnFromIC(masm);
+
+    return true;
+}
+
+//
+// TypeMonitor_Fallback
+//
+
+bool
+ICTypeMonitor_Fallback::addMonitorStubForValue(JSContext *cx, HandleScript script, HandleValue val)
+{
+    bool wasDetachedMonitorChain = lastMonitorStubPtrAddr_ == NULL;
+    JS_ASSERT_IF(wasDetachedMonitorChain, numOptimizedMonitorStubs_ == 0);
+
+    if (numOptimizedMonitorStubs_ >= MAX_OPTIMIZED_STUBS) {
+        // TODO: if the TypeSet becomes unknown or has the AnyObject type,
+        // replace stubs with a single stub to handle these.
+        return true;
+    }
+
+    if (val.isPrimitive()) {
+        JS_ASSERT(!val.isMagic());
+        JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
+
+        // Check for existing TypeMonitor stub.
+        ICTypeMonitor_PrimitiveSet *existingStub = NULL;
+        for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
+            if (iter->isTypeMonitor_PrimitiveSet()) {
+                existingStub = iter->toTypeMonitor_PrimitiveSet();
+                if (existingStub->containsType(type))
+                    return true;
+            }
+        }
+
+        ICTypeMonitor_PrimitiveSet::Compiler compiler(cx, existingStub, type);
+        ICStub *stub = existingStub ? compiler.updateStub()
+                                    : compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        IonSpew(IonSpew_BaselineIC, "  %s TypeMonitor stub %p for primitive type %d",
+                existingStub ? "Modified existing" : "Created new", stub, type);
+
+        if (!existingStub) {
+            JS_ASSERT(!hasStub(TypeMonitor_PrimitiveSet));
+            addOptimizedMonitorStub(stub);
+        }
+
+    } else if (val.toObject().hasSingletonType()) {
+        RootedObject obj(cx, &val.toObject());
+
+        // Check for existing TypeMonitor stub.
+        for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
+            if (iter->isTypeMonitor_SingleObject() &&
+                iter->toTypeMonitor_SingleObject()->object() == obj)
+            {
+                return true;
+            }
+        }
+
+        ICTypeMonitor_SingleObject::Compiler compiler(cx, obj);
+        ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        IonSpew(IonSpew_BaselineIC, "  Added TypeMonitor stub %p for singleton %p",
+                stub, obj.get());
+
+        addOptimizedMonitorStub(stub);
+
+    } else {
+        RootedTypeObject type(cx, val.toObject().type());
+
+        // Check for existing TypeMonitor stub.
+        for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
+            if (iter->isTypeMonitor_TypeObject() &&
+                iter->toTypeMonitor_TypeObject()->type() == type)
+            {
+                return true;
+            }
+        }
+
+        ICTypeMonitor_TypeObject::Compiler compiler(cx, type);
+        ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        IonSpew(IonSpew_BaselineIC, "  Added TypeMonitor stub %p for TypeObject %p",
+                stub, type.get());
+
+        addOptimizedMonitorStub(stub);
+    }
+
+    bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0);
+
+    if (firstMonitorStubAdded) {
+        // Was an empty monitor chain before, but a new stub was added.  This is the
+        // only time that any main stubs' firstMonitorStub fields need to be updated to
+        // refer to the newly added monitor stub.
+        ICStub *firstStub = mainFallbackStub_->icEntry()->firstStub();
+        for (ICStubConstIterator iter = firstStub; !iter.atEnd(); iter++) {
+            // Non-monitored stubs are used if the result has always the same type,
+            // e.g. a StringLength stub will always return int32.
+            if (!iter->isMonitored())
+                continue;
+
+            // Since we just added the first optimized monitoring stub, any
+            // existing main stub's |firstMonitorStub| MUST be pointing to the fallback
+            // monitor stub (i.e. this stub).
+            JS_ASSERT(iter->toMonitoredStub()->firstMonitorStub() == this);
+            iter->toMonitoredStub()->updateFirstMonitorStub(firstMonitorStub_);
+        }
+    }
+
+    return true;
+}
+
+static bool
+DoTypeMonitorFallback(JSContext *cx, BaselineFrame *frame, ICTypeMonitor_Fallback *stub,
+                      HandleValue value, MutableHandleValue res)
+{
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    TypeFallbackICSpew(cx, stub, "TypeMonitor");
+
+    uint32_t argument;
+    if (stub->monitorsThis()) {
+        JS_ASSERT(pc == script->code);
+        types::TypeScript::SetThis(cx, script, value);
+    } else if (stub->monitorsArgument(&argument)) {
+        JS_ASSERT(pc == script->code);
+        types::TypeScript::SetArgument(cx, script, argument, value);
+    } else {
+        types::TypeScript::Monitor(cx, script, pc, value);
+    }
+
+    if (!stub->addMonitorStubForValue(cx, script, value))
+        return false;
+
+    // Copy input value to res.
+    res.set(value);
+    return true;
+}
+
+typedef bool (*DoTypeMonitorFallbackFn)(JSContext *, BaselineFrame *, ICTypeMonitor_Fallback *,
+                                        HandleValue, MutableHandleValue);
+static const VMFunction DoTypeMonitorFallbackInfo =
+    FunctionInfo<DoTypeMonitorFallbackFn>(DoTypeMonitorFallback);
+
+bool
+ICTypeMonitor_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoTypeMonitorFallbackInfo, masm);
+}
+
+bool
+ICTypeMonitor_PrimitiveSet::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label success;
+    if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
+        masm.branchTestInt32(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
+        masm.branchTestNumber(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
+        masm.branchTestUndefined(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
+        masm.branchTestBoolean(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
+        masm.branchTestString(Assembler::Equal, R0, &success);
+
+    // Currently, we will never generate primitive stub checks for object.  However,
+    // when we do get to the point where we want to collapse our monitor chains of
+    // objects and singletons down (when they get too long) to a generic "any object"
+    // in coordination with the typeset doing the same thing, this will need to
+    // be re-enabled.
+    /*
+    if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
+        masm.branchTestObject(Assembler::Equal, R0, &success);
+    */
+    JS_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
+        masm.branchTestNull(Assembler::Equal, R0, &success);
+
+    EmitStubGuardFailure(masm);
+
+    masm.bind(&success);
+    EmitReturnFromIC(masm);
+    return true;
+}
+
+bool
+ICTypeMonitor_SingleObject::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    // Guard on the object's identity.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    Address expectedObject(BaselineStubReg, ICTypeMonitor_SingleObject::offsetOfObject());
+    masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
+
+    EmitReturnFromIC(masm);
+
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICTypeMonitor_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    // Guard on the object's TypeObject.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
+
+    Address expectedType(BaselineStubReg, ICTypeMonitor_TypeObject::offsetOfType());
+    masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
+
+    EmitReturnFromIC(masm);
+
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICUpdatedStub::addUpdateStubForValue(JSContext *cx, HandleScript script, HandleObject obj,
+                                     RawId id, HandleValue val)
+{
+    if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS) {
+        // TODO: if the TypeSet becomes unknown or has the AnyObject type,
+        // replace stubs with a single stub to handle these.
+        return true;
+    }
+
+    types::EnsureTrackPropertyTypes(cx, obj, id);
+
+    if (val.isPrimitive()) {
+        JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
+
+        // Check for existing TypeUpdate stub.
+        ICTypeUpdate_PrimitiveSet *existingStub = NULL;
+        for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
+            if (iter->isTypeUpdate_PrimitiveSet()) {
+                existingStub = iter->toTypeUpdate_PrimitiveSet();
+                if (existingStub->containsType(type))
+                    return true;
+            }
+        }
+
+        ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type);
+        ICStub *stub = existingStub ? compiler.updateStub()
+                                    : compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+        if (!existingStub) {
+            JS_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet));
+            addOptimizedUpdateStub(stub);
+        }
+
+        IonSpew(IonSpew_BaselineIC, "  %s TypeUpdate stub %p for primitive type %d",
+                existingStub ? "Modified existing" : "Created new", stub, type);
+
+    } else if (val.toObject().hasSingletonType()) {
+        RootedObject obj(cx, &val.toObject());
+
+        // Check for existing TypeUpdate stub.
+        for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
+            if (iter->isTypeUpdate_SingleObject() &&
+                iter->toTypeUpdate_SingleObject()->object() == obj)
+            {
+                return true;
+            }
+        }
+
+        ICTypeUpdate_SingleObject::Compiler compiler(cx, obj);
+        ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        IonSpew(IonSpew_BaselineIC, "  Added TypeUpdate stub %p for singleton %p", stub, obj.get());
+
+        addOptimizedUpdateStub(stub);
+
+    } else {
+        RootedTypeObject type(cx, val.toObject().type());
+
+        // Check for existing TypeUpdate stub.
+        for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
+            if (iter->isTypeUpdate_TypeObject() &&
+                iter->toTypeUpdate_TypeObject()->type() == type)
+            {
+                return true;
+            }
+        }
+
+        ICTypeUpdate_TypeObject::Compiler compiler(cx, type);
+        ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        IonSpew(IonSpew_BaselineIC, "  Added TypeUpdate stub %p for TypeObject %p",
+                stub, type.get());
+
+        addOptimizedUpdateStub(stub);
+    }
+
+    return true;
+}
+
+//
+// TypeUpdate_Fallback
+//
+static bool
+DoTypeUpdateFallback(JSContext *cx, BaselineFrame *frame, ICUpdatedStub *stub, HandleValue objval,
+                     HandleValue value)
+{
+    FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)",
+                   ICStub::KindString(stub->kind()));
+
+    RootedScript script(cx, frame->script());
+    RootedObject obj(cx, &objval.toObject());
+    RootedId id(cx);
+
+    switch(stub->kind()) {
+      case ICStub::SetElem_Dense:
+      case ICStub::SetElem_DenseAdd: {
+        JS_ASSERT(obj->isNative());
+        id = JSID_VOID;
+        types::AddTypePropertyId(cx, obj, id, value);
+        break;
+      }
+      case ICStub::SetProp_Native:
+      case ICStub::SetProp_NativeAdd: {
+        JS_ASSERT(obj->isNative());
+        jsbytecode *pc = stub->getChainFallback()->icEntry()->pc(script);
+        id = NameToId(script->getName(pc));
+        types::AddTypePropertyId(cx, obj, id, value);
+        break;
+      }
+      default:
+        JS_NOT_REACHED("Invalid stub");
+        return false;
+    }
+
+    return stub->addUpdateStubForValue(cx, script, obj, id, value);
+}
+
+typedef bool (*DoTypeUpdateFallbackFn)(JSContext *, BaselineFrame *, ICUpdatedStub *, HandleValue,
+                                       HandleValue);
+const VMFunction DoTypeUpdateFallbackInfo =
+    FunctionInfo<DoTypeUpdateFallbackFn>(DoTypeUpdateFallback);
+
+bool
+ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    // Just store false into R1.scratchReg() and return.
+    masm.move32(Imm32(0), R1.scratchReg());
+    EmitReturnFromIC(masm);
+    return true;
+}
+
+bool
+ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label success;
+    if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
+        masm.branchTestInt32(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
+        masm.branchTestNumber(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
+        masm.branchTestUndefined(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
+        masm.branchTestBoolean(Assembler::Equal, R0, &success);
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
+        masm.branchTestString(Assembler::Equal, R0, &success);
+
+    // Currently, we will never generate primitive stub checks for object.  However,
+    // when we do get to the point where we want to collapse our monitor chains of
+    // objects and singletons down (when they get too long) to a generic "any object"
+    // in coordination with the typeset doing the same thing, this will need to
+    // be re-enabled.
+    /*
+    if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
+        masm.branchTestObject(Assembler::Equal, R0, &success);
+    */
+    JS_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
+
+    if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
+        masm.branchTestNull(Assembler::Equal, R0, &success);
+
+    EmitStubGuardFailure(masm);
+
+    // Type matches, load true into R1.scratchReg() and return.
+    masm.bind(&success);
+    masm.mov(Imm32(1), R1.scratchReg());
+    EmitReturnFromIC(masm);
+
+    return true;
+}
+
+bool
+ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    // Guard on the object's identity.
+    Register obj = masm.extractObject(R0, R1.scratchReg());
+    Address expectedObject(BaselineStubReg, ICTypeUpdate_SingleObject::offsetOfObject());
+    masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
+
+    // Identity matches, load true into R1.scratchReg() and return.
+    masm.mov(Imm32(1), R1.scratchReg());
+    EmitReturnFromIC(masm);
+
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICTypeUpdate_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    // Guard on the object's TypeObject.
+    Register obj = masm.extractObject(R0, R1.scratchReg());
+    masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
+
+    Address expectedType(BaselineStubReg, ICTypeUpdate_TypeObject::offsetOfType());
+    masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
+
+    // Type matches, load true into R1.scratchReg() and return.
+    masm.mov(Imm32(1), R1.scratchReg());
+    EmitReturnFromIC(masm);
+
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// This_Fallback
+//
+
+static bool
+DoThisFallback(JSContext *cx, ICThis_Fallback *stub, HandleValue thisv, MutableHandleValue ret)
+{
+    FallbackICSpew(cx, stub, "This");
+
+    ret.set(thisv);
+    bool modified;
+    if (!BoxNonStrictThis(cx, ret, &modified))
+        return false;
+    return true;
+}
+
+typedef bool (*DoThisFallbackFn)(JSContext *, ICThis_Fallback *, HandleValue, MutableHandleValue);
+static const VMFunction DoThisFallbackInfo = FunctionInfo<DoThisFallbackFn>(DoThisFallback);
+
+bool
+ICThis_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+
+    return tailCallVM(DoThisFallbackInfo, masm);
+}
+
+//
+// NewArray_Fallback
+//
+
+static bool
+DoNewArray(JSContext *cx, ICNewArray_Fallback *stub, uint32_t length,
+           HandleTypeObject type, MutableHandleValue res)
+{
+    FallbackICSpew(cx, stub, "NewArray");
+
+    RawObject obj = NewInitArray(cx, length, type);
+    if (!obj)
+        return false;
+
+    res.setObject(*obj);
+    return true;
+}
+
+typedef bool(*DoNewArrayFn)(JSContext *, ICNewArray_Fallback *, uint32_t, HandleTypeObject,
+                            MutableHandleValue);
+static const VMFunction DoNewArrayInfo = FunctionInfo<DoNewArrayFn>(DoNewArray);
+
+bool
+ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    EmitRestoreTailCallReg(masm);
+
+    masm.push(R1.scratchReg()); // type
+    masm.push(R0.scratchReg()); // length
+    masm.push(BaselineStubReg); // stub.
+
+    return tailCallVM(DoNewArrayInfo, masm);
+}
+
+//
+// NewObject_Fallback
+//
+
+static bool
+DoNewObject(JSContext *cx, ICNewObject_Fallback *stub, HandleObject templateObject,
+            MutableHandleValue res)
+{
+    FallbackICSpew(cx, stub, "NewObject");
+
+    RawObject obj = NewInitObject(cx, templateObject);
+    if (!obj)
+        return false;
+
+    res.setObject(*obj);
+    return true;
+}
+
+typedef bool(*DoNewObjectFn)(JSContext *, ICNewObject_Fallback *, HandleObject,
+                             MutableHandleValue);
+static const VMFunction DoNewObjectInfo = FunctionInfo<DoNewObjectFn>(DoNewObject);
+
+bool
+ICNewObject_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    EmitRestoreTailCallReg(masm);
+
+    masm.push(R0.scratchReg()); // template
+    masm.push(BaselineStubReg); // stub.
+
+    return tailCallVM(DoNewObjectInfo, masm);
+}
+
+//
+// Compare_Fallback
+//
+
+static bool
+DoCompareFallback(JSContext *cx, BaselineFrame *frame, ICCompare_Fallback *stub, HandleValue lhs,
+                  HandleValue rhs, MutableHandleValue ret)
+{
+    jsbytecode *pc = stub->icEntry()->pc(frame->script());
+    JSOp op = JSOp(*pc);
+
+    FallbackICSpew(cx, stub, "Compare(%s)", js_CodeName[op]);
+
+    // Case operations in a CONDSWITCH are performing strict equality.
+    if (op == JSOP_CASE)
+        op = JSOP_STRICTEQ;
+
+    // Don't pass lhs/rhs directly, we need the original values when
+    // generating stubs.
+    RootedValue lhsCopy(cx, lhs);
+    RootedValue rhsCopy(cx, rhs);
+
+    // Perform the compare operation.
+    JSBool out;
+    switch(op) {
+      case JSOP_LT:
+        if (!LessThan(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_LE:
+        if (!LessThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_GT:
+        if (!GreaterThan(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_GE:
+        if (!GreaterThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_EQ:
+        if (!LooselyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_NE:
+        if (!LooselyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_STRICTEQ:
+        if (!StrictlyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      case JSOP_STRICTNE:
+        if (!StrictlyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
+            return false;
+        break;
+      default:
+        JS_ASSERT(!"Unhandled baseline compare op");
+        return false;
+    }
+
+    ret.setBoolean(out);
+
+    // Check to see if a new stub should be generated.
+    if (stub->numOptimizedStubs() >= ICCompare_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+        // But for now we just bail.
+        return true;
+    }
+
+    RawScript script = frame->script();
+
+    // Try to generate new stubs.
+    if (lhs.isInt32() && rhs.isInt32()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Int32, Int32) stub", js_CodeName[op]);
+        ICCompare_Int32::Compiler compiler(cx, op);
+        ICStub *int32Stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!int32Stub)
+            return false;
+
+        stub->addNewStub(int32Stub);
+        return true;
+    }
+
+    if (lhs.isNumber() && rhs.isNumber()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Number, Number) stub", js_CodeName[op]);
+
+        // Unlink int32 stubs, it's faster to always use the double stub.
+        stub->unlinkStubsWithKind(cx, ICStub::Compare_Int32);
+
+        ICCompare_Double::Compiler compiler(cx, op);
+        ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!doubleStub)
+            return false;
+
+        stub->addNewStub(doubleStub);
+        return true;
+    }
+
+    if ((lhs.isNumber() && rhs.isUndefined()) ||
+        (lhs.isUndefined() && rhs.isNumber()))
+    {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(%s, %s) stub", js_CodeName[op],
+                    rhs.isUndefined() ? "Number" : "Undefined",
+                    rhs.isUndefined() ? "Undefined" : "Number");
+        ICCompare_NumberWithUndefined::Compiler compiler(cx, op, lhs.isUndefined());
+        ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stub)
+            return false;
+
+        stub->addNewStub(doubleStub);
+        return true;
+    }
+
+    if (lhs.isBoolean() && rhs.isBoolean()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Boolean, Boolean) stub", js_CodeName[op]);
+        ICCompare_Boolean::Compiler compiler(cx, op);
+        ICStub *booleanStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!booleanStub)
+            return false;
+
+        stub->addNewStub(booleanStub);
+        return true;
+    }
+
+    if ((lhs.isBoolean() && rhs.isInt32()) || (lhs.isInt32() && rhs.isBoolean())) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(%s, %s) stub", js_CodeName[op],
+                    rhs.isInt32() ? "Boolean" : "Int32",
+                    rhs.isInt32() ? "Int32" : "Boolean");
+        ICCompare_Int32WithBoolean::Compiler compiler(cx, op, lhs.isInt32());
+        ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!optStub)
+            return false;
+
+        stub->addNewStub(optStub);
+        return true;
+    }
+
+    if (IsEqualityOp(op)) {
+        if (lhs.isString() && rhs.isString() && !stub->hasStub(ICStub::Compare_String)) {
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(String, String) stub", js_CodeName[op]);
+            ICCompare_String::Compiler compiler(cx, op);
+            ICStub *stringStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!stringStub)
+                return false;
+
+            stub->addNewStub(stringStub);
+            return true;
+        }
+
+        if (lhs.isObject() && rhs.isObject()) {
+            JS_ASSERT(!stub->hasStub(ICStub::Compare_Object));
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(Object, Object) stub", js_CodeName[op]);
+            ICCompare_Object::Compiler compiler(cx, op);
+            ICStub *objectStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!objectStub)
+                return false;
+
+            stub->addNewStub(objectStub);
+            return true;
+        }
+
+        if ((lhs.isObject() || lhs.isNull() || lhs.isUndefined()) &&
+            (rhs.isObject() || rhs.isNull() || rhs.isUndefined()) &&
+            !stub->hasStub(ICStub::Compare_ObjectWithUndefined))
+        {
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(Obj/Null/Undef, Obj/Null/Undef) stub",
+                    js_CodeName[op]);
+            bool lhsIsUndefined = lhs.isNull() || lhs.isUndefined();
+            bool compareWithNull = lhs.isNull() || rhs.isNull();
+            ICCompare_ObjectWithUndefined::Compiler compiler(cx, op,
+                                                             lhsIsUndefined, compareWithNull);
+            ICStub *objectStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!objectStub)
+                return false;
+
+            stub->addNewStub(objectStub);
+            return true;
+        }
+    }
+
+    return true;
+}
+
+typedef bool (*DoCompareFallbackFn)(JSContext *, BaselineFrame *, ICCompare_Fallback *,
+                                    HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoCompareFallbackInfo =
+    FunctionInfo<DoCompareFallbackFn>(DoCompareFallback, PopValues(2));
+
+bool
+ICCompare_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Ensure stack is fully synced for the expression decompiler.
+    masm.pushValue(R0);
+    masm.pushValue(R1);
+
+    // Push arguments.
+    masm.pushValue(R1);
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoCompareFallbackInfo, masm);
+}
+
+//
+// Compare_String
+//
+
+bool
+ICCompare_String::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestString(Assembler::NotEqual, R0, &failure);
+    masm.branchTestString(Assembler::NotEqual, R1, &failure);
+
+    JS_ASSERT(IsEqualityOp(op));
+
+    Register left = masm.extractString(R0, ExtractTemp0);
+    Register right = masm.extractString(R1, ExtractTemp1);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+    // x86 doesn't have the luxury of a second scratch.
+    Register scratchReg2;
+    if (regs.empty()) {
+        scratchReg2 = BaselineStubReg;
+        masm.push(BaselineStubReg);
+    } else {
+        scratchReg2 = regs.takeAny();
+    }
+    JS_ASSERT(scratchReg2 != scratchReg);
+
+    Label inlineCompareFailed;
+    masm.compareStrings(op, left, right, scratchReg2, scratchReg, &inlineCompareFailed);
+    masm.tagValue(JSVAL_TYPE_BOOLEAN, scratchReg2, R0);
+    if (scratchReg2 == BaselineStubReg)
+        masm.pop(BaselineStubReg);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&inlineCompareFailed);
+    if (scratchReg2 == BaselineStubReg)
+        masm.pop(BaselineStubReg);
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// Compare_Boolean
+//
+
+bool
+ICCompare_Boolean::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
+    masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
+
+    Register left = masm.extractInt32(R0, ExtractTemp0);
+    Register right = masm.extractInt32(R1, ExtractTemp1);
+
+    // Compare payload regs of R0 and R1.
+    Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+    masm.cmp32(left, right);
+    masm.emitSet(cond, left);
+
+    // Box the result and return
+    masm.tagValue(JSVAL_TYPE_BOOLEAN, left, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// Compare_NumberWithUndefined
+//
+
+bool
+ICCompare_NumberWithUndefined::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    ValueOperand numberOperand, undefinedOperand;
+    if (lhsIsUndefined) {
+        numberOperand = R1;
+        undefinedOperand = R0;
+    } else {
+        numberOperand = R0;
+        undefinedOperand = R1;
+    }
+
+    Label failure;
+    masm.branchTestNumber(Assembler::NotEqual, numberOperand, &failure);
+    masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
+
+    // Comparing a number with undefined will always be true for NE/STRICTNE,
+    // and always be false for other compare ops.
+    masm.moveValue(BooleanValue(op == JSOP_NE || op == JSOP_STRICTNE), R0);
+
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// Compare_Object
+//
+
+bool
+ICCompare_Object::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+    JS_ASSERT(IsEqualityOp(op));
+
+    Register left = masm.extractObject(R0, ExtractTemp0);
+    Register right = masm.extractObject(R1, ExtractTemp1);
+
+    Label ifTrue;
+    masm.branchPtr(JSOpToCondition(op, /* signed = */true), left, right, &ifTrue);
+
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&ifTrue);
+    masm.moveValue(BooleanValue(true), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// Compare_ObjectWithUndefined
+//
+
+bool
+ICCompare_ObjectWithUndefined::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(IsEqualityOp(op));
+
+    ValueOperand objectOperand, undefinedOperand;
+    if (lhsIsUndefined) {
+        objectOperand = R1;
+        undefinedOperand = R0;
+    } else {
+        objectOperand = R0;
+        undefinedOperand = R1;
+    }
+
+    Label failure;
+    if (compareWithNull)
+        masm.branchTestNull(Assembler::NotEqual, undefinedOperand, &failure);
+    else
+        masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
+
+    Label notObject;
+    masm.branchTestObject(Assembler::NotEqual, objectOperand, &notObject);
+
+    if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
+        // obj !== undefined for all objects.
+        masm.moveValue(BooleanValue(op == JSOP_STRICTNE), R0);
+        EmitReturnFromIC(masm);
+    } else {
+        // obj != undefined only where !obj->getClass()->emulatesUndefined()
+        Label emulatesUndefined;
+        Register obj = masm.extractObject(objectOperand, ExtractTemp0);
+        masm.loadPtr(Address(obj, JSObject::offsetOfType()), obj);
+        masm.loadPtr(Address(obj, offsetof(types::TypeObject, clasp)), obj);
+        masm.branchTest32(Assembler::NonZero,
+                          Address(obj, Class::offsetOfFlags()),
+                          Imm32(JSCLASS_EMULATES_UNDEFINED),
+                          &emulatesUndefined);
+        masm.moveValue(BooleanValue(op == JSOP_NE), R0);
+        EmitReturnFromIC(masm);
+        masm.bind(&emulatesUndefined);
+        masm.moveValue(BooleanValue(op == JSOP_EQ), R0);
+        EmitReturnFromIC(masm);
+    }
+
+    masm.bind(&notObject);
+
+    // Also support null == null or undefined == undefined comparisons.
+    if (compareWithNull)
+        masm.branchTestNull(Assembler::NotEqual, objectOperand, &failure);
+    else
+        masm.branchTestUndefined(Assembler::NotEqual, objectOperand, &failure);
+
+    masm.moveValue(BooleanValue(op == JSOP_STRICTEQ || op == JSOP_EQ), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// Compare_Int32WithBoolean
+//
+
+bool
+ICCompare_Int32WithBoolean::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    ValueOperand int32Val;
+    ValueOperand boolVal;
+    if (lhsIsInt32_) {
+        int32Val = R0;
+        boolVal = R1;
+    } else {
+        boolVal = R0;
+        int32Val = R1;
+    }
+    masm.branchTestBoolean(Assembler::NotEqual, boolVal, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, int32Val, &failure);
+
+    if (op_ == JSOP_STRICTEQ || op_ == JSOP_STRICTNE) {
+        // Ints and booleans are never strictly equal, always strictly not equal.
+        masm.moveValue(BooleanValue(op_ == JSOP_STRICTNE), R0);
+        EmitReturnFromIC(masm);
+    } else {
+        Register boolReg = masm.extractBoolean(boolVal, ExtractTemp0);
+        Register int32Reg = masm.extractInt32(int32Val, ExtractTemp1);
+
+        // Compare payload regs of R0 and R1.
+        Assembler::Condition cond = JSOpToCondition(op_, /* signed = */true);
+        masm.cmp32(lhsIsInt32_ ? int32Reg : boolReg,
+                   lhsIsInt32_ ? boolReg : int32Reg);
+        masm.emitSet(cond, R0.scratchReg());
+
+        // Box the result and return
+        masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.scratchReg(), R0);
+        EmitReturnFromIC(masm);
+    }
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToBool_Fallback
+//
+
+static bool
+DoToBoolFallback(JSContext *cx, BaselineFrame *frame, ICToBool_Fallback *stub, HandleValue arg,
+                 MutableHandleValue ret)
+{
+    FallbackICSpew(cx, stub, "ToBool");
+
+    bool cond = ToBoolean(arg);
+    ret.setBoolean(cond);
+
+    // Check to see if a new stub should be generated.
+    if (stub->numOptimizedStubs() >= ICToBool_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+        // But for now we just bail.
+        return true;
+    }
+
+    JS_ASSERT(!arg.isBoolean());
+
+    RawScript script = frame->script();
+
+    // Try to generate new stubs.
+    if (arg.isInt32()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating ToBool(Int32) stub.");
+        ICToBool_Int32::Compiler compiler(cx);
+        ICStub *int32Stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!int32Stub)
+            return false;
+
+        stub->addNewStub(int32Stub);
+        return true;
+    }
+
+    if (arg.isDouble()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating ToBool(Double) stub.");
+        ICToBool_Double::Compiler compiler(cx);
+        ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!doubleStub)
+            return false;
+
+        stub->addNewStub(doubleStub);
+        return true;
+    }
+
+    if (arg.isString()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating ToBool(String) stub");
+        ICToBool_String::Compiler compiler(cx);
+        ICStub *stringStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stringStub)
+            return false;
+
+        stub->addNewStub(stringStub);
+        return true;
+    }
+
+    if (arg.isNull() || arg.isUndefined()) {
+        ICToBool_NullUndefined::Compiler compiler(cx);
+        ICStub *nilStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!nilStub)
+            return false;
+
+        stub->addNewStub(nilStub);
+        return true;
+    }
+
+    if (arg.isObject()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating ToBool(Object) stub.");
+        ICToBool_Object::Compiler compiler(cx);
+        ICStub *objStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!objStub)
+            return false;
+
+        stub->addNewStub(objStub);
+        return true;
+    }
+
+    return true;
+}
+
+typedef bool (*pf)(JSContext *, BaselineFrame *, ICToBool_Fallback *, HandleValue,
+                   MutableHandleValue);
+static const VMFunction fun = FunctionInfo<pf>(DoToBoolFallback);
+
+bool
+ICToBool_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Push arguments.
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(fun, masm);
+}
+
+//
+// ToBool_Int32
+//
+
+bool
+ICToBool_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+    Label ifFalse;
+    Assembler::Condition cond = masm.testInt32Truthy(false, R0);
+    masm.j(cond, &ifFalse);
+
+    masm.moveValue(BooleanValue(true), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&ifFalse);
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToBool_String
+//
+
+bool
+ICToBool_String::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestString(Assembler::NotEqual, R0, &failure);
+
+    Label ifFalse;
+    Assembler::Condition cond = masm.testStringTruthy(false, R0);
+    masm.j(cond, &ifFalse);
+
+    masm.moveValue(BooleanValue(true), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&ifFalse);
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToBool_NullUndefined
+//
+
+bool
+ICToBool_NullUndefined::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure, ifFalse;
+    masm.branchTestNull(Assembler::Equal, R0, &ifFalse);
+    masm.branchTestUndefined(Assembler::NotEqual, R0, &failure);
+
+    masm.bind(&ifFalse);
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToBool_Double
+//
+
+bool
+ICToBool_Double::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure, ifTrue;
+    masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
+    masm.unboxDouble(R0, FloatReg0);
+    Assembler::Condition cond = masm.testDoubleTruthy(true, FloatReg0);
+    masm.j(cond, &ifTrue);
+
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&ifTrue);
+    masm.moveValue(BooleanValue(true), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToBool_Object
+//
+
+bool
+ICToBool_Object::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure, ifFalse, slowPath;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    Register objReg = masm.extractObject(R0, ExtractTemp0);
+    Register scratch = R1.scratchReg();
+    Assembler::Condition cond = masm.branchTestObjectTruthy(false, objReg, scratch, &slowPath);
+    masm.j(cond, &ifFalse);
+
+    // If object doesn't emulate undefined, it evaulates to true.
+    masm.moveValue(BooleanValue(true), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&ifFalse);
+    masm.moveValue(BooleanValue(false), R0);
+    EmitReturnFromIC(masm);
+
+    masm.bind(&slowPath);
+    masm.setupUnalignedABICall(1, scratch);
+    masm.passABIArg(objReg);
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ObjectEmulatesUndefined));
+    masm.xor32(Imm32(1), ReturnReg);
+    masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// ToNumber_Fallback
+//
+
+static bool
+DoToNumberFallback(JSContext *cx, ICToNumber_Fallback *stub, HandleValue arg, MutableHandleValue ret)
+{
+    FallbackICSpew(cx, stub, "ToNumber");
+    ret.set(arg);
+    return ToNumber(cx, ret.address());
+}
+
+typedef bool (*DoToNumberFallbackFn)(JSContext *, ICToNumber_Fallback *, HandleValue, MutableHandleValue);
+static const VMFunction DoToNumberFallbackInfo =
+    FunctionInfo<DoToNumberFallbackFn>(DoToNumberFallback, PopValues(1));
+
+bool
+ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Ensure stack is fully synced for the expression decompiler.
+    masm.pushValue(R0);
+
+    // Push arguments.
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+
+    return tailCallVM(DoToNumberFallbackInfo, masm);
+}
+
+//
+// BinaryArith_Fallback
+//
+
+// Disable PGO (see bug 851490).
+#if defined(_MSC_VER)
+# pragma optimize("g", off)
+#endif
+static bool
+DoBinaryArithFallback(JSContext *cx, BaselineFrame *frame, ICBinaryArith_Fallback *stub,
+                      HandleValue lhs, HandleValue rhs, MutableHandleValue ret)
+{
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    JSOp op = JSOp(*pc);
+    FallbackICSpew(cx, stub, "BinaryArith(%s,%d,%d)", js_CodeName[op],
+            int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
+            int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
+
+    // Don't pass lhs/rhs directly, we need the original values when
+    // generating stubs.
+    RootedValue lhsCopy(cx, lhs);
+    RootedValue rhsCopy(cx, rhs);
+
+    // Perform the compare operation.
+    switch(op) {
+      case JSOP_ADD:
+        // Do an add.
+        if (!AddValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
+            return false;
+        break;
+      case JSOP_SUB:
+        if (!SubValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
+            return false;
+        break;
+      case JSOP_MUL:
+        if (!MulValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
+            return false;
+        break;
+      case JSOP_DIV:
+        if (!DivValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
+            return false;
+        break;
+      case JSOP_MOD:
+        if (!ModValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
+            return false;
+        break;
+      case JSOP_BITOR: {
+        int32_t result;
+        if (!BitOr(cx, lhs, rhs, &result))
+            return false;
+        ret.setInt32(result);
+        break;
+      }
+      case JSOP_BITXOR: {
+        int32_t result;
+        if (!BitXor(cx, lhs, rhs, &result))
+            return false;
+        ret.setInt32(result);
+        break;
+      }
+      case JSOP_BITAND: {
+        int32_t result;
+        if (!BitAnd(cx, lhs, rhs, &result))
+            return false;
+        ret.setInt32(result);
+        break;
+      }
+      case JSOP_LSH: {
+        int32_t result;
+        if (!BitLsh(cx, lhs, rhs, &result))
+            return false;
+        ret.setInt32(result);
+        break;
+      }
+      case JSOP_RSH: {
+        int32_t result;
+        if (!BitRsh(cx, lhs, rhs, &result))
+            return false;
+        ret.setInt32(result);
+        break;
+      }
+      case JSOP_URSH: {
+        if (!UrshOperation(cx, script, pc, lhs, rhs, ret.address()))
+            return false;
+        break;
+      }
+      default:
+        JS_NOT_REACHED("Unhandled baseline arith op");
+        return false;
+    }
+
+    // Check to see if a new stub should be generated.
+    if (stub->numOptimizedStubs() >= ICBinaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+        // But for now we just bail.
+        return true;
+    }
+
+    // Handle string concat.
+    if (op == JSOP_ADD) {
+        if (lhs.isString() && rhs.isString()) {
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(String, String) stub", js_CodeName[op]);
+            JS_ASSERT(ret.isString());
+            ICBinaryArith_StringConcat::Compiler compiler(cx);
+            ICStub *strcatStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!strcatStub)
+                return false;
+            stub->addNewStub(strcatStub);
+            return true;
+        }
+
+        if ((lhs.isString() && rhs.isObject()) || (lhs.isObject() && rhs.isString())) {
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(%s, %s) stub", js_CodeName[op],
+                    lhs.isString() ? "String" : "Object",
+                    lhs.isString() ? "Object" : "String");
+            JS_ASSERT(ret.isString());
+            ICBinaryArith_StringObjectConcat::Compiler compiler(cx, lhs.isString());
+            ICStub *strcatStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!strcatStub)
+                return false;
+            stub->addNewStub(strcatStub);
+            return true;
+        }
+    }
+
+    if (((lhs.isBoolean() && (rhs.isBoolean() || rhs.isInt32())) ||
+         (rhs.isBoolean() && (lhs.isBoolean() || lhs.isInt32()))) &&
+        (op == JSOP_ADD || op == JSOP_SUB || op == JSOP_BITOR || op == JSOP_BITAND ||
+         op == JSOP_BITXOR))
+    {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(%s, %s) stub", js_CodeName[op],
+                lhs.isBoolean() ? "Boolean" : "Int32", rhs.isBoolean() ? "Boolean" : "Int32");
+        ICBinaryArith_BooleanWithInt32::Compiler compiler(cx, op, lhs.isBoolean(), rhs.isBoolean());
+        ICStub *arithStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!arithStub)
+            return false;
+        stub->addNewStub(arithStub);
+        return true;
+    }
+
+    // Handle only int32 or double.
+    if (!lhs.isNumber() || !rhs.isNumber())
+        return true;
+
+    JS_ASSERT(ret.isNumber());
+
+    if (lhs.isDouble() || rhs.isDouble() || ret.isDouble()) {
+        switch (op) {
+          case JSOP_ADD:
+          case JSOP_SUB:
+          case JSOP_MUL:
+          case JSOP_DIV:
+          case JSOP_MOD: {
+            // Unlink int32 stubs, it's faster to always use the double stub.
+            stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(Double, Double) stub", js_CodeName[op]);
+
+            ICBinaryArith_Double::Compiler compiler(cx, op);
+            ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!doubleStub)
+                return false;
+            stub->addNewStub(doubleStub);
+            return true;
+          }
+          default:
+            break;
+        }
+    }
+
+    // TODO: unlink previous !allowDouble stub.
+    if (lhs.isInt32() && rhs.isInt32()) {
+        bool allowDouble = ret.isDouble();
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Int32, Int32) stub", js_CodeName[op]);
+        ICBinaryArith_Int32::Compiler compilerInt32(cx, op, allowDouble);
+        ICStub *int32Stub = compilerInt32.getStub(compilerInt32.getStubSpace(script));
+        if (!int32Stub)
+            return false;
+        stub->addNewStub(int32Stub);
+        return true;
+    }
+
+    // Handle Double <BITOP> Int32 or Int32 <BITOP> Double case.
+    if (((lhs.isDouble() && rhs.isInt32()) || (lhs.isInt32() && rhs.isDouble())) &&
+        ret.isInt32())
+    {
+        switch(op) {
+          case JSOP_BITOR:
+          case JSOP_BITXOR:
+          case JSOP_BITAND: {
+            IonSpew(IonSpew_BaselineIC, "  Generating %s(%s, %s) stub", js_CodeName[op],
+                        lhs.isDouble() ? "Double" : "Int32",
+                        lhs.isDouble() ? "Int32" : "Double");
+            ICBinaryArith_DoubleWithInt32::Compiler compiler(cx, op, lhs.isDouble());
+            ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!optStub)
+                return false;
+            stub->addNewStub(optStub);
+            return true;
+          }
+          default:
+            break;
+        }
+    }
+
+    return true;
+}
+#if defined(_MSC_VER)
+# pragma optimize("g", on)
+#endif
+
+typedef bool (*DoBinaryArithFallbackFn)(JSContext *, BaselineFrame *, ICBinaryArith_Fallback *,
+                                        HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoBinaryArithFallbackInfo =
+    FunctionInfo<DoBinaryArithFallbackFn>(DoBinaryArithFallback, PopValues(2));
+
+bool
+ICBinaryArith_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Ensure stack is fully synced for the expression decompiler.
+    masm.pushValue(R0);
+    masm.pushValue(R1);
+
+    // Push arguments.
+    masm.pushValue(R1);
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoBinaryArithFallbackInfo, masm);
+}
+
+static bool
+DoConcatStrings(JSContext *cx, HandleValue lhs, HandleValue rhs, MutableHandleValue res)
+{
+    JS_ASSERT(lhs.isString());
+    JS_ASSERT(rhs.isString());
+    RawString lstr = lhs.toString();
+    RawString rstr = rhs.toString();
+    RawString result = ConcatStrings<NoGC>(cx, lstr, rstr);
+    if (result) {
+        res.set(StringValue(result));
+        return true;
+    }
+
+    RootedString rootedl(cx, lstr), rootedr(cx, rstr);
+    result = ConcatStrings<CanGC>(cx, rootedl, rootedr);
+    if (!result)
+        return false;
+
+    res.set(StringValue(result));
+    return true;
+}
+
+typedef bool (*DoConcatStringsFn)(JSContext *, HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoConcatStringsInfo = FunctionInfo<DoConcatStringsFn>(DoConcatStrings);
+
+bool
+ICBinaryArith_StringConcat::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestString(Assembler::NotEqual, R0, &failure);
+    masm.branchTestString(Assembler::NotEqual, R1, &failure);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    masm.pushValue(R1);
+    masm.pushValue(R0);
+    if (!tailCallVM(DoConcatStringsInfo, masm))
+        return false;
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+static RawString
+ConvertObjectToStringForConcat(JSContext *cx, HandleValue obj)
+{
+    JS_ASSERT(obj.isObject());
+    RootedValue rootedObj(cx, obj);
+    if (!ToPrimitive(cx, &rootedObj))
+        return NULL;
+    return ToString<CanGC>(cx, rootedObj);
+}
+
+static bool
+DoConcatStringObject(JSContext *cx, bool lhsIsString, HandleValue lhs, HandleValue rhs,
+                     MutableHandleValue res)
+{
+    RawString lstr = NULL;
+    RawString rstr = NULL;
+    if (lhsIsString) {
+        // Convert rhs first.
+        JS_ASSERT(lhs.isString() && rhs.isObject());
+        rstr = ConvertObjectToStringForConcat(cx, rhs);
+        if (!rstr)
+            return false;
+
+        // lhs is already string.
+        lstr = lhs.toString();
+    } else {
+        JS_ASSERT(rhs.isString() && lhs.isObject());
+        // Convert lhs first.
+        lstr = ConvertObjectToStringForConcat(cx, lhs);
+        if (!lstr)
+            return false;
+
+        // rhs is already string.
+        rstr = rhs.toString();
+    }
+
+    JSString *str = ConcatStrings<NoGC>(cx, lstr, rstr);
+    if (!str) {
+        RootedString nlstr(cx, lstr), nrstr(cx, rstr);
+        str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
+        if (!str)
+            return false;
+    }
+
+    // Technically, we need to call TypeScript::MonitorString for this PC, however
+    // it was called when this stub was attached so it's OK.
+
+    res.setString(str);
+    return true;
+}
+
+typedef bool (*DoConcatStringObjectFn)(JSContext *, bool lhsIsString, HandleValue, HandleValue,
+                                       MutableHandleValue);
+static const VMFunction DoConcatStringObjectInfo =
+    FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject, PopValues(2));
+
+bool
+ICBinaryArith_StringObjectConcat::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    if (lhsIsString_) {
+        masm.branchTestString(Assembler::NotEqual, R0, &failure);
+        masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+    } else {
+        masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+        masm.branchTestString(Assembler::NotEqual, R1, &failure);
+    }
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Sync for the decompiler.
+    masm.pushValue(R0);
+    masm.pushValue(R1);
+
+    // Push arguments.
+    masm.pushValue(R1);
+    masm.pushValue(R0);
+    masm.push(Imm32(lhsIsString_));
+    if (!tailCallVM(DoConcatStringObjectInfo, masm))
+        return false;
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICBinaryArith_Double::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.ensureDouble(R0, FloatReg0, &failure);
+    masm.ensureDouble(R1, FloatReg1, &failure);
+
+    switch (op) {
+      case JSOP_ADD:
+        masm.addDouble(FloatReg1, FloatReg0);
+        break;
+      case JSOP_SUB:
+        masm.subDouble(FloatReg1, FloatReg0);
+        break;
+      case JSOP_MUL:
+        masm.mulDouble(FloatReg1, FloatReg0);
+        break;
+      case JSOP_DIV:
+        masm.divDouble(FloatReg1, FloatReg0);
+        break;
+      case JSOP_MOD:
+        masm.setupUnalignedABICall(2, R0.scratchReg());
+        masm.passABIArg(FloatReg0);
+        masm.passABIArg(FloatReg1);
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NumberMod), MacroAssembler::DOUBLE);
+        JS_ASSERT(ReturnFloatReg == FloatReg0);
+        break;
+      default:
+        JS_NOT_REACHED("Unexpected op");
+        return false;
+    }
+
+    masm.boxDouble(FloatReg0, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICBinaryArith_BooleanWithInt32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    if (lhsIsBool_)
+        masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
+    else
+        masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+    if (rhsIsBool_)
+        masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
+    else
+        masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    Register lhsReg = lhsIsBool_ ? masm.extractBoolean(R0, ExtractTemp0)
+                                 : masm.extractInt32(R0, ExtractTemp0);
+    Register rhsReg = rhsIsBool_ ? masm.extractBoolean(R1, ExtractTemp1)
+                                 : masm.extractInt32(R1, ExtractTemp1);
+
+    JS_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB ||
+              op_ == JSOP_BITOR || op_ == JSOP_BITXOR || op_ == JSOP_BITAND);
+
+    switch(op_) {
+      case JSOP_ADD: {
+        Label fixOverflow;
+
+        masm.add32(rhsReg, lhsReg);
+        masm.j(Assembler::Overflow, &fixOverflow);
+        masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+        EmitReturnFromIC(masm);
+
+        masm.bind(&fixOverflow);
+        masm.sub32(rhsReg, lhsReg);
+        masm.jump(&failure);
+        break;
+      }
+      case JSOP_SUB: {
+        Label fixOverflow;
+
+        masm.sub32(rhsReg, lhsReg);
+        masm.j(Assembler::Overflow, &fixOverflow);
+        masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+        EmitReturnFromIC(masm);
+
+        masm.bind(&fixOverflow);
+        masm.add32(rhsReg, lhsReg);
+        masm.jump(&failure);
+        break;
+      }
+      case JSOP_BITOR: {
+        masm.orPtr(rhsReg, lhsReg);
+        masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+        EmitReturnFromIC(masm);
+        break;
+      }
+      case JSOP_BITXOR: {
+        masm.xorPtr(rhsReg, lhsReg);
+        masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+        EmitReturnFromIC(masm);
+        break;
+      }
+      case JSOP_BITAND: {
+        masm.andPtr(rhsReg, lhsReg);
+        masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+        EmitReturnFromIC(masm);
+        break;
+      }
+      default:
+       JS_NOT_REACHED("Unhandled op for BinaryArith_BooleanWithInt32.");
+       return false;
+    }
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+bool
+ICBinaryArith_DoubleWithInt32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(op == JSOP_BITOR || op == JSOP_BITAND || op == JSOP_BITXOR);
+
+    Label failure;
+    Register intReg;
+    Register scratchReg;
+    if (lhsIsDouble_) {
+        masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
+        masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+        intReg = masm.extractInt32(R1, ExtractTemp0);
+        masm.unboxDouble(R0, FloatReg0);
+        scratchReg = R0.scratchReg();
+    } else {
+        masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+        masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
+        intReg = masm.extractInt32(R0, ExtractTemp0);
+        masm.unboxDouble(R1, FloatReg0);
+        scratchReg = R1.scratchReg();
+    }
+
+    // Truncate the double to an int32.
+    {
+        Label doneTruncate;
+        Label truncateABICall;
+        masm.branchTruncateDouble(FloatReg0, scratchReg, &truncateABICall);
+        masm.jump(&doneTruncate);
+
+        masm.bind(&truncateABICall);
+        masm.push(intReg);
+        masm.setupUnalignedABICall(1, scratchReg);
+        masm.passABIArg(FloatReg0);
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
+        masm.storeCallResult(scratchReg);
+        masm.pop(intReg);
+
+        masm.bind(&doneTruncate);
+    }
+
+    Register intReg2 = scratchReg;
+    // All handled ops commute, so no need to worry about ordering.
+    switch(op) {
+      case JSOP_BITOR:
+        masm.orPtr(intReg, intReg2);
+        break;
+      case JSOP_BITXOR:
+        masm.xorPtr(intReg, intReg2);
+        break;
+      case JSOP_BITAND:
+        masm.andPtr(intReg, intReg2);
+        break;
+      default:
+       JS_NOT_REACHED("Unhandled op for BinaryArith_DoubleWithInt32.");
+       return false;
+    }
+    masm.tagValue(JSVAL_TYPE_INT32, intReg2, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// UnaryArith_Fallback
+//
+
+// Disable PGO (see bug 851490).
+#if defined(_MSC_VER)
+# pragma optimize("g", off)
+#endif
+static bool
+DoUnaryArithFallback(JSContext *cx, BaselineFrame *frame, ICUnaryArith_Fallback *stub,
+                     HandleValue val, MutableHandleValue res)
+{
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    JSOp op = JSOp(*pc);
+    FallbackICSpew(cx, stub, "UnaryArith(%s)", js_CodeName[op]);
+
+    switch (op) {
+      case JSOP_BITNOT: {
+        int32_t result;
+        if (!BitNot(cx, val, &result))
+            return false;
+        res.setInt32(result);
+        break;
+      }
+      case JSOP_NEG:
+        if (!NegOperation(cx, script, pc, val, res))
+            return false;
+        break;
+      default:
+        JS_NOT_REACHED("Unexpected op");
+        return false;
+    }
+
+    if (stub->numOptimizedStubs() >= ICUnaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard/replace stubs.
+        return true;
+    }
+
+    if (val.isInt32() && res.isInt32()) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Int32 => Int32) stub", js_CodeName[op]);
+        ICUnaryArith_Int32::Compiler compiler(cx, op);
+        ICStub *int32Stub = compiler.getStub(compiler.getStubSpace(script));
+        if (!int32Stub)
+            return false;
+        stub->addNewStub(int32Stub);
+        return true;
+    }
+
+    if (val.isNumber() && res.isNumber() && op == JSOP_NEG) {
+        IonSpew(IonSpew_BaselineIC, "  Generating %s(Number => Number) stub", js_CodeName[op]);
+        // Unlink int32 stubs, the double stub handles both cases and TI specializes for both.
+        stub->unlinkStubsWithKind(cx, ICStub::UnaryArith_Int32);
+
+        ICUnaryArith_Double::Compiler compiler(cx, op);
+        ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!doubleStub)
+            return false;
+        stub->addNewStub(doubleStub);
+        return true;
+    }
+
+    return true;
+}
+#if defined(_MSC_VER)
+# pragma optimize("g", on)
+#endif
+
+typedef bool (*DoUnaryArithFallbackFn)(JSContext *, BaselineFrame *, ICUnaryArith_Fallback *,
+                                       HandleValue, MutableHandleValue);
+static const VMFunction DoUnaryArithFallbackInfo =
+    FunctionInfo<DoUnaryArithFallbackFn>(DoUnaryArithFallback, PopValues(1));
+
+bool
+ICUnaryArith_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Ensure stack is fully synced for the expression decompiler.
+    masm.pushValue(R0);
+
+    // Push arguments.
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoUnaryArithFallbackInfo, masm);
+}
+
+bool
+ICUnaryArith_Double::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.ensureDouble(R0, FloatReg0, &failure);
+
+    JS_ASSERT(op == JSOP_NEG);
+    masm.negateDouble(FloatReg0);
+    masm.boxDouble(FloatReg0, R0);
+
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// GetElem_Fallback
+//
+
+static void GetFixedOrDynamicSlotOffset(HandleObject obj, uint32_t slot,
+                                        bool *isFixed, uint32_t *offset)
+{
+    JS_ASSERT(isFixed);
+    JS_ASSERT(offset);
+    *isFixed = obj->isFixedSlot(slot);
+    *offset = *isFixed ? JSObject::getFixedSlotOffset(slot)
+                       : obj->dynamicSlotIndex(slot) * sizeof(Value);
+}
+
+// Look up a property's shape on an object, being careful never to do any effectful
+// operations.  This procedure not yielding a shape should not be taken as a lack of
+// existence of the property on the object.
+static bool
+EffectlesslyLookupProperty(JSContext *cx, HandleObject obj, HandlePropertyName name,
+                           MutableHandleObject holder, MutableHandleShape shape)
+{
+    if (obj->hasIdempotentProtoChain()) {
+        if (!JSObject::lookupProperty(cx, obj, name, holder, shape))
+            return false;
+    } else if (obj->isNative()) {
+        shape.set(obj->nativeLookup(cx, NameToId(name)));
+        if (shape)
+            holder.set(obj);
+    } else {
+        shape.set(NULL);
+        holder.set(NULL);
+    }
+    return true;
+}
+
+static bool
+IsCacheableProtoChain(JSObject *obj, JSObject *holder)
+{
+    JS_ASSERT(obj->isNative());
+
+    // Don't handle objects which require a prototype guard. This should
+    // be uncommon so handling it is likely not worth the complexity.
+    if (obj->hasUncacheableProto())
+        return false;
+
+    while (obj != holder) {
+        // We cannot assume that we find the holder object on the prototype
+        // chain and must check for null proto. The prototype chain can be
+        // altered during the lookupProperty call.
+        JSObject *proto = obj->getProto();
+        if (!proto || !proto->isNative())
+            return false;
+
+        if (proto->hasUncacheableProto())
+            return false;
+
+        obj = proto;
+    }
+    return true;
+}
+
+static bool
+IsCacheableGetPropReadSlot(JSObject *obj, JSObject *holder, Shape *shape)
+{
+    if (!shape || !IsCacheableProtoChain(obj, holder))
+        return false;
+
+    if (!shape->hasSlot() || !shape->hasDefaultGetter())
+        return false;
+
+    return true;
+}
+
+static bool
+IsCacheableGetPropCall(JSObject *obj, JSObject *holder, Shape *shape, bool *isScripted)
+{
+    JS_ASSERT(isScripted);
+
+    // Currently we only optimize getter calls for getters bound on prototypes.
+    if (obj == holder)
+        return false;
+
+    if (!shape || !IsCacheableProtoChain(obj, holder))
+        return false;
+
+    if (shape->hasSlot() || shape->hasDefaultGetter())
+        return false;
+
+    if (!shape->hasGetterValue())
+        return false;
+
+    if (!shape->getterValue().isObject() || !shape->getterObject()->isFunction())
+        return false;
+
+    JSFunction *func = shape->getterObject()->toFunction();
+    if (func->isNative()) {
+        *isScripted = false;
+        return true;
+    }
+
+    if (!func->hasScript())
+        return false;
+
+    JSScript *script = func->nonLazyScript();
+    if (!script->hasIonScript() && !script->hasBaselineScript())
+        return false;
+
+    *isScripted = true;
+    return true;
+}
+
+static bool
+IsCacheableSetPropWriteSlot(JSObject *obj, Shape *oldShape, JSObject *holder, Shape *shape)
+{
+    if (!shape)
+        return false;
+
+    // Object shape must not have changed during the property set.
+    if (obj->lastProperty() != oldShape)
+        return false;
+
+    // Currently we only optimize direct writes.
+    if (obj != holder)
+        return false;
+
+    if (!shape->hasSlot() || !shape->hasDefaultSetter() || !shape->writable())
+        return false;
+
+    return true;
+}
+
+static bool
+IsCacheableSetPropAddSlot(JSContext *cx, HandleObject obj, HandleShape oldShape, uint32_t oldSlots,
+                          HandleId id, HandleObject holder, HandleShape shape,
+                          size_t *protoChainDepth)
+{
+    if (!shape)
+        return false;
+
+    // Property must be set directly on object, and be last added property of object.
+    if (obj != holder || shape != obj->lastProperty())
+        return false;
+
+    // Object must be extensible, oldShape must be immediate parent of curShape.
+    if (!obj->isExtensible() || obj->lastProperty()->previous() != oldShape)
+        return false;
+
+    // Basic shape checks.
+    if (shape->inDictionary() || !shape->hasSlot() || !shape->hasDefaultSetter() ||
+        !shape->writable())
+    {
+        return false;
+    }
+
+    // If object has a non-default resolve hook, don't inline
+    if (obj->getClass()->resolve != JS_ResolveStub)
+        return false;
+
+    size_t chainDepth = 0;
+    // walk up the object prototype chain and ensure that all prototypes
+    // are native, and that all prototypes have setter defined on the property
+    for (JSObject *proto = obj->getProto(); proto; proto = proto->getProto()) {
+        chainDepth++;
+        // if prototype is non-native, don't optimize
+        if (!proto->isNative())
+            return false;
+
+        // if prototype defines this property in a non-plain way, don't optimize
+        Shape *protoShape = proto->nativeLookup(cx, id);
+        if (protoShape && !protoShape->hasDefaultSetter())
+            return false;
+
+        // Otherise, if there's no such property, watch out for a resolve hook that would need
+        // to be invoked and thus prevent inlining of property addition.
+        if (proto->getClass()->resolve != JS_ResolveStub)
+             return false;
+    }
+
+    // Only add a IC entry if the dynamic slots didn't change when the shapes
+    // changed.  Need to ensure that a shape change for a subsequent object
+    // won't involve reallocating the slot array.
+    if (obj->numDynamicSlots() != oldSlots)
+        return false;
+
+    *protoChainDepth = chainDepth;
+    return true;
+}
+
+static bool
+IsCacheableSetPropCall(JSObject *obj, JSObject *holder, Shape *shape, bool *isScripted)
+{
+    JS_ASSERT(isScripted);
+
+    // Currently we only optimize setter calls for setters bound on prototypes.
+    if (obj == holder)
+        return false;
+
+    if (!shape || !IsCacheableProtoChain(obj, holder))
+        return false;
+
+    if (shape->hasSlot() || shape->hasDefaultSetter())
+        return false;
+
+    if (!shape->hasSetterValue())
+        return false;
+
+    if (!shape->setterValue().isObject() || !shape->setterObject()->isFunction())
+        return false;
+
+    JSFunction *func = shape->setterObject()->toFunction();
+    if (func->isNative()) {
+        *isScripted = false;
+        return true;
+    }
+
+    if (!func->hasScript())
+        return false;
+
+    JSScript *script = func->nonLazyScript();
+    if (!script->hasIonScript() && !script->hasBaselineScript())
+        return false;
+
+    *isScripted = true;
+    return true;
+}
+
+static bool
+TypedArrayGetElemStubExists(ICGetElem_Fallback *stub, HandleObject obj)
+{
+    for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+        if (!iter->isGetElem_TypedArray())
+            continue;
+        if (obj->lastProperty() == iter->toGetElem_TypedArray()->shape())
+            return true;
+    }
+    return false;
+}
+
+
+static bool TryAttachNativeGetElemStub(JSContext *cx, HandleScript script,
+                                       ICGetElem_Fallback *stub, HandleObject obj,
+                                       HandleValue key)
+{
+    RootedId id(cx);
+    RootedValue idval(cx);
+    if (!FetchElementId(cx, obj, key, &id, &idval))
+        return false;
+
+    uint32_t dummy;
+    if (!JSID_IS_ATOM(id) || JSID_TO_ATOM(id)->isIndex(&dummy))
+        return true;
+
+    RootedPropertyName propName(cx, JSID_TO_ATOM(id)->asPropertyName());
+
+    RootedShape shape(cx);
+    RootedObject holder(cx);
+    if (!EffectlesslyLookupProperty(cx, obj, propName, &holder, &shape))
+        return false;
+
+    if (!IsCacheableGetPropReadSlot(obj, holder, shape))
+        return true;
+
+    bool isFixedSlot;
+    uint32_t offset;
+    GetFixedOrDynamicSlotOffset(holder, shape->slot(), &isFixedSlot, &offset);
+
+    ICStub *monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+    ICStub::Kind kind = (obj == holder) ? ICStub::GetElem_Native : ICStub::GetElem_NativePrototype;
+
+    IonSpew(IonSpew_BaselineIC, "  Generating GetElem(Native %s) stub (obj=%p, shape=%p, holder=%p, holderShape=%p)",
+                (obj == holder) ? "direct" : "prototype",
+                obj.get(), obj->lastProperty(), holder.get(), holder->lastProperty());
+
+    ICGetElemNativeCompiler compiler(cx, kind, monitorStub, obj, holder, key,
+                                     isFixedSlot, offset);
+    ICStub *newStub = compiler.getStub(compiler.getStubSpace(script));
+    if (!newStub)
+        return false;
+
+    stub->addNewStub(newStub);
+    return true;
+}
+
+static bool
+TryAttachGetElemStub(JSContext *cx, HandleScript script, ICGetElem_Fallback *stub,
+                     HandleValue lhs, HandleValue rhs, HandleValue res)
+{
+    // Check for String[i] => Char accesses.
+    if (lhs.isString() && rhs.isInt32() && res.isString() &&
+        !stub->hasStub(ICStub::GetElem_String))
+    {
+        IonSpew(IonSpew_BaselineIC, "  Generating GetElem(String[Int32]) stub");
+        ICGetElem_String::Compiler compiler(cx);
+        ICStub *stringStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!stringStub)
+            return false;
+
+        stub->addNewStub(stringStub);
+        return true;
+    }
+
+    // Otherwise, GetElem is only optimized on objects.
+    if (!lhs.isObject())
+        return true;
+    RootedObject obj(cx, &lhs.toObject());
+
+    if (obj->isNative()) {
+        // Check for NativeObject[int] dense accesses.
+        if (rhs.isInt32()) {
+            IonSpew(IonSpew_BaselineIC, "  Generating GetElem(Native[Int32] dense) stub");
+            ICGetElem_Dense::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+                                               obj->lastProperty());
+            ICStub *denseStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!denseStub)
+                return false;
+
+            stub->addNewStub(denseStub);
+            return true;
+        }
+
+        // Check for NativeObject[id] shape-optimizable accesses.
+        if (rhs.isString()) {
+            if (!TryAttachNativeGetElemStub(cx, script, stub, obj, rhs))
+                return false;
+        }
+    }
+
+    // Check for TypedArray[int] => Number accesses.
+    if (obj->isTypedArray() && rhs.isInt32() && res.isNumber() &&
+        !TypedArrayGetElemStubExists(stub, obj))
+    {
+        IonSpew(IonSpew_BaselineIC, "  Generating GetElem(TypedArray[Int32]) stub");
+        ICGetElem_TypedArray::Compiler compiler(cx, obj->lastProperty(), TypedArray::type(obj));
+        ICStub *typedArrayStub = compiler.getStub(compiler.getStubSpace(script));
+        if (!typedArrayStub)
+            return false;
+
+        stub->addNewStub(typedArrayStub);
+        return true;
+    }
+
+    return true;
+}
+
+static bool
+DoGetElemFallback(JSContext *cx, BaselineFrame *frame, ICGetElem_Fallback *stub, HandleValue lhs,
+                  HandleValue rhs, MutableHandleValue res)
+{
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    JSOp op = JSOp(*pc);
+    FallbackICSpew(cx, stub, "GetElem(%s)", js_CodeName[op]);
+
+    JS_ASSERT(op == JSOP_GETELEM || op == JSOP_CALLELEM);
+
+    // Don't pass lhs directly, we need it when generating stubs.
+    RootedValue lhsCopy(cx, lhs);
+
+    bool isOptimizedArgs = false;
+    if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS)) {
+        // Handle optimized arguments[i] access.
+        if (!GetElemOptimizedArguments(cx, frame, &lhsCopy, rhs, res, &isOptimizedArgs))
+            return false;
+        if (isOptimizedArgs)
+            types::TypeScript::Monitor(cx, script, pc, res);
+    }
+
+    if (!isOptimizedArgs) {
+        if (!GetElementOperation(cx, op, &lhsCopy, rhs, res))
+            return false;
+        types::TypeScript::Monitor(cx, script, pc, res);
+    }
+
+    // Add a type monitor stub for the resulting value.
+    if (!stub->addMonitorStubForValue(cx, script, res))
+        return false;
+
+    if (stub->numOptimizedStubs() >= ICGetElem_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+        // But for now we just bail.
+        return true;
+    }
+
+    // Try to attach an optimized stub.
+    if (!TryAttachGetElemStub(cx, script, stub, lhs, rhs, res))
+        return false;
+
+    return true;
+}
+
+typedef bool (*DoGetElemFallbackFn)(JSContext *, BaselineFrame *, ICGetElem_Fallback *,
+                                    HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoGetElemFallbackInfo =
+    FunctionInfo<DoGetElemFallbackFn>(DoGetElemFallback, PopValues(2));
+
+bool
+ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    // Restore the tail call register.
+    EmitRestoreTailCallReg(masm);
+
+    // Ensure stack is fully synced for the expression decompiler.
+    masm.pushValue(R0);
+    masm.pushValue(R1);
+
+    // Push arguments.
+    masm.pushValue(R1);
+    masm.pushValue(R0);
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoGetElemFallbackInfo, masm);
+}
+
+//
+// GetElem_Native
+//
+
+bool
+ICGetElemNativeCompiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    Label failurePopR1;
+    bool popR1 = false;
+
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+    Address idValAddr(BaselineStubReg, ICGetElemNativeStub::offsetOfIdval());
+    masm.branchTestValue(Assembler::NotEqual, idValAddr, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox object.
+    Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+    // Check object shape.
+    masm.loadPtr(Address(objReg, JSObject::offsetOfShape()), scratchReg);
+    Address shapeAddr(BaselineStubReg, ICGetElemNativeStub::offsetOfShape());
+    masm.branchPtr(Assembler::NotEqual, shapeAddr, scratchReg, &failure);
+
+    Register holderReg;
+    if (obj_ == holder_) {
+        holderReg = objReg;
+    } else {
+        // Shape guard holder.
+        if (regs.empty()) {
+            masm.push(R1.scratchReg());
+            popR1 = true;
+            holderReg = R1.scratchReg();
+        } else {
+            holderReg = regs.takeAny();
+        }
+        masm.loadPtr(Address(BaselineStubReg, ICGetElem_NativePrototype::offsetOfHolder()),
+                     holderReg);
+        masm.loadPtr(Address(BaselineStubReg, ICGetElem_NativePrototype::offsetOfHolderShape()),
+                     scratchReg);
+        masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratchReg,
+                                popR1 ? &failurePopR1 : &failure);
+    }
+
+    // Load from object.
+    if (!isFixedSlot_)
+        masm.loadPtr(Address(holderReg, JSObject::offsetOfSlots()), holderReg);
+
+    masm.load32(Address(BaselineStubReg, ICGetElem_Native::offsetOfOffset()), scratchReg);
+    masm.loadValue(BaseIndex(holderReg, scratchReg, TimesOne), R0);
+
+    if (popR1)
+        masm.pop(R1.scratchReg());
+    // Enter type monitor IC to type-check result.
+    EmitEnterTypeMonitorIC(masm);
+
+    // Failure case - jump to next stub
+    if (popR1) {
+        masm.bind(&failurePopR1);
+        masm.pop(R1.scratchReg());
+    }
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// GetElem_String
+//
+
+bool
+ICGetElem_String::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestString(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox string in R0.
+    Register str = masm.extractString(R0, ExtractTemp0);
+
+    // Load string lengthAndFlags
+    Address lengthAndFlagsAddr(str, JSString::offsetOfLengthAndFlags());
+    masm.loadPtr(lengthAndFlagsAddr, scratchReg);
+
+    // Check for non-linear strings.
+    masm.branchTest32(Assembler::Zero, scratchReg, Imm32(JSString::FLAGS_MASK), &failure);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Extract length and bounds check.
+    masm.rshiftPtr(Imm32(JSString::LENGTH_SHIFT), scratchReg);
+    masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
+
+    // Get char code.
+    Address charsAddr(str, JSString::offsetOfChars());
+    masm.loadPtr(charsAddr, scratchReg);
+    masm.load16ZeroExtend(BaseIndex(scratchReg, key, TimesTwo, 0), scratchReg);
+
+    // Check if char code >= UNIT_STATIC_LIMIT.
+    masm.branch32(Assembler::AboveOrEqual, scratchReg, Imm32(StaticStrings::UNIT_STATIC_LIMIT),
+                  &failure);
+
+    // Load static string.
+    masm.movePtr(ImmWord(&cx->compartment->rt->staticStrings.unitStaticTable), str);
+    masm.loadPtr(BaseIndex(str, scratchReg, ScalePointer), str);
+
+    // Return.
+    masm.tagValue(JSVAL_TYPE_STRING, str, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// GetElem_Dense
+//
+
+bool
+ICGetElem_Dense::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox R0 and shape guard.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(BaselineStubReg, ICGetElem_Dense::offsetOfShape()), scratchReg);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+    // Load obj->elements.
+    masm.loadPtr(Address(obj, JSObject::offsetOfElements()), scratchReg);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Bounds check.
+    Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+    masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
+
+    // Hole check and load value.
+    BaseIndex element(scratchReg, key, TimesEight);
+    masm.branchTestMagic(Assembler::Equal, element, &failure);
+    masm.loadValue(element, R0);
+
+    // Enter type monitor IC to type-check result.
+    EmitEnterTypeMonitorIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// GetElem_TypedArray
+//
+
+bool
+ICGetElem_TypedArray::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox R0 and shape guard.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(BaselineStubReg, ICGetElem_TypedArray::offsetOfShape()), scratchReg);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Bounds check.
+    masm.unboxInt32(Address(obj, TypedArray::lengthOffset()), scratchReg);
+    masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
+
+    // Load the elements vector.
+    masm.loadPtr(Address(obj, TypedArray::dataOffset()), scratchReg);
+
+    // Load the value.
+    BaseIndex source(scratchReg, key, ScaleFromElemWidth(TypedArray::slotWidth(type_)));
+    masm.loadFromTypedArray(type_, source, R0, false, scratchReg, &failure);
+
+    // Todo: Allow loading doubles from uint32 arrays, but this requires monitoring.
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// SetElem_Fallback
+//
+
+static bool
+DenseSetElemStubExists(JSContext *cx, ICStub::Kind kind, ICSetElem_Fallback *stub, HandleObject obj)
+{
+    JS_ASSERT(kind == ICStub::SetElem_Dense || kind == ICStub::SetElem_DenseAdd);
+
+    for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+        if (kind == ICStub::SetElem_Dense && iter->isSetElem_Dense()) {
+            ICSetElem_Dense *dense = iter->toSetElem_Dense();
+            if (obj->lastProperty() == dense->shape() && obj->getType(cx) == dense->type())
+                return true;
+        }
+
+        if (kind == ICStub::SetElem_DenseAdd && iter->isSetElem_DenseAdd()) {
+            ICSetElem_DenseAdd *dense = iter->toSetElem_DenseAdd();
+            if (obj->lastProperty() == dense->toImplUnchecked<0>()->shape(0) &&
+                obj->getType(cx) == dense->type())
+            {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+static bool
+TypedArraySetElemStubExists(ICSetElem_Fallback *stub, HandleObject obj, bool expectOOB)
+{
+    for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+        if (!iter->isSetElem_TypedArray())
+            continue;
+        ICSetElem_TypedArray *taStub = iter->toSetElem_TypedArray();
+        if (obj->lastProperty() == taStub->shape() && taStub->expectOutOfBounds() == expectOOB)
+            return true;
+    }
+    return false;
+}
+
+static bool
+RemoveExistingTypedArraySetElemStub(JSContext *cx, ICSetElem_Fallback *stub, HandleObject obj)
+{
+    for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
+        if (!iter->isSetElem_TypedArray())
+            continue;
+
+        if (obj->lastProperty() != iter->toSetElem_TypedArray()->shape())
+            continue;
+
+        // TypedArraySetElem stubs are only removed using this procedure if
+        // being replaced with one that expects out of bounds index.
+        JS_ASSERT(!iter->toSetElem_TypedArray()->expectOutOfBounds());
+        iter.unlink(cx->zone());
+        return true;
+    }
+    return false;
+}
+
+static bool
+CanOptimizeDenseSetElem(JSContext *cx, HandleObject obj, uint32_t index,
+                        HandleShape oldShape, uint32_t oldCapacity, uint32_t oldInitLength,
+                        bool *isAddingCaseOut, size_t *protoDepthOut)
+{
+    uint32_t initLength = obj->getDenseInitializedLength();
+    uint32_t capacity = obj->getDenseCapacity();
+
+    *isAddingCaseOut = false;
+    *protoDepthOut = 0;
+
+    // Some initial sanity checks.
+    if (initLength < oldInitLength || capacity < oldCapacity)
+        return false;
+
+    RootedShape shape(cx, obj->lastProperty());
+
+    // Cannot optimize if the shape changed.
+    if (oldShape != shape)
+        return false;
+
+    // Cannot optimize if the capacity changed.
+    if (oldCapacity != capacity)
+        return false;
+
+    // Cannot optimize if the index doesn't fit within the new initialized length.
+    if (index >= initLength)
+        return false;
+
+    // Cannot optimize if the value at position after the set is a hole.
+    if (!obj->containsDenseElement(index))
+        return false;
+
+    // At this point, if we know that the initLength did not change, then
+    // an optimized set is possible.
+    if (oldInitLength == initLength)
+        return true;
+
+    // If it did change, ensure that it changed specifically by incrementing by 1
+    // to accomodate this particular indexed set.
+    if (oldInitLength + 1 != initLength)
+        return false;
+    if (index != oldInitLength)
+        return false;
+
+    // The checks are not complete.  The object may have a setter definition,
+    // either directly, or via a prototype, or via the target object for a prototype
+    // which is a proxy, that handles a particular integer write.
+    // Scan the prototype and shape chain to make sure that this is not the case.
+    RootedObject curObj(cx, obj);
+    while (curObj) {
+        // Ensure object is native.
+        if (!curObj->isNative())
+            return false;
+
+        // Ensure all indexed properties are stored in dense elements.
+        if (curObj->isIndexed())
+            return false;
+
+        curObj = curObj->getProto();
+        if (curObj)
+            ++*protoDepthOut;
+    }
+
+    if (*protoDepthOut > ICSetElem_DenseAdd::MAX_PROTO_CHAIN_DEPTH)
+        return false;
+
+    *isAddingCaseOut = true;
+
+    return true;
+}
+
+static bool
+DoSetElemFallback(JSContext *cx, BaselineFrame *frame, ICSetElem_Fallback *stub, Value *stack,
+                  HandleValue objv, HandleValue index, HandleValue rhs)
+{
+    RootedScript script(cx, frame->script());
+    jsbytecode *pc = stub->icEntry()->pc(script);
+    JSOp op = JSOp(*pc);
+    FallbackICSpew(cx, stub, "SetElem(%s)", js_CodeName[JSOp(*pc)]);
+
+    JS_ASSERT(op == JSOP_SETELEM ||
+              op == JSOP_ENUMELEM ||
+              op == JSOP_INITELEM ||
+              op == JSOP_INITELEM_ARRAY);
+
+    RootedObject obj(cx, ToObject(cx, objv));
+    if (!obj)
+        return false;
+
+    RootedShape oldShape(cx, obj->lastProperty());
+
+    // Check the old capacity
+    uint32_t oldCapacity = 0;
+    uint32_t oldInitLength = 0;
+    if (obj->isNative() && index.isInt32() && index.toInt32() >= 0) {
+        oldCapacity = obj->getDenseCapacity();
+        oldInitLength = obj->getDenseInitializedLength();
+    }
+
+    if (op == JSOP_INITELEM) {
+        RootedValue nindex(cx, index);
+        if (!InitElemOperation(cx, obj, &nindex, rhs))
+            return false;
+    } else if (op == JSOP_INITELEM_ARRAY) {
+        JS_ASSERT(uint32_t(index.toInt32()) == GET_UINT24(pc));
+        if (!InitArrayElemOperation(cx, pc, obj, index.toInt32(), rhs))
+            return false;
+    } else {
+        if (!SetObjectElement(cx, obj, index, rhs, script->strict, script, pc))
+            return false;
+    }
+
+    // Overwrite the object on the stack (pushed for the decompiler) with the rhs.
+    JS_ASSERT(stack[2] == objv);
+    stack[2] = rhs;
+
+    if (stub->numOptimizedStubs() >= ICSetElem_Fallback::MAX_OPTIMIZED_STUBS) {
+        // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+        // But for now we just bail.
+        return true;
+    }
+
+    // Try to generate new stubs.
+    if (obj->isNative() &&
+        index.isInt32() && index.toInt32() >= 0 &&
+        !rhs.isMagic(JS_ELEMENTS_HOLE))
+    {
+        JS_ASSERT(!obj->isTypedArray());
+
+        bool addingCase;
+        size_t protoDepth;
+
+        if (CanOptimizeDenseSetElem(cx, obj, index.toInt32(), oldShape, oldCapacity, oldInitLength,
+                                    &addingCase, &protoDepth))
+        {
+            RootedTypeObject type(cx, obj->getType(cx));
+            RootedShape shape(cx, obj->lastProperty());
+
+            if (addingCase && !DenseSetElemStubExists(cx, ICStub::SetElem_DenseAdd, stub, obj)) {
+                IonSpew(IonSpew_BaselineIC,
+                        "  Generating SetElem_DenseAdd stub "
+                        "(shape=%p, type=%p, protoDepth=%u)",
+                        obj->lastProperty(), type.get(), protoDepth);
+                ICSetElemDenseAddCompiler compiler(cx, obj, protoDepth);
+                ICUpdatedStub *denseStub = compiler.getStub(compiler.getStubSpace(script));
+                if (!denseStub)
+                    return false;
+                if (!denseStub->addUpdateStubForValue(cx, script, obj, JSID_VOID, rhs))
+                    return false;
+
+                stub->addNewStub(denseStub);
+            } else if (!addingCase &&
+                       !DenseSetElemStubExists(cx, ICStub::SetElem_Dense, stub, obj))
+            {
+                IonSpew(IonSpew_BaselineIC,
+                        "  Generating SetElem_Dense stub (shape=%p, type=%p)",
+                        obj->lastProperty(), type.get());
+                ICSetElem_Dense::Compiler compiler(cx, shape, type);
+                ICUpdatedStub *denseStub = compiler.getStub(compiler.getStubSpace(script));
+                if (!denseStub)
+                    return false;
+                if (!denseStub->addUpdateStubForValue(cx, script, obj, JSID_VOID, rhs))
+                    return false;
+
+                stub->addNewStub(denseStub);
+            }
+        }
+
+        return true;
+    }
+
+    if (obj->isTypedArray() && index.isInt32() && rhs.isNumber()) {
+        uint32_t len = TypedArray::length(obj);
+        int32_t idx = index.toInt32();
+        bool expectOutOfBounds = (idx < 0) || (static_cast<uint32_t>(idx) >= len);
+
+        if (!TypedArraySetElemStubExists(stub, obj, expectOutOfBounds)) {
+            // Remove any existing TypedArraySetElemStub that doesn't handle out-of-bounds
+            if (expectOutOfBounds)
+                RemoveExistingTypedArraySetElemStub(cx, stub, obj);
+
+            IonSpew(IonSpew_BaselineIC,
+                    "  Generating SetElem_TypedArray stub (shape=%p, type=%u, oob=%s)",
+                    obj->lastProperty(), TypedArray::type(obj),
+                    expectOutOfBounds ? "yes" : "no");
+            ICSetElem_TypedArray::Compiler compiler(cx, obj->lastProperty(), TypedArray::type(obj),
+                                                    expectOutOfBounds);
+            ICStub *typedArrayStub = compiler.getStub(compiler.getStubSpace(script));
+            if (!typedArrayStub)
+                return false;
+
+            stub->addNewStub(typedArrayStub);
+            return true;
+        }
+    }
+
+    return true;
+}
+
+typedef bool (*DoSetElemFallbackFn)(JSContext *, BaselineFrame *, ICSetElem_Fallback *, Value *,
+                                    HandleValue, HandleValue, HandleValue);
+static const VMFunction DoSetElemFallbackInfo =
+    FunctionInfo<DoSetElemFallbackFn>(DoSetElemFallback, PopValues(2));
+
+bool
+ICSetElem_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    JS_ASSERT(R0 == JSReturnOperand);
+
+    EmitRestoreTailCallReg(masm);
+
+    // State: R0: object, R1: index, stack: rhs.
+    // For the decompiler, the stack has to be: object, index, rhs,
+    // so we push the index, then overwrite the rhs Value with R0
+    // and push the rhs value.
+    masm.pushValue(R1);
+    masm.loadValue(Address(BaselineStackReg, sizeof(Value)), R1);
+    masm.storeValue(R0, Address(BaselineStackReg, sizeof(Value)));
+    masm.pushValue(R1);
+
+    // Push arguments.
+    masm.pushValue(R1); // RHS
+
+    // Push index. On x86 and ARM two push instructions are emitted so use a
+    // separate register to store the old stack pointer.
+    masm.mov(BaselineStackReg, R1.scratchReg());
+    masm.pushValue(Address(R1.scratchReg(), 2 * sizeof(Value)));
+    masm.pushValue(R0); // Object.
+
+    // Push pointer to stack values, so that the stub can overwrite the object
+    // (pushed for the decompiler) with the rhs.
+    masm.computeEffectiveAddress(Address(BaselineStackReg, 3 * sizeof(Value)), R0.scratchReg());
+    masm.push(R0.scratchReg());
+
+    masm.push(BaselineStubReg);
+    masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+    return tailCallVM(DoSetElemFallbackInfo, masm);
+}
+
+//
+// SetElem_Dense
+//
+
+bool
+ICSetElem_Dense::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    // R0 = object
+    // R1 = key
+    // Stack = { ... rhs-value, <return-addr>? }
+    Label failure;
+    Label failureUnstow;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox R0 and guard on its shape.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_Dense::offsetOfShape()), scratchReg);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+    // Stow both R0 and R1 (object and key)
+    // But R0 and R1 still hold their values.
+    EmitStowICValues(masm, 2);
+
+    // We may need to free up some registers.
+    regs = availableGeneralRegs(0);
+    regs.take(R0);
+
+    // Guard that the type object matches.
+    Register typeReg = regs.takeAny();
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_Dense::offsetOfType()), typeReg);
+    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfType()), typeReg,
+                   &failureUnstow);
+    regs.add(typeReg);
+
+    // Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
+    // Load rhs-value in to R0
+    masm.loadValue(Address(BaselineStackReg, 2 * sizeof(Value) + ICStackValueOffset), R0);
+
+    // Call the type-update stub.
+    if (!callTypeUpdateIC(masm, sizeof(Value)))
+        return false;
+
+    // Unstow R0 and R1 (object and key)
+    EmitUnstowICValues(masm, 2);
+
+    // Reset register set.
+    regs = availableGeneralRegs(2);
+    scratchReg = regs.takeAny();
+
+    // Load obj->elements in scratchReg.
+    masm.loadPtr(Address(obj, JSObject::offsetOfElements()), scratchReg);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Bounds check.
+    Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+    masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
+
+    // Hole check.
+    BaseIndex element(scratchReg, key, TimesEight);
+    masm.branchTestMagic(Assembler::Equal, element, &failure);
+
+    // Convert int32 values to double if convertDoubleElements is set. In this
+    // case the heap typeset is guaranteed to contain both int32 and double, so
+    // it's okay to store a double.
+    Label convertDoubles, convertDoublesDone;
+    Address elementsFlags(scratchReg, ObjectElements::offsetOfFlags());
+    masm.branchTest32(Assembler::NonZero, elementsFlags,
+                      Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+                      &convertDoubles);
+    masm.bind(&convertDoublesDone);
+
+    // It's safe to overwrite R0 now.
+    Address valueAddr(BaselineStackReg, ICStackValueOffset);
+    masm.loadValue(valueAddr, R0);
+    EmitPreBarrier(masm, element, MIRType_Value);
+    masm.storeValue(R0, element);
+    EmitReturnFromIC(masm);
+
+    // Convert to double and jump back.
+    masm.bind(&convertDoubles);
+    masm.convertInt32ValueToDouble(valueAddr, R0.scratchReg(), &convertDoublesDone);
+    masm.jump(&convertDoublesDone);
+
+    // Failure case - fail but first unstow R0 and R1
+    masm.bind(&failureUnstow);
+    EmitUnstowICValues(masm, 2);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+static bool
+GetProtoShapes(JSObject *obj, size_t protoChainDepth, AutoShapeVector *shapes)
+{
+    JS_ASSERT(shapes->length() == 1);
+    JSObject *curProto = obj->getProto();
+    for (size_t i = 0; i < protoChainDepth; i++) {
+        if (!shapes->append(curProto->lastProperty()))
+            return false;
+        curProto = curProto->getProto();
+    }
+    JS_ASSERT(!curProto);
+    return true;
+}
+
+//
+// SetElem_DenseAdd
+//
+
+ICUpdatedStub *
+ICSetElemDenseAddCompiler::getStub(ICStubSpace *space)
+{
+    AutoShapeVector shapes(cx);
+    if (!shapes.append(obj_->lastProperty()))
+        return NULL;
+
+    if (!GetProtoShapes(obj_, protoChainDepth_, &shapes))
+        return NULL;
+
+    JS_STATIC_ASSERT(ICSetElem_DenseAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+
+    ICUpdatedStub *stub = NULL;
+    switch (protoChainDepth_) {
+      case 0: stub = getStubSpecific<0>(space, &shapes); break;
+      case 1: stub = getStubSpecific<1>(space, &shapes); break;
+      case 2: stub = getStubSpecific<2>(space, &shapes); break;
+      case 3: stub = getStubSpecific<3>(space, &shapes); break;
+      case 4: stub = getStubSpecific<4>(space, &shapes); break;
+      default: JS_NOT_REACHED("ProtoChainDepth too high.");
+    }
+    if (!stub || !stub->initUpdatingChain(cx, space))
+        return NULL;
+    return stub;
+}
+
+bool
+ICSetElemDenseAddCompiler::generateStubCode(MacroAssembler &masm)
+{
+    // R0 = object
+    // R1 = key
+    // Stack = { ... rhs-value, <return-addr>? }
+    Label failure;
+    Label failureUnstow;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox R0 and guard on its shape.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_DenseAddImpl<0>::offsetOfShape(0)),
+                 scratchReg);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+    // Stow both R0 and R1 (object and key)
+    // But R0 and R1 still hold their values.
+    EmitStowICValues(masm, 2);
+
+    // We may need to free up some registers.
+    regs = availableGeneralRegs(0);
+    regs.take(R0);
+
+    // Guard that the type object matches.
+    Register typeReg = regs.takeAny();
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_DenseAdd::offsetOfType()), typeReg);
+    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfType()), typeReg,
+                   &failureUnstow);
+    regs.add(typeReg);
+
+    // Shape guard objects on the proto chain.
+    scratchReg = regs.takeAny();
+    Register protoReg = regs.takeAny();
+    for (size_t i = 0; i < protoChainDepth_; i++) {
+        masm.loadObjProto(i == 0 ? obj : protoReg, protoReg);
+        masm.loadPtr(Address(BaselineStubReg, ICSetElem_DenseAddImpl<0>::offsetOfShape(i + 1)),
+                     scratchReg);
+        masm.branchTestObjShape(Assembler::NotEqual, protoReg, scratchReg, &failureUnstow);
+    }
+    regs.add(protoReg);
+    regs.add(scratchReg);
+
+    // Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
+    // Load rhs-value in to R0
+    masm.loadValue(Address(BaselineStackReg, 2 * sizeof(Value) + ICStackValueOffset), R0);
+
+    // Call the type-update stub.
+    if (!callTypeUpdateIC(masm, sizeof(Value)))
+        return false;
+
+    // Unstow R0 and R1 (object and key)
+    EmitUnstowICValues(masm, 2);
+
+    // Reset register set.
+    regs = availableGeneralRegs(2);
+    scratchReg = regs.takeAny();
+
+    // Load obj->elements in scratchReg.
+    masm.loadPtr(Address(obj, JSObject::offsetOfElements()), scratchReg);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Bounds check (key == initLength)
+    Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+    masm.branch32(Assembler::NotEqual, initLength, key, &failure);
+
+    // Capacity check.
+    Address capacity(scratchReg, ObjectElements::offsetOfCapacity());
+    masm.branch32(Assembler::BelowOrEqual, capacity, key, &failure);
+
+    // Increment initLength before write.
+    masm.add32(Imm32(1), initLength);
+
+    // If length is now <= key, increment length before write.
+    Label skipIncrementLength;
+    Address length(scratchReg, ObjectElements::offsetOfLength());
+    masm.branch32(Assembler::Above, length, key, &skipIncrementLength);
+    masm.add32(Imm32(1), length);
+    masm.bind(&skipIncrementLength);
+
+    // Convert int32 values to double if convertDoubleElements is set. In this
+    // case the heap typeset is guaranteed to contain both int32 and double, so
+    // it's okay to store a double.
+    Label convertDoubles, convertDoublesDone;
+    Address elementsFlags(scratchReg, ObjectElements::offsetOfFlags());
+    masm.branchTest32(Assembler::NonZero, elementsFlags,
+                      Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+                      &convertDoubles);
+    masm.bind(&convertDoublesDone);
+
+    // Write the value.  No need for write barrier since we're not overwriting an old value.
+    // It's safe to overwrite R0 now.
+    BaseIndex element(scratchReg, key, TimesEight);
+    Address valueAddr(BaselineStackReg, ICStackValueOffset);
+    masm.loadValue(valueAddr, R0);
+    masm.storeValue(R0, element);
+    EmitReturnFromIC(masm);
+
+    // Convert to double and jump back.
+    masm.bind(&convertDoubles);
+    masm.convertInt32ValueToDouble(valueAddr, R0.scratchReg(), &convertDoublesDone);
+    masm.jump(&convertDoublesDone);
+
+    // Failure case - fail but first unstow R0 and R1
+    masm.bind(&failureUnstow);
+    EmitUnstowICValues(masm, 2);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+//
+// SetElem_TypedArray
+//
+
+bool
+ICSetElem_TypedArray::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    GeneralRegisterSet regs(availableGeneralRegs(2));
+    Register scratchReg = regs.takeAny();
+
+    // Unbox R0 and shape guard.
+    Register obj = masm.extractObject(R0, ExtractTemp0);
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_TypedArray::offsetOfShape()), scratchReg);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+    // Unbox key.
+    Register key = masm.extractInt32(R1, ExtractTemp1);
+
+    // Bounds check.
+    Label oobWrite;
+    masm.unboxInt32(Address(obj, TypedArray::lengthOffset()), scratchReg);
+    masm.branch32(Assembler::BelowOrEqual, scratchReg, key,
+                  expectOutOfBounds_ ? &oobWrite : &failure);
+
+    // Load the elements vector.
+    masm.loadPtr(Address(obj, TypedArray::dataOffset()), scratchReg);
+
+    BaseIndex dest(scratchReg, key, ScaleFromElemWidth(TypedArray::slotWidth(type_)));
+    Address value(BaselineStackReg, ICStackValueOffset);
+
+    // We need a second scratch register. It's okay to clobber the type tag of
+    // R0 or R1, as long as it's restored before jumping to the next stub.
+    regs = availableGeneralRegs(0);
+    regs.takeUnchecked(obj);
+    regs.takeUnchecked(key);
+    regs.take(scratchReg);
+    Register secondScratch = regs.takeAny();
+
+    if (type_ == TypedArray::TYPE_FLOAT32 || type_ == TypedArray::TYPE_FLOAT64) {
+        masm.ensureDouble(value, FloatReg0, &failure);
+        masm.storeToTypedFloatArray(type_, FloatReg0, dest);
+        EmitReturnFromIC(masm);
+    } else if (type_ == TypedArray::TYPE_UINT8_CLAMPED) {