Merge from mozilla-central.
authorDavid Anderson <danderson@mozilla.com>
Tue, 20 Sep 2011 13:27:50 -0700
changeset 105260 e1d7da3830eabdf5f5ec92a46d7289f90841ff20
parent 77221 b15856d4b1148976cc05f09bf70d5faf46f99215 (current diff)
parent 105259 d498ff65f1e53901d44f29da966e0bc24a775714 (diff)
child 105261 c11c77e73480f4da194f8115ab67d2ffa63c7517
push id14706
push usereakhgari@mozilla.com
push dateTue, 11 Sep 2012 20:39:52 +0000
treeherdermozilla-inbound@d50bf1edaabe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone9.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge from mozilla-central.
browser/base/content/browser.js
content/html/content/src/nsGenericHTMLElement.cpp
dom/base/nsDOMClassInfo.cpp
dom/base/nsGlobalWindow.cpp
dom/base/nsJSEnvironment.cpp
dom/base/nsJSEnvironment.h
js/src/Makefile.in
js/src/assembler/assembler/ARMAssembler.h
js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
js/src/assembler/assembler/MacroAssemblerARM.h
js/src/config/autoconf.mk.in
js/src/configure.in
js/src/jsapi.cpp
js/src/jsapi.h
js/src/jsarray.cpp
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsdbgapi.cpp
js/src/jsfriendapi.cpp
js/src/jsfun.cpp
js/src/jsfun.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcmark.cpp
js/src/jsinfer.cpp
js/src/jsinterp.cpp
js/src/jsinterp.h
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/jsopcode.cpp
js/src/jsopcode.tbl
js/src/jsproxy.cpp
js/src/jsprvtd.h
js/src/jsregexpinlines.h
js/src/jsstr.cpp
js/src/jstracer.cpp
js/src/jstypedarray.cpp
js/src/jsutil.h
js/src/jsxdrapi.h
js/src/jsxml.cpp
js/src/methodjit/BaseAssembler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/Compiler.h
js/src/methodjit/InvokeHelpers.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/MonoIC.cpp
js/src/methodjit/PolyIC.cpp
js/src/methodjit/StubCalls.cpp
js/src/methodjit/StubCalls.h
js/src/shell/js.cpp
js/src/vm/GlobalObject.cpp
js/src/xpconnect/shell/xpcshell.cpp
js/src/xpconnect/src/nsXPConnect.cpp
layout/build/Makefile.in
layout/build/nsLayoutModule.cpp
layout/generic/nsFrame.cpp
modules/libpref/src/init/all.js
testing/mochitest/specialpowers/content/specialpowers.js
--- a/config/find_vanilla_new_calls
+++ b/config/find_vanilla_new_calls
@@ -55,20 +55,20 @@ if [ ! -f $file ] ; then
 fi
 
 tmpfile1=`mktemp`
 tmpfile2=`mktemp`
 nm -C $file > $tmpfile1
 
 # Need to double-escape '[' and ']' to stop grep from interpreting them
 # specially.
-grep 'operator new(unsigned int)'        $tmpfile1 >> $tmpfile2
-grep 'operator new(unsigned long)'       $tmpfile1 >> $tmpfile2
-grep 'operator new\\[\\](unsigned int)'  $tmpfile1 >> $tmpfile2
-grep 'operator new\\[\\](unsigned long)' $tmpfile1 >> $tmpfile2
+grep '^operator new(unsigned int)'        $tmpfile1 >> $tmpfile2
+grep '^operator new(unsigned long)'       $tmpfile1 >> $tmpfile2
+grep '^operator new\\[\\](unsigned int)'  $tmpfile1 >> $tmpfile2
+grep '^operator new\\[\\](unsigned long)' $tmpfile1 >> $tmpfile2
 rm -f $tmpfile1
 
 if [ -s $tmpfile2 ] ; then
     echo "TEST-UNEXPECTED-FAIL | find_vanilla_new_calls | found calls are listed below"
     cat $tmpfile2
     echo
     rm -f $tmpfile2
     exit 1
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -337,40 +337,98 @@ CPPSRCS += 	MethodJIT.cpp \
 		MonoIC.cpp \
 		PolyIC.cpp \
 		ImmutableSync.cpp \
 		InvokeHelpers.cpp \
 		Retcon.cpp \
 		TrampolineCompiler.cpp \
 		$(NULL)
 
+# Ion
+ifdef ENABLE_ION
+VPATH +=	$(srcdir)/ion
+VPATH +=	$(srcdir)/ion/shared
+
+CPPSRCS +=	MIR.cpp \
+		IonBuilder.cpp \
+		MIRGraph.cpp \
+		IonAnalysis.cpp \
+		Lowering.cpp \
+		Ion.cpp \
+		BitSet.cpp \
+		IonLIR.cpp \
+		GreedyAllocator.cpp \
+		C1Spewer.cpp \
+		JSONSpewer.cpp \
+		IonSpewer.cpp \
+		LICM.cpp \
+		ValueNumbering.cpp \
+		LinearScan.cpp \
+		MoveResolver.cpp \
+		CodeGenerator-shared.cpp \
+		Lowering-shared.cpp \
+		TypeOracle.cpp \
+		TypePolicy.cpp \
+		CodeGenerator.cpp \
+		IonMacroAssembler.cpp \
+		Snapshots.cpp \
+		Bailouts.cpp \
+		$(NULL)
+endif #ENABLE_ION
 ifeq (86, $(findstring 86,$(TARGET_CPU)))
+CPPSRCS +=	CodeGenerator-x86-shared.cpp
+CPPSRCS +=	MoveEmitter-x86-shared.cpp
+CPPSRCS +=	Assembler-x86-shared.cpp
 ifeq (x86_64, $(TARGET_CPU))
 ifdef _MSC_VER
 ASFILES +=	TrampolineMasmX64.asm
 endif
 ifeq ($(OS_ARCH),WINNT)
 ifdef GNU_CC
 ASFILES +=	TrampolineMingwX64.s
 endif
 endif
 ifdef SOLARIS_SUNPRO_CXX
 ASFILES +=	TrampolineSUNWX64.s
 endif
 #CPPSRCS		+= only_on_x86_64.cpp
+VPATH +=	$(srcdir)/ion/x64
+CPPSRCS += 	Lowering-x64.cpp \
+		CodeGenerator-x64.cpp \
+		Trampoline-x64.cpp \
+		Assembler-x64.cpp \
+		Bailouts-x64.cpp \
+		$(NULL)
 else
 #CPPSRCS		+= only_on_x86.cpp
+VPATH +=	$(srcdir)/ion/x86
+CPPSRCS +=	Lowering-x86.cpp \
+		CodeGenerator-x86.cpp \
+		Trampoline-x86.cpp \
+		Assembler-x86.cpp \
+		Bailouts-x86.cpp \
+		$(NULL)
 ifdef SOLARIS_SUNPRO_CXX
 ASFILES +=	TrampolineSUNWX86.s
 endif
 endif
 endif
-ifeq (arm, $(TARGET_CPU))
+ifdef ENABLE_ION
+ifeq (arm, $(findstring arm, $(TARGET_CPU)))
 #CPPSRCS		+= only_on_arm.cpp
+VPATH +=	$(srcdir)/ion/arm
+CPPSRCS +=	Lowering-arm.cpp \
+		CodeGenerator-arm.cpp \
+		Trampoline-arm.cpp \
+		Assembler-arm.cpp \
+		Bailouts-arm.cpp \
+		MoveEmitter-arm.cpp \
+		$(NULL)
 endif
+endif #ENABLE_ION
 ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
 ASFILES +=	TrampolineSparc.s
 endif
 #
 # END enclude sources for the method JIT
 #############################################
 
 endif
@@ -459,17 +517,17 @@ endif
 
 ifeq (86, $(findstring 86,$(TARGET_CPU)))
 ifeq (x86_64, $(TARGET_CPU))
 #CPPSRCS		+= only_on_x86_64.cpp
 else
 #CPPSRCS		+= only_on_x86.cpp
 endif
 endif
-ifeq (arm, $(TARGET_CPU))
+ifeq (arm, $(findstring arm, $(TARGET_CPU)))
 #CPPSRCS		+= only_on_arm.cpp
 endif
 #
 # END enclude sources for the Nitro assembler
 #############################################
 
 endif
 
@@ -661,17 +719,17 @@ check-malloc-function-usage: $(filter-ou
 		"in Makefile.in" "cx->calloc_ or rt->calloc_" $^
 	$(srcdir)/config/check_source_count.py "\bjs_realloc\b" 0 \
 		"in Makefile.in" "cx->realloc_ or rt->realloc_" $^
 	$(srcdir)/config/check_source_count.py "\bjs_free\b" 0 \
 		"in Makefile.in" "cx->free_" $^
 
 	# We desire these numbers to go down, not up. See "User guide to memory
 	# management within SpiderMonkey" in jsutil.h.
-	$(srcdir)/config/check_source_count.py OffTheBooks:: 59 \
+	$(srcdir)/config/check_source_count.py OffTheBooks:: 60 \
 		"in Makefile.in" "{cx,rt}->{new_,array_new,malloc_,calloc_,realloc_}" $^
 	# This should go to zero, if possible.
 	$(srcdir)/config/check_source_count.py UnwantedForeground:: 31 \
 		"in Makefile.in" "{cx,rt}->{free_,delete_,array_delete}" $^
 
 ifneq ($(OS_ARCH),WINNT) # FIXME: this should be made work on Windows too.
 #check:: check-malloc-function-usage FIXME: disable on JM until closer to merge time.
 endif
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -83,17 +83,18 @@ namespace JSC {
             r11,
             r12,
             ip = r12,
             r13,
             sp = r13,
             r14,
             lr = r14,
             r15,
-            pc = r15
+            pc = r15,
+            invalid_reg
         } RegisterID;
 
         typedef enum {
             d0,
             d1,
             d2,
             d3,
             SD0 = d3,
@@ -119,17 +120,18 @@ namespace JSC {
             d23,
             d24,
             d25,
             d26,
             d27,
             d28,
             d29,
             d30,
-            d31
+            d31,
+            invalid_freg
         } FPRegisterID;
 
         inline FPRegisterID floatShadow(FPRegisterID s)
         {
             return (FPRegisterID)(s*2);
         }
         inline FPRegisterID doubleShadow(FPRegisterID d)
         {
@@ -281,17 +283,17 @@ namespace JSC {
 
         class JmpSrc {
             friend class ARMAssembler;
         public:
             JmpSrc()
                 : m_offset(-1)
             {
             }
-
+            int offset() {return m_offset;}
         private:
             JmpSrc(int offset)
                 : m_offset(offset)
             {
             }
 
             int m_offset;
         };
@@ -581,17 +583,17 @@ namespace JSC {
                 mnemonic_size = "h";
                 break;
             case 4:
                 mnemonic_size = "";
                 break;
             }
             char const * off_sign = (posOffset) ? ("+") : ("-");
             js::JaegerSpew(js::JSpew_Insns, 
-                           IPFX "%sr%s%s %s, [%s, #%s%u]\n", 
+                           IPFX "%sr%s%s %s, [%s, #%s%u]\n",
                            MAYBE_PAD, mnemonic_act, mnemonic_sign, mnemonic_size,
                            nameGpReg(rd), nameGpReg(rb), off_sign, offset);
             if (size == 32 || (size == 8 && !isSigned)) {
                 /* All (the one) 32 bit ops and the unsigned 8 bit ops use the original encoding.*/
                 emitInst(static_cast<ARMWord>(cc) | DTR |
                          (isLoad ? DT_LOAD : 0) |
                          (size == 8 ? DT_BYTE : 0) |
                          (posOffset ? DT_UP : 0), rd, rb, offset);
@@ -922,19 +924,19 @@ namespace JSC {
 
         // General helpers
 
         void forceFlushConstantPool()
         {
             m_buffer.flushWithoutBarrier(true);
         }
 
-        int size()
+        size_t size() const
         {
-            return m_buffer.size();
+            return m_buffer.uncheckedSize();
         }
 
         void ensureSpace(int insnSpace, int constSpace)
         {
             m_buffer.ensureSpace(insnSpace, constSpace);
         }
 
         void ensureSpace(int space)
@@ -1036,17 +1038,17 @@ namespace JSC {
 
         static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
 
         // Patch pointers
 
         static void linkPointer(void* code, JmpDst from, void* to)
         {
             js::JaegerSpew(js::JSpew_Insns,
-                           ISPFX "##linkPointer     ((%p + %#x)) points to ((%p))\n",
+                           "##linkPointer     ((%p + %#x)) points to ((%p))\n",
                            code, from.m_offset, to);
 
             patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
         }
 
         static void repatchInt32(void* from, int32_t to)
         {
             js::JaegerSpew(js::JSpew_Insns,
@@ -1054,17 +1056,17 @@ namespace JSC {
                            from, to);
 
             patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
         }
 
         static void repatchPointer(void* from, void* to)
         {
             js::JaegerSpew(js::JSpew_Insns,
-                           ISPFX "##repatchPointer  ((%p)) points to ((%p))\n",
+                           "##repatchPointer  ((%p)) points to ((%p))\n",
                            from, to);
 
             patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
         }
 
         static void repatchLoadPtrToLEA(void* from)
         {
             // On arm, this is a patch from LDR to ADD. It is restricted conversion,
@@ -1603,17 +1605,17 @@ namespace JSC {
             emitVFPInst(static_cast<ARMWord>(cc) | VFP_DXFER | VFP_MOV |
                         (fromFP ? DT_LOAD : 0) |
                         (isDbl ? VFP_DBL : 0), RD(r1), RN(r2), isDbl ? DM(rFP) : SM(rFP));
         }
 
         void fcpyd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
-                    IPFX   "%-15s %s, %s\n", MAYBE_PAD, "vmov.f64", 
+                           IPFX   "%-15s %s, %s\n", MAYBE_PAD, "vmov.f64",
                            nameFpRegD(dd), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
             emitVFPInst(static_cast<ARMWord>(cc) | FCPYD, DD(dd), DM(dm), 0);
         }
 
         void faddd_r(int dd, int dn, int dm, Condition cc = AL)
         {
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -30,16 +30,17 @@
 #ifndef AssemblerBuffer_h
 #define AssemblerBuffer_h
 
 #include "assembler/wtf/Platform.h"
 
 #if ENABLE_ASSEMBLER
 
 #include <string.h>
+#include <limits.h>
 #include "assembler/jit/ExecutableAllocator.h"
 #include "assembler/wtf/Assertions.h"
 #include "jsstdint.h"
 
 namespace JSC {
 
     class AssemblerBuffer {
         static const int inlineCapacity = 256;
@@ -193,16 +194,24 @@ namespace JSC {
         {
             /*
              * If |extraCapacity| is zero (as it almost always is) this is an
              * allocator-friendly doubling growth strategy.
              */
             int newCapacity = m_capacity + m_capacity + extraCapacity;
             char* newBuffer;
 
+            // Do not allow offsets to grow beyond INT_MAX / 2. This mirrors
+            // Assembler-shared.h.
+            if (newCapacity >= INT_MAX / 2) {
+                m_size = 0;
+                m_oom = true;
+                return;
+            }
+
             if (m_buffer == m_inlineBuffer) {
                 newBuffer = static_cast<char*>(malloc(newCapacity));
                 if (!newBuffer) {
                     m_size = 0;
                     m_oom = true;
                     return;
                 }
                 memcpy(newBuffer, m_buffer, m_size);
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -182,17 +182,17 @@ public:
     }
 
     int size()
     {
         flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
         return AssemblerBuffer::size();
     }
 
-    int uncheckedSize()
+    int uncheckedSize() const
     {
         return AssemblerBuffer::size();
     }
 
     void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
     {
         flushConstantPool(false);
         return AssemblerBuffer::executableAllocAndCopy(allocator, poolp, kind);
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -78,36 +78,50 @@ namespace X86Registers {
         r9,
         r10,
         r11,
         r12,
         r13,
         r14,
         r15
 #endif
+        ,invalid_reg
     } RegisterID;
 
     typedef enum {
         xmm0,
         xmm1,
         xmm2,
         xmm3,
         xmm4,
         xmm5,
         xmm6,
         xmm7
+#if WTF_CPU_X86_64
+       ,xmm8,
+        xmm9,
+        xmm10,
+        xmm11,
+        xmm12,
+        xmm13,
+        xmm14,
+        xmm15
+#endif
+       ,invalid_xmm
     } XMMRegisterID;
 
     static const char* nameFPReg(XMMRegisterID fpreg)
     {
-        static const char* xmmnames[8]
+        static const char* xmmnames[16]
           = { "%xmm0", "%xmm1", "%xmm2", "%xmm3",
-              "%xmm4", "%xmm5", "%xmm6", "%xmm7" };
+              "%xmm4", "%xmm5", "%xmm6", "%xmm7",
+              "%xmm8", "%xmm9", "%xmm10", "%xmm11",
+              "%xmm12", "%xmm13", "%xmm14", "%xmm15" };
         int off = (XMMRegisterID)fpreg - (XMMRegisterID)xmm0;
-        return (off < 0 || off > 7) ? "%xmm?" : xmmnames[off];
+        return (off < 0 || off > 15) ? "%xmm?" : xmmnames[off];
     }
 
     static const char* nameIReg(int szB, RegisterID reg)
     {
         static const char* r64names[16]
           = { "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
               "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" };
         static const char* r32names[16]
@@ -180,17 +194,16 @@ public:
 
 private:
     typedef enum {
         OP_ADD_EvGv                     = 0x01,
         OP_ADD_GvEv                     = 0x03,
         OP_OR_EvGv                      = 0x09,
         OP_OR_GvEv                      = 0x0B,
         OP_2BYTE_ESCAPE                 = 0x0F,
-        OP_3BYTE_ESCAPE                 = 0x3A,
         OP_AND_EvGv                     = 0x21,
         OP_AND_GvEv                     = 0x23,
         OP_SUB_EvGv                     = 0x29,
         OP_SUB_GvEv                     = 0x2B,
         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
         OP_XOR_EvGv                     = 0x31,
         OP_XOR_GvEv                     = 0x33,
         OP_CMP_EvGv                     = 0x39,
@@ -245,16 +258,17 @@ private:
 
     typedef enum {
         OP2_MOVSD_VsdWsd    = 0x10,
         OP2_MOVSD_WsdVsd    = 0x11,
         OP2_UNPCKLPS_VsdWsd = 0x14,
         OP2_CVTSI2SD_VsdEd  = 0x2A,
         OP2_CVTTSD2SI_GdWsd = 0x2C,
         OP2_UCOMISD_VsdWsd  = 0x2E,
+        OP2_MOVMSKPD_EdVd   = 0x50,
         OP2_ADDSD_VsdWsd    = 0x58,
         OP2_MULSD_VsdWsd    = 0x59,
         OP2_CVTSS2SD_VsdEd  = 0x5A,
         OP2_CVTSD2SS_VsdEd  = 0x5A,
         OP2_SUBSD_VsdWsd    = 0x5C,
         OP2_DIVSD_VsdWsd    = 0x5E,
         OP2_SQRTSD_VsdWsd   = 0x51,
         OP2_ANDPD_VpdWpd    = 0x54,
@@ -268,19 +282,25 @@ private:
         OP2_MOVSX_GvEb      = 0xBE,
         OP2_MOVSX_GvEw      = 0xBF,
         OP2_MOVZX_GvEb      = 0xB6,
         OP2_MOVZX_GvEw      = 0xB7,
         OP2_PEXTRW_GdUdIb   = 0xC5
     } TwoByteOpcodeID;
 
     typedef enum {
-        OP3_PINSRD_VsdWsd   = 0x22
+        OP3_PINSRD_VsdWsd   = 0x22,
+        OP3_PTEST_VdVd      = 0x17 
     } ThreeByteOpcodeID;
 
+    typedef enum {
+        ESCAPE_PTEST        = 0x38,
+        ESCAPE_PINSRD       = 0x3A 
+    } ThreeByteEscape;
+
     TwoByteOpcodeID jccRel32(Condition cond)
     {
         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
     }
 
     TwoByteOpcodeID setccOpcode(Condition cond)
     {
         return (TwoByteOpcodeID)(OP_SETCC + cond);
@@ -324,46 +344,53 @@ public:
         friend class X86Assembler;
         friend class X86InstructionFormatter;
     public:
         JmpSrc()
             : m_offset(-1)
         {
         }
 
-    private:
         JmpSrc(int offset)
             : m_offset(offset)
         {
         }
 
+        int offset() const {
+            return m_offset;
+        }
+
+    private:
         int m_offset;
     };
     
     class JmpDst {
         friend class X86Assembler;
         friend class X86InstructionFormatter;
     public:
         JmpDst()
             : m_offset(-1)
             , m_used(false)
         {
         }
 
         bool isUsed() const { return m_used; }
         void used() { m_used = true; }
         bool isValid() const { return m_offset != -1; }
-    private:
+
         JmpDst(int offset)
             : m_offset(offset)
             , m_used(false)
         {
             ASSERT(m_offset == offset);
         }
-
+        int offset() const {
+            return m_offset;
+        }
+    private:
         signed int m_offset : 31;
         bool m_used : 1;
     };
 
     X86Assembler()
 #ifdef JS_METHODJIT_SPEW
       : isOOLPath(false)
 #endif
@@ -434,17 +461,19 @@ public:
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "addl       %s, %s\n", MAYBE_PAD,
                        nameIReg(4,src), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
     }
 
     void addl_mr(int offset, RegisterID base, RegisterID dst)
     {
-        FIXME_INSN_PRINTING;
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "addl       %s0x%x(%s), %s\n", MAYBE_PAD,
+                       PRETTY_PRINT_OFFSET(offset), nameIReg(4,base), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
     }
 
     void addl_rm(RegisterID src, int offset, RegisterID base)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
     }
@@ -528,17 +557,19 @@ public:
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "andl       %s, %s\n", MAYBE_PAD,
                        nameIReg(4,src), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
     }
 
     void andl_mr(int offset, RegisterID base, RegisterID dst)
     {
-        FIXME_INSN_PRINTING;
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "andl       %s0x%x(%s), %s\n", MAYBE_PAD,
+                       PRETTY_PRINT_OFFSET(offset), nameIReg(4,base), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
     }
 
     void andl_rm(RegisterID src, int offset, RegisterID base)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
     }
@@ -744,17 +775,19 @@ public:
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "subl       %s, %s\n", MAYBE_PAD,
                        nameIReg(4,src), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
     }
 
     void subl_mr(int offset, RegisterID base, RegisterID dst)
     {
-        FIXME_INSN_PRINTING;
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "subl        %s0x%x(%s), %s\n", MAYBE_PAD,
+                       PRETTY_PRINT_OFFSET(offset), nameIReg(4,base), nameIReg(4,dst));
         m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
     }
 
     void subl_rm(RegisterID src, int offset, RegisterID base)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
     }
@@ -967,16 +1000,28 @@ public:
                        IPFX "shlq       $%d, %s\n", MAYBE_PAD, imm, nameIReg(8, dst));
         if (imm == 1)
             m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
         else {
             m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
             m_formatter.immediate8(imm);
         }
     }
+
+    void shrq_i8r(int imm, RegisterID dst)
+    {
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "shrq       $%d, %s\n", MAYBE_PAD, imm, nameIReg(8, dst));
+        if (imm == 1)
+            m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+        else {
+            m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+            m_formatter.immediate8(imm);
+        }
+    }
 #endif
 
     void imull_rr(RegisterID src, RegisterID dst)
     {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "imull       %s, %s\n", MAYBE_PAD, nameIReg(4,src), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
     }
@@ -1784,17 +1829,19 @@ public:
         JmpSrc r = JmpSrc(m_formatter.size());
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "call       *%s\n", MAYBE_PAD, nameIReg(dst));
         return r;
     }
     
     void call_m(int offset, RegisterID base)
     {
-        FIXME_INSN_PRINTING;
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "call       %s0x%x(%s)\n", MAYBE_PAD,
+                       PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
     }
 
     JmpSrc jmp()
     {
         m_formatter.oneByteOp(OP_JMP_rel32);
         JmpSrc r = m_formatter.immediateRel32();
         js::JaegerSpew(js::JSpew_Insns,
@@ -1822,16 +1869,28 @@ public:
 
     void jmp_m(int offset, RegisterID base, RegisterID index, int scale) {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "jmp       ((%d(%s,%s,%d)))\n", MAYBE_PAD,
                        offset, nameIReg(base), nameIReg(index), scale);
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, index, scale, offset);
     }
 
+#if WTF_CPU_X86_64
+    void jmp_rip(int ripOffset) {
+        // rip-relative addressing.
+        m_formatter.oneByteRipOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, ripOffset);
+    }
+
+    void immediate64(int64_t imm)
+    {
+        m_formatter.immediate64(imm);
+    }
+#endif
+
     JmpSrc jne()
     {
         return jCC(ConditionNE);
     }
     
     JmpSrc jnz()
     {
         // printing done by jne()
@@ -2018,16 +2077,27 @@ public:
     {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "cvttsd2si  %s, %s\n", MAYBE_PAD,
                        nameFPReg(src), nameIReg(dst));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
     }
 
+#if WTF_CPU_X86_64
+    void cvttsd2sq_rr(XMMRegisterID src, RegisterID dst)
+    {
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "cvttsd2sq  %s, %s\n", MAYBE_PAD,
+                       nameFPReg(src), nameIReg(dst));
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+    }
+#endif
+
     void unpcklps_rr(XMMRegisterID src, XMMRegisterID dst)
     {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "unpcklps   %s, %s\n", MAYBE_PAD,
                        nameFPReg(src), nameFPReg(dst));
         m_formatter.twoByteOp(OP2_UNPCKLPS_VsdWsd, (RegisterID)dst, (RegisterID)src);
     }
 
@@ -2045,16 +2115,33 @@ public:
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "pslldq     %s, %d\n", MAYBE_PAD,
                        nameFPReg(dest), shift);
         m_formatter.prefix(PRE_SSE_66);
         m_formatter.twoByteOp(OP2_PSRLDQ_Vd, (RegisterID)3, (RegisterID)dest);
         m_formatter.immediate8(shift);
     }
 
+    void movmskpd_rr(XMMRegisterID src, RegisterID dst)
+    {
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "movmskpd   %s, %s\n", MAYBE_PAD,
+                       nameFPReg(src), nameIReg(dst));
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_MOVMSKPD_EdVd, (RegisterID)src, dst);
+    }
+
+    void ptest_rr(XMMRegisterID lhs, XMMRegisterID rhs) {
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "ptest      %s, %s\n", MAYBE_PAD,
+                       nameFPReg(lhs), nameFPReg(rhs));
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP3_PTEST_VdVd, ESCAPE_PTEST, (RegisterID)rhs, (RegisterID)lhs);
+    }
+
     void movd_rr(XMMRegisterID src, RegisterID dst)
     {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "movd       %s, %s\n", MAYBE_PAD,
                        nameFPReg(src), nameIReg(dst));
         m_formatter.prefix(PRE_SSE_66);
         m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
     }
@@ -2279,17 +2366,28 @@ public:
     }
 
     void pinsrd_rr(RegisterID src, XMMRegisterID dst)
     {
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "pinsrd     $1, %s, %s\n", MAYBE_PAD,
                        nameIReg(src), nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_66);
-        m_formatter.threeByteOp(OP3_PINSRD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+        m_formatter.threeByteOp(OP3_PINSRD_VsdWsd, ESCAPE_PINSRD, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(0x01); // the $1
+    }
+
+    void pinsrd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        js::JaegerSpew(js::JSpew_Insns,
+                       IPFX "pinsrd     $1, %s0x%x(%s), %s\n", MAYBE_PAD,
+                       PRETTY_PRINT_OFFSET(offset),
+                       nameIReg(base), nameFPReg(dst));
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP3_PINSRD_VsdWsd, ESCAPE_PINSRD, (RegisterID)dst, base, offset);
         m_formatter.immediate8(0x01); // the $1
     }
 
     // Misc instructions:
 
     void int3()
     {
         js::JaegerSpew(js::JSpew_Insns, IPFX "int3\n", MAYBE_PAD);
@@ -2349,22 +2447,50 @@ public:
 
     // Linking & patching:
     //
     // 'link' and 'patch' methods are for use on unprotected code - such as the code
     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
     // code has been finalized it is (platform support permitting) within a non-
     // writable region of memory; to modify the code in an execute-only execuable
     // pool the 'repatch' and 'relink' methods should be used.
+    
+    // Like Lua's emitter, we thread jump lists through the unpatched target
+    // field, which will get fixed up when the label (which has a pointer to
+    // the head of the jump list) is bound.
+    bool nextJump(const JmpSrc& from, JmpSrc* next)
+    {
+        char* code = reinterpret_cast<char*>(m_formatter.data());
+        int32 offset = getInt32(code + from.m_offset);
+        if (offset == -1)
+            return false;
+        *next = JmpSrc(offset);
+        return true;
+    }
+    void setNextJump(const JmpSrc& from, const JmpSrc &to)
+    {
+        // Sanity check - if the assembler has OOM'd, it will start overwriting
+        // its internal buffer and thus our links could be garbage.
+        if (oom())
+            return;
+
+        char* code = reinterpret_cast<char*>(m_formatter.data());
+        setInt32(code + from.m_offset, to.m_offset);
+    }
 
     void linkJump(JmpSrc from, JmpDst to)
     {
         ASSERT(from.m_offset != -1);
         ASSERT(to.m_offset != -1);
 
+        // Sanity check - if the assembler has OOM'd, it will start overwriting
+        // its internal buffer and thus our links could be garbage.
+        if (oom())
+            return;
+
         js::JaegerSpew(js::JSpew_Insns,
                        IPFX "##link     ((%d)) jumps to ((%d))\n", MAYBE_PAD,
                        from.m_offset, to.m_offset);
         char* code = reinterpret_cast<char*>(m_formatter.data());
         setRel32(code + from.m_offset, code + to.m_offset);
     }
     
     static void linkJump(void* code, JmpSrc from, void* to)
@@ -2498,44 +2624,59 @@ public:
         return m_formatter.executableAllocAndCopy(allocator, poolp, kind);
     }
 
     void executableCopy(void* buffer)
     {
         memcpy(buffer, m_formatter.buffer(), size());
     }
 
-private:
-
-    static void setPointer(void* where, void* value)
-    {
-        js::JaegerSpew(js::JSpew_Insns,
-                       ISPFX "##setPtr     ((where=%p)) ((value=%p))\n", where, value);
-        reinterpret_cast<void**>(where)[-1] = value;
-    }
-
-    static void setInt32(void* where, int32_t value)
-    {
-        reinterpret_cast<int32_t*>(where)[-1] = value;
-    }
-
     static void setRel32(void* from, void* to)
     {
         intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
         ASSERT(offset == static_cast<int32_t>(offset));
 #define JS_CRASH(x) *(int *)x = 0
         if (offset != static_cast<int32_t>(offset))
             JS_CRASH(0xC0DE);
 #undef JS_CRASH
 
         js::JaegerSpew(js::JSpew_Insns,
                        ISPFX "##setRel32 ((from=%p)) ((to=%p))\n", from, to);
         setInt32(from, offset);
     }
 
+    static void *getRel32Target(void* where)
+    {
+        int32_t rel = getInt32(where);
+        return (char *)where + rel;
+    }
+
+    static void *getPointer(void* where)
+    {
+        return reinterpret_cast<void **>(where)[-1];
+    }
+
+    static void setPointer(void* where, const void* value)
+    {
+        js::JaegerSpew(js::JSpew_Insns,
+                       ISPFX "##setPtr     ((where=%p)) ((value=%p))\n", where, value);
+        reinterpret_cast<const void**>(where)[-1] = value;
+    }
+
+private:
+
+    static int32_t getInt32(void* where)
+    {
+        return reinterpret_cast<int32_t*>(where)[-1];
+    }
+    static void setInt32(void* where, int32_t value)
+    {
+        reinterpret_cast<int32_t*>(where)[-1] = value;
+    }
+
     class X86InstructionFormatter {
 
         static const int maxInstructionSize = 16;
 
     public:
 
         // Legacy prefix bytes:
         //
@@ -2607,16 +2748,24 @@ private:
 
 #if !WTF_CPU_X86_64
         void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, address);
         }
+#else
+        void oneByteRipOp(OneByteOpcodeID opcode, int reg, int ripOffset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            m_buffer.putByteUnchecked(opcode);
+            putModRm(ModRmMemoryNoDisp, reg, noBase);
+            m_buffer.putIntUnchecked(ripOffset);
+        }
 #endif
 
         void twoByteOp(TwoByteOpcodeID opcode)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
             m_buffer.putByteUnchecked(opcode);
         }
@@ -2653,26 +2802,36 @@ private:
         {
             m_buffer.ensureSpace(maxInstructionSize);
             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, address);
         }
 #endif
 
-        void threeByteOp(ThreeByteOpcodeID opcode, int reg, RegisterID rm)
+        void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape, int reg, RegisterID rm)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIfNeeded(reg, 0, rm);
             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(OP_3BYTE_ESCAPE);
+            m_buffer.putByteUnchecked(escape);
             m_buffer.putByteUnchecked(opcode);
             registerModRM(reg, rm);
         }
 
+        void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape, int reg, RegisterID base, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, 0, base);
+            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            m_buffer.putByteUnchecked(escape);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM(reg, base, offset);
+        }
+
 #if WTF_CPU_X86_64
         // Quad-word-sized operands:
         //
         // Used to format 64-bit operantions, planting a REX.w prefix.
         // When planting d64 or f64 instructions, not requiring a REX.w prefix,
         // the normal (non-'64'-postfixed) formatters should be used.
 
         void oneByteOp64(OneByteOpcodeID opcode)
--- a/js/src/config/autoconf.mk.in
+++ b/js/src/config/autoconf.mk.in
@@ -333,16 +333,17 @@ NEXT_ROOT	= @NEXT_ROOT@
 GCC_VERSION	= @GCC_VERSION@
 UNIVERSAL_BINARY= @UNIVERSAL_BINARY@
 MOZ_CAN_RUN_PROGRAMS = MOZ_CAN_RUN_PROGRAMS@
 HAVE_DTRACE= @HAVE_DTRACE@
 
 VISIBILITY_FLAGS = @VISIBILITY_FLAGS@
 WRAP_SYSTEM_INCLUDES = @WRAP_SYSTEM_INCLUDES@
 
+ENABLE_ION = @ENABLE_ION@
 ENABLE_TRACEJIT = @ENABLE_TRACEJIT@
 ENABLE_METHODJIT = @ENABLE_METHODJIT@
 NANOJIT_ARCH = @NANOJIT_ARCH@
 HAVE_ARM_SIMD= @HAVE_ARM_SIMD@
 
 JS_SHARED_LIBRARY = @JS_SHARED_LIBRARY@
 HAVE_LINUX_PERF_EVENT_H = @HAVE_LINUX_PERF_EVENT_H@
 
--- a/js/src/config/find_vanilla_new_calls
+++ b/js/src/config/find_vanilla_new_calls
@@ -55,20 +55,20 @@ if [ ! -f $file ] ; then
 fi
 
 tmpfile1=`mktemp`
 tmpfile2=`mktemp`
 nm -C $file > $tmpfile1
 
 # Need to double-escape '[' and ']' to stop grep from interpreting them
 # specially.
-grep 'operator new(unsigned int)'        $tmpfile1 >> $tmpfile2
-grep 'operator new(unsigned long)'       $tmpfile1 >> $tmpfile2
-grep 'operator new\\[\\](unsigned int)'  $tmpfile1 >> $tmpfile2
-grep 'operator new\\[\\](unsigned long)' $tmpfile1 >> $tmpfile2
+grep '^operator new(unsigned int)'        $tmpfile1 >> $tmpfile2
+grep '^operator new(unsigned long)'       $tmpfile1 >> $tmpfile2
+grep '^operator new\\[\\](unsigned int)'  $tmpfile1 >> $tmpfile2
+grep '^operator new\\[\\](unsigned long)' $tmpfile1 >> $tmpfile2
 rm -f $tmpfile1
 
 if [ -s $tmpfile2 ] ; then
     echo "TEST-UNEXPECTED-FAIL | find_vanilla_new_calls | found calls are listed below"
     cat $tmpfile2
     echo
     rm -f $tmpfile2
     exit 1
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -2480,49 +2480,54 @@ fi
 
 dnl Configure JIT support
 
 case "$target" in
 i?86-*)
     ENABLE_TRACEJIT=1
     NANOJIT_ARCH=i386
     ENABLE_METHODJIT=1
+    ENABLE_ION=1
     ENABLE_MONOIC=1
     ENABLE_POLYIC=1
     ENABLE_METHODJIT_TYPED_ARRAY=1
     AC_DEFINE(JS_CPU_X86)
     AC_DEFINE(JS_NUNBOX32)
     ;;
 x86_64*-*)
     ENABLE_TRACEJIT=1
     NANOJIT_ARCH=X64
     ENABLE_METHODJIT=1
+    ENABLE_ION=1
     ENABLE_MONOIC=1
     ENABLE_POLYIC=1
     ENABLE_METHODJIT_TYPED_ARRAY=1
     AC_DEFINE(JS_CPU_X64)
     AC_DEFINE(JS_PUNBOX64)
     ;;
 arm*-*)
     ENABLE_TRACEJIT=1
     NANOJIT_ARCH=ARM
     ENABLE_METHODJIT=1
+dnl    ENABLE_ION=0
     ENABLE_MONOIC=1
     ENABLE_POLYIC=1
     ENABLE_POLYIC_TYPED_ARRAY=1
     AC_DEFINE(JS_CPU_ARM)
+dnl    ENABLE_POLYIC_TYPED_ARRAY=0
     AC_DEFINE(JS_NUNBOX32)
     ;;
 sparc*-*)
     ENABLE_TRACEJIT=1
     NANOJIT_ARCH=Sparc
     ENABLE_METHODJIT=1
     ENABLE_MONOIC=1
     ENABLE_POLYIC=1
     ENABLE_METHODJIT_TYPED_ARRAY=1
+dnl ENABLE_ION=0
     AC_DEFINE(JS_CPU_SPARC)
     AC_DEFINE(JS_NUNBOX32)
     ;;
 esac
 
 MOZ_ARG_DISABLE_BOOL(methodjit,
 [  --disable-methodjit           Disable method JIT support],
   ENABLE_METHODJIT= )
@@ -2539,16 +2544,26 @@ MOZ_ARG_DISABLE_BOOL(tracejit,
 [  --disable-tracejit      Disable tracing JIT support],
   ENABLE_TRACEJIT=)
 
 MOZ_ARG_ENABLE_BOOL(methodjit-spew,
 [  --enable-methodjit-spew      Enable method JIT spew support],
   ENABLE_METHODJIT_SPEW=1,
   ENABLE_METHODJIT_SPEW= )
 
+MOZ_ARG_DISABLE_BOOL(polyic-typed-array,
+[  --disable-polyic-typed-array      Disable use of Typed Array PICs by JIT compiler],
+  ENABLE_POLYIC_TYPED_ARRAY=,
+  ENABLE_POLYIC_TYPED_ARRAY=1 )
+
+MOZ_ARG_DISABLE_BOOL(ion,
+[  --disable-ion      Disable use of IONMonkey compiler],
+  ENABLE_ION=,
+  ENABLE_ION=1 )
+
 AC_SUBST(ENABLE_METHODJIT)
 
 if test "$ENABLE_METHODJIT"; then
     AC_DEFINE(JS_METHODJIT)
 fi
 
 if test "$ENABLE_MONOIC"; then
     AC_DEFINE(JS_MONOIC)
@@ -2561,16 +2576,22 @@ fi
 if test "$ENABLE_METHODJIT_TYPED_ARRAY"; then
     AC_DEFINE(JS_METHODJIT_TYPED_ARRAY)
 fi
 
 if test "$ENABLE_METHODJIT_SPEW"; then
     AC_DEFINE(JS_METHODJIT_SPEW)
 fi
 
+AC_SUBST(ENABLE_ION)
+
+if test "$ENABLE_ION"; then
+    AC_DEFINE(JS_ION)
+fi
+
 if test "$ENABLE_TRACEJIT"; then
 
 AC_DEFINE(FEATURE_NANOJIT)
 AC_DEFINE(JS_TRACER)
 
 case "$target" in
 i?86-*)
     AC_DEFINE(AVMPLUS_IA32)
new file mode 100644
--- /dev/null
+++ b/js/src/ion/Bailouts.cpp
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "Bailouts.h"
+#include "Snapshots.h"
+#include "Ion.h"
+#include "IonCompartment.h"
+
+using namespace js;
+using namespace js::ion;
+
+class IonFrameIterator
+{
+    IonScript *ionScript_;
+    BailoutEnvironment *env_;
+    SnapshotReader reader_;
+
+    static Value FromTypedPayload(JSValueType type, uintptr_t payload)
+    {
+        switch (type) {
+          case JSVAL_TYPE_INT32:
+            return Int32Value(payload);
+          case JSVAL_TYPE_BOOLEAN:
+            return BooleanValue(!!payload);
+          case JSVAL_TYPE_STRING:
+            return StringValue(reinterpret_cast<JSString *>(payload));
+          case JSVAL_TYPE_OBJECT:
+            return ObjectValue(*reinterpret_cast<JSObject *>(payload));
+          default:
+            JS_NOT_REACHED("unexpected type - needs payload");
+            return UndefinedValue();
+        }
+    }
+
+    uintptr_t fromLocation(const SnapshotReader::Location &loc) {
+        if (loc.isStackSlot())
+            return env_->readSlot(loc.stackSlot());
+        return env_->readReg(loc.reg());
+    }
+
+  public:
+    IonFrameIterator(IonScript *ionScript, BailoutEnvironment *env, const uint8 *start, const uint8 *end)
+      : ionScript_(ionScript),
+        env_(env),
+        reader_(start, end)
+    {
+    }
+
+    Value read() {
+        SnapshotReader::Slot slot = reader_.readSlot();
+        switch (slot.mode()) {
+          case SnapshotReader::DOUBLE_REG:
+            return DoubleValue(env_->readFloatReg(slot.floatReg()));
+
+          case SnapshotReader::TYPED_REG:
+            return FromTypedPayload(slot.knownType(), env_->readReg(slot.reg()));
+
+          case SnapshotReader::TYPED_STACK:
+          {
+            JSValueType type = slot.knownType();
+            if (type == JSVAL_TYPE_DOUBLE)
+                return DoubleValue(env_->readDoubleSlot(slot.stackSlot()));
+            return FromTypedPayload(type, env_->readSlot(slot.stackSlot()));
+          }
+
+          case SnapshotReader::UNTYPED:
+          {
+              jsval_layout layout;
+#if defined(JS_NUNBOX32)
+              layout.s.tag = (JSValueTag)fromLocation(slot.type());
+              layout.s.payload.word = fromLocation(slot.payload());
+#elif defined(JS_PUNBOX64)
+              layout.asBits = fromLocation(slot.value());
+#endif
+              return Valueify(JSVAL_FROM_LAYOUT(layout));
+          }
+
+          case SnapshotReader::JS_UNDEFINED:
+            return UndefinedValue();
+
+          case SnapshotReader::JS_NULL:
+            return NullValue();
+
+          case SnapshotReader::JS_INT32:
+            return Int32Value(slot.int32Value());
+
+          case SnapshotReader::CONSTANT:
+            return ionScript_->getConstant(slot.constantIndex());
+
+          default:
+            JS_NOT_REACHED("huh?");
+            return UndefinedValue();
+        }
+    }
+
+    uint32 slots() const {
+        return reader_.slots();
+    }
+    uint32 pcOffset() const {
+        return reader_.pcOffset();
+    }
+
+    bool nextFrame() {
+        reader_.finishReading();
+        return false;
+    }
+};
+
+static void
+RestoreOneFrame(JSContext *cx, StackFrame *fp, IonFrameIterator &iter)
+{
+    uint32 exprStackSlots = iter.slots() - fp->script()->nfixed;
+
+    if (fp->isFunctionFrame()) {
+        Value thisv = iter.read();
+        fp->formalArgs()[-1] = thisv;
+
+        for (uint32 i = 0; i < fp->fun()->nargs; i++) {
+            Value arg = iter.read();
+            fp->formalArgs()[i] = arg;
+        }
+
+        exprStackSlots -= (fp->fun()->nargs + 1);
+    }
+
+    for (uint32 i = 0; i < fp->script()->nfixed; i++) {
+        Value slot = iter.read();
+        fp->slots()[i] = slot;
+    }
+
+    FrameRegs &regs = cx->regs();
+    for (uint32 i = 0; i < exprStackSlots; i++) {
+        Value v = iter.read();
+        *regs.sp++ = v;
+    }
+    regs.pc = fp->script()->code + iter.pcOffset();
+}
+
+static bool
+ConvertFrames(JSContext *cx, IonActivation *activation, BailoutEnvironment *env)
+{
+    IonFramePrefix *top = env->top();
+
+    // Recover information about the callee.
+    JSScript *script;
+    IonScript *ionScript;
+    JSFunction *fun = NULL;
+    JSObject *callee = NULL;
+    if (IsCalleeTokenFunction(top->calleeToken())) {
+        callee = CalleeTokenToFunction(top->calleeToken());
+        fun = callee->getFunctionPrivate();
+        script = fun->script();
+    } else {
+        script = CalleeTokenToScript(top->calleeToken());
+    }
+    ionScript = script->ion;
+
+    // Recover the snapshot.
+    uint32 snapshotOffset;
+    if (env->frameClass() != FrameSizeClass::None()) {
+        BailoutId id = env->bailoutId();
+        snapshotOffset = ionScript->bailoutToSnapshot(id);
+    } else {
+        snapshotOffset = env->snapshotOffset();
+    }
+
+    JS_ASSERT(snapshotOffset < ionScript->snapshotsSize());
+    const uint8 *start = ionScript->snapshots() + snapshotOffset;
+    const uint8 *end = ionScript->snapshots() + ionScript->snapshotsSize();
+    IonFrameIterator iter(ionScript, env, start, end);
+
+    // It is critical to temporarily repoint the frame regs here, otherwise
+    // pushing a new frame could clobber existing frames, since the stack code
+    // cannot determine the real stack top. We unpoint the regs after the
+    // bailout completes.
+    cx->stack.repointRegs(&activation->oldFrameRegs());
+
+    BailoutClosure *br = cx->new_<BailoutClosure>();
+    if (!br)
+        return false;
+    activation->setBailout(br);
+
+    // Non-function frames are not supported yet. We don't compile or enter
+    // global scripts so this assert should not fire yet.
+    JS_ASSERT(callee);
+
+    StackFrame *fp = cx->stack.pushBailoutFrame(cx, callee, fun, script, br->frameGuard());
+    if (!fp)
+        return false;
+
+    br->setEntryFrame(fp);
+
+    if (callee)
+        fp->formalArgs()[-2].setObject(*callee);
+
+    for (;;) {
+        RestoreOneFrame(cx, fp, iter);
+        if (!iter.nextFrame())
+            break;
+
+        // Once we have method inlining, pushInlineFrame logic should go here.
+        JS_NOT_REACHED("NYI");
+    }
+
+    return true;
+}
+
+uint32
+ion::Bailout(void **sp)
+{
+    JSContext *cx = GetIonContext()->cx;
+    IonCompartment *ioncompartment = cx->compartment->ionCompartment();
+    IonActivation *activation = ioncompartment->activation();
+    BailoutEnvironment env(ioncompartment, sp);
+
+    if (!ConvertFrames(cx, activation, &env))
+        return BAILOUT_RETURN_FATAL_ERROR;
+
+    return BAILOUT_RETURN_OK;
+}
+
+JSBool
+ion::ThunkToInterpreter(IonFramePrefix *top, Value *vp)
+{
+    JSContext *cx = GetIonContext()->cx;
+    IonActivation *activation = cx->compartment->ionCompartment()->activation();
+    BailoutClosure *br = activation->takeBailout();
+
+    bool ok = Interpret(cx, br->entryfp(), JSINTERP_BAILOUT);
+
+    if (ok)
+        *vp = br->entryfp()->returnValue();
+
+    // The BailoutFrameGuard's destructor will ensure that the frame is
+    // removed.
+    cx->delete_(br);
+
+    JS_ASSERT(&cx->regs() == &activation->oldFrameRegs());
+    cx->stack.repointRegs(NULL);
+
+    return ok ? JS_TRUE : JS_FALSE;
+}
+
+uint32
+ion::HandleException(IonFramePrefix *top)
+{
+    JSContext *cx = GetIonContext()->cx;
+    IonCompartment *ioncompartment = cx->compartment->ionCompartment();
+
+    // Currently, function calls are not supported.
+    JS_ASSERT(top->isEntryFrame());
+
+    // Currently, try blocks are not supported, so we don't have to implement
+    // logic to bailout a bunch o' frames.
+    if (BailoutClosure *closure = ioncompartment->activation()->maybeTakeBailout())
+        cx->delete_(closure);
+
+    top->setReturnAddress(ioncompartment->returnError()->raw());
+    return 0;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/Bailouts.h
@@ -0,0 +1,178 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_bailouts_h__
+#define jsion_bailouts_h__
+
+#include "jstypes.h"
+
+#if defined(JS_CPU_X86)
+# include "ion/x86/Bailouts-x86.h"
+#elif defined(JS_CPU_X64)
+# include "ion/x64/Bailouts-x64.h"
+#elif defined(JS_CPU_ARM)
+# include "ion/arm/Bailouts-arm.h"
+#else
+# error "CPU!"
+#endif
+
+namespace js {
+namespace ion {
+
+// A "bailout" is a condition in which we need to recover an interpreter frame
+// from an IonFrame. Bailouts can happen for the following reasons:
+//   (1) A deoptimization guard, for example, an add overflows or a type check
+//       fails.
+//   (2) A check or assumption held by the JIT is invalidated by the VM, and
+//       JIT code must be thrown away. This includes the GC possibly deciding
+//       to evict live JIT code, or a Type Inference reflow.
+//
+// Note that bailouts as described here do not include normal Ion frame
+// inspection, for example, if an exception must be built or the GC needs to
+// scan an Ion frame for gcthings.
+//
+// The second type of bailout needs a different name - "deoptimization" or
+// "deep bailout". Here we are concerned with eager (or maybe "shallow")
+// bailouts, that happen from JIT code. These happen from guards, like:
+//
+//  cmp [obj + shape], 0x50M37TH1NG
+//  jmp _bailout
+//
+// The bailout target needs to somehow translate the Ion frame (whose state
+// will differ at each program point) to an interpreter frame. This state is
+// captured into the IonScript's snapshot buffer, and for each bailout we know
+// which snapshot corresponds to its state.
+//
+// Roughly, the following needs to happen at the bailout target.
+//   (1) Move snapshot ID into a known stack location (registers cannot be
+//       mutated).
+//   (2) Spill all registers to the stack.
+//   (3) Call a Bailout() routine, whose argument is the stack pointer.
+//   (4) Bailout() will find the IonScript on the stack, use the snapshot ID
+//       to find the structure of the frame, and then use the stack and spilled
+//       registers to perform frame conversion.
+//   (5) Bailout() returns, and the JIT must immediately return to the
+//       interpreter (all frames are converted at once).
+//
+// (2) and (3) are implemented by a trampoline held in the compartment.
+// Naively, we could implement (1) like:
+//
+//   _bailout_ID_1:
+//     push 1
+//     jmp _global_bailout_handler
+//   _bailout_ID_2:
+//     push 2
+//     jmp _global_bailout_handler
+//
+// This takes about 10 extra bytes per guard. On some platforms, we can reduce
+// this overhead to 4 bytes by creating a global jump table, shared again in
+// the compartment:
+//
+//     call _global_bailout_handler
+//     call _global_bailout_handler
+//     call _global_bailout_handler
+//     call _global_bailout_handler
+//      ...
+//    _global_bailout_handler:
+//
+// In the bailout handler, we can recompute which entry in the table was
+// selected by subtracting the return addressed pushed by the call, from the
+// start of the table, and then dividing by the size of a (call X) entry in the
+// table. This gives us a number in [0, TableSize), which we call a
+// "BailoutId".
+//
+// Then, we can provide a per-script mapping from BailoutIds to snapshots,
+// which takes only four bytes per entry.
+//
+// This strategy does not work as given, because the bailout handler has no way
+// to compute the location of an IonScript. Currently, we do not use frame
+// pointers. To account for this we segregate frames into a limited set of
+// "frame sizes", and create a table for each frame size. We also have the
+// option of not using bailout tables, for platforms or situations where the
+// 10 byte cost is more optimal than a bailout table. See IonFrames.h for more
+// detail.
+
+typedef uint32 BailoutId;
+static const BailoutId INVALID_BAILOUT_ID = BailoutId(-1);
+
+// Keep this arbitrarily small for now, for testing.
+static const uint32 BAILOUT_TABLE_SIZE = 16;
+
+// Bailout return codes.
+static const uint32 BAILOUT_RETURN_OK = 0;
+static const uint32 BAILOUT_RETURN_FATAL_ERROR = 1;
+
+// Attached to the compartment for easy passing through from ::Bailout to
+// ::ThunkToInterpreter.
+class BailoutClosure
+{
+    BailoutFrameGuard bfg_;
+    StackFrame *entryfp_;
+
+  public:
+    BailoutFrameGuard *frameGuard() {
+        return &bfg_;
+    }
+    StackFrame *entryfp() const {
+        return entryfp_;
+    }
+    void setEntryFrame(StackFrame *fp) {
+        entryfp_ = fp;
+    }
+};
+
+// Called from a bailout thunk. Returns a BAILOUT_* error code.
+uint32 Bailout(void **esp);
+
+// Called from a bailout thunk. Interprets the frame(s) that have been bailed
+// out.
+JSBool ThunkToInterpreter(IonFramePrefix *top, Value *vp);
+
+// Called when an error occurs in Ion code. Normally, exceptions are bailouts,
+// and pop the frame. This is called to propagate an exception through multiple
+// frames. The return value is how much stack to adjust before returning.
+uint32 HandleException(IonFramePrefix *top);
+
+}
+}
+
+#endif // jsion_bailouts_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BitSet.cpp
@@ -0,0 +1,189 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Andrew Drake <adrake@adrake.org>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsutil.h"
+#include "BitSet.h"
+
+using namespace js;
+using namespace js::ion;
+
+BitSet *
+BitSet::New(unsigned int max)
+{
+    BitSet *result = new BitSet(max);
+    if (!result->init())
+        return NULL;
+    return result;
+}
+
+bool
+BitSet::init()
+{
+    size_t sizeRequired = numWords() * sizeof(*bits_);
+
+    TempAllocator *alloc = GetIonContext()->temp;
+    bits_ = (unsigned long *)alloc->allocate(sizeRequired);
+    if (!bits_)
+        return false;
+
+    memset(bits_, 0, sizeRequired);
+
+    return true;
+}
+
+bool
+BitSet::contains(unsigned int value) const
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(value <= max_);
+
+    return bits_[wordForValue(value)] & bitForValue(value);
+}
+
+bool
+BitSet::empty() const
+{
+    JS_ASSERT(bits_);
+    for (unsigned int i = 0; i < numWords(); i++) {
+        if (bits_[i])
+            return false;
+    }
+    return true;
+}
+
+void
+BitSet::insert(unsigned int value)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(value <= max_);
+
+    bits_[wordForValue(value)] |= bitForValue(value);
+}
+
+void
+BitSet::insertAll(const BitSet *other)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(other->max_ == max_);
+    JS_ASSERT(other->bits_);
+
+    for (unsigned int i = 0; i < numWords(); i++)
+        bits_[i] |= other->bits_[i];
+}
+
+void
+BitSet::remove(unsigned int value)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(value <= max_);
+
+    bits_[wordForValue(value)] &= ~bitForValue(value);
+}
+
+void
+BitSet::removeAll(const BitSet *other)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(other->max_ == max_);
+    JS_ASSERT(other->bits_);
+
+    for (unsigned int i = 0; i < numWords(); i++)
+        bits_[i] &= ~other->bits_[i];
+}
+
+void
+BitSet::intersect(const BitSet *other)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(other->max_ == max_);
+    JS_ASSERT(other->bits_);
+
+    for (unsigned int i = 0; i < numWords(); i++)
+        bits_[i] &= other->bits_[i];
+}
+
+// returns true if the intersection caused the contents of the set to change.
+bool
+BitSet::fixedPointIntersect(const BitSet *other)
+{
+    JS_ASSERT(bits_);
+    JS_ASSERT(other->max_ == max_);
+    JS_ASSERT(other->bits_);
+
+    bool changed = false;
+
+    for (unsigned int i = 0; i < numWords(); i++) {
+        unsigned long old = bits_[i];
+        bits_[i] &= other->bits_[i];
+
+        if (!changed && old != bits_[i])
+            changed = true;
+    }
+    return changed;
+}
+
+void
+BitSet::complement()
+{
+    JS_ASSERT(bits_);
+    for (unsigned int i = 0; i < numWords(); i++)
+        bits_[i] = ~bits_[i];
+}
+
+void
+BitSet::clear()
+{
+    JS_ASSERT(bits_);
+    for (unsigned int i = 0; i < numWords(); i++)
+        bits_[i] = 0;
+}
+
+BitSet::Iterator
+BitSet::begin()
+{
+    return Iterator(*this, 0);
+}
+
+BitSet::Iterator
+BitSet::end()
+{
+    return Iterator(*this, max_ + 1);
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/BitSet.h
@@ -0,0 +1,163 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Andrew Drake <adrake@adrake.org>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_bitset_h__
+#define jsion_bitset_h__
+
+#include "IonAllocPolicy.h"
+
+namespace js {
+namespace ion {
+
+// Provides constant time set insertion and removal, and fast linear
+// set operations such as intersection, difference, and union.
+// N.B. All set operations must be performed on sets with the same maximum.
+class BitSet : private TempObject
+{
+  private:
+    BitSet(unsigned int max) :
+        max_(max),
+        bits_(NULL) {};
+
+    unsigned int max_;
+
+    unsigned long *bits_;
+
+    static inline unsigned long bitForValue(unsigned int value) {
+        return 1l << (unsigned long)(value % (8 * sizeof(unsigned long)));
+    }
+
+    static inline unsigned int wordForValue(unsigned int value) {
+        return value / (8 * sizeof(unsigned long));
+    }
+
+    inline unsigned int numWords() const {
+        return 1 + max_ / (8 * sizeof(*bits_));
+    }
+
+    bool init();
+
+  public:
+    class Iterator;
+
+    static BitSet *New(unsigned int max);
+
+    unsigned int getMax() const {
+        return max_;
+    }
+
+    // O(1): Check if this set contains the given value.
+    bool contains(unsigned int value) const;
+
+    // O(max): Check if this set contains any value.
+    bool empty() const;
+
+    // O(1): Insert the given value into this set.
+    void insert(unsigned int value);
+
+    // O(max): Insert every element of the given set into this set.
+    void insertAll(const BitSet *other);
+
+    // O(1): Remove the given value from this set.
+    void remove(unsigned int value);
+
+    // O(max): Remove the every element of the given set from this set.
+    void removeAll(const BitSet *other);
+
+    // O(max): Intersect this set with the given set.
+    void intersect(const BitSet *other);
+
+    // O(max): Intersect this set with the given set; return whether the
+    // intersection caused the set to change.
+    bool fixedPointIntersect(const BitSet *other);
+
+    // O(max): Does inplace complement of the set.
+    void complement();
+
+    // O(max): Clear this set.
+    void clear();
+
+    // Iterator to the beginning of this set.
+    Iterator begin();
+
+    // Iterator to the end of this set.
+    Iterator end();
+
+};
+
+class BitSet::Iterator
+{
+  private:
+    BitSet &set_;
+    unsigned index_;
+
+  public:
+    Iterator(BitSet &set, unsigned int index) :
+      set_(set),
+      index_(index)
+    {
+        if (index_ <= set_.max_ && !set_.contains(index_))
+            (*this)++;
+    }
+
+    bool operator!=(const Iterator &other) const {
+        return index_ != other.index_;
+    }
+
+    // FIXME (668305): Use bit scan.
+    Iterator& operator++(int dummy) {
+        JS_ASSERT(index_ <= set_.max_);
+        do {
+            index_++;
+        } while (index_ <= set_.max_ && !set_.contains(index_));
+        return *this;
+    }
+
+    unsigned int operator *() {
+        JS_ASSERT(index_ <= set_.max_);
+        return index_;
+    }
+};
+
+}
+}
+
+#endif
new file mode 100644
--- /dev/null
+++ b/js/src/ion/C1Spewer.cpp
@@ -0,0 +1,238 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifdef DEBUG
+
+#include <stdarg.h>
+#include "IonBuilder.h"
+#include "Ion.h"
+#include "C1Spewer.h"
+#include "MIRGraph.h"
+#include "IonLIR.h"
+#include "jsscriptinlines.h"
+#include "LinearScan.h"
+
+using namespace js;
+using namespace js::ion;
+
+bool
+C1Spewer::init(const char *path)
+{
+    spewout_ = fopen(path, "w");
+    return (spewout_ != NULL);
+}
+
+void
+C1Spewer::beginFunction(MIRGraph *graph, JSScript *script)
+{
+    if (!spewout_)
+        return;
+
+    this->graph  = graph;
+    this->script = script;
+
+    fprintf(spewout_, "begin_compilation\n");
+    fprintf(spewout_, "  name \"%s:%d\"\n", script->filename, script->lineno);
+    fprintf(spewout_, "  method \"%s:%d\"\n", script->filename, script->lineno);
+    fprintf(spewout_, "  date %d\n", (int)time(NULL));
+    fprintf(spewout_, "end_compilation\n");
+}
+
+void
+C1Spewer::spewPass(const char *pass)
+{
+    if (!spewout_)
+        return;
+
+    fprintf(spewout_, "begin_cfg\n");
+    fprintf(spewout_, "  name \"%s\"\n", pass);
+
+    for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
+        spewPass(spewout_, *block);
+
+    fprintf(spewout_, "end_cfg\n");
+    fflush(spewout_);
+}
+
+void
+C1Spewer::spewIntervals(const char *pass, LinearScanAllocator *regalloc)
+{
+    if (!spewout_)
+        return;
+
+    fprintf(spewout_, "begin_intervals\n");
+    fprintf(spewout_, " name \"%s\"\n", pass);
+
+    size_t nextId = 0x4000;
+    for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
+        spewIntervals(spewout_, *block, regalloc, nextId);
+
+    fprintf(spewout_, "end_intervals\n");
+    fflush(spewout_);
+}
+
+void
+C1Spewer::endFunction()
+{
+    return;
+}
+
+void
+C1Spewer::finish()
+{
+    if (spewout_)
+        fclose(spewout_);
+}
+
+static void
+DumpDefinition(FILE *fp, MDefinition *def)
+{
+    fprintf(fp, "      ");
+    fprintf(fp, "%u %lu ", def->id(), def->useCount());
+    def->printName(fp);
+    fprintf(fp, " ");
+    def->printOpcode(fp);
+    fprintf(fp, " <|@\n");
+}
+
+static void
+DumpLIR(FILE *fp, LInstruction *ins)
+{
+    fprintf(fp, "      ");
+    fprintf(fp, "%d ", ins->id());
+    ins->print(fp);
+    fprintf(fp, " <|@\n");
+}
+
+void
+C1Spewer::spewIntervals(FILE *fp, MBasicBlock *block, LinearScanAllocator *regalloc, size_t &nextId)
+{
+    LBlock *lir = block->lir();
+
+    for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
+        for (size_t k = 0; k < ins->numDefs(); k++) {
+            VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];
+
+            for (size_t i = 0; i < vreg->numIntervals(); i++) {
+                LiveInterval *live = vreg->getInterval(i);
+                if (live->numRanges()) {
+                    fprintf(fp, "%d object \"", (i == 0) ? vreg->reg() : int32(nextId++));
+                    LAllocation::PrintAllocation(fp, live->getAllocation());
+                    fprintf(fp, "\" %d -1", vreg->reg());
+                    for (size_t j = 0; j < live->numRanges(); j++) {
+                        fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(),
+                                live->getRange(j)->to.pos());
+                    }
+                    for (size_t j = 0; j < vreg->numUses(); j++)
+                        fprintf(fp, " %d M", vreg->getUse(j)->ins->id() * 2);
+                    fprintf(fp, " \"\"\n");
+                }
+            }
+        }
+    }
+}
+void
+C1Spewer::spewPass(FILE *fp, MBasicBlock *block)
+{
+    fprintf(fp, "  begin_block\n");
+    fprintf(fp, "    name \"B%d\"\n", block->id());
+    fprintf(fp, "    from_bci -1\n");
+    fprintf(fp, "    to_bci -1\n");
+
+    fprintf(fp, "    predecessors");
+    for (uint32 i = 0; i < block->numPredecessors(); i++) {
+        MBasicBlock *pred = block->getPredecessor(i);
+        fprintf(fp, " \"B%d\"", pred->id());
+    }
+    fprintf(fp, "\n");
+
+    fprintf(fp, "    successors");
+    for (uint32 i = 0; i < block->numSuccessors(); i++) {
+        MBasicBlock *successor = block->getSuccessor(i);
+        fprintf(fp, " \"B%d\"", successor->id());
+    }
+    fprintf(fp, "\n");
+
+    fprintf(fp, "    xhandlers\n");
+    fprintf(fp, "    flags\n");
+
+    if (block->lir() && block->lir()->begin() != block->lir()->end()) {
+        fprintf(fp, "    first_lir_id %d\n", block->lir()->firstId());
+        fprintf(fp, "    last_lir_id %d\n", block->lir()->lastId());
+    }
+
+    fprintf(fp, "    begin_states\n");
+
+    fprintf(fp, "      begin_locals\n");
+    fprintf(fp, "        size %d\n", (int)block->numEntrySlots());
+    fprintf(fp, "        method \"None\"\n");
+    for (uint32 i = 0; i < block->numEntrySlots(); i++) {
+        MDefinition *ins = block->getEntrySlot(i);
+        fprintf(fp, "        ");
+        fprintf(fp, "%d ", i);
+        ins->printName(fp);
+        fprintf(fp, "\n");
+    }
+    fprintf(fp, "      end_locals\n");
+
+    fprintf(fp, "    end_states\n");
+
+    fprintf(fp, "    begin_HIR\n");
+    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++)
+        DumpDefinition(fp, *phi);
+    for (MInstructionIterator i(block->begin()); i != block->end(); i++)
+        DumpDefinition(fp, *i);
+    fprintf(fp, "    end_HIR\n");
+
+    if (block->lir()) {
+        fprintf(fp, "    begin_LIR\n");
+        for (size_t i = 0; i < block->lir()->numPhis(); i++)
+            DumpLIR(fp, block->lir()->getPhi(i));
+        for (LInstructionIterator i(block->lir()->begin()); i != block->lir()->end(); i++)
+            DumpLIR(fp, *i);
+        fprintf(fp, "    end_LIR\n");
+    }
+
+    fprintf(fp, "  end_block\n");
+}
+
+#endif /* DEBUG */
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/C1Spewer.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifdef DEBUG
+
+#ifndef jsion_c1spewer_h__
+#define jsion_c1spewer_h__
+
+#include "jscntxt.h"
+#include "MIR.h"
+#include "LinearScan.h"
+
+namespace js {
+namespace ion {
+
+class C1Spewer
+{
+    MIRGraph *graph;
+    JSScript *script;
+    FILE *spewout_;
+
+  public:
+    C1Spewer()
+      : graph(NULL), script(NULL), spewout_(NULL)
+    { }
+
+    bool init(const char *path);
+    void beginFunction(MIRGraph *graph, JSScript *script);
+    void spewPass(const char *pass);
+    void spewIntervals(const char *pass, LinearScanAllocator *regalloc);
+    void endFunction();
+    void finish();
+
+  private:
+    void spewPass(FILE *fp, MBasicBlock *block);
+    void spewIntervals(FILE *fp, MBasicBlock *block, LinearScanAllocator *regalloc, size_t &nextId);
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_c1spewer_h__
+
+#endif /* DEBUG */
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/CodeGenerator.cpp
@@ -0,0 +1,314 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "CodeGenerator.h"
+#include "IonLinker.h"
+#include "MIRGenerator.h"
+#include "shared/CodeGenerator-shared-inl.h"
+#include "jsnum.h"
+
+using namespace js;
+using namespace js::ion;
+
+CodeGenerator::CodeGenerator(MIRGenerator *gen, LIRGraph &graph)
+  : CodeGeneratorSpecific(gen, graph)
+{
+}
+
+bool
+CodeGenerator::visitValueToInt32(LValueToInt32 *lir)
+{
+    ValueOperand operand = ToValue(lir, LValueToInt32::Input);
+    Register output = ToRegister(lir->output());
+
+    Assembler::Condition cond;
+    Label done, simple, isInt32, isBool, notDouble;
+
+    // Type-check switch.
+    cond = masm.testInt32(Assembler::Equal, operand);
+    masm.j(cond, &isInt32);
+    cond = masm.testBoolean(Assembler::Equal, operand);
+    masm.j(cond, &isBool);
+    cond = masm.testDouble(Assembler::NotEqual, operand);
+    masm.j(cond, &notDouble);
+
+    // If the value is a double, see if it fits in a 32-bit int. We need to ask
+    // the platform-specific codegenerator to do this.
+    FloatRegister temp = ToFloatRegister(lir->tempFloat());
+    masm.unboxDouble(operand, temp);
+
+    Label fails;
+    switch (lir->mode()) {
+      case LValueToInt32::TRUNCATE:
+        emitTruncateDouble(temp, output, &fails);
+        break;
+      default:
+        JS_ASSERT(lir->mode() == LValueToInt32::NORMAL);
+        emitDoubleToInt32(temp, output, &fails);
+        break;
+    }
+    if (!bailoutFrom(&fails, lir->snapshot()))
+        return false;
+    masm.jump(&done);
+
+    masm.bind(&notDouble);
+
+    if (lir->mode() == LValueToInt32::NORMAL) {
+        // If the value is not null, it's a string, object, or undefined,
+        // which we can't handle here.
+        cond = masm.testNull(Assembler::NotEqual, operand);
+        if (!bailoutIf(cond, lir->snapshot()))
+            return false;
+    } else {
+        // Test for string or object - then fallthrough to null, which will
+        // also handle undefined.
+        cond = masm.testObject(Assembler::Equal, operand);
+        if (!bailoutIf(cond, lir->snapshot()))
+            return false;
+        cond = masm.testString(Assembler::Equal, operand);
+        if (!bailoutIf(cond, lir->snapshot()))
+            return false;
+    }
+    
+    // The value is null - just emit 0.
+    masm.mov(Imm32(0), output);
+    masm.jump(&done);
+
+    // Just unbox a bool, the result is 0 or 1.
+    masm.bind(&isBool);
+    masm.unboxBoolean(operand, output);
+    masm.jump(&done);
+
+    // Integers can be unboxed.
+    masm.bind(&isInt32);
+    masm.unboxInt32(operand, output);
+
+    masm.bind(&done);
+
+    return true;
+}
+
+static const double DoubleZero = 0.0;
+
+bool
+CodeGenerator::visitValueToDouble(LValueToDouble *lir)
+{
+    ValueOperand operand = ToValue(lir, LValueToDouble::Input);
+    FloatRegister output = ToFloatRegister(lir->output());
+
+    Assembler::Condition cond;
+    Label isDouble, isInt32, isBool, isNull, done;
+
+    // Type-check switch.
+    cond = masm.testDouble(Assembler::Equal, operand);
+    masm.j(cond, &isDouble);
+    cond = masm.testInt32(Assembler::Equal, operand);
+    masm.j(cond, &isInt32);
+    cond = masm.testBoolean(Assembler::Equal, operand);
+    masm.j(cond, &isBool);
+    cond = masm.testNull(Assembler::Equal, operand);
+    masm.j(cond, &isNull);
+
+    cond = masm.testUndefined(Assembler::NotEqual, operand);
+    if (!bailoutIf(cond, lir->snapshot()))
+        return false;
+    masm.loadStaticDouble(&js_NaN, output);
+    masm.jump(&done);
+
+    masm.bind(&isNull);
+    masm.loadStaticDouble(&DoubleZero, output);
+    masm.jump(&done);
+
+    masm.bind(&isBool);
+    masm.boolValueToDouble(operand, output);
+    masm.jump(&done);
+
+    masm.bind(&isInt32);
+    masm.int32ValueToDouble(operand, output);
+    masm.jump(&done);
+
+    masm.bind(&isDouble);
+    masm.unboxDouble(operand, output);
+    masm.bind(&done);
+
+    return true;
+}
+
+bool
+CodeGenerator::visitInt32ToDouble(LInt32ToDouble *lir)
+{
+    masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+    return true;
+}
+
+bool
+CodeGenerator::visitTestVAndBranch(LTestVAndBranch *lir)
+{
+    const ValueOperand value = ToValue(lir, LTestVAndBranch::Input);
+
+    Register tag = splitTagForTest(value);
+
+    Assembler::Condition cond;
+
+    // Eventually we will want some sort of type filter here. For now, just
+    // emit all easy cases. For speed we use the cached tag for all comparison,
+    // except for doubles, which we test last (as the operation can clobber the
+    // tag, which may be in ScratchReg).
+    cond = masm.testUndefined(Assembler::Equal, tag);
+    masm.j(cond, lir->ifFalse());
+
+    cond = masm.testNull(Assembler::Equal, tag);
+    masm.j(cond, lir->ifFalse());
+
+    cond = masm.testObject(Assembler::Equal, tag);
+    masm.j(cond, lir->ifTrue());
+
+    Label notBoolean;
+    cond = masm.testBoolean(Assembler::NotEqual, tag);
+    masm.j(cond, &notBoolean);
+    cond = masm.testBooleanTruthy(false, value);
+    masm.j(cond, lir->ifFalse());
+    masm.jump(lir->ifTrue());
+    masm.bind(&notBoolean);
+
+    Label notInt32;
+    cond = masm.testInt32(Assembler::NotEqual, tag);
+    masm.j(cond, &notInt32);
+    cond = masm.testInt32Truthy(false, value);
+    masm.j(cond, lir->ifFalse());
+    masm.jump(lir->ifTrue());
+    masm.bind(&notInt32);
+
+    // Test if a string is non-empty.
+    Label notString;
+    cond = masm.testString(Assembler::NotEqual, tag);
+    masm.j(cond, &notString);
+    cond = testStringTruthy(false, value);
+    masm.j(cond, lir->ifFalse());
+    masm.jump(lir->ifTrue());
+    masm.bind(&notString);
+
+    // If we reach here the value is a double.
+    masm.unboxDouble(value, ToFloatRegister(lir->tempFloat()));
+    cond = masm.testDoubleTruthy(false, ToFloatRegister(lir->tempFloat()));
+    masm.j(cond, lir->ifFalse());
+    masm.jump(lir->ifTrue());
+
+    return true;
+}
+
+bool
+CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32 *lir)
+{
+    Label fails;
+
+    emitTruncateDouble(ToFloatRegister(lir->input()), ToRegister(lir->output()), &fails);
+    if (!bailoutFrom(&fails, lir->snapshot()))
+        return false;
+
+    return true;
+}
+
+bool
+CodeGenerator::generateBody()
+{
+    for (size_t i = 0; i < graph.numBlocks(); i++) {
+        current = graph.getBlock(i);
+        masm.bind(current->label());
+        for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) {
+            if (!iter->accept(this))
+                return false;
+        }
+        if (masm.oom())
+            return false;
+    }
+    return true;
+}
+
+bool
+CodeGenerator::generate()
+{
+    JSContext *cx = gen->cx;
+
+    if (frameClass_ != FrameSizeClass::None()) {
+        deoptTable_ = cx->compartment->ionCompartment()->getBailoutTable(cx, frameClass_);
+        if (!deoptTable_)
+            return false;
+    }
+
+    if (!generatePrologue())
+        return false;
+    if (!generateBody())
+        return false;
+    if (!generateEpilogue())
+        return false;
+    if (!generateOutOfLineCode())
+        return false;
+
+    if (masm.oom())
+        return false;
+
+    Linker linker(masm);
+    IonCode *code = linker.newCode(cx);
+    if (!code)
+        return false;
+
+    JS_ASSERT(!gen->script->ion);
+
+    gen->script->ion = IonScript::New(cx, snapshots_.length(), bailouts_.length(),
+                                      graph.numConstants());
+    if (!gen->script->ion)
+        return false;
+
+    gen->script->ion->setMethod(code);
+    gen->script->ion->setDeoptTable(deoptTable_);
+    if (snapshots_.length())
+        gen->script->ion->copySnapshots(&snapshots_);
+    if (bailouts_.length())
+        gen->script->ion->copyBailoutTable(&bailouts_[0]);
+    if (graph.numConstants())
+        gen->script->ion->copyConstants(graph.constantPool());
+
+    linkAbsoluteLabels();
+
+    return true;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/CodeGenerator.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_codegen_h__
+#define jsion_codegen_h__
+
+#if defined(JS_CPU_X86)
+# include "x86/CodeGenerator-x86.h"
+#elif defined(JS_CPU_X64)
+# include "x64/CodeGenerator-x64.h"
+#elif defined(JS_CPU_ARM)
+# include "arm/CodeGenerator-arm.h"
+#else
+#error "CPU Not Supported"
+#endif
+
+namespace js {
+namespace ion {
+
+class CodeGenerator : public CodeGeneratorSpecific
+{
+    bool generateBody();
+
+  public:
+    CodeGenerator(MIRGenerator *gen, LIRGraph &graph);
+
+  public:
+    bool generate();
+
+    virtual bool visitValueToInt32(LValueToInt32 *lir);
+    virtual bool visitValueToDouble(LValueToDouble *lir);
+    virtual bool visitInt32ToDouble(LInt32ToDouble *lir);
+    virtual bool visitTestVAndBranch(LTestVAndBranch *lir);
+    virtual bool visitTruncateDToInt32(LTruncateDToInt32 *lir);
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_codegen_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/CompactBuffer.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_compact_buffer_h__
+#define jsion_compact_buffer_h__
+
+#include "jsvector.h"
+
+namespace js {
+namespace ion {
+
+// CompactBuffers are byte streams designed for compressable integers. It has
+// helper functions for writing bytes, fixed-size integers, and variable-sized
+// integers. Variable sized integers are encoded in 1-5 bytes, each byte
+// containing 7 bits of the integer and a bit which specifies whether the next
+// byte is also part of the integer.
+//
+// Fixed-width integers are also available, in case the actual value will not
+// be known until later.
+
+class CompactBufferReader
+{
+    const uint8 *buffer_;
+    const uint8 *end_;
+
+    uint32 readVariableLength() {
+        uint32 val = 0;
+        uint32 shift = 0;
+        uint byte;
+        while (true) {
+            JS_ASSERT(shift < 32);
+            byte = readByte();
+            val |= (uint32(byte) >> 1) << shift;
+            shift += 7;
+            if (!(byte & 1))
+                return val;
+        }
+        JS_NOT_REACHED("unreachable");
+        return 0;
+    }
+
+  public:
+    CompactBufferReader(const uint8 *start, const uint8 *end)
+      : buffer_(start),
+        end_(end)
+    { }
+    uint8 readByte() {
+        JS_ASSERT(buffer_ < end_);
+        return *buffer_++;
+    }
+    uint32 readFixedUint32() {
+        uint32 b0 = readByte();
+        uint32 b1 = readByte();
+        uint32 b2 = readByte();
+        uint32 b3 = readByte();
+        return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+    }
+    uint32 readUnsigned() {
+        return readVariableLength();
+    }
+    int32 readSigned() {
+        uint8 b = readByte();
+        bool isNegative = !!(b & (1 << 0));
+        bool more = !!(b & (1 << 1));
+        int32 result = b >> 2;
+        if (more)
+            result |= readUnsigned() << 6;
+        if (isNegative)
+            return -result;
+        return result;
+    }
+
+    bool more() const {
+        JS_ASSERT(buffer_ <= end_);
+        return buffer_ < end_;
+    }
+};
+
+class CompactBufferWriter
+{
+    js::Vector<uint8, 32, SystemAllocPolicy> buffer_;
+    bool enoughMemory_;
+
+  public:
+    CompactBufferWriter()
+      : enoughMemory_(true)
+    { }
+
+    // Note: writeByte() takes uint32 to catch implicit casts with a runtime
+    // assert.
+    void writeByte(uint32 byte) {
+        JS_ASSERT(byte <= 0xFF);
+        enoughMemory_ &= buffer_.append(byte);
+    }
+    void writeUnsigned(uint32 value) {
+        do {
+            uint8 byte = ((value & 0x7F) << 1) | (value > 0x7F);
+            writeByte(byte);
+            value >>= 7;
+        } while (value);
+    }
+    void writeSigned(int32 v) {
+        bool isNegative = v < 0;
+        uint32 value = isNegative ? -v : v;
+        uint8 byte = ((value & 0x3F) << 2) | ((value > 0x3F) << 1) | isNegative;
+        writeByte(byte);
+
+        // Write out the rest of the bytes, if needed.
+        value >>= 6;
+        if (value == 0)
+            return;
+        writeUnsigned(value);
+    }
+    void writeFixedUint32(uint32 value) {
+        writeByte(value & 0xFF);
+        writeByte((value >> 8) & 0xFF);
+        writeByte((value >> 16) & 0xFF);
+        writeByte((value >> 24) & 0xFF);
+    }
+    size_t length() const {
+        return buffer_.length();
+    }
+    uint8 *buffer() {
+        return &buffer_[0];
+    }
+    const uint8 *buffer() const {
+        return &buffer_[0];
+    }
+    bool oom() const {
+        return !enoughMemory_;
+    }
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_compact_buffer_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/FixedArityList.h
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_ion_fixed_arity_list_h__
+#define jsion_ion_fixed_arity_list_h__
+
+namespace js {
+namespace ion {
+
+template <typename T, size_t Arity>
+class FixedArityList
+{
+    T list_[Arity];
+
+  public:
+    T &operator [](size_t index) {
+        JS_ASSERT(index < Arity);
+        return list_[index];
+    }
+    const T &operator [](size_t index) const {
+        JS_ASSERT(index < Arity);
+        return list_[index];
+    }
+};
+
+template <typename T>
+class FixedArityList<T, 0>
+{
+  public:
+    T &operator [](size_t index) {
+        JS_NOT_REACHED("no items");
+        static T *operand = NULL;
+        return *operand;
+    }
+    const T &operator [](size_t index) const {
+        JS_NOT_REACHED("no items");
+        static T *operand = NULL;
+        return *operand;
+    }
+};
+
+// List of a fixed length, but the length is unknown until runtime.
+template <typename T>
+class FixedList
+{
+    size_t length_;
+    T *list_;
+
+  private:
+    FixedList(const FixedList&); // no copy definition.
+    void operator= (const FixedList*); // no assignment definition.
+
+  public:
+    FixedList()
+      : length_(0)
+    { }
+
+    // Dynamic memory allocation requires the ability to report failure.
+    bool init(size_t length) {
+        length_ = length;
+        if (length == 0)
+            return true;
+
+        list_ = (T *)GetIonContext()->temp->allocate(length * sizeof(T *));
+        return list_ != NULL;
+    }
+
+    size_t length() const {
+        return length_;
+    }
+
+    T &operator[](size_t index) {
+        JS_ASSERT(index < length_);
+        return list_[index];
+    }
+    const T &operator [](size_t index) const {
+        JS_ASSERT(index < length_);
+        return list_[index];
+    };
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_ion_fixed_arity_list_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/GreedyAllocator.cpp
@@ -0,0 +1,1061 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "GreedyAllocator.h"
+
+using namespace js;
+using namespace js::ion;
+
+GreedyAllocator::GreedyAllocator(MIRGenerator *gen, LIRGraph &graph)
+  : gen(gen),
+    graph(graph)
+{
+}
+
+void
+GreedyAllocator::findDefinitionsInLIR(LInstruction *ins)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        LDefinition *def = ins->getDef(i);
+        JS_ASSERT(def->virtualRegister() < graph.numVirtualRegisters());
+
+        if (def->policy() == LDefinition::REDEFINED)
+            continue;
+
+        vars[def->virtualRegister()].def = def;
+#ifdef DEBUG
+        vars[def->virtualRegister()].ins = ins;
+#endif
+    }
+}
+
+void
+GreedyAllocator::findDefinitionsInBlock(LBlock *block)
+{
+    for (size_t i = 0; i < block->numPhis(); i++)
+        findDefinitionsInLIR(block->getPhi(i));
+    for (LInstructionIterator i = block->begin(); i != block->end(); i++)
+        findDefinitionsInLIR(*i);
+}
+
+void
+GreedyAllocator::findDefinitions()
+{
+    for (size_t i = 0; i < graph.numBlocks(); i++)
+        findDefinitionsInBlock(graph.getBlock(i));
+}
+
+bool
+GreedyAllocator::maybeEvict(AnyRegister reg)
+{
+    if (!state.free.has(reg))
+        return evict(reg);
+    return true;
+}
+
+static inline AnyRegister
+GetFixedRegister(LDefinition *def, LUse *use)
+{
+    return def->type() == LDefinition::DOUBLE
+           ? AnyRegister(FloatRegister::FromCode(use->registerCode()))
+           : AnyRegister(Register::FromCode(use->registerCode()));
+}
+
+static inline AnyRegister
+GetAllocatedRegister(const LAllocation *a)
+{
+    JS_ASSERT(a->isRegister());
+    return a->isFloatReg()
+           ? AnyRegister(a->toFloatReg()->reg())
+           : AnyRegister(a->toGeneralReg()->reg());
+}
+
+static inline AnyRegister
+GetPresetRegister(const LDefinition *def)
+{
+    JS_ASSERT(def->policy() == LDefinition::PRESET);
+    return GetAllocatedRegister(def->output());
+}
+
+bool
+GreedyAllocator::prescanDefinition(LDefinition *def)
+{
+    // If the definition is fakeo, a redefinition, ignore it entirely. It's not
+    // valid to kill it, and it doesn't matter if an input uses the same
+    // register (thus it does not go into the disallow set).
+    if (def->policy() == LDefinition::REDEFINED)
+        return true;
+
+    VirtualRegister *vr = getVirtualRegister(def);
+
+    // Add its stack slot and register to the free pool.
+    if (!kill(vr))
+        return false;
+
+    // If it has a register, prevent it from being allocated this round.
+    if (vr->hasRegister())
+        disallowed.add(vr->reg());
+
+    if (def->policy() == LDefinition::PRESET) {
+        const LAllocation *a = def->output();
+        if (a->isRegister()) {
+            // Evict fixed registers. Use the unchecked version of set-add
+            // because the register does not reflect any allocation state, so
+            // it may have already been added.
+            AnyRegister reg = GetPresetRegister(def);
+            disallowed.addUnchecked(reg);
+            if (!maybeEvict(reg))
+                return false;
+        }
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::prescanDefinitions(LInstruction *ins)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        if (!prescanDefinition(ins->getDef(i)))
+            return false;
+    }
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        LDefinition *temp = ins->getTemp(i);
+        if (temp->isBogusTemp())
+            continue;
+        if (!prescanDefinition(temp))
+            return false;
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::prescanUses(LInstruction *ins)
+{
+    for (size_t i = 0; i < ins->numOperands(); i++) {
+        LAllocation *a = ins->getOperand(i);
+        if (!a->isUse()) {
+            JS_ASSERT(a->isConstant());
+            continue;
+        }
+
+        LUse *use = a->toUse();
+        VirtualRegister *vr = getVirtualRegister(use);
+
+        if (use->policy() == LUse::FIXED)
+            disallowed.add(GetFixedRegister(vr->def, use));
+        else if (vr->hasRegister())
+            discouraged.addUnchecked(vr->reg());
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::allocateStack(VirtualRegister *vr)
+{
+    if (vr->hasBackingStack())
+        return true;
+
+    uint32 index;
+    if (vr->isDouble()) {
+        if (!stackSlots.allocateDoubleSlot(&index))
+            return false;
+    } else {
+        if (!stackSlots.allocateSlot(&index))
+            return false;
+    }
+
+    vr->setStackSlot(index);
+    return true;
+}
+
+bool
+GreedyAllocator::allocate(LDefinition::Type type, Policy policy, AnyRegister *out)
+{
+    RegisterSet allowed = RegisterSet::Not(disallowed);
+    RegisterSet free = allocatableRegs();
+    RegisterSet tryme = RegisterSet::Intersect(free, RegisterSet::Not(discouraged));
+
+    if (tryme.empty(type == LDefinition::DOUBLE)) {
+        if (free.empty(type == LDefinition::DOUBLE)) {
+            *out = allowed.takeAny(type == LDefinition::DOUBLE);
+            if (!evict(*out))
+                return false;
+        } else {
+            *out = free.takeAny(type == LDefinition::DOUBLE);
+        }
+    } else {
+        *out = tryme.takeAny(type == LDefinition::DOUBLE);
+    }
+
+    if (policy != TEMPORARY)
+        disallowed.add(*out);
+
+    return true;
+}
+
+void
+GreedyAllocator::freeStack(VirtualRegister *vr)
+{
+    if (vr->isDouble())
+        stackSlots.freeDoubleSlot(vr->stackSlot());
+    else
+        stackSlots.freeSlot(vr->stackSlot());
+}
+
+void
+GreedyAllocator::freeReg(AnyRegister reg)
+{
+    state[reg] = NULL;
+    state.free.add(reg);
+}
+
+bool
+GreedyAllocator::kill(VirtualRegister *vr)
+{
+    if (vr->hasRegister()) {
+        AnyRegister reg = vr->reg();
+        JS_ASSERT(state[reg] == vr);
+
+        freeReg(reg);
+    }
+    if (vr->hasStackSlot())
+        freeStack(vr);
+    return true;
+}
+
+bool
+GreedyAllocator::evict(AnyRegister reg)
+{
+    VirtualRegister *vr = state[reg];
+    JS_ASSERT(vr->reg() == reg);
+
+    // If the virtual register does not have a stack slot, allocate one now.
+    if (!allocateStack(vr))
+        return false;
+
+    // We're allocating bottom-up, so eviction *restores* a register, otherwise
+    // it could not be used downstream.
+    if (!restore(vr->backingStack(), reg))
+        return false;
+
+    freeReg(reg);
+    vr->unsetRegister();
+    return true;
+}
+
+void
+GreedyAllocator::assign(VirtualRegister *vr, AnyRegister reg)
+{
+    JS_ASSERT(!state[reg]);
+    state[reg] = vr;
+    vr->setRegister(reg);
+    state.free.take(reg);
+}
+
+bool
+GreedyAllocator::allocateRegisterOperand(LAllocation *a, VirtualRegister *vr)
+{
+    AnyRegister reg;
+
+    // Note that the disallow policy is required to prevent other allocations
+    // in later uses clobbering the register.
+    if (vr->hasRegister()) {
+        reg = vr->reg();
+        disallowed.add(reg);
+    } else {
+        // If it does not have a register, allocate one now.
+        if (!allocate(vr->type(), DISALLOW, &reg))
+            return false;
+        assign(vr, reg);
+    }
+
+    *a = LAllocation(reg);
+    return true;
+}
+
+bool
+GreedyAllocator::allocateWritableOperand(LAllocation *a, VirtualRegister *vr)
+{
+    AnyRegister reg;
+    if (!vr->hasRegister()) {
+        // If the vr has no register assigned, then we can assign a register and
+        // steal it knowing that it will be that register's last use.
+        if (!allocate(vr->type(), DISALLOW, &reg))
+            return false;
+        assign(vr, reg);
+    } else {
+        if (allocatableRegs().empty(vr->isDouble())) {
+            // If there are registers free, get one.
+            if (!allocate(vr->type(), DISALLOW, &reg))
+                return false;
+            align(vr->reg(), reg);
+        } else {
+            // Otherwise, just steal the register.
+            if (!evict(vr->reg()))
+                return false;
+            reg = vr->reg();
+        }
+    }
+
+    *a = LAllocation(reg);
+    return true;
+}
+
+bool
+GreedyAllocator::allocateAnyOperand(LAllocation *a, VirtualRegister *vr, bool preferReg)
+{
+    if (vr->hasRegister()) {
+        *a = LAllocation(vr->reg());
+        return true;
+    }
+
+    // Are any registers free? Don't bother if the requestee is a type tag.
+    if ((preferReg || vr->type() != LDefinition::TYPE) && !allocatableRegs().empty(vr->isDouble()))
+        return allocateRegisterOperand(a, vr);
+
+    // Otherwise, use a memory operand.
+    if (!allocateStack(vr))
+        return false;
+    *a = vr->backingStack();
+    return true;
+}
+
+bool
+GreedyAllocator::allocateFixedOperand(LAllocation *a, VirtualRegister *vr)
+{
+    // Note that this register is already in the disallow set.
+    AnyRegister needed = GetFixedRegister(vr->def, a->toUse());
+
+    *a = LAllocation(needed);
+
+    if (!vr->hasRegister()) {
+        if (!maybeEvict(needed))
+            return false;
+        assign(vr, needed);
+        return true;
+    }
+
+    if (vr->reg() == needed)
+        return true;
+
+    // Otherwise, we need to align the input.
+    return align(vr->reg(), needed);
+}
+
+bool
+GreedyAllocator::allocateSameAsInput(LDefinition *def, LAllocation *a, AnyRegister *out)
+{
+    LUse *use = a->toUse();
+    VirtualRegister *vdef = getVirtualRegister(def);
+    VirtualRegister *vuse = getVirtualRegister(use);
+
+    JS_ASSERT(vdef->isDouble() == vuse->isDouble());
+
+    AnyRegister reg;
+
+    // Find a suitable output register. For simplicity, we do not consider the
+    // current allocation of the input virtual register, which means it could
+    // be evicted.
+    if (use->isFixedRegister()) {
+        reg = GetFixedRegister(def, use);
+    } else if (vdef->hasRegister()) {
+        reg = vdef->reg();
+    } else {
+        if (!allocate(vdef->type(), DISALLOW, &reg))
+            return false;
+    }
+    JS_ASSERT(disallowed.has(reg));
+
+    if (vuse->hasRegister()) {
+        JS_ASSERT(vuse->reg() != reg);
+        if (!align(vuse->reg(), reg))
+            return false;
+    } else {
+        // If the input has no register, we can just re-use the output register
+        // directly, because nothing downstream could be clobbered by consuming
+        // the register.
+        assign(vuse, reg);
+    }
+
+    // Overwrite the input allocation now.
+    *a = LAllocation(reg);
+
+    *out = reg;
+    return true;
+}
+
+bool
+GreedyAllocator::allocateDefinitions(LInstruction *ins)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        LDefinition *def = ins->getDef(i);
+        VirtualRegister *vr = getVirtualRegister(def);
+
+        LAllocation output;
+        switch (def->policy()) {
+          case LDefinition::REDEFINED:
+            // This is purely passthru, so ignore it.
+            continue;
+
+          case LDefinition::DEFAULT:
+          {
+            // Either take the register requested, or allocate a new one.
+            if (vr->hasRegister()) {
+                output = LAllocation(vr->reg());
+            } else {
+                AnyRegister reg;
+                if (!allocate(vr->type(), DISALLOW, &reg))
+                    return false;
+                output = LAllocation(reg);
+            }
+            break;
+          }
+
+          case LDefinition::PRESET:
+          {
+            // Eviction and disallowing occurred during the definition
+            // pre-scan pass.
+            output = *def->output();
+            break;
+          }
+
+          case LDefinition::MUST_REUSE_INPUT:
+          {
+            AnyRegister out_reg;
+            if (!allocateSameAsInput(def, ins->getOperand(0), &out_reg))
+                return false;
+            output = LAllocation(out_reg);
+            break;
+          }
+        }
+
+        if (output.isRegister()) {
+            JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg()));
+            JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg()));
+        }
+
+        // |output| is now the allocation state leaving the instruction.
+        // However, this is not necessarily the allocation state expected
+        // downstream, so emit moves where necessary.
+        if (output.isRegister()) {
+            if (vr->hasRegister()) {
+                // If the returned register is different from the output
+                // register, a move is required.
+                AnyRegister out = GetAllocatedRegister(&output);
+                if (out != vr->reg()) {
+                    if (!spill(output, vr->reg()))
+                        return false;
+                }
+            }
+
+            // Spill to the stack if needed.
+            if (vr->hasStackSlot() && !spill(output, vr->backingStack()))
+                return false;
+        } else if (vr->hasRegister()) {
+            // This definition has a canonical spill location, so make sure to
+            // load it to the resulting register, if any.
+            JS_ASSERT(!vr->hasStackSlot());
+            JS_ASSERT(vr->hasBackingStack());
+            if (!spill(output, vr->reg()))
+                return false;
+        }
+
+        // Finally, set the output.
+        *def = LDefinition(def->type(), output);
+    }
+
+    return true;
+}
+
+bool
+GreedyAllocator::allocateTemporaries(LInstruction *ins)
+{
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        LDefinition *def = ins->getTemp(i);
+        if (def->policy() == LDefinition::PRESET)
+            continue;
+
+        JS_ASSERT(def->policy() == LDefinition::DEFAULT);
+        AnyRegister reg;
+        if (!allocate(def->type(), DISALLOW, &reg))
+            return false;
+        *def = LDefinition(def->type(), LAllocation(reg));
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::allocateInputs(LInstruction *ins)
+{
+    // First deal with fixed-register policies and policies that require
+    // registers.
+    for (size_t i = 0; i < ins->numOperands(); i++) {
+        LAllocation *a = ins->getOperand(i);
+        if (!a->isUse())
+            continue;
+        LUse *use = a->toUse();
+        VirtualRegister *vr = getVirtualRegister(use);
+        if (use->policy() == LUse::FIXED) {
+            if (!allocateFixedOperand(a, vr))
+                return false;
+        } else if (use->policy() == LUse::REGISTER) {
+            if (!allocateRegisterOperand(a, vr))
+                return false;
+        } else if (use->policy() == LUse::COPY) {
+            if (!allocateWritableOperand(a, vr))
+                return false;
+        }
+    }
+
+    // Allocate temporaries before uses that accept memory operands, because
+    // temporaries require registers.
+    if (!allocateTemporaries(ins))
+        return false;
+
+    // Finally, deal with things that take either registers or memory.
+    for (size_t i = 0; i < ins->numOperands(); i++) {
+        LAllocation *a = ins->getOperand(i);
+        if (!a->isUse())
+            continue;
+
+        LUse *use = a->toUse();
+        JS_ASSERT(use->policy() == LUse::ANY);
+
+        VirtualRegister *vr = getVirtualRegister(use);
+        if (!allocateAnyOperand(a, vr))
+            return false;
+    }
+
+    return true;
+}
+
+bool
+GreedyAllocator::spillForCall(LInstruction *ins)
+{
+    GeneralRegisterSet genset(Registers::JSCallClobberMask);
+    FloatRegisterSet floatset(FloatRegisters::JSCallClobberMask);
+    for (AnyRegisterIterator iter(genset, floatset); iter.more(); iter++) {
+        if (!maybeEvict(*iter))
+            return false;
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::informSnapshot(LSnapshot *snapshot)
+{
+    for (size_t i = 0; i < snapshot->numEntries(); i++) {
+        LAllocation *a = snapshot->getEntry(i);
+        if (!a->isUse())
+            continue;
+
+        LUse *use = a->toUse();
+        VirtualRegister *vr = getVirtualRegister(use);
+        if (vr->hasRegister()) {
+            *a = LAllocation(vr->reg());
+        } else {
+            if (!allocateStack(vr))
+                return false;
+            *a = vr->backingStack();
+        }
+    }
+    return true;
+}
+
+void
+GreedyAllocator::assertValidRegisterState()
+{
+#ifdef DEBUG
+    // Assert that for each taken register in state.free, that it maps to a vr
+    // and that that vr has that register.
+    for (AnyRegisterIterator iter; iter.more(); iter++) {
+        AnyRegister reg = *iter;
+        VirtualRegister *vr = state[reg];
+        if (!reg.allocatable()) {
+            JS_ASSERT(!vr);
+            continue;
+        }
+        JS_ASSERT(!vr == state.free.has(reg));
+        JS_ASSERT_IF(vr, vr->reg() == reg);
+    }
+#endif
+}
+
+bool
+GreedyAllocator::allocateInstruction(LBlock *block, LInstruction *ins)
+{
+    if (!gen->ensureBallast())
+        return false;
+
+    // Reset internal state used for evicting.
+    reset();
+    assertValidRegisterState();
+
+    // Step 1. Around a call, save all registers used downstream.
+    if (ins->isCallGeneric() && !spillForCall(ins))
+        return false;
+
+    // Step 2. Find all fixed writable registers, adding them to the
+    // disallow set.
+    if (!prescanDefinitions(ins))
+        return false;
+
+    // Step 3. For each use, add fixed policies to the disallow set and
+    // already allocated registers to the discouraged set.
+    if (!prescanUses(ins))
+        return false;
+
+    // Step 4. Allocate registers for each definition.
+    if (!allocateDefinitions(ins))
+        return false;
+
+    // Step 5. Allocate inputs and temporaries.
+    if (!allocateInputs(ins))
+        return false;
+
+    // Step 6. Assign fields of a snapshot.
+    if (ins->snapshot() && !informSnapshot(ins->snapshot()))
+        return false;
+
+    if (aligns)
+        block->insertBefore(ins, aligns);
+
+    return true;
+}
+
+bool
+GreedyAllocator::allocateRegistersInBlock(LBlock *block)
+{
+    LInstructionReverseIterator ri = block->instructions().rbegin();
+
+    // Control instructions need to be handled specially. Since they have no
+    // outputs, we are guaranteed they do not spill. But restores may occur,
+    // and may need to be duplicated on each outgoing edge.
+    if (!allocateInstruction(block, *ri))
+        return false;
+    ri++;
+
+    JS_ASSERT(!spills);
+
+    if (restores) {
+        // For each successor that has already been allocated, duplicate the
+        // move group into the start of its block. We don't yet have the
+        // ability to detect whether the receiving blocks actually need this
+        // move.
+        for (size_t i = 0; i < block->mir()->numSuccessors(); i++) {
+            MBasicBlock *msuccessor = block->mir()->getSuccessor(i);
+            if (msuccessor->id() <= block->mir()->id())
+                continue;
+
+            Mover moves;
+            LBlock *successor = msuccessor->lir();
+            for (size_t i = 0; i < restores->numMoves(); i++) {
+                const LMove &move = restores->getMove(i);
+                if (!moves.move(*move.from(), *move.to()))
+                    return false;
+            }
+
+            successor->insertBefore(*successor->begin(), moves.moves);
+        }
+    }
+
+    if (block->mir()->isLoopBackedge()) {
+        // If this is a loop backedge, save its exit allocation state at the
+        // loop header. Note this occurs after allocating the initial jump
+        // instruction, to avoid placing useless moves at the loop edge.
+        if (!prepareBackedge(block))
+            return false;
+    }
+    blockInfo(block)->out = state;
+
+    for (; ri != block->instructions().rend(); ri++) {
+        LInstruction *ins = *ri;
+
+        if (!allocateInstruction(block, ins))
+            return false;
+
+        // Step 6. Insert move instructions.
+        if (restores)
+            block->insertAfter(ins, restores);
+        if (spills) {
+            JS_ASSERT(ri != block->rbegin());
+            block->insertAfter(ins, spills);
+        }
+
+        assertValidRegisterState();
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::mergeRegisterState(const AnyRegister &reg, LBlock *left, LBlock *right)
+{
+    VirtualRegister *vleft = state[reg];
+    VirtualRegister *vright = blockInfo(right)->in[reg];
+
+    // If the input register is unused or occupied by the same vr, we're done.
+    if (vleft == vright)
+        return true;
+
+    // If the right-hand side has no allocation, then do nothing because the
+    // left-hand side has already propagated its value up.
+    if (!vright)
+        return true;
+
+    BlockInfo *rinfo = blockInfo(right);
+
+    if (!vleft && !vright->hasRegister()) {
+        // The left-hand side never assigned a register to |vright|, and has
+        // not assigned this register, so just inherit the right-hand side's
+        // allocation.
+        assign(vright, reg);
+        return true;
+    }
+
+    // Otherwise, we have reached one of two situations:
+    //  (1) The left-hand and right-hand sides have two different definitions
+    //      in the same register.
+    //  (2) The right-hand side expects a definition in a different register
+    //      than the left-hand side has assigned.
+    //
+    // In both cases, we emit a load or move on the right-hand side to ensure
+    // that the definition is in the expected register.
+    if (!vright->hasRegister() && !allocatableRegs().empty(vright->isDouble())) {
+        AnyRegister reg;
+        if (!allocate(vright->type(), DISALLOW, &reg))
+            return false;
+        assign(vright, reg);
+    }
+
+    if (vright->hasRegister()) {
+        JS_ASSERT(vright->reg() != reg);
+        if (!rinfo->restores.move(vright->reg(), reg))
+            return false;
+    } else {
+        if (!allocateStack(vright))
+            return false;
+        if (!rinfo->restores.move(vright->backingStack(), reg))
+            return false;
+    }
+
+    return true;
+}
+
+bool
+GreedyAllocator::prepareBackedge(LBlock *block)
+{
+    MBasicBlock *msuccessor = block->mir()->successorWithPhis();
+    if (!msuccessor)
+        return true;
+
+    LBlock *successor = msuccessor->lir();
+
+    uint32 pos = block->mir()->positionInPhiSuccessor();
+    for (size_t i = 0; i < successor->numPhis(); i++) {
+        LPhi *phi = successor->getPhi(i);
+        LAllocation *a = phi->getOperand(pos);
+        if (!a->isUse())
+            continue;
+        VirtualRegister *vr = getVirtualRegister(a->toUse());
+
+        // We ensure a phi always has an allocation, because it's too early to
+        // tell whether something in the loop uses it.
+        LAllocation result;
+        if (!allocateAnyOperand(&result, vr, true))
+            return false;
+
+        // Store the def's exit allocation in the phi's output, as a cheap
+        // trick. At the loop header we'll see this and emit moves from the def
+        // to the phi's final storage.
+        phi->getDef(0)->setOutput(result);
+    }
+
+    return true;
+}
+
+bool
+GreedyAllocator::mergeBackedgeState(LBlock *header, LBlock *backedge)
+{
+    BlockInfo *info = blockInfo(backedge);
+
+    // Handle loop-carried carried registers, making sure anything live at the
+    // backedge is also properly held live at the top of the loop.
+    Mover carried;
+    for (AnyRegisterIterator iter; iter.more(); iter++) {
+        AnyRegister reg = *iter;
+        VirtualRegister *inVr = state[reg];
+        if (!inVr)
+            continue;
+
+        VirtualRegister *outVr = info->out[reg];
+        if (inVr == outVr)
+            continue;
+
+        // A register is live coming into the loop, but has a different exit
+        // assignment. For this to work, we either need to insert a spill or a
+        // move. This may insert unnecessary moves, since it cannot tell if a
+        // register was clobbered in the loop. It only knows if the allocation
+        // states at the loop edges are different. Note that for the same
+        // reasons, we cannot assume a register allocated here will be
+        // preserved across the loop.
+        if (!allocateStack(inVr))
+            return false;
+        if (!carried.move(inVr->backingStack(), reg))
+            return false;
+    }
+    if (carried.moves) {
+        LInstruction *ins = *header->instructions().begin();
+        header->insertBefore(ins, carried.moves);
+    }
+
+    Mover phis;
+
+    // Handle loop phis.
+    for (size_t i = 0; i < header->numPhis(); i++) {
+        LPhi *phi = header->getPhi(i);
+        LDefinition *def = phi->getDef(0);
+        VirtualRegister *vr = getVirtualRegister(def);
+
+        JS_ASSERT(def->policy() == LDefinition::PRESET);
+        const LAllocation *a = def->output();
+
+        if (vr->hasStackSlot() && !phis.move(*a, vr->backingStack()))
+            return false;
+
+        if (vr->hasRegister() && (!a->isRegister() || vr->reg() != a->toRegister())) {
+            if (!phis.move(*a, vr->reg()))
+                return false;
+        }
+    }
+
+    if (phis.moves) {
+        LInstruction *ins = *backedge->instructions().rbegin();
+        backedge->insertBefore(ins, phis.moves);
+    }
+
+    return true;
+}
+
+bool
+GreedyAllocator::mergePhiState(LBlock *block)
+{
+    MBasicBlock *mblock = block->mir();
+    if (!mblock->successorWithPhis())
+        return true;
+
+    // Reset state so evictions will work.
+    reset();
+
+    Mover phis;
+
+    uint32 pos = mblock->positionInPhiSuccessor();
+    LBlock *successor = mblock->successorWithPhis()->lir();
+    for (size_t i = 0; i < successor->numPhis(); i++) {
+        LPhi *phi = successor->getPhi(i);
+        VirtualRegister *def = getVirtualRegister(phi->getDef(0));
+
+        // Ignore non-loop phis with no uses.
+        if (!def->hasRegister() && !def->hasStackSlot())
+            continue;
+
+        LAllocation *a = phi->getOperand(pos);
+        VirtualRegister *use = getVirtualRegister(a->toUse());
+
+        // Try to give the use a register.
+        if (!use->hasRegister()) {
+            if (def->hasRegister() && !state[def->reg()]) {
+                assign(use, def->reg());
+            } else {
+                LAllocation unused;
+                if (!allocateAnyOperand(&unused, use, true))
+                    return false;
+            }
+        }
+
+        // Emit a move from the use to a def register.
+        if (def->hasRegister()) {
+            if (use->hasRegister()) {
+                if (use->reg() != def->reg() && !phis.move(use->reg(), def->reg()))
+                    return false;
+            } else {
+                if (!phis.move(use->backingStack(), def->reg()))
+                    return false;
+            }
+        }
+
+        // Emit a move from the use to a def stack slot.
+        if (def->hasStackSlot()) {
+            if (use->hasRegister()) {
+                if (!phis.move(use->reg(), def->backingStack()))
+                    return false;
+            } else if (use->backingStack() != def->backingStack()) {
+                if (!phis.move(use->backingStack(), def->backingStack()))
+                    return false;
+            }
+        }
+    }
+
+    // Now insert restores (if any) and phi moves.
+    JS_ASSERT(!aligns);
+    JS_ASSERT(!spills);
+    LInstruction *before = *block->instructions().rbegin();
+    if (restores)
+        block->insertBefore(before, restores);
+    if (phis.moves)
+        block->insertBefore(before, phis.moves);
+
+    return true;
+}
+
+bool
+GreedyAllocator::mergeAllocationState(LBlock *block)
+{
+    MBasicBlock *mblock = block->mir();
+
+    if (!mblock->numSuccessors()) {
+        state = AllocationState();
+        return true;
+    }
+
+    // Prefer the successor with phis as the baseline state
+    LBlock *leftblock = mblock->getSuccessor(0)->lir();
+    state = blockInfo(leftblock)->in;
+
+    // To complete inheriting our successor's state, make sure each taken
+    // register is applied to the def for which it was intended.
+    for (AnyRegisterIterator iter; iter.more(); iter++) {
+        AnyRegister reg = *iter;
+        if (VirtualRegister *vr = state[reg])
+            vr->setRegister(reg);
+    }
+
+    // Merge state from each additional successor.
+    for (size_t i = 1; i < mblock->numSuccessors(); i++) {
+        LBlock *rightblock = mblock->getSuccessor(i)->lir();
+
+        for (AnyRegisterIterator iter; iter.more(); iter++) {
+            AnyRegister reg = *iter;
+            if (!mergeRegisterState(reg, leftblock, rightblock))
+                return false;
+        }
+
+        // If there were parallel moves, append them now.
+        BlockInfo *info = blockInfo(rightblock);
+        if (info->restores.moves)
+            rightblock->insertBefore(*rightblock->begin(), info->restores.moves);
+    }
+
+    if (!mergePhiState(block))
+        return false;
+
+    return true;
+}
+
+bool
+GreedyAllocator::allocateRegisters()
+{
+    // Allocate registers bottom-up, such that we see all uses before their
+    // definitions.
+    for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) {
+        LBlock *block = graph.getBlock(i);
+
+        // Merge allocation state from our successors.
+        if (!mergeAllocationState(block))
+            return false;
+
+        // Allocate registers.
+        if (!allocateRegistersInBlock(block))
+            return false;
+
+        // If this is a loop header, insert moves at the backedge from phi
+        // inputs to phi outputs.
+        if (block->mir()->isLoopHeader()) {
+            if (!mergeBackedgeState(block, block->mir()->backedge()->lir()))
+                return false;
+        }
+
+        // Kill phis.
+        for (size_t i = 0; i < block->numPhis(); i++) {
+            LPhi *phi = block->getPhi(i);
+            JS_ASSERT(phi->numDefs() == 1);
+
+            VirtualRegister *vr = getVirtualRegister(phi->getDef(0));
+            kill(vr);
+        }
+
+        // We've reached the top of the block. Save the mapping of registers to
+        // definitions, as our predecessors will need this to merge state.
+        // Then, clear the register assignments to all defs. This is necessary
+        // otherwise block A's state could be left around for completely
+        // independent block B, which never actually allocated for that def.
+        blockInfo(block)->in = state;
+        for (AnyRegisterIterator iter; iter.more(); iter++) {
+            AnyRegister reg = *iter;
+            VirtualRegister *vr = state[reg];
+            if (vr) {
+                JS_ASSERT(vr->reg() == reg);
+                vr->unsetRegister();
+            }
+        }
+    }
+    return true;
+}
+
+bool
+GreedyAllocator::allocate()
+{
+    vars = gen->allocate<VirtualRegister>(graph.numVirtualRegisters());
+    if (!vars)
+        return false;
+    memset(vars, 0, sizeof(VirtualRegister) * graph.numVirtualRegisters());
+
+    blocks = gen->allocate<BlockInfo>(graph.numBlockIds());
+    for (size_t i = 0; i < graph.numBlockIds(); i++)
+        new (&blocks[i]) BlockInfo();
+
+    findDefinitions();
+    if (!allocateRegisters())
+        return false;
+    graph.setLocalSlotCount(stackSlots.stackHeight());
+
+    return true;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/GreedyAllocator.h
@@ -0,0 +1,322 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_include_greedy_h__
+#define jsion_include_greedy_h__
+
+#include "MIR.h"
+#include "MIRGraph.h"
+#include "IonLIR.h"
+
+namespace js {
+namespace ion {
+
+class GreedyAllocator
+{
+    struct Mover {
+        LMoveGroup *moves;
+
+        Mover() : moves(NULL)
+        { }
+
+        template <typename From, typename To>
+        bool move(const From &from, const To &to) {
+            if (!moves)
+                moves = new LMoveGroup;
+            return moves->add(LAllocation::New(from), LAllocation::New(to));
+        }
+    };
+
+    struct VirtualRegister {
+        LDefinition *def;
+        uint32 stackSlot_;
+        union {
+            Registers::Code gprCode;
+            FloatRegisters::Code fpuCode;
+            uint32 registerCode;
+        };
+        bool hasRegister_;
+        bool hasStackSlot_;
+
+#ifdef DEBUG
+        LInstruction *ins;
+#endif
+
+        LDefinition::Type type() const {
+            return def->type();
+        }
+        bool isDouble() const {
+            return type() == LDefinition::DOUBLE;
+        }
+        Register gpr() const {
+            JS_ASSERT(!isDouble());
+            JS_ASSERT(hasRegister());
+            return Register::FromCode(gprCode);
+        }
+        FloatRegister fpu() const {
+            JS_ASSERT(isDouble());
+            JS_ASSERT(hasRegister());
+            return FloatRegister::FromCode(fpuCode);
+        }
+        AnyRegister reg() const {
+            return isDouble() ? AnyRegister(fpu()) : AnyRegister(gpr());
+        }
+        void setRegister(FloatRegister reg) {
+            JS_ASSERT(isDouble());
+            fpuCode = reg.code();
+            hasRegister_ = true;
+        }
+        void setRegister(Register reg) {
+            JS_ASSERT(!isDouble());
+            gprCode = reg.code();
+            hasRegister_ = true;
+        }
+        void setRegister(AnyRegister reg) {
+            if (reg.isFloat())
+                setRegister(reg.fpu());
+            else
+                setRegister(reg.gpr());
+        }
+        uint32 stackSlot() const {
+            return stackSlot_;
+        }
+        bool hasBackingStack() const {
+            return hasStackSlot() ||
+                   (def->isPreset() && def->output()->isMemory());
+        }
+        LAllocation backingStack() const {
+            if (hasStackSlot())
+                return LStackSlot(stackSlot_, isDouble());
+            JS_ASSERT(def->policy() == LDefinition::PRESET);
+            JS_ASSERT(def->output()->isMemory());
+            return *def->output();
+        }
+        void setStackSlot(uint32 index) {
+            JS_ASSERT(!hasStackSlot());
+            stackSlot_ = index;
+            hasStackSlot_ = true;
+        }
+        bool hasRegister() const {
+            return hasRegister_;
+        }
+        void unsetRegister() {
+            hasRegister_ = false;
+        }
+        bool hasSameRegister(uint32 code) const {
+            return hasRegister() && registerCode == code;
+        }
+        bool hasStackSlot() const {
+            return hasStackSlot_;
+        }
+    };
+
+    struct AllocationState {
+        RegisterSet free;
+        VirtualRegister *gprs[Registers::Total];
+        VirtualRegister *fpus[FloatRegisters::Total];
+
+        VirtualRegister *& operator[](const AnyRegister &reg) {
+            if (reg.isFloat())
+                return fpus[reg.fpu().code()];
+            return gprs[reg.gpr().code()];
+        }
+
+        AllocationState()
+          : free(RegisterSet::All()),
+            gprs(),
+            fpus()
+        { }
+    };
+
+    struct BlockInfo {
+        AllocationState in;
+        AllocationState out;
+        Mover restores;
+    };
+
+  private:
+    MIRGenerator *gen;
+    LIRGraph &graph;
+    VirtualRegister *vars;
+    RegisterSet disallowed;
+    RegisterSet discouraged;
+    AllocationState state;
+    StackAssignment stackSlots;
+    BlockInfo *blocks;
+
+    // Aligns: If a register shuffle must occur to align input parameters (for
+    //         example, ecx loading into fixed edx), it goes here.
+    // Spills: A definition may have to spill its result register to the stack,
+    //         if restore code lies downstream.
+    // Restores: If a register is evicted, an instruction will load it off the
+    //         stack for downstream uses.
+    //
+    // Moves happen in this order:
+    //   Aligns
+    //   <Instruction>
+    //   Spills
+    //   Restores
+    // 
+    LMoveGroup *aligns;
+    LMoveGroup *spills;
+    LMoveGroup *restores;
+
+    bool restore(const LAllocation &from, const AnyRegister &to) {
+        if (!restores)
+            restores = new LMoveGroup;
+        return restores->add(LAllocation::New(from), LAllocation::New(to));
+    }
+
+    template <typename LA, typename LB>
+    bool spill(const LA &from, const LB &to) {
+        if (!spills)
+            spills = new LMoveGroup;
+        return spills->add(LAllocation::New(from), LAllocation::New(to));
+    }
+
+    template <typename LA, typename LB>
+    bool align(const LA &from, const LB &to) {
+        if (!aligns)
+            aligns = new LMoveGroup;
+        return aligns->add(LAllocation::New(from), LAllocation::New(to));
+    }
+
+    void reset() {
+        aligns = NULL;
+        spills = NULL;
+        restores = NULL;
+        disallowed = RegisterSet();
+        discouraged = RegisterSet();
+    }
+
+  private:
+    void assertValidRegisterState();
+
+    void findDefinitionsInLIR(LInstruction *ins);
+    void findDefinitionsInBlock(LBlock *block);
+    void findDefinitions();
+
+    // Kills a definition, freeing its stack allocation and register.
+    bool kill(VirtualRegister *vr);
+
+    // Evicts a register, spilling it to the stack and allowing it to be
+    // allocated.
+    bool evict(AnyRegister reg);
+    bool maybeEvict(AnyRegister reg);
+
+    // Allocates or frees a stack slot.
+    bool allocateStack(VirtualRegister *vr);
+    void freeStack(VirtualRegister *vr);
+
+    // Marks a register as being free.
+    void freeReg(AnyRegister reg);
+
+    // Takes a free register and assigns it to a virtual register.
+    void assign(VirtualRegister *vr, AnyRegister reg);
+
+    enum Policy {
+        // A temporary register may be allocated again immediately. It is not
+        // added to the disallow or used set.
+        TEMPORARY,
+
+        // A disallowed register can be re-allocated next instruction, but is
+        // pinned for further allocations during this instruction.
+        DISALLOW
+    };
+
+    // Allocate a free register of a particular type, possibly evicting in the
+    // process.
+    bool allocate(LDefinition::Type type, Policy policy, AnyRegister *out);
+
+    // Allocate a physical register for a virtual register, possibly evicting
+    // in the process.
+    bool allocateRegisterOperand(LAllocation *a, VirtualRegister *vr);
+    bool allocateAnyOperand(LAllocation *a, VirtualRegister *vr, bool preferReg = false);
+    bool allocateFixedOperand(LAllocation *a, VirtualRegister *vr);
+    bool allocateWritableOperand(LAllocation *a, VirtualRegister *vr);
+
+    bool prescanDefinition(LDefinition *def);
+    bool prescanDefinitions(LInstruction *ins);
+    bool prescanUses(LInstruction *ins);
+    bool spillForCall(LInstruction *ins);
+    bool informSnapshot(LSnapshot *snapshot);
+    bool allocateSameAsInput(LDefinition *def, LAllocation *a, AnyRegister *out);
+    bool allocateDefinitions(LInstruction *ins);
+    bool allocateTemporaries(LInstruction *ins);
+    bool allocateInputs(LInstruction *ins);
+
+    bool allocateRegisters();
+    bool allocateRegistersInBlock(LBlock *block);
+    bool allocateInstruction(LBlock *block, LInstruction *ins);
+    bool mergePhiState(LBlock *block);
+    bool prepareBackedge(LBlock *block);
+    bool mergeAllocationState(LBlock *block);
+    bool mergeBackedgeState(LBlock *header, LBlock *backedge);
+    bool mergeRegisterState(const AnyRegister &reg, LBlock *left, LBlock *right);
+
+    VirtualRegister *getVirtualRegister(LDefinition *def) {
+        JS_ASSERT(def->virtualRegister() < graph.numVirtualRegisters());
+        return &vars[def->virtualRegister()];
+    }
+    VirtualRegister *getVirtualRegister(LUse *use) {
+        JS_ASSERT(use->virtualRegister() < graph.numVirtualRegisters());
+        JS_ASSERT(vars[use->virtualRegister()].def);
+        return &vars[use->virtualRegister()];
+    }
+    RegisterSet allocatableRegs() const {
+        return RegisterSet::Intersect(state.free, RegisterSet::Not(disallowed));
+    }
+    BlockInfo *blockInfo(LBlock *block) {
+        JS_ASSERT(block->mir()->id() < graph.numBlockIds());
+        return &blocks[block->mir()->id()];
+    }
+
+  public:
+    GreedyAllocator(MIRGenerator *gen, LIRGraph &graph);
+
+    bool allocate();
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_include_greedy_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/InlineList.h
@@ -0,0 +1,423 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef js_inline_list_h__
+#define js_inline_list_h__
+
+namespace js {
+
+template <typename T> class InlineForwardList;
+template <typename T> class InlineForwardListIterator;
+
+template <typename T>
+class InlineForwardListNode
+{
+  public:
+    InlineForwardListNode() : next(NULL)
+    { }
+    InlineForwardListNode(InlineForwardListNode<T> *n) : next(n)
+    { }
+
+  protected:
+    friend class InlineForwardList<T>;
+    friend class InlineForwardListIterator<T>;
+
+    InlineForwardListNode<T> *next;
+};
+
+template <typename T>
+class InlineForwardList : protected InlineForwardListNode<T>
+{
+    friend class InlineForwardListIterator<T>;
+
+    typedef InlineForwardListNode<T> Node;
+
+    Node *tail_;
+#ifdef DEBUG
+    uintptr_t modifyCount_;
+#endif
+
+    InlineForwardList<T> *thisFromConstructor() {
+        return this;
+    }
+
+  public:
+    InlineForwardList()
+      : tail_(thisFromConstructor())
+#ifdef DEBUG
+      ,  modifyCount_(0)
+#endif
+    { }
+
+  public:
+    typedef InlineForwardListIterator<T> iterator;
+
+  public:
+    iterator begin() const {
+        return iterator(this);
+    }
+    iterator end() const {
+        return iterator(NULL);
+    }
+    iterator removeAt(iterator &where) {
+        iterator iter(where);
+        iter++;
+        iter.prev = where.prev;
+#ifdef DEBUG
+        iter.modifyCount++;
+#endif
+
+        // Once the element 'where' points at has been removed, it is no longer
+        // safe to do any operations that would touch 'iter', as the element
+        // may be added to another list, etc. This NULL ensures that any
+        // improper uses of this function will fail quickly and loudly.
+        removeAfter(where.prev, where.iter);
+        where.prev = where.iter = NULL;
+
+        return iter;
+    }
+    void pushFront(Node *t) {
+        insertAfter(this, t);
+    }
+    void pushBack(Node *t) {
+#ifdef DEBUG
+        modifyCount_++;
+#endif
+        tail_->next = t;
+        t->next = NULL;
+        tail_ = t;
+    }
+    T *popFront() {
+        JS_ASSERT(!empty());
+        T* result = static_cast<T *>(this->next);
+        removeAfter(this, result);
+        return result;
+    }
+    void insertAfter(Node *at, Node *item) {
+#ifdef DEBUG
+        modifyCount_++;
+#endif
+        if (at == tail_)
+            tail_ = item;
+        item->next = at->next;
+        at->next = item;
+    }
+    void removeAfter(Node *at, Node *item) {
+#ifdef DEBUG
+        modifyCount_++;
+#endif
+        if (item == tail_)
+            tail_ = at;
+        JS_ASSERT(at->next == item);
+        at->next = item->next;
+    }
+    bool empty() const {
+        return tail_ == this;
+    }
+};
+
+template <typename T>
+class InlineForwardListIterator
+{
+private:
+    friend class InlineForwardList<T>;
+
+    typedef InlineForwardListNode<T> Node;
+
+    InlineForwardListIterator<T>(const InlineForwardList<T> *owner)
+      : prev(const_cast<Node *>(static_cast<const Node *>(owner))),
+        iter(owner ? owner->next : NULL)
+#ifdef DEBUG
+      , owner(owner),
+        modifyCount(owner ? owner->modifyCount_ : 0)
+#endif
+    { }
+
+public:
+    InlineForwardListIterator<T> & operator ++() {
+        JS_ASSERT(modifyCount == owner->modifyCount_);
+        prev = iter;
+        iter = iter->next;
+        return *this;
+    }
+    InlineForwardListIterator<T> operator ++(int) {
+        JS_ASSERT(modifyCount == owner->modifyCount_);
+        InlineForwardListIterator<T> old(*this);
+        prev = iter;
+        iter = iter->next;
+        return old;
+    }
+    T * operator *() const {
+        JS_ASSERT(modifyCount == owner->modifyCount_);
+        return static_cast<T *>(iter);
+    }
+    T * operator ->() const {
+        JS_ASSERT(modifyCount == owner->modifyCount_);
+        return static_cast<T *>(iter);
+    }
+    bool operator !=(const InlineForwardListIterator<T> &where) const {
+        return iter != where.iter;
+    }
+    bool operator ==(const InlineForwardListIterator<T> &where) const {
+        return iter == where.iter;
+    }
+
+private:
+    Node *prev;
+    Node *iter;
+#ifdef DEBUG
+    const InlineForwardList<T> *owner;
+    uintptr_t modifyCount;
+#endif
+};
+
+template <typename T> class InlineList;
+template <typename T> class InlineListIterator;
+template <typename T> class InlineListReverseIterator;
+
+template <typename T>
+class InlineListNode : public InlineForwardListNode<T>
+{
+  public:
+    InlineListNode() : InlineForwardListNode<T>(NULL), prev(NULL)
+    { }
+    InlineListNode(InlineListNode<T> *n, InlineListNode<T> *p)
+      : InlineForwardListNode<T>(n),
+        prev(p)
+    { }
+
+  protected:
+    friend class InlineList<T>;
+    friend class InlineListIterator<T>;
+    friend class InlineListReverseIterator<T>;
+
+    InlineListNode<T> *prev;
+};
+
+template <typename T>
+class InlineList : protected InlineListNode<T>
+{
+    typedef InlineListNode<T> Node;
+
+    // Silence MSVC warning C4355
+    InlineList<T> *thisFromConstructor() {
+        return this;
+    }
+
+  public:
+    InlineList() : InlineListNode<T>(thisFromConstructor(), thisFromConstructor())
+    { }
+
+  public:
+    typedef InlineListIterator<T> iterator;
+    typedef InlineListReverseIterator<T> reverse_iterator;
+
+  public:
+    iterator begin() const {
+        return iterator(static_cast<Node *>(this->next));
+    }
+    iterator end() const {
+        return iterator(this);
+    }
+    reverse_iterator rbegin() {
+        return reverse_iterator(this->prev);
+    }
+    reverse_iterator rend() {
+        return reverse_iterator(this);
+    }
+    template <typename itertype>
+    itertype removeAt(itertype &where) {
+        itertype iter(where);
+        iter++;
+
+        // Once the element 'where' points at has been removed, it is no longer
+        // safe to do any operations that would touch 'iter', as the element
+        // may be added to another list, etc. This NULL ensures that any
+        // improper uses of this function will fail quickly and loudly.
+        remove(where.iter);
+        where.iter = NULL;
+
+        return iter;
+    }
+    void pushFront(Node *t) {
+        insertAfter(this, t);
+    }
+    void pushBack(Node *t) {
+        insertBefore(this, t);
+    }
+    T *popFront() {
+        JS_ASSERT(!empty());
+        T *t = static_cast<T *>(this->next);
+        remove(t);
+        return t;
+    }
+    T *popBack() {
+        JS_ASSERT(!empty());
+        T *t = static_cast<T *>(this->prev);
+        remove(t);
+        return t;
+    }
+    T *peekBack() const {
+        iterator iter = end();
+        iter--;
+        return *iter;
+    }
+    void insertBefore(Node *at, Node *item) {
+        item->next = at;
+        item->prev = at->prev;
+        at->prev->next = item;
+        at->prev = item;
+    }
+    void insertAfter(Node *at, Node *item) {
+        item->next = at->next;
+        item->prev = at;
+        static_cast<Node *>(at->next)->prev = item;
+        at->next = item;
+    }
+    void steal(InlineList<T> *from) {
+        Node *oldTail = this->prev;
+        Node *stealHead = from->next;
+        Node *stealTail = from->prev;
+        oldTail->next = stealHead;
+        stealHead->prev = oldTail;
+        stealTail->next = this;
+        this->prev = stealTail;
+        from->next = from->prev = from;
+    }
+    void remove(Node *t) {
+        t->prev->next = t->next;
+        static_cast<Node *>(t->next)->prev = t->prev;
+        t->next = t->prev = NULL;
+    }
+    void clear() {
+        this->next = this->prev = this;
+    }
+    bool empty() const {
+        return begin() == end();
+    }
+};
+
+template <typename T>
+class InlineListIterator
+{
+  private:
+    friend class InlineList<T>;
+
+    typedef InlineListNode<T> Node;
+
+    InlineListIterator(const Node *iter)
+      : iter(const_cast<Node *>(iter))
+    { }
+
+  public:
+    InlineListIterator<T> & operator ++() {
+        iter = iter->next;
+        return *iter;
+    }
+    InlineListIterator<T> operator ++(int) {
+        InlineListIterator<T> old(*this);
+        iter = static_cast<Node *>(iter->next);
+        return old;
+    }
+    InlineListIterator<T> operator --(int) {
+        InlineListIterator<T> old(*this);
+        iter = iter->prev;
+        return old;
+    }
+    T * operator *() const {
+        return static_cast<T *>(iter);
+    }
+    T * operator ->() const {
+        return static_cast<T *>(iter);
+    }
+    bool operator !=(const InlineListIterator<T> &where) const {
+        return iter != where.iter;
+    }
+    bool operator ==(const InlineListIterator<T> &where) const {
+        return iter == where.iter;
+    }
+
+  private:
+    Node *iter;
+};
+
+template <typename T>
+class InlineListReverseIterator
+{
+  private:
+    friend class InlineList<T>;
+
+    typedef InlineListNode<T> Node;
+
+    InlineListReverseIterator(const Node *iter)
+      : iter(const_cast<Node *>(iter))
+    { }
+
+  public:
+    InlineListReverseIterator<T> & operator ++() {
+        iter = iter->prev;
+        return *iter;
+    }
+    InlineListReverseIterator<T> operator ++(int) {
+        InlineListReverseIterator<T> old(*this);
+        iter = iter->prev;
+        return old;
+    }
+    T * operator *() {
+        return static_cast<T *>(iter);
+    }
+    T * operator ->() {
+        return static_cast<T *>(iter);
+    }
+    bool operator !=(const InlineListReverseIterator<T> &where) const {
+        return iter != where.iter;
+    }
+    bool operator ==(const InlineListReverseIterator<T> &where) const {
+        return iter == where.iter;
+    }
+
+  private:
+    Node *iter;
+};
+
+} // namespace js
+
+#endif // js_inline_list_h__
new file mode 100644
--- /dev/null
+++ b/js/src/ion/Ion.cpp
@@ -0,0 +1,636 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Andrew Drake <adrake@adrake.org>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "Ion.h"
+#include "IonAnalysis.h"
+#include "IonBuilder.h"
+#include "IonSpewer.h"
+#include "IonLIR.h"
+#include "GreedyAllocator.h"
+#include "LICM.h"
+#include "ValueNumbering.h"
+#include "LinearScan.h"
+#include "jscompartment.h"
+#include "IonCompartment.h"
+#include "CodeGenerator.h"
+
+#if defined(JS_CPU_X86)
+# include "x86/Lowering-x86.h"
+#elif defined(JS_CPU_X64)
+# include "x64/Lowering-x64.h"
+#elif defined(JS_CPU_ARM)
+# include "arm/Lowering-arm.h"
+#endif
+#include "jsgcmark.h"
+#include "jsgcinlines.h"
+#include "jsinferinlines.h"
+#include "jsobjinlines.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::ion;
+
+IonOptions ion::js_IonOptions;
+
+// Assert that IonCode is gc::Cell aligned.
+JS_STATIC_ASSERT(sizeof(IonCode) % gc::Cell::CellSize == 0);
+
+#ifdef JS_THREADSAFE
+static bool IonTLSInitialized = false;
+static PRUintn IonTLSIndex;
+#else
+static IonContext *GlobalIonContext;
+#endif
+
+IonContext::IonContext(JSContext *cx, TempAllocator *temp)
+  : cx(cx),
+    temp(temp)
+{
+    SetIonContext(this);
+}
+
+IonContext::~IonContext()
+{
+    SetIonContext(NULL);
+}
+
+bool
+ion::InitializeIon()
+{
+#ifdef JS_THREADSAFE
+    if (!IonTLSInitialized) {
+        PRStatus status = PR_NewThreadPrivateIndex(&IonTLSIndex, NULL);
+        if (status != PR_SUCCESS)
+            return false;
+        IonTLSInitialized = true;
+    }
+#endif
+    CheckLogging();
+    return true;
+}
+
+#ifdef JS_THREADSAFE
+IonContext *
+ion::GetIonContext()
+{
+    return (IonContext *)PR_GetThreadPrivate(IonTLSIndex);
+}
+
+bool
+ion::SetIonContext(IonContext *ctx)
+{
+    return PR_SetThreadPrivate(IonTLSIndex, ctx) == PR_SUCCESS;
+}
+#else
+IonContext *
+ion::GetIonContext()
+{
+    JS_ASSERT(GlobalIonContext);
+    return GlobalIonContext;
+}
+
+bool
+ion::SetIonContext(IonContext *ctx)
+{
+    GlobalIonContext = ctx;
+    return true;
+}
+#endif
+
+IonCompartment::IonCompartment()
+  : execAlloc_(NULL),
+    enterJIT_(NULL),
+    bailoutHandler_(NULL),
+    returnError_(NULL),
+    argumentsRectifier_(NULL)
+{
+}
+
+bool
+IonCompartment::initialize(JSContext *cx)
+{
+    execAlloc_ = js::OffTheBooks::new_<JSC::ExecutableAllocator>();
+    if (!execAlloc_)
+        return false;
+
+    return true;
+}
+
+void
+IonCompartment::mark(JSTracer *trc, JSCompartment *compartment)
+{
+    if (!compartment->active)
+        return;
+
+    // These must be available if we could be running JIT code.
+    if (enterJIT_)
+        MarkIonCode(trc, enterJIT_, "enterJIT");
+    if (returnError_)
+        MarkIonCode(trc, returnError_, "returnError");
+
+    // These need to be here until we can figure out how to make the GC
+    // scan these references inside the code generator itself.
+    if (bailoutHandler_)
+        MarkIonCode(trc, bailoutHandler_, "bailoutHandler");
+    for (size_t i = 0; i < bailoutTables_.length(); i++) {
+        if (bailoutTables_[i])
+            MarkIonCode(trc, bailoutTables_[i], "bailoutTable");
+    }
+}
+
+void
+IonCompartment::sweep(JSContext *cx)
+{
+    if (enterJIT_ && IsAboutToBeFinalized(cx, enterJIT_))
+        enterJIT_ = NULL;
+    if (bailoutHandler_ && IsAboutToBeFinalized(cx, bailoutHandler_))
+        bailoutHandler_ = NULL;
+    if (returnError_ && IsAboutToBeFinalized(cx, returnError_))
+        returnError_ = NULL;
+    if (argumentsRectifier_ && IsAboutToBeFinalized(cx, argumentsRectifier_))
+        argumentsRectifier_ = NULL;
+
+    for (size_t i = 0; i < bailoutTables_.length(); i++) {
+        if (bailoutTables_[i] && IsAboutToBeFinalized(cx, bailoutTables_[i]))
+            bailoutTables_[i] = NULL;
+    }
+}
+
+IonCode *
+IonCompartment::getBailoutTable(const FrameSizeClass &frameClass)
+{
+    JS_ASSERT(frameClass != FrameSizeClass::None());
+    return bailoutTables_[frameClass.classId()];
+}
+
+IonCode *
+IonCompartment::getBailoutTable(JSContext *cx, const FrameSizeClass &frameClass)
+{
+    uint32 id = frameClass.classId();
+
+    if (id >= bailoutTables_.length()) {
+        size_t numToPush = id - bailoutTables_.length() + 1;
+        if (!bailoutTables_.reserve(bailoutTables_.length() + numToPush))
+            return NULL;
+        for (size_t i = 0; i < numToPush; i++)
+            bailoutTables_.infallibleAppend(NULL);
+    }
+
+    if (!bailoutTables_[id])
+        bailoutTables_[id] = generateBailoutTable(cx, id);
+
+    return bailoutTables_[id];
+}
+
+IonCompartment::~IonCompartment()
+{
+    Foreground::delete_(execAlloc_);
+}
+
+IonActivation::IonActivation(JSContext *cx, StackFrame *fp)
+  : cx_(cx),
+    prev_(cx->compartment->ionCompartment()->activation()),
+    entryfp_(fp),
+    oldFrameRegs_(cx->regs()),
+    bailout_(NULL)
+{
+    cx->compartment->ionCompartment()->active_ = this;
+    cx->stack.repointRegs(NULL);
+}
+
+IonActivation::~IonActivation()
+{
+    JS_ASSERT(cx_->compartment->ionCompartment()->active_ == this);
+    JS_ASSERT(!bailout_);
+
+    cx_->compartment->ionCompartment()->active_ = prev();
+    cx_->stack.repointRegs(&oldFrameRegs_);
+}
+
+IonCode *
+IonCode::New(JSContext *cx, uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool)
+{
+    IonCode *codeObj = NewGCThing<IonCode>(cx, gc::FINALIZE_IONCODE, sizeof(IonCode));
+    if (!codeObj) {
+        pool->release();
+        return NULL;
+    }
+
+    new (codeObj) IonCode(code, bufferSize, pool);
+    return codeObj;
+}
+
+void
+IonCode::copyFrom(MacroAssembler &masm)
+{
+    // Store the IonCode pointer right before the code buffer, so we can
+    // recover the gcthing from relocation tables.
+    *(IonCode **)(code_ - sizeof(IonCode *)) = this;
+
+    insnSize_ = masm.instructionsSize();
+    masm.executableCopy(code_);
+
+    relocTableSize_ = masm.relocationTableSize();
+    masm.copyRelocationTable(code_ + relocTableOffset());
+
+    dataSize_ = masm.dataSize();
+    masm.processDeferredData(this, code_ + dataOffset());
+
+    masm.processCodeLabels(this);
+}
+
+void
+IonCode::trace(JSTracer *trc)
+{
+    if (relocTableSize_) {
+        uint8 *start = code_ + relocTableOffset();
+        CompactBufferReader reader(start, start + relocTableSize_);
+        MacroAssembler::TraceRelocations(trc, this, reader);
+    }
+}
+
+void
+IonCode::finalize(JSContext *cx)
+{
+    if (pool_)
+        pool_->release();
+}
+
+IonScript::IonScript()
+  : method_(NULL),
+    deoptTable_(NULL),
+    snapshots_(0),
+    snapshotsSize_(0),
+    bailoutTable_(0),
+    bailoutEntries_(0)
+{
+}
+
+IonScript *
+IonScript::New(JSContext *cx, size_t snapshotsSize, size_t bailoutEntries, size_t constants)
+{
+    if (snapshotsSize >= MAX_BUFFER_SIZE ||
+        (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32)))
+    {
+        js_ReportOutOfMemory(cx);
+        return NULL;
+    }
+
+    // This should not overflow on x86, because the memory is already allocated
+    // *somewhere* and if their total overflowed there would be no memory left
+    // at all.
+    size_t bytes = snapshotsSize +
+                   bailoutEntries * sizeof(uint32) +
+                   constants * sizeof(Value);
+    uint8 *buffer = (uint8 *)cx->malloc_(sizeof(IonScript) + bytes);
+    if (!buffer)
+        return NULL;
+
+    IonScript *script = reinterpret_cast<IonScript *>(buffer);
+    new (script) IonScript();
+
+    script->snapshots_ = sizeof(IonScript);
+    script->snapshotsSize_ = snapshotsSize;
+
+    script->bailoutTable_ = script->snapshots_ + snapshotsSize;
+    script->bailoutEntries_ = bailoutEntries;
+
+    script->constantTable_ = script->bailoutTable_ + bailoutEntries * sizeof(uint32);
+    script->constantEntries_ = constants;
+
+    return script;
+}
+
+
+void
+IonScript::trace(JSTracer *trc, JSScript *script)
+{
+    if (method_)
+        MarkIonCode(trc, method_, "method");
+    if (deoptTable_)
+        MarkIonCode(trc, deoptTable_, "deoptimizationTable");
+}
+
+void
+IonScript::copySnapshots(const SnapshotWriter *writer)
+{
+    JS_ASSERT(writer->length() == snapshotsSize_);
+    memcpy((uint8 *)this + snapshots_, writer->buffer(), snapshotsSize_);
+}
+
+void
+IonScript::copyBailoutTable(const SnapshotOffset *table)
+{
+    memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32));
+}
+
+void
+IonScript::copyConstants(const Value *vp)
+{
+    memcpy(constants(), vp, constantEntries_ * sizeof(Value));
+}
+
+void
+IonScript::Trace(JSTracer *trc, JSScript *script)
+{
+    if (script->ion && script->ion != ION_DISABLED_SCRIPT)
+        script->ion->trace(trc, script);
+}
+
+void
+IonScript::Destroy(JSContext *cx, JSScript *script)
+{
+    if (!script->ion || script->ion == ION_DISABLED_SCRIPT)
+        return;
+
+    cx->free_(script->ion);
+}
+
+static bool
+TestCompiler(IonBuilder &builder, MIRGraph &graph)
+{
+    IonSpewNewFunction(&graph, builder.script);
+
+    if (!builder.build())
+        return false;
+    IonSpewPass("BuildSSA");
+
+    if (!SplitCriticalEdges(&builder, graph))
+        return false;
+    IonSpewPass("Split Critical Edges");
+
+    if (!ReorderBlocks(graph))
+        return false;
+    IonSpewPass("Reorder Blocks");
+
+    if (!BuildDominatorTree(graph))
+        return false;
+    // No spew: graph not changed.
+
+    if (!BuildPhiReverseMapping(graph))
+        return false;
+    // No spew: graph not changed.
+
+    if (!ApplyTypeInformation(graph))
+        return false;
+    IonSpewPass("Apply types");
+
+    if (js_IonOptions.gvn) {
+        ValueNumberer gvn(graph, js_IonOptions.gvnIsOptimistic);
+        if (!gvn.analyze())
+            return false;
+        IonSpewPass("GVN");
+    }
+
+    if (!EliminateDeadCode(graph))
+        return false;
+    IonSpewPass("DCE");
+
+    if (js_IonOptions.licm) {
+        LICM licm(graph);
+        if (!licm.analyze())
+            return false;
+        IonSpewPass("LICM");
+    }
+
+    LIRGraph lir(graph);
+    LIRGenerator lirgen(&builder, graph, lir);
+    if (!lirgen.generate())
+        return false;
+    IonSpewPass("Generate LIR");
+
+    if (js_IonOptions.lsra) {
+        LinearScanAllocator regalloc(&lirgen, lir);
+        if (!regalloc.go())
+            return false;
+        IonSpewPass("Allocate Registers", &regalloc);
+    } else {
+        GreedyAllocator greedy(&builder, lir);
+        if (!greedy.allocate())
+            return false;
+        IonSpewPass("Allocate Registers");
+    }
+
+    CodeGenerator codegen(&builder, lir);
+    if (!codegen.generate())
+        return false;
+    // No spew: graph not changed.
+
+    IonSpewEndFunction();
+
+    return true;
+}
+
+static bool
+IonCompile(JSContext *cx, JSScript *script, StackFrame *fp)
+{
+    TempAllocator temp(&cx->tempPool);
+    IonContext ictx(cx, &temp);
+
+    if (!cx->compartment->ensureIonCompartmentExists(cx))
+        return false;
+
+    MIRGraph graph;
+    JSFunction *fun = fp->isFunctionFrame() ? fp->fun() : NULL;
+
+    if (cx->typeInferenceEnabled()) {
+        types::AutoEnterTypeInference enter(cx, true);
+        TypeInferenceOracle oracle;
+
+        if (!oracle.init(cx, script))
+            return false;
+
+        types::AutoEnterCompilation enterCompiler(cx, script);
+
+        IonBuilder builder(cx, script, fun, temp, graph, &oracle);
+        if (!TestCompiler(builder, graph))
+            return false;
+    } else {
+        DummyOracle oracle;
+        IonBuilder builder(cx, script, fun, temp, graph, &oracle);
+        if (!TestCompiler(builder, graph))
+            return false;
+    }
+
+    return true;
+}
+
+static bool
+CheckFrame(StackFrame *fp)
+{
+    if (!fp->isFunctionFrame()) {
+        // Support for this is almost there - we would need a new
+        // pushBailoutFrame. For the most part we just don't support
+        // the opcodes in a global script yet.
+        IonSpew(IonSpew_Abort, "global frame");
+        return false;
+    }
+
+    if (fp->isEvalFrame()) {
+        // Eval frames are not yet supported. Supporting this will require new
+        // logic in pushBailoutFrame to deal with linking prev.
+        IonSpew(IonSpew_Abort, "eval frame");
+        return false;
+    }
+
+    if (fp->isConstructing()) {
+        // Constructors are not supported yet. We need a way to communicate the
+        // constructing bit through Ion frames.
+        IonSpew(IonSpew_Abort, "constructing frame");
+        return false;
+    }
+
+    if (fp->hasCallObj()) {
+        // Functions with call objects aren't supported yet. To support them,
+        // we need to fix bug 659577 which would prevent aliasing locals to
+        // stack slots.
+        IonSpew(IonSpew_Abort, "frame has callobj");
+        return false;
+    }
+
+    if (fp->script()->usesArguments) {
+        // Functions with arguments objects, or scripts that use arguments, are
+        // not supported yet.
+        IonSpew(IonSpew_Abort, "frame has argsobj");
+        return false;
+    }
+
+    if (fp->isGeneratorFrame()) {
+        // Err... no.
+        IonSpew(IonSpew_Abort, "generator frame");
+        return false;
+    }
+
+    if (fp->isDebuggerFrame()) {
+        IonSpew(IonSpew_Abort, "debugger frame");
+        return false;
+    }
+
+    // This check is to not overrun the stack. Eventually, we will want to
+    // handle this when we support JSOP_ARGUMENTS or function calls.
+    if (fp->numActualArgs() >= SNAPSHOT_MAX_NARGS) {
+        IonSpew(IonSpew_Abort, "too many actual args");
+        return false;
+    }
+
+    JS_ASSERT(!fp->hasArgsObj());
+    JS_ASSERT_IF(fp->fun(), !fp->fun()->isHeavyweight());
+    return true;
+}
+
+MethodStatus
+ion::Compile(JSContext *cx, JSScript *script, js::StackFrame *fp)
+{
+    JS_ASSERT(ion::IsEnabled());
+
+    if (cx->compartment->debugMode()) {
+        IonSpew(IonSpew_Abort, "debugging");
+        return Method_CantCompile;
+    }
+
+    if (!CheckFrame(fp))
+        return Method_CantCompile;
+
+    if (script->ion) {
+        if (script->ion == ION_DISABLED_SCRIPT || !script->ion->method())
+            return Method_CantCompile;
+
+        return Method_Compiled;
+    }
+
+    if (script->incUseCount() <= js_IonOptions.invokesBeforeCompile)
+        return Method_Skipped;
+
+    if (!IonCompile(cx, script, fp)) {
+        script->ion = ION_DISABLED_SCRIPT;
+        return Method_CantCompile;
+    }
+
+    return Method_Compiled;
+}
+
+bool
+ion::Cannon(JSContext *cx, StackFrame *fp)
+{
+    JS_ASSERT(ion::IsEnabled());
+    JS_ASSERT(CheckFrame(fp));
+
+    EnterIonCode enterJIT = cx->compartment->ionCompartment()->enterJIT(cx);
+    if (!enterJIT)
+        return false;
+
+    int argc = 0;
+    Value *argv = NULL;
+
+    void *calleeToken;
+    if (fp->isFunctionFrame()) {
+        argc = CountArgSlots(fp->fun());
+        argv = fp->formalArgs() - 1;
+        calleeToken = CalleeToToken(&fp->callee());
+    } else {
+        calleeToken = CalleeToToken(fp->script());
+    }
+
+    JSScript *script = fp->script();
+    IonScript *ion = script->ion;
+    IonCode *code = ion->method();
+    void *jitcode = code->raw();
+
+    JSBool ok;
+    Value result;
+    {
+        AssertCompartmentUnchanged pcc(cx);
+        IonContext ictx(cx, NULL);
+        IonActivation activation(cx, fp);
+        JSAutoResolveFlags rf(cx, RESOLVE_INFER);
+
+        ok = enterJIT(jitcode, argc, argv, &result, calleeToken);
+    }
+
+    JS_ASSERT(fp == cx->fp());
+
+    // The trampoline wrote the return value but did not set the HAS_RVAL flag.
+    fp->setReturnValue(result);
+    fp->markFunctionEpilogueDone();
+
+    return !!ok;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/Ion.h
@@ -0,0 +1,143 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#if !defined(jsion_ion_h__) && defined(JS_ION)
+#define jsion_ion_h__
+
+#include "jscntxt.h"
+#include "IonCode.h"
+
+namespace js {
+namespace ion {
+
+struct TempAllocator;
+
+struct IonOptions
+{
+    // If Ion is supported, this toggles whether Ion is used.
+    //
+    // Default: false
+    bool enabled;
+
+    // Toggles whether global value numbering is used.
+    //
+    // Default: true
+    bool gvn;
+
+    // Toggles whether global value numbering is optimistic (true) or
+    // pessimistic (false).
+    //
+    // Default: true
+    bool gvnIsOptimistic;
+
+    // Toggles whether loop invariant code motion is performed.
+    //
+    // Default: true
+    bool licm;
+
+    // Toggles whether Linear Scan Register Allocation is used. If LSRA is not
+    // used, then Greedy Register Allocation is used instead.
+    //
+    // Default: true
+    bool lsra;
+
+    // How many invocations of a function are needed before the Ion compiler
+    // kicks in.
+    //
+    // Default: 40.
+    uint32 invokesBeforeCompile;
+
+    void setEagerCompilation() {
+        invokesBeforeCompile = 0;
+    }
+
+    IonOptions()
+      : enabled(false),
+        gvn(true),
+        gvnIsOptimistic(true),
+        licm(true),
+        lsra(true),
+        invokesBeforeCompile(40)
+    { }
+};
+
+enum MethodStatus
+{
+    Method_CantCompile,
+    Method_Skipped,
+    Method_Compiled
+};
+
+// An Ion context is needed to enter into either an Ion method or an instance
+// of the Ion compiler. It points to a temporary allocator and the active
+// JSContext.
+class IonContext
+{
+  public:
+    IonContext(JSContext *cx, TempAllocator *temp);
+    ~IonContext();
+
+    JSContext *cx;
+    TempAllocator *temp;
+};
+
+extern IonOptions js_IonOptions;
+
+// Initialize Ion statically for all JSRuntimes.
+bool InitializeIon();
+
+// Get and set the current Ion context.
+IonContext *GetIonContext();
+bool SetIonContext(IonContext *ctx);
+
+MethodStatus Compile(JSContext *cx, JSScript *script, js::StackFrame *fp);
+bool Cannon(JSContext *cx, StackFrame *fp);
+
+static inline bool IsEnabled()
+{
+    return js_IonOptions.enabled;
+}
+
+}
+}
+
+#endif // jsion_ion_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonAllocPolicy.h
@@ -0,0 +1,138 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_ion_alloc_policy_h__
+#define jsion_ion_alloc_policy_h__
+
+#include "jscntxt.h"
+#include "jsarena.h"
+
+#include "Ion.h"
+#include "InlineList.h"
+
+namespace js {
+namespace ion {
+
+class IonAllocPolicy
+{
+  public:
+    void *malloc_(size_t bytes) {
+        JSContext *cx = GetIonContext()->cx;
+        void *p;
+        JS_ARENA_ALLOCATE(p, &cx->tempPool, bytes);
+        return p;
+    }
+    void *realloc_(void *p, size_t oldBytes, size_t bytes) {
+        void *n = malloc_(bytes);
+        if (!n)
+            return n;
+        memcpy(n, p, Min(oldBytes, bytes));
+        return n;
+    }
+    void free_(void *p) {
+    }
+    void reportAllocOverflow() const {
+    }
+};
+
+struct TempAllocator
+{
+    JSArenaPool *arena;
+
+    TempAllocator(JSArenaPool *arena)
+      : arena(arena),
+        mark(JS_ARENA_MARK(arena))
+    { }
+
+    ~TempAllocator()
+    {
+        JS_ARENA_RELEASE(arena, mark);
+    }
+
+    void *allocate(size_t bytes)
+    {
+        void *p;
+        JS_ARENA_ALLOCATE(p, arena, bytes);
+        if (!ensureBallast())
+            return NULL;
+        return p;
+    }
+
+    bool ensureBallast() {
+        return true;
+    }
+
+  private:
+    void *mark;
+};
+
+struct TempObject
+{
+    inline void *operator new(size_t nbytes) {
+        return GetIonContext()->temp->allocate(nbytes);
+    }
+public:
+    inline void *operator new(size_t nbytes, void *pos) {
+        return pos;
+    }
+};
+
+template <typename T>
+class TempObjectPool
+{
+    InlineForwardList<T> freed_;
+
+  public:
+    T *allocate() {
+        if (freed_.empty())
+            return new T();
+        return freed_.popFront();
+    }
+    void free(T *obj) {
+        freed_.pushFront(obj);
+    }
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_temp_alloc_policy_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonAnalysis.cpp
@@ -0,0 +1,715 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "IonBuilder.h"
+#include "MIRGraph.h"
+#include "Ion.h"
+#include "IonAnalysis.h"
+
+using namespace js;
+using namespace js::ion;
+
+// A critical edge is an edge which is neither its successor's only predecessor
+// nor its predecessor's only successor. Critical edges must be split to
+// prevent copy-insertion and code motion from affecting other edges.
+bool
+ion::SplitCriticalEdges(MIRGenerator *gen, MIRGraph &graph)
+{
+    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+        if (block->numSuccessors() < 2)
+            continue;
+        for (size_t i = 0; i < block->numSuccessors(); i++) {
+            MBasicBlock *target = block->getSuccessor(i);
+            if (target->numPredecessors() < 2)
+                continue;
+
+            // Create a new block inheriting from the predecessor.
+            MBasicBlock *split = MBasicBlock::NewSplitEdge(gen, *block);
+            graph.addBlock(split);
+            split->end(MGoto::New(target));
+
+            block->replaceSuccessor(i, split);
+            target->replacePredecessor(*block, split);
+        }
+    }
+    return true;
+}
+
+// Instructions are useless if they are idempotent and unused.
+// This pass eliminates useless instructions.
+// The graph itself is unchanged.
+bool
+ion::EliminateDeadCode(MIRGraph &graph)
+{
+    // Traverse in postorder so that we hit uses before definitions.
+    // Traverse instruction list backwards for the same reason.
+    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+        // Remove unused instructions.
+        for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) {
+            if (inst->isIdempotent() && !inst->hasUses())
+                inst = block->removeAt(inst);
+            else
+                inst++;
+        }
+
+        // FIXME: Bug 678273.
+        // All phi nodes currently have non-zero uses as determined
+        // by hasUses(). They are kept alive by resume points, and therefore
+        // cannot currently be eliminated.
+    }
+
+    return true;
+}
+
+// The type analysis algorithm inserts conversions and box/unbox instructions
+// to make the IR graph well-typed for future passes. Each definition has the
+// following type information:
+//
+//     * Actual type. This is the type the instruction will definitely return.
+//     * Specialization. Some instructions, like MAdd, may be specialized to a 
+//       particular type. This specialization directs the actual type.
+//     * Observed type. If the actual type of a node is not known (Value), then
+//       it may be annotated with the set of types it would be unboxed as
+//       (determined by specialization). This directs whether unbox operations
+//       can be hoisted to the definition versus placed near its uses.
+//
+// (1) Specialization.
+//     ------------------------
+//     All instructions and phis are added to a worklist, such that they are
+//     initially observed in postorder.
+//
+//     Each instruction looks at the types of its inputs and decides whether to
+//     respecialize (for example, prefer double inputs to int32). Instructions
+//     may also annotate untyped values with a preferred type.
+//
+//     Each phi looks at the effective types of its inputs. If all inputs have
+//     the same effective type, the phi specializes to that type.
+//
+//     If any definition's specialization changes, its uses are re-analyzed.
+//     If any definition's effective type changes, its phi uses are
+//     re-analyzed.
+//
+// (2) Conversions.
+//     ------------------------
+//     All instructions and phis are visited in reverse postorder.
+//
+//     (A) Output adjustment. If the definition's output is a Value, and has
+//         exactly one observed type, then an Unbox instruction is placed right
+//         after the definition. Each use is modified to take the narrowed
+//         type.
+//
+//     (B) Input adjustment. Each input is asked to apply conversion operations
+//         to its inputs. This may include Box, Unbox, or other
+//         instruction-specific type conversion operations.
+//
+class TypeAnalyzer : public TypeAnalysis
+{
+    MIRGraph &graph;
+    Vector<MInstruction *, 0, SystemAllocPolicy> worklist_;
+    Vector<MPhi *, 0, SystemAllocPolicy> phiWorklist_;
+    bool phisHaveBeenAnalyzed_;
+
+    MInstruction *popInstruction() {
+        MInstruction *ins = worklist_.popCopy();
+        ins->setNotInWorklist();
+        return ins;
+    }
+    MPhi *popPhi() {
+        MPhi *phi = phiWorklist_.popCopy();
+        phi->setNotInWorklist();
+        return phi;
+    }
+    void repush(MDefinition *def) {
+#ifdef DEBUG
+        bool ok =
+#endif
+            push(def);
+        JS_ASSERT(ok);
+    }
+    bool push(MDefinition *def) {
+        if (def->isInWorklist())
+            return true;
+        if (!def->isPhi() && !def->typePolicy())
+            return true;
+        def->setInWorklist();
+        if (def->isPhi())
+            return phiWorklist_.append(def->toPhi());
+        return worklist_.append(def->toInstruction());
+    }
+
+    // After building the worklist, insertion is infallible because memory for
+    // all instructions has been reserved.
+    bool buildWorklist();
+
+    void addPreferredType(MDefinition *def, MIRType type);
+    void reanalyzePhiUses(MDefinition *def);
+    void reanalyzeUses(MDefinition *def);
+    void despecializePhi(MPhi *phi);
+    void specializePhi(MPhi *phi);
+    void specializePhis();
+    void specializeInstructions();
+    void determineSpecializations();
+    void replaceRedundantPhi(MPhi *phi);
+    void adjustPhiInputs(MPhi *phi);
+    bool adjustInputs(MDefinition *def);
+    void adjustOutput(MDefinition *def);
+    bool insertConversions();
+
+  public:
+    TypeAnalyzer(MIRGraph &graph)
+      : graph(graph),
+        phisHaveBeenAnalyzed_(false)
+    { }
+
+    bool analyze();
+};
+
+bool
+TypeAnalyzer::buildWorklist()
+{
+    // The worklist is LIFO. We add items in postorder to get reverse-postorder
+    // removal.
+    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+        for (MPhiIterator iter = block->phisBegin(); iter != block->phisEnd(); iter++) {
+            if (!push(*iter))
+                return false;
+        }
+        MInstructionIterator iter = block->begin();
+        while (iter != block->end()) {
+            if (iter->isCopy()) {
+                // Remove copies here.
+                MCopy *copy = iter->toCopy();
+                copy->replaceAllUsesWith(copy->getOperand(0));
+                iter = block->removeAt(iter);
+                continue;
+            }
+            if (!push(*iter))
+                return false;
+            iter++;
+        }
+    }
+    return true;
+}
+
+void
+TypeAnalyzer::reanalyzePhiUses(MDefinition *def)
+{
+    // Only bother analyzing effective type changes if the phi queue has not
+    // yet been analyzed.
+    if (!phisHaveBeenAnalyzed_)
+        return;
+
+    for (MUseDefIterator uses(def); uses; uses++) {
+        if (uses.def()->isPhi())
+            repush(uses.def());
+    }
+}
+
+void
+TypeAnalyzer::reanalyzeUses(MDefinition *def)
+{
+    // Reflow this definition's uses, since its output type changed.
+    // Policies must guarantee this terminates by never narrowing
+    // during a respecialization.
+    for (MUseDefIterator uses(def); uses; uses++)
+        repush(uses.def());
+}
+
+void
+TypeAnalyzer::addPreferredType(MDefinition *def, MIRType type)
+{
+    MIRType usedAsType = def->usedAsType();
+    def->useAsType(type);
+    if (usedAsType != def->usedAsType())
+        reanalyzePhiUses(def);
+}
+
+void
+TypeAnalyzer::specializeInstructions()
+{
+    // For each instruction with a type policy, analyze its inputs to see if a
+    // respecialization is needed, which may change its output type. If such a
+    // change occurs, re-add each use of the instruction back to the worklist.
+    while (!worklist_.empty()) {
+        MInstruction *ins = popInstruction();
+
+        TypePolicy *policy = ins->typePolicy();
+        if (policy->respecialize(ins))
+            reanalyzeUses(ins);
+        policy->specializeInputs(ins, this);
+    }
+}
+
+static inline MIRType
+GetObservedType(MDefinition *def)
+{
+    return def->type() != MIRType_Value
+           ? def->type()
+           : def->usedAsType();
+}
+
+void
+TypeAnalyzer::despecializePhi(MPhi *phi)
+{
+    // If the phi is already despecialized, we're done.
+    if (phi->type() == MIRType_Value)
+        return;
+
+    phi->specialize(MIRType_Value);
+    reanalyzeUses(phi);
+}
+
+void
+TypeAnalyzer::specializePhi(MPhi *phi)
+{
+    // If this phi was despecialized, but we have already tried to specialize
+    // it, just give up.
+    if (phi->triedToSpecialize() && phi->type() == MIRType_Value)
+        return;
+
+    MIRType phiType = GetObservedType(phi);
+    if (phiType != MIRType_Value) {
+        // This phi is expected to be a certain type, so propagate this up to
+        // its uses. While doing so, prevent re-adding this phi to the phi
+        // worklist.
+        phi->setInWorklist();
+        for (size_t i = 0; i < phi->numOperands(); i++)
+            addPreferredType(phi->getOperand(i), phiType);
+        phi->setNotInWorklist();
+    }
+
+    // Find the type of the first phi input.
+    MDefinition *in = phi->getOperand(0);
+    MIRType first = GetObservedType(in);
+
+    // If it's a value, just give up and leave the phi unspecialized.
+    if (first == MIRType_Value) {
+        despecializePhi(phi);
+        return;
+    }
+
+    for (size_t i = 1; i < phi->numOperands(); i++) {
+        MDefinition *other = phi->getOperand(i);
+        if (GetObservedType(other) != first) {
+            despecializePhi(phi);
+            return;
+        }
+    }
+
+    if (phi->type() == first)
+        return;
+
+    // All inputs have the same type - specialize this phi!
+    phi->specialize(first);
+    reanalyzeUses(phi);
+}
+
+void
+TypeAnalyzer::specializePhis()
+{
+    phisHaveBeenAnalyzed_ = true;
+
+    while (!phiWorklist_.empty()) {
+        MPhi *phi = popPhi();
+        specializePhi(phi);
+    }
+}
+ 
+// Part 1: Determine specializations.
+void
+TypeAnalyzer::determineSpecializations()
+{
+    do {
+        // First, specialize all non-phi instructions.
+        specializeInstructions();
+
+        // Now, go through phis, and try to specialize those. If any phis
+        // become specialized, their uses are re-added to the worklist.
+        specializePhis();
+    } while (!worklist_.empty());
+}
+
+static inline bool
+ShouldSpecializeInput(MDefinition *box, MNode *use, MUnbox *unbox)
+{
+    // If the node is a resume point, always replace the input to avoid
+    // carrying around a wider type.
+    if (use->isResumePoint()) {
+        MResumePoint *resumePoint = use->toResumePoint();
+            
+        // If this resume point is attached to the definition, being effectful,
+        // we *cannot* replace its use! The resume point comes in between the
+        // definition and the unbox.
+        MResumePoint *defResumePoint;
+        if (box->isInstruction())
+            defResumePoint = box->toInstruction()->resumePoint();
+        else if (box->isPhi())
+            defResumePoint = box->block()->entryResumePoint();
+        return (defResumePoint != resumePoint);
+    }
+
+    MDefinition *def = use->toDefinition();
+
+    // Phis do not have type policies, but if they are specialized need
+    // specialized inputs.
+    if (def->isPhi())
+        return def->type() != MIRType_Value;
+
+    // Otherwise, only replace nodes that have a type policy. Otherwise, we
+    // would replace an unbox into its own input.
+    if (def->typePolicy())
+        return true;
+
+    return false;
+}
+
+void
+TypeAnalyzer::adjustOutput(MDefinition *def)
+{
+    JS_ASSERT(def->type() == MIRType_Value);
+
+    MIRType usedAs = def->usedAsType();
+    if (usedAs == MIRType_Value) {
+        // This definition is used as more than one type, so give up on
+        // specializing its definition. Its uses instead will insert
+        // appropriate conversion operations.
+        return;
+    }
+
+    MBasicBlock *block = def->block();
+    MUnbox *unbox = MUnbox::New(def, usedAs);
+    if (def->isPhi()) {
+        // Insert at the beginning of the block.
+        block->insertBefore(*block->begin(), unbox);
+    } else if (block->start() && def->id() < block->start()->id()) {
+        // This definition comes before the start of the program, so insert
+        // the unbox after the start instruction.
+        block->insertAfter(block->start(), unbox);
+    } else {
+        // Insert directly after the instruction.
+        block->insertAfter(def->toInstruction(), unbox);
+    }
+
+    JS_ASSERT(def->usesBegin()->node() == unbox);
+
+    for (MUseIterator use(def->usesBegin()); use != def->usesEnd(); ) {
+        if (ShouldSpecializeInput(def, use->node(), unbox))
+            use = use->node()->replaceOperand(use, unbox);
+        else
+            use++;
+    }
+}
+
+void
+TypeAnalyzer::adjustPhiInputs(MPhi *phi)
+{
+    // If the phi returns a specific type, assert that its inputs are correct.
+    if (phi->type() != MIRType_Value) {
+#ifdef DEBUG
+        for (size_t i = 0; i < phi->numOperands(); i++) {
+            MDefinition *in = phi->getOperand(i);
+            JS_ASSERT(GetObservedType(in) == phi->type());
+        }
+#endif
+        return;
+    }
+
+    // Box every typed input.
+    for (size_t i = 0; i < phi->numOperands(); i++) {
+        MDefinition *in = phi->getOperand(i);
+        if (in->type() == MIRType_Value)
+            continue;
+
+        MBox *box = MBox::New(in);
+        in->block()->insertBefore(in->block()->lastIns(), box);
+        phi->replaceOperand(i, box);
+    }
+}
+
+bool
+TypeAnalyzer::adjustInputs(MDefinition *def)
+{
+    // The adjustOutput pass of our inputs' defs may not have have been
+    // satisfactory, so double check now, inserting conversions as necessary.
+    TypePolicy *policy = def->typePolicy();
+    if (policy && !policy->adjustInputs(def->toInstruction()))
+        return false;
+    return true;
+}
+
+void
+TypeAnalyzer::replaceRedundantPhi(MPhi *phi)
+{
+    MBasicBlock *block = phi->block();
+    js::Value v = (phi->type() == MIRType_Undefined) ? UndefinedValue() : NullValue();
+    MConstant *c = MConstant::New(v);
+    // The instruction pass will insert the box
+    block->insertBefore(*(block->begin()), c);
+    phi->replaceAllUsesWith(c);
+}
+
+bool
+TypeAnalyzer::insertConversions()
+{
+    // Instructions are processed in reverse postorder: all uses are defs are
+    // seen before uses. This ensures that output adjustment (which may rewrite
+    // inputs of uses) does not conflict with input adjustment.
+    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+        for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd();) {
+            if (phi->type() <= MIRType_Null) {
+                replaceRedundantPhi(*phi);
+                phi = block->removePhiAt(phi);
+            } else {
+                adjustPhiInputs(*phi);
+                if (phi->type() == MIRType_Value)
+                    adjustOutput(*phi);
+                phi++;
+            }
+        }
+        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
+            if (!adjustInputs(*iter))
+                return false;
+            if (iter->type() == MIRType_Value)
+                adjustOutput(*iter);
+        }
+    }
+    return true;
+}
+
+bool
+TypeAnalyzer::analyze()
+{
+    if (!buildWorklist())
+        return false;
+    determineSpecializations();
+    if (!insertConversions())
+        return false;
+    return true;
+}
+
+bool
+ion::ApplyTypeInformation(MIRGraph &graph)
+{
+    TypeAnalyzer analyzer(graph);
+
+    if (!analyzer.analyze())
+        return false;
+
+    return true;
+}
+
+bool
+ion::ReorderBlocks(MIRGraph &graph)
+{
+    InlineList<MBasicBlock> pending;
+    Vector<unsigned int, 0, IonAllocPolicy> successors;
+    InlineList<MBasicBlock> done;
+
+    MBasicBlock *current = *graph.begin();
+    unsigned int nextSuccessor = 0;
+
+    graph.clearBlockList();
+
+    // Build up a postorder traversal non-recursively.
+    while (true) {
+        if (!current->isMarked()) {
+            current->mark();
+
+            if (nextSuccessor < current->lastIns()->numSuccessors()) {
+                pending.pushFront(current);
+                if (!successors.append(nextSuccessor))
+                    return false;
+
+                current = current->lastIns()->getSuccessor(nextSuccessor);
+                nextSuccessor = 0;
+                continue;
+            }
+
+            done.pushFront(current);
+        }
+
+        if (pending.empty())
+            break;
+
+        current = pending.popFront();
+        current->unmark();
+        nextSuccessor = successors.popCopy() + 1;
+    }
+
+    JS_ASSERT(pending.empty());
+    JS_ASSERT(successors.empty());
+
+    // Insert in reverse order so blocks are in RPO order in the graph
+    while (!done.empty()) {
+        current = done.popFront();
+        current->unmark();
+        graph.addBlock(current);
+    }
+
+    return true;
+}
+
+// A Simple, Fast Dominance Algorithm by Cooper et al.
+static MBasicBlock *
+IntersectDominators(MBasicBlock *block1, MBasicBlock *block2)
+{
+    MBasicBlock *finger1 = block1;
+    MBasicBlock *finger2 = block2;
+
+    while (finger1->id() != finger2->id()) {
+        // In the original paper, the comparisons are on the postorder index.
+        // In this implementation, the id of the block is in reverse postorder,
+        // so we reverse the comparison.
+        while (finger1->id() > finger2->id())
+            finger1 = finger1->immediateDominator();
+
+        while (finger2->id() > finger1->id())
+            finger2 = finger2->immediateDominator();
+    }
+    return finger1;
+}
+
+static void
+ComputeImmediateDominators(MIRGraph &graph)
+{
+    MBasicBlock *startBlock = *graph.begin();
+    startBlock->setImmediateDominator(startBlock);
+
+    bool changed = true;
+
+    while (changed) {
+        changed = false;
+        // We intentionally exclude the start node.
+        MBasicBlockIterator block(graph.begin());
+        block++;
+        for (; block != graph.end(); block++) {
+            if (block->numPredecessors() == 0)
+                continue;
+
+            MBasicBlock *newIdom = block->getPredecessor(0);
+
+            for (size_t i = 1; i < block->numPredecessors(); i++) {
+                MBasicBlock *pred = block->getPredecessor(i);
+                if (pred->immediateDominator() != NULL)
+                    newIdom = IntersectDominators(pred, newIdom);
+            }
+
+            if (block->immediateDominator() != newIdom) {
+                block->setImmediateDominator(newIdom);
+                changed = true;
+            }
+        }
+    }
+}
+
+bool
+ion::BuildDominatorTree(MIRGraph &graph)
+{
+    ComputeImmediateDominators(graph);
+
+    // Since traversing through the graph in post-order means that every use
+    // of a definition is visited before the def itself. Since a def must
+    // dominate all its uses, this means that by the time we reach a particular
+    // block, we have processed all of its dominated children, so
+    // block->numDominated() is accurate.
+    for (PostorderIterator i(graph.poBegin()); *i != *graph.begin(); i++) {
+        MBasicBlock *child = *i;
+        MBasicBlock *parent = child->immediateDominator();
+
+        if (!parent->addImmediatelyDominatedBlock(child))
+            return false;
+
+        // an additional +1 because of this child block.
+        parent->addNumDominated(child->numDominated() + 1);
+    }
+    JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
+    return true;
+}
+
+bool
+ion::BuildPhiReverseMapping(MIRGraph &graph)
+{
+    // Build a mapping such that given a basic block, whose successor has one or
+    // more phis, we can find our specific input to that phi. To make this fast
+    // mapping work we rely on a specific property of our structured control
+    // flow graph: For a block with phis, its predecessors each have only one
+    // successor with phis. Consider each case:
+    //   * Blocks with less than two predecessors cannot have phis.
+    //   * Breaks. A break always has exactly one successor, and the break
+    //             catch block has exactly one predecessor for each break, as
+    //             well as a final predecessor for the actual loop exit.
+    //   * Continues. A continue always has exactly one successor, and the
+    //             continue catch block has exactly one predecessor for each
+    //             continue, as well as a final predecessor for the actual
+    //             loop continuation. The continue itself has exactly one
+    //             successor.
+    //   * An if. Each branch as exactly one predecessor.
+    //   * A switch. Each branch has exactly one predecessor.
+    //   * Loop tail. A new block is always created for the exit, and if a
+    //             break statement is present, the exit block will forward
+    //             directly to the break block.
+    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+        if (block->numPredecessors() < 2) {
+            JS_ASSERT(block->phisEmpty());
+            continue;
+        }
+
+        // Assert on the above.
+        for (size_t j = 0; j < block->numPredecessors(); j++) {
+            MBasicBlock *pred = block->getPredecessor(j);
+
+#ifdef DEBUG
+            size_t numSuccessorsWithPhis = 0;
+            for (size_t k = 0; k < pred->numSuccessors(); k++) {
+                MBasicBlock *successor = pred->getSuccessor(k);
+                if (!successor->phisEmpty())
+                    numSuccessorsWithPhis++;
+            }
+            JS_ASSERT(numSuccessorsWithPhis <= 1);
+#endif
+
+            pred->setSuccessorWithPhis(*block, j);
+        }
+    }
+
+    return true;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonAnalysis.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_ion_analysis_h__
+#define jsion_ion_analysis_h__
+
+// This file declares various analysis passes that operate on MIR.
+
+#include "IonAllocPolicy.h"
+
+namespace js {
+namespace ion {
+
+class MIRGenerator;
+class MIRGraph;
+
+bool
+SplitCriticalEdges(MIRGenerator *gen, MIRGraph &graph);
+
+bool
+EliminateDeadCode(MIRGraph &graph);
+
+bool
+ApplyTypeInformation(MIRGraph &graph);
+
+bool
+ReorderBlocks(MIRGraph &graph);
+
+bool
+BuildPhiReverseMapping(MIRGraph &graph);
+
+bool
+BuildDominatorTree(MIRGraph &graph);
+
+} // namespace js
+} // namespace ion
+
+#endif // jsion_ion_analysis_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonBuilder.cpp
@@ -0,0 +1,1816 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "IonAnalysis.h"
+#include "IonBuilder.h"
+#include "MIRGraph.h"
+#include "Ion.h"
+#include "IonAnalysis.h"
+#include "IonSpewer.h"
+#include "jsemit.h"
+#include "jsscriptinlines.h"
+
+#ifdef JS_THREADSAFE
+# include "prthread.h"
+#endif
+
+using namespace js;
+using namespace js::ion;
+
+IonBuilder::IonBuilder(JSContext *cx, JSScript *script, JSFunction *fun, TempAllocator &temp,
+                       MIRGraph &graph, TypeOracle *oracle)
+  : MIRGenerator(cx, temp, script, fun, graph),
+    oracle(oracle)
+{
+    pc = script->code;
+    atoms = script->atoms;
+}
+
+static inline int32
+GetJumpOffset(jsbytecode *pc)
+{
+    JSOp op = JSOp(*pc);
+    JS_ASSERT(js_CodeSpec[op].type() == JOF_JUMP ||
+              js_CodeSpec[op].type() == JOF_JUMPX);
+    return (js_CodeSpec[op].type() == JOF_JUMP)
+           ? GET_JUMP_OFFSET(pc)
+           : GET_JUMPX_OFFSET(pc);
+}
+
+static inline jsbytecode *
+GetNextPc(jsbytecode *pc)
+{
+    return pc + js_CodeSpec[JSOp(*pc)].length;
+}
+
+uint32
+IonBuilder::readIndex(jsbytecode *pc)
+{
+    return (atoms - script->atoms) + GET_INDEX(pc);
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::If(jsbytecode *join, MBasicBlock *ifFalse)
+{
+    CFGState state;
+    state.state = IF_TRUE;
+    state.stopAt = join;
+    state.branch.ifFalse = ifFalse;
+    return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MBasicBlock *ifFalse) 
+{
+    CFGState state;
+    // If the end of the false path is the same as the start of the
+    // false path, then the "else" block is empty and we can devolve
+    // this to the IF_TRUE case. We handle this here because there is
+    // still an extra GOTO on the true path and we want stopAt to point
+    // there, whereas the IF_TRUE case does not have the GOTO.
+    state.state = (falseEnd == ifFalse->pc())
+                  ? IF_TRUE_EMPTY_ELSE
+                  : IF_ELSE_TRUE;
+    state.stopAt = trueEnd;
+    state.branch.falseEnd = falseEnd;
+    state.branch.ifFalse = ifFalse;
+    return state;
+}
+
+void
+IonBuilder::popCfgStack()
+{
+    if (cfgStack_.back().isLoop())
+        loops_.popBack();
+    cfgStack_.popBack();
+}
+
+bool
+IonBuilder::pushLoop(CFGState::State initial, jsbytecode *stopAt, MBasicBlock *entry,
+                     jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc,
+                     jsbytecode *continuepc)
+{
+    if (!continuepc)
+        continuepc = entry->pc();
+
+    ControlFlowInfo loop(cfgStack_.length(), continuepc);
+    if (!loops_.append(loop))
+        return false;
+
+    CFGState state;
+    state.state = initial;
+    state.stopAt = stopAt;
+    state.loop.bodyStart = bodyStart;
+    state.loop.bodyEnd = bodyEnd;
+    state.loop.exitpc = exitpc;
+    state.loop.entry = entry;
+    state.loop.successor = NULL;
+    state.loop.breaks = NULL;
+    state.loop.continues = NULL;
+    return cfgStack_.append(state);
+}
+
+bool
+IonBuilder::build()
+{
+    current = newBlock(pc);
+    if (!current)
+        return false;
+
+    IonSpew(IonSpew_MIR, "Analying script %s:%d", script->filename, script->lineno);
+
+    // Initialize argument references if inside a function frame.
+    if (fun()) {
+        MParameter *param = MParameter::New(MParameter::THIS_SLOT, oracle->thisTypeSet(script));
+        current->add(param);
+        current->initSlot(thisSlot(), param);
+
+        for (uint32 i = 0; i < nargs(); i++) {
+            param = MParameter::New(int(i), oracle->parameterTypeSet(script, i));
+            current->add(param);
+            current->initSlot(argSlot(i), param);
+        }
+    }
+
+    // Initialize local variables.
+    for (uint32 i = 0; i < nlocals(); i++) {
+        MConstant *undef = MConstant::New(UndefinedValue());
+        current->add(undef);
+        current->initSlot(localSlot(i), undef);
+    }
+
+    current->makeStart(new MStart());
+
+    // Attach a resume point to each parameter, so the type analyzer doesn't
+    // replace its first use.
+    for (uint32 i = 0; i < CountArgSlots(fun()); i++) {
+        MParameter *param = current->getEntrySlot(i)->toInstruction()->toParameter();
+        param->setResumePoint(current->entryResumePoint());
+    }
+
+    if (!traverseBytecode())
+        return false;
+
+    return true;
+}
+
+// We try to build a control-flow graph in the order that it would be built as
+// if traversing the AST. This leads to a nice ordering and lets us build SSA
+// in one pass, since the bytecode is structured.
+//
+// We traverse the bytecode iteratively, maintaining a current basic block.
+// Each basic block has a mapping of local slots to instructions, as well as a
+// stack depth. As we encounter instructions we mutate this mapping in the
+// current block.
+//
+// Things get interesting when we encounter a control structure. This can be
+// either an IFEQ, downward GOTO, or a decompiler hint stashed away in source
+// notes. Once we encounter such an opcode, we recover the structure of the
+// control flow (its branches and bounds), and push it on a stack.
+//
+// As we continue traversing the bytecode, we look for points that would
+// terminate the topmost control flow path pushed on the stack. These are:
+//  (1) The bounds of the current structure (end of a loop or join/edge of a
+//      branch).
+//  (2) A "return", "break", or "continue" statement.
+//
+// For (1), we expect that there is a current block in the progress of being
+// built, and we complete the necessary edges in the CFG. For (2), we expect
+// that there is no active block.
+//
+// For normal diamond join points, we construct Phi nodes as we add
+// predecessors. For loops, care must be taken to propagate Phi nodes back
+// through uses in the loop body.
+bool
+IonBuilder::traverseBytecode()
+{
+    for (;;) {
+        JS_ASSERT(pc < script->code + script->length);
+
+        for (;;) {
+            if (!temp().ensureBallast())
+                return false;
+
+            // Check if we've hit an expected join point or edge in the bytecode.
+            // Leaving one control structure could place us at the edge of another,
+            // thus |while| instead of |if| so we don't skip any opcodes.
+            if (!cfgStack_.empty() && cfgStack_.back().stopAt == pc) {
+                ControlStatus status = processCfgStack();
+                if (status == ControlStatus_Error)
+                    return false;
+                if (!current)
+                    return true;
+                continue;
+            }
+
+            // Some opcodes need to be handled early because they affect control
+            // flow, terminating the current basic block and/or instructing the
+            // traversal algorithm to continue from a new pc.
+            //
+            //   (1) If the opcode does not affect control flow, then the opcode
+            //       is inspected and transformed to IR. This is the process_opcode
+            //       label.
+            //   (2) A loop could be detected via a forward GOTO. In this case,
+            //       we don't want to process the GOTO, but the following
+            //       instruction.
+            //   (3) A RETURN, STOP, BREAK, or CONTINUE may require processing the
+            //       CFG stack to terminate open branches.
+            //
+            // Similar to above, snooping control flow could land us at another
+            // control flow point, so we iterate until it's time to inspect a real
+            // opcode.
+            ControlStatus status;
+            if ((status = snoopControlFlow(JSOp(*pc))) == ControlStatus_None)
+                break;
+            if (status == ControlStatus_Error)
+                return false;
+            if (!current)
+                return true;
+        }
+
+        // Nothing in inspectOpcode() is allowed to advance the pc.
+        JSOp op = JSOp(*pc);
+        if (!inspectOpcode(op))
+            return false;
+
+        pc += js_CodeSpec[op].length;
+    }
+
+    return true;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::snoopControlFlow(JSOp op)
+{
+    switch (op) {
+      case JSOP_NOP:
+        return maybeLoop(op, js_GetSrcNote(script, pc));
+
+      case JSOP_POP:
+        return maybeLoop(op, js_GetSrcNote(script, pc));
+
+      case JSOP_RETURN:
+      case JSOP_STOP:
+        return processReturn(op);
+
+      case JSOP_GOTO:
+      case JSOP_GOTOX:
+      {
+        jssrcnote *sn = js_GetSrcNote(script, pc);
+        switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+          case SRC_BREAK:
+          case SRC_BREAK2LABEL:
+            return processBreak(op, sn);
+
+          case SRC_CONTINUE:
+          case SRC_CONT2LABEL:
+            return processContinue(op, sn);
+
+          case SRC_SWITCHBREAK:
+            return processSwitchBreak(op, sn);
+
+          case SRC_WHILE:
+            // while (cond) { }
+            if (!whileLoop(op, sn))
+              return ControlStatus_Error;
+            return ControlStatus_Jumped;
+
+          case SRC_FOR_IN:
+            // for (x in y) { }
+            if (!forInLoop(op, sn))
+              return ControlStatus_Error;
+            return ControlStatus_Jumped;
+
+          default:
+            // Hard assert for now - make an error later.
+            JS_NOT_REACHED("unknown goto case");
+            break;
+        }
+        break;
+      }
+
+      case JSOP_TABLESWITCH:
+        return tableSwitch(op, js_GetSrcNote(script, pc));
+
+      case JSOP_IFNE:
+      case JSOP_IFNEX:
+        // We should never reach an IFNE, it's a stopAt point, which will
+        // trigger closing the loop.
+        JS_NOT_REACHED("we should never reach an ifne!");
+        return ControlStatus_Error;
+
+      default:
+        break;
+    }
+    return ControlStatus_None;
+}
+
+bool
+IonBuilder::inspectOpcode(JSOp op)
+{
+    switch (op) {
+      case JSOP_NOP:
+        return true;
+
+      case JSOP_PUSH:
+        return pushConstant(UndefinedValue());
+
+      case JSOP_IFEQ:
+        return jsop_ifeq(JSOP_IFEQ);
+
+      case JSOP_BITNOT:
+        return jsop_bitnot();
+
+      case JSOP_BITAND:
+      case JSOP_BITOR:
+      case JSOP_BITXOR:
+      case JSOP_LSH:
+      case JSOP_RSH:
+      case JSOP_URSH:
+        return jsop_bitop(op);
+
+      case JSOP_ADD:
+      case JSOP_SUB:
+      case JSOP_MUL:
+      case JSOP_DIV:
+      	return jsop_binary(op);
+
+      case JSOP_NEG:
+        return jsop_neg();
+
+      case JSOP_LOCALINC:
+      case JSOP_INCLOCAL:
+      case JSOP_LOCALDEC:
+      case JSOP_DECLOCAL:
+        return jsop_localinc(op);
+
+      case JSOP_LT:
+      case JSOP_LE:
+      case JSOP_GT:
+      case JSOP_GE:
+        return jsop_compare(op);
+
+      case JSOP_ARGINC:
+      case JSOP_INCARG:
+      case JSOP_ARGDEC:
+      case JSOP_DECARG:
+        return jsop_arginc(op);
+
+      case JSOP_DOUBLE:
+        return pushConstant(script->getConst(readIndex(pc)));
+
+      case JSOP_STRING:
+        return pushConstant(StringValue(atoms[GET_INDEX(pc)]));
+
+      case JSOP_ZERO:
+        return pushConstant(Int32Value(0));
+
+      case JSOP_ONE:
+        return pushConstant(Int32Value(1));
+
+      case JSOP_NULL:
+        return pushConstant(NullValue());
+
+      case JSOP_VOID:
+        current->pop();
+        return pushConstant(UndefinedValue());
+
+      case JSOP_FALSE:
+        return pushConstant(BooleanValue(false));
+
+      case JSOP_TRUE:
+        return pushConstant(BooleanValue(true));
+
+      case JSOP_NOTEARG:
+        return jsop_notearg();
+
+      case JSOP_CALLARG:
+        current->pushArg(GET_SLOTNO(pc));
+        if (!pushConstant(UndefinedValue())) // Implicit |this|.
+            return false;
+        return jsop_notearg();
+
+      case JSOP_GETARG:
+        current->pushArg(GET_SLOTNO(pc));
+        return true;
+
+      case JSOP_SETARG:
+        current->setArg(GET_SLOTNO(pc));
+        return true;
+
+      case JSOP_GETLOCAL:
+        current->pushLocal(GET_SLOTNO(pc));
+        return true;
+
+      case JSOP_SETLOCAL:
+        return current->setLocal(GET_SLOTNO(pc));
+
+      case JSOP_POP:
+        current->pop();
+        return true;
+
+      case JSOP_IFEQX:
+        return jsop_ifeq(JSOP_IFEQX);
+
+      case JSOP_CALL:
+        return jsop_call(GET_ARGC(pc));
+
+      case JSOP_NULLBLOCKCHAIN:
+        return true;
+
+      case JSOP_INT8:
+        return pushConstant(Int32Value(GET_INT8(pc)));
+
+      case JSOP_UINT16:
+        return pushConstant(Int32Value(GET_UINT16(pc)));
+
+      case JSOP_UINT24:
+        return pushConstant(Int32Value(GET_UINT24(pc)));
+
+      case JSOP_INT32:
+        return pushConstant(Int32Value(GET_INT32(pc)));
+
+      case JSOP_TRACE:
+        assertValidTraceOp(op);
+        return true;
+
+      default:
+#ifdef DEBUG
+        return abort("Unsupported opcode: %s (line %d)", js_CodeName[op],
+                     js_PCToLineNumber(cx, script, pc));
+#else
+        return abort("Unsupported opcode: %d (line %d)", op, js_PCToLineNumber(cx, script, pc));
+#endif
+    }
+}
+
+// Given that the current control flow structure has ended forcefully,
+// via a return, break, or continue (rather than joining), propagate the
+// termination up. For example, a return nested 5 loops deep may terminate
+// every outer loop at once, if there are no intervening conditionals:
+//
+// for (...) {
+//   for (...) {
+//     return x;
+//   }
+// }
+//
+// If |current| is NULL when this function returns, then there is no more
+// control flow to be processed.
+IonBuilder::ControlStatus
+IonBuilder::processControlEnd()
+{
+    JS_ASSERT(!current);
+
+    if (cfgStack_.empty()) {
+        // If there is no more control flow to process, then this is the
+        // last return in the function.
+        return ControlStatus_Ended;
+    }
+
+    return processCfgStack();
+}
+
+// Processes the top of the CFG stack. This is used from two places:
+// (1) processControlEnd(), whereby a break, continue, or return may interrupt
+//     an in-progress CFG structure before reaching its actual termination
+//     point in the bytecode.
+// (2) traverseBytecode(), whereby we reach the last instruction in a CFG
+//     structure.
+IonBuilder::ControlStatus
+IonBuilder::processCfgStack()
+{
+    ControlStatus status = processCfgEntry(cfgStack_.back());
+
+    // If this terminated a CFG structure, act like processControlEnd() and
+    // keep propagating upward.
+    while (status == ControlStatus_Ended) {
+        popCfgStack();
+        if (cfgStack_.empty())
+            return status;
+        status = processCfgEntry(cfgStack_.back());
+    }
+
+    // If some join took place, the current structure is finished.
+    if (status == ControlStatus_Joined)
+        popCfgStack();
+
+    return status;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processCfgEntry(CFGState &state)
+{
+    switch (state.state) {
+      case CFGState::IF_TRUE:
+      case CFGState::IF_TRUE_EMPTY_ELSE:
+        return processIfEnd(state);
+
+      case CFGState::IF_ELSE_TRUE:
+        return processIfElseTrueEnd(state);
+
+      case CFGState::IF_ELSE_FALSE:
+        return processIfElseFalseEnd(state);
+
+      case CFGState::DO_WHILE_LOOP_BODY:
+        return processDoWhileBodyEnd(state);
+
+      case CFGState::DO_WHILE_LOOP_COND:
+        return processDoWhileCondEnd(state);
+
+      case CFGState::WHILE_LOOP_COND:
+        return processWhileCondEnd(state);
+
+      case CFGState::WHILE_LOOP_BODY:
+        return processWhileBodyEnd(state);
+
+      case CFGState::FOR_LOOP_COND:
+        return processForCondEnd(state);
+
+      case CFGState::FOR_LOOP_BODY:
+        return processForBodyEnd(state);
+
+      case CFGState::FOR_LOOP_UPDATE:
+        return processForUpdateEnd(state);
+
+      case CFGState::TABLE_SWITCH:
+        return processNextTableSwitchCase(state);
+
+      default:
+        JS_NOT_REACHED("unknown cfgstate");
+    }
+    return ControlStatus_Error;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfEnd(CFGState &state)
+{
+    if (current) {
+        // Here, the false block is the join point. Create an edge from the
+        // current block to the false block. Note that a RETURN opcode
+        // could have already ended the block.
+        current->end(MGoto::New(state.branch.ifFalse));
+
+        if (!state.branch.ifFalse->addPredecessor(current))
+            return ControlStatus_Error;
+    }
+
+    current = state.branch.ifFalse;
+    pc = current->pc();
+    return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfElseTrueEnd(CFGState &state)
+{
+    // We've reached the end of the true branch of an if-else. Don't
+    // create an edge yet, just transition to parsing the false branch.
+    state.state = CFGState::IF_ELSE_FALSE;
+    state.branch.ifTrue = current;
+    state.stopAt = state.branch.falseEnd;
+    pc = state.branch.ifFalse->pc();
+    current = state.branch.ifFalse;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfElseFalseEnd(CFGState &state)
+{
+    // Update the state to have the latest block from the false path.
+    state.branch.ifFalse = current;
+  
+    // To create the join node, we need an incoming edge that has not been
+    // terminated yet.
+    MBasicBlock *pred = state.branch.ifTrue
+                        ? state.branch.ifTrue
+                        : state.branch.ifFalse;
+    MBasicBlock *other = (pred == state.branch.ifTrue) ? state.branch.ifFalse : state.branch.ifTrue;
+  
+    if (!pred)
+        return ControlStatus_Ended;
+  
+    // Create a new block to represent the join.
+    MBasicBlock *join = newBlock(pred, state.branch.falseEnd);
+    if (!join)
+        return ControlStatus_Error;
+  
+    // Create edges from the true and false blocks as needed.
+    pred->end(MGoto::New(join));
+
+    if (other) {
+        other->end(MGoto::New(join));
+        if (!join->addPredecessor(other))
+            return ControlStatus_Error;
+    }
+
+    // Ignore unreachable remainder of false block if existent.
+    current = join;
+    pc = current->pc();
+    return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processBrokenLoop(CFGState &state)
+{
+    JS_ASSERT(!current);
+
+    // If the loop started with a condition (while/for) then even if the
+    // structure never actually loops, the condition itself can still fail and
+    // thus we must resume at the successor, if one exists.
+    current = state.loop.successor;
+
+    // Join the breaks together and continue parsing.
+    if (state.loop.breaks) {
+        MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
+        if (!block)
+            return ControlStatus_Error;
+
+        if (current) {
+            current->end(MGoto::New(block));
+            if (!block->addPredecessor(current))
+                return ControlStatus_Error;
+        }
+
+        current = block;
+    }
+
+    // If the loop is not gated on a condition, and has only returns, we'll
+    // reach this case. For example:
+    // do { ... return; } while ();
+    if (!current)
+        return ControlStatus_Ended;
+
+    // Otherwise, the loop is gated on a condition and/or has breaks so keep
+    // parsing at the successor.
+    pc = current->pc();
+    return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::finishLoop(CFGState &state, MBasicBlock *successor)
+{
+    JS_ASSERT(current);
+
+    // Compute phis in the loop header and propagate them throughout the loop,
+    // including the successor.
+    if (!state.loop.entry->setBackedge(current))
+        return ControlStatus_Error;
+    if (successor)
+        successor->inheritPhis(state.loop.entry);
+
+    if (state.loop.breaks) {
+        // Propagate phis placed in the header to individual break exit points.
+        DeferredEdge *edge = state.loop.breaks;
+        while (edge) {
+            edge->block->inheritPhis(state.loop.entry);
+            edge = edge->next;
+        }
+
+        // Create a catch block to join all break exits.
+        MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
+        if (!block)
+            return ControlStatus_Error;
+
+        if (successor) {
+            // Finally, create an unconditional edge from the successor to the
+            // catch block.
+            successor->end(MGoto::New(block));
+            if (!block->addPredecessor(successor))
+                return ControlStatus_Error;
+        }
+        successor = block;
+    }
+
+    current = successor;
+
+    // An infinite loop (for (;;) { }) will not have a successor.
+    if (!current)
+        return ControlStatus_Ended;
+
+    pc = current->pc();
+    return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processDoWhileBodyEnd(CFGState &state)
+{
+    if (!processDeferredContinues(state))
+        return ControlStatus_Error;
+
+    // No current means control flow cannot reach the condition, so this will
+    // never loop.
+    if (!current)
+        return processBrokenLoop(state);
+
+    MBasicBlock *header = newBlock(current, state.loop.updatepc);
+    if (!header)
+        return ControlStatus_Error;
+    current->end(MGoto::New(header));
+
+    state.state = CFGState::DO_WHILE_LOOP_COND;
+    state.stopAt = state.loop.updateEnd;
+    pc = state.loop.updatepc;
+    current = header;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processDoWhileCondEnd(CFGState &state)
+{
+    JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFNEX);
+
+    // We're guaranteed a |current|, it's impossible to break or return from
+    // inside the conditional expression.
+    JS_ASSERT(current);
+
+    // Pop the last value, and create the successor block.
+    MDefinition *vins = current->pop();
+    MBasicBlock *successor = newBlock(current, GetNextPc(pc));
+    if (!successor)
+        return ControlStatus_Error;
+
+    // Create the test instruction and end the current block.
+    MTest *test = MTest::New(vins, state.loop.entry, successor);
+    current->end(test);
+    return finishLoop(state, successor);
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processWhileCondEnd(CFGState &state)
+{
+    JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFNEX);
+
+    // Balance the stack past the IFNE.
+    MDefinition *ins = current->pop();
+
+    // Create the body and successor blocks.
+    MBasicBlock *body = newBlock(current, state.loop.bodyStart);
+    state.loop.successor = newBlock(current, state.loop.exitpc);
+    if (!body || !state.loop.successor)
+        return ControlStatus_Error;
+
+    MTest *test = MTest::New(ins, body, state.loop.successor);
+    current->end(test);
+
+    state.state = CFGState::WHILE_LOOP_BODY;
+    state.stopAt = state.loop.bodyEnd;
+    pc = state.loop.bodyStart;
+    current = body;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processWhileBodyEnd(CFGState &state)
+{
+    if (!processDeferredContinues(state))
+        return ControlStatus_Error;
+
+    if (!current)
+        return processBrokenLoop(state);
+
+    current->end(MGoto::New(state.loop.entry));
+    return finishLoop(state, state.loop.successor);
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForCondEnd(CFGState &state)
+{
+    JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFNEX);
+
+    // Balance the stack past the IFNE.
+    MDefinition *ins = current->pop();
+
+    // Create the body and successor blocks.
+    MBasicBlock *body = newBlock(current, state.loop.bodyStart);
+    state.loop.successor = newBlock(current, state.loop.exitpc);
+    if (!body || !state.loop.successor)
+        return ControlStatus_Error;
+
+    MTest *test = MTest::New(ins, body, state.loop.successor);
+    current->end(test);
+
+    state.state = CFGState::FOR_LOOP_BODY;
+    state.stopAt = state.loop.bodyEnd;
+    pc = state.loop.bodyStart;
+    current = body;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForBodyEnd(CFGState &state)
+{
+    if (!processDeferredContinues(state))
+        return ControlStatus_Error;
+
+    // If there is no updatepc, just go right to processing what would be the
+    // end of the update clause. Otherwise, |current| might be NULL; if this is
+    // the case, the udpate is unreachable anyway.
+    if (!state.loop.updatepc || !current)
+        return processForUpdateEnd(state);
+
+    pc = state.loop.updatepc;
+
+    state.state = CFGState::FOR_LOOP_UPDATE;
+    state.stopAt = state.loop.updateEnd;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForUpdateEnd(CFGState &state)
+{
+    // If there is no current, we couldn't reach the loop edge and there was no
+    // update clause.
+    if (!current)
+        return processBrokenLoop(state);
+
+    current->end(MGoto::New(state.loop.entry));
+    return finishLoop(state, state.loop.successor);
+}
+
+bool
+IonBuilder::processDeferredContinues(CFGState &state)
+{
+    // If there are any continues for this loop, and there is an update block,
+    // then we need to create a new basic block to house the update.
+    if (state.loop.continues) {
+        DeferredEdge *edge = state.loop.continues;
+
+        MBasicBlock *update = newBlock(edge->block, pc);
+        if (!update)
+            return false;
+
+        if (current) {
+            current->end(MGoto::New(update));
+            if (!update->addPredecessor(current))
+                return ControlStatus_Error;
+        }
+
+        // No need to use addPredecessor for first edge,
+        // because it is already predecessor.
+        edge->block->end(MGoto::New(update));
+        edge = edge->next;
+
+        // Remaining edges
+        while (edge) {
+            edge->block->end(MGoto::New(update));
+            if (!update->addPredecessor(edge->block))
+                return ControlStatus_Error;
+            edge = edge->next;
+        }
+        state.loop.continues = NULL;
+
+        current = update;
+    }
+
+    return true;
+}
+
+MBasicBlock *
+IonBuilder::createBreakCatchBlock(DeferredEdge *edge, jsbytecode *pc)
+{
+    // Create block, using the first break statement as predecessor
+    MBasicBlock *successor = newBlock(edge->block, pc);
+    if (!successor)
+        return NULL;
+
+    // No need to use addPredecessor for first edge,
+    // because it is already predecessor.
+    edge->block->end(MGoto::New(successor));
+    edge = edge->next;
+
+    // Finish up remaining breaks.
+    while (edge) {
+        edge->block->end(MGoto::New(successor));
+        if (!successor->addPredecessor(edge->block))
+            return NULL;
+        edge = edge->next;
+    }
+
+    return successor;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processNextTableSwitchCase(CFGState &state)
+{
+    JS_ASSERT(state.state == CFGState::TABLE_SWITCH);
+
+    state.tableswitch.currentSuccessor++;
+
+    // Test if there are still unprocessed successors (cases/default)
+    if (state.tableswitch.currentSuccessor >= state.tableswitch.ins->numSuccessors())
+        return processTableSwitchEnd(state);
+
+    // Get the next successor
+    MBasicBlock *successor = state.tableswitch.ins->getSuccessor(state.tableswitch.currentSuccessor);
+
+    // Add current block as predecessor if available.
+    // This means the previous case didn't have a break statement.
+    // So flow will continue in this block.
+    if (current) {
+        current->end(MGoto::New(successor));
+        successor->addPredecessor(current);
+    }
+
+    // If this is the last successor the block should stop at the end of the tableswitch
+    // Else it should stop at the start of the next successor
+    if (state.tableswitch.currentSuccessor+1 < state.tableswitch.ins->numSuccessors())
+        state.stopAt = state.tableswitch.ins->getSuccessor(state.tableswitch.currentSuccessor+1)->pc();
+    else
+        state.stopAt = state.tableswitch.exitpc;
+
+    current = successor;
+    pc = current->pc();
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processTableSwitchEnd(CFGState &state)
+{
+    // No break statements and no current
+    // This means that control flow is cut-off from this point
+    // (e.g. all cases have return statements).
+    if (!state.tableswitch.breaks && !current)
+        return ControlStatus_Ended;
+
+    // Create successor block.
+    // If there are breaks, create block with breaks as predecessor
+    // Else create a block with current as predecessor 
+    MBasicBlock *successor = NULL;
+    if (state.tableswitch.breaks)
+        successor = createBreakCatchBlock(state.tableswitch.breaks, state.tableswitch.exitpc);
+    else
+        successor = newBlock(current, state.tableswitch.exitpc);
+
+    if (!successor)
+        return ControlStatus_Ended;
+
+    // If there is current, the current block flows into this one.
+    // So current is also a predecessor to this block 
+    if (current) {
+        current->end(MGoto::New(successor));
+        if (state.tableswitch.breaks)
+            successor->addPredecessor(current);
+    }
+
+    pc = state.tableswitch.exitpc;
+    current = successor;
+    return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processBreak(JSOp op, jssrcnote *sn)
+{
+    JS_ASSERT(op == JSOP_GOTO || op == JSOP_GOTOX);
+
+    // Find the target loop.
+    CFGState *found = NULL;
+    jsbytecode *target = pc + GetJumpOffset(pc);
+    for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
+        CFGState &cfg = cfgStack_[loops_[i].cfgEntry];
+        if (cfg.loop.exitpc == target) {
+            found = &cfg;
+            break;
+        }
+    }
+
+    if (!found) {
+        // Sometimes, we can't determine the structure of a labeled break. For
+        // example:
+        //
+        // 0:    label: {
+        // 1:        for (;;) {
+        // 2:            break label;
+        // 3:        }
+        // 4:        stuff;
+        // 5:    }
+        //
+        // In this case, the successor of the block is 4, but the target of the
+        // single-level break is actually 5. To recognize this case we'd need
+        // to know about the label structure at 0,5 ahead of time - and lacking
+        // those source notes for now, we just abort instead.
+        abort("could not find the target of a break");
+        return ControlStatus_Error;
+    }
+
+    // There must always be a valid target loop structure. If not, there's
+    // probably an off-by-something error in which pc we track.
+    CFGState &state = *found;
+
+    state.loop.breaks = new DeferredEdge(current, state.loop.breaks);
+
+    current = NULL;
+    pc += js_CodeSpec[op].length;
+    return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processContinue(JSOp op, jssrcnote *sn)
+{
+    JS_ASSERT(op == JSOP_GOTO || op == JSOP_GOTOX);
+
+    // Find the target loop.
+    CFGState *found = NULL;
+    jsbytecode *target = pc + GetJumpOffset(pc);
+    for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
+        if (loops_[i].continuepc == target) {
+            found = &cfgStack_[loops_[i].cfgEntry];
+            break;
+        }
+    }
+
+    // There must always be a valid target loop structure. If not, there's
+    // probably an off-by-something error in which pc we track.
+    JS_ASSERT(found);
+    CFGState &state = *found;
+
+    state.loop.continues = new DeferredEdge(current, state.loop.continues);
+
+    current = NULL;
+    pc += js_CodeSpec[op].length;
+    return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processSwitchBreak(JSOp op, jssrcnote *sn)
+{
+    JS_ASSERT(op == JSOP_GOTO || op == JSOP_GOTOX);
+
+    // Find the target switch.
+    CFGState *found = NULL;
+    jsbytecode *target = pc + GetJumpOffset(pc);
+    for (size_t i = switches_.length() - 1; i < switches_.length(); i--) {
+        if (switches_[i].continuepc == target) {
+            found = &cfgStack_[switches_[i].cfgEntry];
+            break;
+        }
+    }
+
+    // There must always be a valid target loop structure. If not, there's
+    // probably an off-by-something error in which pc we track.
+    JS_ASSERT(found);
+    CFGState &state = *found;
+
+    state.tableswitch.breaks = new DeferredEdge(current, state.tableswitch.breaks);
+
+    current = NULL;
+    pc += js_CodeSpec[op].length;
+    return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::maybeLoop(JSOp op, jssrcnote *sn)
+{
+    // This function looks at the opcode and source note and tries to
+    // determine the structure of the loop. For some opcodes, like
+    // POP/NOP which are not explicitly control flow, this source note is
+    // optional. For opcodes with control flow, like GOTO, an unrecognized
+    // or not-present source note is a compilation failure.
+    switch (op) {
+      case JSOP_POP:
+        // for (init; ; update?) ...
+        if (sn && SN_TYPE(sn) == SRC_FOR) {
+            current->pop();
+            return forLoop(op, sn);
+        }
+        break;
+
+      case JSOP_NOP:
+        if (sn) {
+            // do { } while (cond)
+            if (SN_TYPE(sn) == SRC_WHILE)
+                return doWhileLoop(op, sn);
+            // Build a mapping such that given a basic block, whose successor
+            // has a phi 
+
+            // for (; ; update?)
+            if (SN_TYPE(sn) == SRC_FOR)
+                return forLoop(op, sn);
+        }
+        break;
+
+      default:
+        JS_NOT_REACHED("unexpected opcode");
+        return ControlStatus_Error;
+    }
+
+    return ControlStatus_None;
+}
+
+void
+IonBuilder::assertValidTraceOp(JSOp op)
+{
+#ifdef DEBUG
+    jssrcnote *sn = js_GetSrcNote(script, pc);
+    jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 0);
+    CFGState &state = cfgStack_.back();
+
+    // Make sure this is the next opcode after the loop header.
+    JS_ASSERT(GetNextPc(state.loop.entry->pc()) == pc);
+
+    jsbytecode *expected_ifne;
+    switch (state.state) {
+      case CFGState::DO_WHILE_LOOP_BODY:
+        expected_ifne = state.stopAt;
+        break;
+
+      default:
+        JS_NOT_REACHED("JSOP_TRACE appeared in unknown control flow construct");
+        return;
+    }
+
+    // Make sure this trace op goes to the same ifne as the loop header's
+    // source notes or GOTO.
+    JS_ASSERT(ifne == expected_ifne);
+#endif
+}
+
+IonBuilder::ControlStatus
+IonBuilder::doWhileLoop(JSOp op, jssrcnote *sn)
+{
+    // do { } while() loops have the following structure:
+    //    NOP         ; SRC_WHILE (offset to COND)
+    //    TRACE       ; SRC_WHILE (offset to IFNE)
+    //    ...         ; body
+    //    ...
+    //    COND        ; start of condition
+    //    ...
+    //    IFNE ->     ; goes to TRACE
+    int condition_offset = js_GetSrcNoteOffset(sn, 0);
+    jsbytecode *conditionpc = pc + condition_offset;
+
+    jssrcnote *sn2 = js_GetSrcNote(script, pc+1);
+    int offset = js_GetSrcNoteOffset(sn2, 0);
+    jsbytecode *ifne = pc + offset + 1;
+    JS_ASSERT(ifne > pc);
+
+    // Verify that the IFNE goes back to a trace op.
+    JS_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_TRACE);
+    JS_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne));
+
+    MBasicBlock *header = newPendingLoopHeader(current, pc);
+    if (!header)
+        return ControlStatus_Error;
+    current->end(MGoto::New(header));
+
+    current = header;
+    jsbytecode *bodyStart = GetNextPc(GetNextPc(pc));
+    jsbytecode *bodyEnd = conditionpc;
+    jsbytecode *exitpc = GetNextPc(ifne);
+    if (!pushLoop(CFGState::DO_WHILE_LOOP_BODY, conditionpc, header, bodyStart, bodyEnd, exitpc, conditionpc))
+        return ControlStatus_Error;
+
+    CFGState &state = cfgStack_.back();
+    state.loop.updatepc = conditionpc;
+    state.loop.updateEnd = ifne;
+
+    pc = bodyStart;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::whileLoop(JSOp op, jssrcnote *sn)
+{
+    // while (cond) { } loops have the following structure:
+    //    GOTO cond   ; SRC_WHILE (offset to IFNE)
+    //    TRACE       ; SRC_WHILE (offset to IFNE)
+    //    ...
+    //  cond:
+    //    ...
+    //    IFNE        ; goes to TRACE
+    int ifneOffset = js_GetSrcNoteOffset(sn, 0);
+    jsbytecode *ifne = pc + ifneOffset;
+    JS_ASSERT(ifne > pc);
+
+    // Verify that the IFNE goes back to a trace op.
+    JS_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_TRACE);
+    JS_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne));
+
+    MBasicBlock *header = newPendingLoopHeader(current, pc);
+    if (!header)
+        return ControlStatus_Error;
+    current->end(MGoto::New(header));
+
+    // Skip past the JSOP_TRACE for the body start.
+    jsbytecode *bodyStart = GetNextPc(GetNextPc(pc));
+    jsbytecode *bodyEnd = pc + GetJumpOffset(pc);
+    jsbytecode *exitpc = GetNextPc(ifne);
+    if (!pushLoop(CFGState::WHILE_LOOP_COND, ifne, header, bodyStart, bodyEnd, exitpc))
+        return ControlStatus_Error;
+
+    // Parse the condition first.
+    pc = bodyEnd;
+    current = header;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::forLoop(JSOp op, jssrcnote *sn)
+{
+    // Skip the NOP or POP.
+    JS_ASSERT(op == JSOP_POP || op == JSOP_NOP);
+    pc = GetNextPc(pc);
+
+    jsbytecode *condpc = pc + js_GetSrcNoteOffset(sn, 0);
+    jsbytecode *updatepc = pc + js_GetSrcNoteOffset(sn, 1);
+    jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 2);
+    jsbytecode *exitpc = GetNextPc(ifne);
+
+    // for loops have the following structures:
+    //
+    //   NOP or POP
+    //   [GOTO cond]
+    //   TRACE
+    // body:
+    //    ; [body]
+    // [increment:]
+    //    ; [increment]
+    // [cond:]
+    //   GOTO body
+    //
+    // If there is a condition (condpc != ifne), this acts similar to a while
+    // loop otherwise, it acts like a do-while loop.
+    jsbytecode *bodyStart = pc;
+    jsbytecode *bodyEnd = updatepc;
+    if (condpc != ifne) {
+        JS_ASSERT(JSOp(*bodyStart) == JSOP_GOTO || JSOp(*bodyStart) == JSOP_GOTOX);
+        JS_ASSERT(bodyStart + GetJumpOffset(bodyStart) == condpc);
+        bodyStart = GetNextPc(bodyStart);
+    }
+    JS_ASSERT(JSOp(*bodyStart) == JSOP_TRACE);
+    JS_ASSERT(ifne + GetJumpOffset(ifne) == bodyStart);
+    bodyStart = GetNextPc(bodyStart);
+
+    MBasicBlock *header = newPendingLoopHeader(current, pc);
+    if (!header)
+        return ControlStatus_Error;
+    current->end(MGoto::New(header));
+
+    // If there is no condition, we immediately parse the body. Otherwise, we
+    // parse the condition.
+    jsbytecode *stopAt;
+    CFGState::State initial;
+    if (condpc != ifne) {
+        pc = condpc;
+        stopAt = ifne;
+        initial = CFGState::FOR_LOOP_COND;
+    } else {
+        pc = bodyStart;
+        stopAt = bodyEnd;
+        initial = CFGState::FOR_LOOP_BODY;
+    }
+
+    if (!pushLoop(initial, stopAt, header, bodyStart, bodyEnd, exitpc, updatepc))
+        return ControlStatus_Error;
+
+    CFGState &state = cfgStack_.back();
+    state.loop.condpc = (condpc != ifne) ? condpc : NULL;
+    state.loop.updatepc = (updatepc != condpc) ? updatepc : NULL;
+    if (state.loop.updatepc)
+        state.loop.updateEnd = condpc;
+
+    current = header;
+    return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::tableSwitch(JSOp op, jssrcnote *sn)
+{
+    // TableSwitch op contains the following data
+    // (length between data is JUMP_OFFSET_LEN)
+    //
+    // 0: Offset of default case
+    // 1: Lowest number in tableswitch
+    // 2: Highest number in tableswitch
+    // 3: Offset of first case
+    // 4: Offset of second case
+    // .: ...
+    // .: Offset of last case
+
+    JS_ASSERT(op == JSOP_TABLESWITCH);
+
+    // Pop input.
+    MDefinition *ins = current->pop();
+
+    // Get the default and exit pc
+    jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0);
+    jsbytecode *defaultpc = pc + GET_JUMP_OFFSET(pc);
+
+    JS_ASSERT(defaultpc > pc && defaultpc <= exitpc);
+
+    // Get the low and high from the tableswitch
+    jsbytecode *pc2 = pc;
+    pc2 += JUMP_OFFSET_LEN;
+    jsint low = GET_JUMP_OFFSET(pc2);
+    pc2 += JUMP_OFFSET_LEN;
+    jsint high = GET_JUMP_OFFSET(pc2);
+    pc2 += JUMP_OFFSET_LEN;
+
+    // Create MIR instruction
+    MTableSwitch *tableswitch = MTableSwitch::New(ins, low, high);
+
+    // Create default case
+    MBasicBlock *defaultcase = newBlock(current, defaultpc);
+    if (!defaultcase)
+        return ControlStatus_Error;
+
+    // Create cases
+    jsbytecode *casepc = NULL, *prevcasepc; 
+    for (jsint i = 0; i < high-low+1; i++) {
+        prevcasepc = casepc;
+        casepc = pc + GET_JUMP_OFFSET(pc2);
+        
+        JS_ASSERT(casepc >= pc && casepc <= exitpc);
+
+        // Test if the default case appears before this case.
+        // If it does add the default case
+        if (defaultpc >= prevcasepc && defaultpc < casepc)
+            tableswitch->addDefault(defaultcase);
+
+        // If the casepc equals the current pc, it is not a written case,
+        // but a filled gap. That way we can use a tableswitch instead of 
+        // lookupswitch, even if not all numbers are consecutive.
+        if (casepc == pc) {
+            tableswitch->addCase(defaultcase, true);
+        } else {
+            MBasicBlock *caseblock = newBlock(current, casepc);
+            if (!caseblock)
+                return ControlStatus_Error;
+            tableswitch->addCase(caseblock);
+        }
+
+        pc2 += JUMP_OFFSET_LEN;
+    }
+
+    // Test if the default case comes behind all cases
+    // or if there are no case, add the default case now.
+    if (!casepc || defaultpc >= casepc)
+        tableswitch->addDefault(defaultcase);
+
+    JS_ASSERT(tableswitch->numCases() == (uint32)(high - low + 1));
+    JS_ASSERT(tableswitch->numSuccessors() > 0);
+
+    // Create info 
+    ControlFlowInfo switchinfo(cfgStack_.length(), exitpc);
+    if (!switches_.append(switchinfo))
+        return ControlStatus_Error;
+
+    // Use a state to retrieve some information
+    CFGState state;
+    state.state = CFGState::TABLE_SWITCH;
+    state.tableswitch.exitpc = exitpc;
+    state.tableswitch.breaks = NULL;
+    state.tableswitch.ins = tableswitch;
+    state.tableswitch.currentSuccessor = 0;
+
+    // Save the MIR instruction as last instruction of this block.
+    current->end(tableswitch);
+
+    // If there is only one successor the block should stop at the end of the switch
+    // Else it should stop at the start of the next successor
+    if (tableswitch->numSuccessors() == 1)
+        state.stopAt = state.tableswitch.exitpc;
+    else
+        state.stopAt = tableswitch->getSuccessor(1)->pc();
+    current = tableswitch->getSuccessor(0);
+
+    if (!cfgStack_.append(state))
+        return ControlStatus_Error;
+
+    pc = current->pc();
+    return ControlStatus_Jumped;
+}
+
+
+bool
+IonBuilder::jsop_ifeq(JSOp op)
+{
+    // IFEQ always has a forward offset.
+    jsbytecode *trueStart = pc + js_CodeSpec[op].length;
+    jsbytecode *falseStart = pc + GetJumpOffset(pc);
+    JS_ASSERT(falseStart > pc);
+
+    // We only handle cases that emit source notes.
+    jssrcnote *sn = js_GetSrcNote(script, pc);
+    if (!sn) {
+        // :FIXME: log this.
+        return false;
+    }
+
+    MDefinition *ins = current->pop();
+
+    // Create true and false branches.
+    MBasicBlock *ifTrue = newBlock(current, trueStart);
+    MBasicBlock *ifFalse = newBlock(current, falseStart);
+    if (!ifTrue || !ifFalse)
+        return false;
+
+    current->end(MTest::New(ins, ifTrue, ifFalse));
+
+    // The bytecode for if/ternary gets emitted either like this:
+    //
+    //    IFEQ X  ; src note (IF_ELSE, COND) points to the GOTO
+    //    ...
+    //    GOTO Z
+    // X: ...     ; else/else if
+    //    ...
+    // Z:         ; join
+    //
+    // Or like this:
+    //
+    //    IFEQ X  ; src note (IF) has no offset
+    //    ...
+    // Z: ...     ; join
+    //
+    // We want to parse the bytecode as if we were parsing the AST, so for the
+    // IF_ELSE/COND cases, we use the source note and follow the GOTO. For the
+    // IF case, the IFEQ offset is the join point.
+    switch (SN_TYPE(sn)) {
+      case SRC_IF:
+        if (!cfgStack_.append(CFGState::If(falseStart, ifFalse)))
+            return false;
+        break;
+
+      case SRC_IF_ELSE:
+      case SRC_COND:
+      {
+        // Infer the join point from the JSOP_GOTO[X] sitting here, then
+        // assert as we much we can that this is the right GOTO.
+        jsbytecode *trueEnd = pc + js_GetSrcNoteOffset(sn, 0);
+        JS_ASSERT(trueEnd > pc);
+        JS_ASSERT(trueEnd < falseStart);
+        JS_ASSERT(JSOp(*trueEnd) == JSOP_GOTO || JSOp(*trueEnd) == JSOP_GOTOX);
+        JS_ASSERT(!js_GetSrcNote(script, trueEnd));
+
+        jsbytecode *falseEnd = trueEnd + GetJumpOffset(trueEnd);
+        JS_ASSERT(falseEnd > trueEnd);
+        JS_ASSERT(falseEnd >= falseStart);
+
+        if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, ifFalse)))
+            return false;
+        break;
+      }
+
+      default:
+        JS_NOT_REACHED("unexpected source note type");
+        break;
+    }
+
+    // Switch to parsing the true branch. Note that no PC update is needed,
+    // it's the next instruction.
+    current = ifTrue;
+
+    return true;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processReturn(JSOp op)
+{
+    MDefinition *def;
+    switch (op) {
+      case JSOP_RETURN:
+        def = current->pop();
+        break;
+
+      case JSOP_STOP:
+      {
+        MInstruction *ins = MConstant::New(UndefinedValue());
+        current->add(ins);
+        def = ins;
+        break;
+      }
+
+      default:
+        def = NULL;
+        JS_NOT_REACHED("unknown return op");
+        break;
+    }
+
+    MReturn *ret = MReturn::New(def);
+    current->end(ret);
+
+    // Make sure no one tries to use this block now.
+    current = NULL;
+    return processControlEnd();
+}
+
+bool
+IonBuilder::pushConstant(const Value &v)
+{
+    MConstant *ins = MConstant::New(v);
+    current->add(ins);
+    current->push(ins);
+    return true;
+}
+
+bool
+IonBuilder::jsop_bitnot()
+{
+    MDefinition *input = current->pop();
+    MBitNot *ins = MBitNot::New(input);
+
+    current->add(ins);
+    ins->infer(oracle->unaryOp(script, pc));
+
+    current->push(ins);
+    return true;
+}
+bool
+IonBuilder::jsop_bitop(JSOp op)
+{
+    // Pop inputs.
+    MDefinition *right = current->pop();
+    MDefinition *left = current->pop();
+
+    MBinaryBitwiseInstruction *ins;
+    switch (op) {
+      case JSOP_BITAND:
+        ins = MBitAnd::New(left, right);
+        break;
+
+      case JSOP_BITOR:
+        ins = MBitOr::New(left, right);
+        break;
+
+      case JSOP_BITXOR:
+        ins = MBitXor::New(left, right);
+        break;
+        
+      case JSOP_LSH:
+        ins = MLsh::New(left, right);
+        break;
+        
+      case JSOP_RSH:
+        ins = MRsh::New(left, right);
+        break;
+        
+      case JSOP_URSH:
+        ins = MUrsh::New(left, right);
+        break;
+
+      default:
+        JS_NOT_REACHED("unexpected bitop");
+        return false;
+    }
+
+    current->add(ins);
+    ins->infer(oracle->binaryOp(script, pc));
+
+    current->push(ins);
+    return true;
+}
+
+bool
+IonBuilder::jsop_binary(JSOp op)
+{
+    // Pop inputs.
+    MDefinition *right = current->pop();
+    MDefinition *left = current->pop();
+
+    MBinaryArithInstruction *ins;
+    switch (op) {
+      case JSOP_ADD:
+        ins = MAdd::New(left, right);
+        break;
+
+      case JSOP_SUB:
+        ins = MSub::New(left, right);
+        break;
+
+      case JSOP_MUL:
+        ins = MMul::New(left, right);
+        break;
+
+      case JSOP_DIV:
+        ins = MDiv::New(left, right);
+        break;
+
+      default:
+        JS_NOT_REACHED("unexpected binary opcode");
+        return false;
+    }
+
+    current->add(ins);
+    ins->infer(oracle->binaryOp(script, pc));
+
+    current->push(ins);
+    return true;
+}
+
+bool
+IonBuilder::jsop_neg()
+{
+    if (!pushConstant(Int32Value(-1)))
+        return false;
+
+    if (!jsop_binary(JSOP_MUL))
+        return false;
+    return true;
+}
+
+bool
+IonBuilder::jsop_notearg()
+{
+    // JSOP_NOTEARG notes that the value in current->pop() has just
+    // been pushed onto the stack for use in calling a function.
+    MDefinition *def = current->pop();
+    MPassArg *arg = MPassArg::New(def);
+
+    current->add(arg);
+    current->push(arg);
+    return true;
+}
+
+bool
+IonBuilder::jsop_call(uint32 argc)
+{
+    MCall *ins = MCall::New(argc + 1); // +1 for implicit this.
+    if (!ins)
+        return false;
+
+    // Bytecode order: Function, This, Arg0, Arg1, ..., ArgN, Call.
+    for (int32 i = argc; i >= 0; i--)
+        ins->addArg(i, current->pop()->toPassArg());
+    ins->initFunction(current->pop());
+
+    // Insert an MPrepareCall immediately before the first argument is pushed.
+    MPrepareCall *start = new MPrepareCall;
+    MPassArg *arg = ins->getArg(0)->toPassArg();
+    current->insertBefore(arg, start);
+
+    ins->initPrepareCall(start);
+
+    current->add(ins);
+    current->push(ins);
+
+    if (!resumeAfter(ins))
+        return false;
+    return true;
+}
+
+bool
+IonBuilder::jsop_localinc(JSOp op)
+{
+    int32 amt = (js_CodeSpec[op].format & JOF_INC) ? 1 : -1;
+    bool post_incr = !!(js_CodeSpec[op].format & JOF_POST);
+
+    if (post_incr)
+        current->pushLocal(GET_SLOTNO(pc));
+    
+    current->pushLocal(GET_SLOTNO(pc));
+
+    if (!pushConstant(Int32Value(amt)))
+        return false;
+
+    if (!jsop_binary(JSOP_ADD))
+        return false;
+
+    if (!current->setLocal(GET_SLOTNO(pc)))
+        return false;
+
+    if (post_incr)
+        current->pop();
+
+    return true;
+}
+
+bool
+IonBuilder::jsop_arginc(JSOp op)
+{
+    int32 amt = (js_CodeSpec[op].format & JOF_INC) ? 1 : -1;
+    bool post_incr = !!(js_CodeSpec[op].format & JOF_POST);
+
+    if (post_incr)
+        current->pushArg(GET_SLOTNO(pc));
+    
+    current->pushArg(GET_SLOTNO(pc));
+
+    if (!pushConstant(Int32Value(amt)))
+        return false;
+
+    if (!jsop_binary(JSOP_ADD))
+        return false;
+
+    if (!current->setArg(GET_SLOTNO(pc)))
+        return false;
+
+    if (post_incr)
+        current->pop();
+
+    return true;
+}
+
+bool
+IonBuilder::jsop_compare(JSOp op)
+{
+    // Pop inputs
+    MDefinition *right = current->pop();
+    MDefinition *left = current->pop();
+
+    // Add instruction to current block
+    MCompare *ins = MCompare::New(left, right, op);
+    current->add(ins);
+
+    ins->infer(oracle->binaryOp(script, pc));
+
+    // Push result
+    current->push(ins);
+    return true;
+}
+
+MBasicBlock *
+IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc)
+{
+    MBasicBlock *block = MBasicBlock::New(this, predecessor, pc, MBasicBlock::NORMAL);
+    graph().addBlock(block);
+    return block;
+}
+
+MBasicBlock *
+IonBuilder::newPendingLoopHeader(MBasicBlock *predecessor, jsbytecode *pc)
+{
+    MBasicBlock *block = MBasicBlock::NewPendingLoopHeader(this, predecessor, pc);
+    graph().addBlock(block);
+    return block;
+}
+
+// A resume point is a mapping of stack slots to MDefinitions. It is used to
+// capture the environment such that if a guard fails, and IonMonkey needs
+// to exit back to the interpreter, the interpreter state can be
+// reconstructed.
+//
+// The resume model differs from TraceMonkey in that we do not need to
+// take snapshots for every guard. Instead, we capture stack state at
+// critical points:
+//   * (1) At the beginning of every basic block.
+//   * (2) After every non-idempotent operation.
+//
+// As long as these two properties are maintained, instructions can
+// be moved, hoisted, or, eliminated without problems, and ops without side
+// effects do not need to worry about capturing state at precisely the
+// right point in time.
+//
+// Effectful instructions, of course, need to capture state after completion,
+// where the interpreter will not attempt to repeat the operation. For this,
+// resumeAfter() must be used. The state is attached directly to the effectful
+// instruction to ensure that no intermediate instructions could be injected
+// in between by a future analysis pass.
+//
+// During LIR construction, if an instruction can bail back to the interpreter,
+// we create an LSnapshot, which uses the last known resume point to request
+// register/stack assignments for every live value.
+bool
+IonBuilder::resumeAfter(MInstruction *ins)
+{
+    return resumeAt(ins, GetNextPc(pc));
+}
+
+bool
+IonBuilder::resumeAt(MInstruction *ins, jsbytecode *pc)
+{
+    JS_ASSERT(!ins->isIdempotent());
+
+    MResumePoint *resumePoint = MResumePoint::New(current, pc);
+    if (!resumePoint)
+        return false;
+    ins->setResumePoint(resumePoint);
+    return true;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonBuilder.h
@@ -0,0 +1,268 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_bytecode_analyzer_h__
+#define jsion_bytecode_analyzer_h__
+
+// This file declares the data structures for building a MIRGraph from a
+// JSScript.
+
+#include "MIR.h"
+#include "MIRGraph.h"
+
+namespace js {
+namespace ion {
+
+class IonBuilder : public MIRGenerator
+{
+    enum ControlStatus {
+        ControlStatus_Error,
+        ControlStatus_Ended,        // There is no continuation/join point.
+        ControlStatus_Joined,       // Created a join node.
+        ControlStatus_Jumped,       // Parsing another branch at the same level.
+        ControlStatus_None          // No control flow.
+    };
+
+    struct DeferredEdge : public TempObject
+    {
+        MBasicBlock *block;
+        DeferredEdge *next;
+
+        DeferredEdge(MBasicBlock *block, DeferredEdge *next)
+          : block(block), next(next)
+        { }
+    };
+
+    struct ControlFlowInfo {
+        // Entry in the cfgStack.
+        uint32 cfgEntry;
+
+        // Label that continues go to.
+        jsbytecode *continuepc;
+
+        ControlFlowInfo(uint32 cfgEntry, jsbytecode *continuepc)
+          : cfgEntry(cfgEntry),
+            continuepc(continuepc)
+        { }
+    };
+
+    // To avoid recursion, the bytecode analyzer uses a stack where each entry
+    // is a small state machine. As we encounter branches or jumps in the
+    // bytecode, we push information about the edges on the stack so that the
+    // CFG can be built in a tree-like fashion.
+    struct CFGState {
+        enum State {
+            IF_TRUE,            // if() { }, no else.
+            IF_TRUE_EMPTY_ELSE, // if() { }, empty else
+            IF_ELSE_TRUE,       // if() { X } else { }
+            IF_ELSE_FALSE,      // if() { } else { X }
+            DO_WHILE_LOOP_BODY, // do { x } while ()
+            DO_WHILE_LOOP_COND, // do { } while (x)
+            WHILE_LOOP_COND,    // while (x) { }
+            WHILE_LOOP_BODY,    // while () { x }
+            FOR_LOOP_COND,      // for (; x;) { }
+            FOR_LOOP_BODY,      // for (; ;) { x }
+            FOR_LOOP_UPDATE,    // for (; ; x) { }
+            TABLE_SWITCH        // switch() { x }
+        };
+
+        State state;            // Current state of this control structure.
+        jsbytecode *stopAt;     // Bytecode at which to stop the processing loop.
+        
+        // For if structures, this contains branch information.
+        union {
+            struct {
+                MBasicBlock *ifFalse;
+                jsbytecode *falseEnd;
+                MBasicBlock *ifTrue;    // Set when the end of the true path is reached.
+            } branch;
+            struct {
+                // Common entry point.
+                MBasicBlock *entry;
+
+                // Position of where the loop body starts and ends.
+                jsbytecode *bodyStart;
+                jsbytecode *bodyEnd;
+
+                // pc immediately after the loop exits.
+                jsbytecode *exitpc;
+
+                // Common exit point. Created lazily, so it may be NULL.
+                MBasicBlock *successor;
+
+                // Deferred break and continue targets.
+                DeferredEdge *breaks;
+                DeferredEdge *continues;
+
+                // For-loops only.
+                jsbytecode *condpc;
+                jsbytecode *updatepc;
+                jsbytecode *updateEnd;
+            } loop;
+            struct {
+                // pc immediately after the switch.
+                jsbytecode *exitpc;
+
+                // Deferred break and continue targets.
+                DeferredEdge *breaks;
+
+                // MIR instruction
+                MTableSwitch *ins;
+
+                // The number of current successor that get mapped into a block. 
+                uint32 currentSuccessor;
+
+            } tableswitch;
+        };
+
+        inline bool isLoop() const {
+            switch (state) {
+              case DO_WHILE_LOOP_COND:
+              case DO_WHILE_LOOP_BODY:
+              case WHILE_LOOP_COND:
+              case WHILE_LOOP_BODY:
+                return true;
+              default:
+                return false;
+            }
+        }
+
+        static CFGState If(jsbytecode *join, MBasicBlock *ifFalse);
+        static CFGState IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MBasicBlock *ifFalse);
+    };
+
+  public:
+    IonBuilder(JSContext *cx, JSScript *script, JSFunction *fun, TempAllocator &temp,
+               MIRGraph &graph, TypeOracle *oracle);
+
+  public:
+    bool build();
+
+  private:
+    bool traverseBytecode();
+    ControlStatus snoopControlFlow(JSOp op);
+    bool inspectOpcode(JSOp op);
+    uint32 readIndex(jsbytecode *pc);
+
+    void popCfgStack();
+    bool processDeferredContinues(CFGState &state);
+    ControlStatus processControlEnd();
+    ControlStatus processCfgStack();
+    ControlStatus processCfgEntry(CFGState &state);
+    ControlStatus processIfEnd(CFGState &state);
+    ControlStatus processIfElseTrueEnd(CFGState &state);
+    ControlStatus processIfElseFalseEnd(CFGState &state);
+    ControlStatus processDoWhileBodyEnd(CFGState &state);
+    ControlStatus processDoWhileCondEnd(CFGState &state);
+    ControlStatus processWhileCondEnd(CFGState &state);
+    ControlStatus processWhileBodyEnd(CFGState &state);
+    ControlStatus processForCondEnd(CFGState &state);
+    ControlStatus processForBodyEnd(CFGState &state);
+    ControlStatus processForUpdateEnd(CFGState &state);
+    ControlStatus processNextTableSwitchCase(CFGState &state);
+    ControlStatus processTableSwitchEnd(CFGState &state);
+    ControlStatus processSwitchBreak(JSOp op, jssrcnote *sn);
+    ControlStatus processReturn(JSOp op);
+    ControlStatus processContinue(JSOp op, jssrcnote *sn);
+    ControlStatus processBreak(JSOp op, jssrcnote *sn);
+    ControlStatus maybeLoop(JSOp op, jssrcnote *sn);
+    bool pushLoop(CFGState::State state, jsbytecode *stopAt, MBasicBlock *entry,
+                  jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc,
+                  jsbytecode *continuepc = NULL);
+
+    MBasicBlock *newBlock(MBasicBlock *predecessor, jsbytecode *pc);
+    MBasicBlock *newPendingLoopHeader(MBasicBlock *predecessor, jsbytecode *pc);
+    MBasicBlock *newBlock(jsbytecode *pc) {
+        return newBlock(NULL, pc);
+    }
+
+    // Given a list of pending breaks, creates a new block and inserts a Goto
+    // linking each break to the new block.
+    MBasicBlock *createBreakCatchBlock(DeferredEdge *edge, jsbytecode *pc);
+
+    // Finishes loops that do not actually loop, containing only breaks or
+    // returns.
+    ControlStatus processBrokenLoop(CFGState &state);
+
+    // Computes loop phis, places them in all successors of a loop, then
+    // handles any pending breaks.
+    ControlStatus finishLoop(CFGState &state, MBasicBlock *successor);
+
+    void assertValidTraceOp(JSOp op);
+    bool forInLoop(JSOp op, jssrcnote *sn) {
+        return false;
+    }
+    ControlStatus forLoop(JSOp op, jssrcnote *sn);
+    ControlStatus whileLoop(JSOp op, jssrcnote *sn);
+    ControlStatus doWhileLoop(JSOp op, jssrcnote *sn);
+    ControlStatus tableSwitch(JSOp op, jssrcnote *sn);
+
+    // Please see the Big Honkin' Comment about how resume points work in
+    // IonBuilder.cpp, near the definition for this function.
+    bool resumeAt(MInstruction *ins, jsbytecode *pc);
+    bool resumeAfter(MInstruction *ins);
+
+    bool pushConstant(const Value &v);
+    bool jsop_bitnot();
+    bool jsop_bitop(JSOp op);
+    bool jsop_binary(JSOp op);
+    bool jsop_neg();
+    bool jsop_notearg();
+    bool jsop_call(uint32 argc);
+    bool jsop_ifeq(JSOp op);
+    bool jsop_localinc(JSOp op);
+    bool jsop_arginc(JSOp op);
+    bool jsop_compare(JSOp op);
+
+  private:
+    JSAtom **atoms;
+    MBasicBlock *current;
+    Vector<CFGState, 8, IonAllocPolicy> cfgStack_;
+    Vector<ControlFlowInfo, 4, IonAllocPolicy> loops_;
+    Vector<ControlFlowInfo, 0, IonAllocPolicy> switches_;
+    TypeOracle *oracle;
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_bytecode_analyzer_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonCode.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_coderef_h__
+#define jsion_coderef_h__
+
+#include "jscell.h"
+
+namespace JSC {
+    class ExecutablePool;
+}
+
+class JSScript;
+
+namespace js {
+namespace ion {
+
+// The maximum size of any buffer associated with an assembler or code object.
+// This is chosen to not overflow a signed integer, leaving room for an extra
+// bit on offsets.
+static const uint32 MAX_BUFFER_SIZE = (1 << 30) - 1;
+
+typedef uint32 SnapshotOffset;
+
+class MacroAssembler;
+
+class IonCode : public gc::Cell
+{
+  protected:
+    uint8 *code_;
+    JSC::ExecutablePool *pool_;
+    uint32 bufferSize_;             // Total buffer size.
+    uint32 insnSize_;               // Instruction stream size.
+    uint32 dataSize_;               // Size of the read-only data area.
+    uint32 relocTableSize_;         // Size of the relocation table.
+
+    IonCode()
+      : code_(NULL),
+        pool_(NULL)
+    { }
+    IonCode(uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool)
+      : code_(code),
+        pool_(pool),
+        bufferSize_(bufferSize),
+        insnSize_(0),
+        dataSize_(0),
+        relocTableSize_(0)
+    { }
+
+    uint32 dataOffset() const {
+        return insnSize_;
+    }
+    uint32 relocTableOffset() const {
+        return dataOffset() + dataSize_;
+    }
+
+  public:
+    uint8 *raw() const {
+        return code_;
+    }
+    size_t instructionsSize() const {
+        return insnSize_;
+    }
+    void trace(JSTracer *trc);
+    void finalize(JSContext *cx);
+
+    template <typename T> T as() const {
+        return JS_DATA_TO_FUNC_PTR(T, raw());
+    }
+
+    void copyFrom(MacroAssembler &masm);
+
+    static IonCode *FromExecutable(uint8 *buffer) {
+        IonCode *code = *(IonCode **)(buffer - sizeof(IonCode *));
+        JS_ASSERT(code->raw() == buffer);
+        return code;
+    }
+
+    static size_t OffsetOfCode() {
+        return offsetof(IonCode, code_);
+    }
+
+    // Allocates a new IonCode object which will be managed by the GC. If no
+    // object can be allocated, NULL is returned. On failure, |pool| is
+    // automatically released, so the code may be freed.
+    static IonCode *New(JSContext *cx, uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool);
+};
+
+#define ION_DISABLED_SCRIPT ((IonScript *)0x1)
+
+class SnapshotWriter;
+
+// An IonScript attaches Ion-generated information to a JSScript.
+struct IonScript
+{
+    // Code pointer containing the actual method.
+    IonCode *method_;
+
+    // Deoptimization table used by this method.
+    IonCode *deoptTable_;
+
+    // Offset from the start of the code buffer to its snapshot buffer.
+    uint32 snapshots_;
+    uint32 snapshotsSize_;
+
+    // Table mapping bailout IDs to snapshot offsets.
+    uint32 bailoutTable_;
+    uint32 bailoutEntries_;
+
+    // Constant table for constants stored in snapshots.
+    uint32 constantTable_;
+    uint32 constantEntries_;
+
+    SnapshotOffset *bailoutTable() {
+        return (SnapshotOffset *)(reinterpret_cast<uint8 *>(this) + bailoutTable_);
+    }
+    Value *constants() {
+        return (Value *)(reinterpret_cast<uint8 *>(this) + constantTable_);
+    }
+
+  private:
+    void trace(JSTracer *trc, JSScript *script);
+
+  public:
+    // Do not call directly, use IonScript::New. This is public for cx->new_.
+    IonScript();
+
+    static IonScript *New(JSContext *cx, size_t snapshotsSize, size_t snapshotEntries,
+                          size_t constants);
+    static void Trace(JSTracer *trc, JSScript *script);
+    static void Destroy(JSContext *cx, JSScript *script);
+
+  public:
+    IonCode *method() const {
+        return method_;
+    }
+    void setMethod(IonCode *code) {
+        method_ = code;
+    }
+    void setDeoptTable(IonCode *code) {
+        deoptTable_ = code;
+    }
+    const uint8 *snapshots() const {
+        return reinterpret_cast<const uint8 *>(this) + snapshots_;
+    }
+    size_t snapshotsSize() const {
+        return snapshotsSize_;
+    }
+    Value &getConstant(size_t index) {
+        JS_ASSERT(index < numConstants());
+        return constants()[index];
+    }
+    size_t numConstants() const {
+        return constantEntries_;
+    }
+    SnapshotOffset bailoutToSnapshot(uint32 bailoutId) {
+        JS_ASSERT(bailoutId < bailoutEntries_);
+        return bailoutTable()[bailoutId];
+    }
+    void copySnapshots(const SnapshotWriter *writer);
+    void copyBailoutTable(const SnapshotOffset *table);
+    void copyConstants(const Value *vp);
+};
+
+}
+}
+
+#endif // jsion_coderef_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonCompartment.h
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_ion_compartment_h__
+#define jsion_ion_compartment_h__
+
+#include "IonCode.h"
+#include "jsvalue.h"
+#include "jsvector.h"
+#include "vm/Stack.h"
+#include "IonFrames.h"
+
+namespace js {
+namespace ion {
+
+class FrameSizeClass;
+
+typedef JSBool (*EnterIonCode)(void *code, int argc, Value *argv, Value *vp,
+                               CalleeToken calleeToken);
+
+class IonActivation;
+
+class IonCompartment
+{
+    friend class IonActivation;
+
+    JSC::ExecutableAllocator *execAlloc_;
+
+    // Current activation of ion::Cannon.
+    IonActivation *active_;
+
+    // Trampoline for entering JIT code.
+    IonCode *enterJIT_;
+
+    // Vector mapping frame class sizes to bailout tables.
+    js::Vector<IonCode *, 4, SystemAllocPolicy> bailoutTables_;
+
+    // Generic bailout table; used if the bailout table overflows.
+    IonCode *bailoutHandler_;
+
+    // Error-returning thunk.
+    IonCode *returnError_;
+
+    // Argument-rectifying thunk, in the case of insufficient arguments passed
+    // to a function call site. Pads with |undefined|.
+    IonCode *argumentsRectifier_;
+
+    IonCode *generateEnterJIT(JSContext *cx);
+    IonCode *generateReturnError(JSContext *cx);
+    IonCode *generateArgumentsRectifier(JSContext *cx);
+    IonCode *generateBailoutTable(JSContext *cx, uint32 frameClass);
+    IonCode *generateBailoutHandler(JSContext *cx);
+
+  public:
+    bool initialize(JSContext *cx);
+    IonCompartment();
+    ~IonCompartment();
+
+    void mark(JSTracer *trc, JSCompartment *compartment);
+    void sweep(JSContext *cx);
+
+    JSC::ExecutableAllocator *execAlloc() {
+        return execAlloc_;
+    }
+
+    IonCode *getBailoutTable(JSContext *cx, const FrameSizeClass &frameClass);
+    IonCode *getGenericBailoutHandler(JSContext *cx) {
+        if (!bailoutHandler_) {
+            bailoutHandler_ = generateBailoutHandler(cx);
+            if (!bailoutHandler_)
+                return NULL;
+        }
+        return bailoutHandler_;
+    }
+
+    // Infallible; does not generate a table.
+    IonCode *getBailoutTable(const FrameSizeClass &frameClass);
+
+    // Fallible; generates a thunk and returns the target.
+    IonCode *getArgumentsRectifier(JSContext *cx) {
+        if (!argumentsRectifier_) {
+            argumentsRectifier_ = generateArgumentsRectifier(cx);
+            if (!argumentsRectifier_)
+                return NULL;
+        }
+        return argumentsRectifier_;
+    }
+
+    EnterIonCode enterJIT(JSContext *cx) {
+        if (!enterJIT_) {
+            enterJIT_ = generateEnterJIT(cx);
+            if (!enterJIT_)
+                return NULL;
+        }
+        if (!returnError_) {
+            returnError_ = generateReturnError(cx);
+            if (!returnError_)
+                return NULL;
+        }
+        return enterJIT_->as<EnterIonCode>();
+    }
+    IonCode *returnError() const {
+        JS_ASSERT(returnError_);
+        return returnError_;
+    }
+
+    IonActivation *activation() const {
+        return active_;
+    }
+};
+
+class BailoutClosure;
+
+class IonActivation
+{
+    JSContext *cx_;
+    IonActivation *prev_;
+    StackFrame *entryfp_;
+    FrameRegs &oldFrameRegs_;
+    BailoutClosure *bailout_;
+
+  public:
+    IonActivation(JSContext *cx, StackFrame *fp);
+    ~IonActivation();
+    StackFrame *entryfp() const {
+        return entryfp_;
+    }
+    IonActivation *prev() const {
+        return prev_;
+    }
+    void setBailout(BailoutClosure *bailout) {
+        JS_ASSERT(!bailout_);
+        bailout_ = bailout;
+    }
+    BailoutClosure *maybeTakeBailout() {
+        BailoutClosure *br = bailout_;
+        bailout_ = NULL;
+        return br;
+    }
+    BailoutClosure *takeBailout() {
+        JS_ASSERT(bailout_);
+        return maybeTakeBailout();
+    }
+    FrameRegs &oldFrameRegs() {
+        return oldFrameRegs_;
+    }
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_ion_compartment_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonFrames.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <dvander@alliedmods.net>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_frames_h__
+#define jsion_frames_h__
+
+#include "jstypes.h"
+#include "jsutil.h"
+
+struct JSFunction;
+struct JSScript;
+
+namespace js {
+namespace ion {
+
+// The layout of an Ion frame on the C stack:
+//   argN     _ 
+//   ...       \ - These are jsvals
+//   arg0      /
+//   this    _/
+//   calleeToken - Encodes script or JSFunction
+//   sizeDescriptor  - Size of the parent frame, with lower bits for FrameType.
+//   returnAddr - Return address, entering into the next call.
+//   .. locals ..
+
+// Layout of the frame prefix. This assumes the stack architecture grows down.
+// If this is ever not the case, we'll have to refactor.
+struct IonFrameData
+{
+    void *returnAddress_;
+    uintptr_t sizeDescriptor_;
+    void *calleeToken_;
+};
+
+class IonFramePrefix : protected IonFrameData
+{
+  public:
+    enum FrameType {
+        JSFrame,
+        EntryFrame,
+        RectifierFrame
+    };
+
+    // The FrameType is packed into the sizeDescriptor by left-shifting the
+    // sizeDescriptor by FrameTypeBits, then ORing in the FrameType.
+    static const unsigned FrameTypeBits = 2;
+
+  public:
+    // True if this is the frame passed into EnterIonCode.
+    bool isEntryFrame() const {
+        return !!(sizeDescriptor_ & EntryFrame);
+    }
+    // The depth of the parent frame.
+    size_t prevFrameDepth() const {
+        JS_ASSERT(!isEntryFrame());
+        return sizeDescriptor_ >> FrameTypeBits;
+    }
+    IonFramePrefix *prev() const {
+        JS_ASSERT(!isEntryFrame());
+        return (IonFramePrefix *)((uint8 *)this - prevFrameDepth());
+    }
+    void *calleeToken() const {
+        return calleeToken_;
+    }
+    void setReturnAddress(void *address) {
+        returnAddress_ = address;
+    }
+};
+
+static const uint32 ION_FRAME_PREFIX_SIZE = sizeof(IonFramePrefix);
+
+// Ion frames have a few important numbers associated with them:
+//      Local depth:    The number of bytes required to spill local variables.
+//      Argument depth: The number of bytes required to push arguments and make
+//                      a function call.
+//      Slack:          A frame may temporarily use extra stack to resolve cycles.
+//
+// The (local + argument) depth determines the "fixed frame size". The fixed
+// frame size is the distance between the stack pointer and the frame header.
+// Thus, fixed >= (local + argument).
+//
+// In order to compress guards, we create shared jump tables that recover the
+// script from the stack and recover a snapshot pointer based on which jump was
+// taken. Thus, we create a jump table for each fixed frame size.
+//
+// Jump tables are big. To control the amount of jump tables we generate, each
+// platform chooses how to segregate stack size classes based on its
+// architecture.
+//
+// On some architectures, these jump tables are not used at all, or frame
+// size segregation is not needed. Thus, there is an option for a frame to not
+// have any frame size class, and to be totally dynamic.
+static const uint32 NO_FRAME_SIZE_CLASS_ID = uint32(-1);
+
+class FrameSizeClass
+{
+    uint32 class_;
+
+    explicit FrameSizeClass(uint32 class_) : class_(class_)
+    { }
+  
+  public:
+    FrameSizeClass()
+    { }
+
+    static FrameSizeClass None() {
+        return FrameSizeClass(NO_FRAME_SIZE_CLASS_ID);
+    }
+    static FrameSizeClass FromClass(uint32 class_) {
+        return FrameSizeClass(class_);
+    }
+
+    // These two functions are implemented in specific CodeGenerator-* files.
+    static FrameSizeClass FromDepth(uint32 frameDepth);
+    uint32 frameSize() const;
+
+    uint32 classId() const {
+        JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+        return class_;
+    }
+
+    bool operator ==(const FrameSizeClass &other) const {
+        return class_ == other.class_;
+    }
+    bool operator !=(const FrameSizeClass &other) const {
+        return class_ != other.class_;
+    }
+};
+
+typedef void * CalleeToken;
+
+static inline CalleeToken
+CalleeToToken(JSObject *fun)
+{
+    return (CalleeToken *)fun;
+}
+static inline CalleeToken
+CalleeToToken(JSScript *script)
+{
+    return (CalleeToken *)(uintptr_t(script) | 1);
+}
+static inline bool
+IsCalleeTokenFunction(CalleeToken token)
+{
+    return (uintptr_t(token) & 1) == 0;
+}
+static inline JSObject *
+CalleeTokenToFunction(CalleeToken token)
+{
+    JS_ASSERT(IsCalleeTokenFunction(token));
+    return (JSObject *)token;
+}
+static inline JSScript *
+CalleeTokenToScript(CalleeToken token)
+{
+    JS_ASSERT(!IsCalleeTokenFunction(token));
+    return (JSScript*)(uintptr_t(token) & ~uintptr_t(1));
+}
+
+}
+}
+
+#endif // jsion_frames_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonLIR-inl.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_lir_inl_h__
+#define jsion_lir_inl_h__
+
+namespace js {
+namespace ion {
+
+#define LIROP(name)                                                         \
+    L##name *LInstruction::to##name()                                       \
+    {                                                                       \
+        JS_ASSERT(is##name());                                              \
+        return static_cast<L##name *>(this);                                \
+    }
+    LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+#define LALLOC_CAST(type)                                                   \
+    L##type *LAllocation::to##type() {                                      \
+        JS_ASSERT(is##type());                                              \
+        return static_cast<L##type *>(this);                                \
+    }
+#define LALLOC_CONST_CAST(type)                                             \
+    const L##type *LAllocation::to##type() const {                          \
+        JS_ASSERT(is##type());                                              \
+        return static_cast<const L##type *>(this);                          \
+    }
+
+LALLOC_CAST(Use)
+LALLOC_CONST_CAST(Use)
+LALLOC_CONST_CAST(GeneralReg)
+LALLOC_CONST_CAST(FloatReg)
+LALLOC_CONST_CAST(StackSlot)
+LALLOC_CONST_CAST(Argument)
+LALLOC_CONST_CAST(ConstantIndex)
+
+#undef LALLOC_CAST
+
+} // namespace ion
+} // namespace js
+
+#endif // jsion_lir_inl_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonLIR.cpp
@@ -0,0 +1,312 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "MIR.h"
+#include "MIRGraph.h"
+#include "IonLIR.h"
+#include "IonLIR-inl.h"
+#include "IonSpewer.h"
+
+using namespace js;
+using namespace js::ion;
+
+LIRGraph::LIRGraph(MIRGraph &mir)
+  : numVirtualRegisters_(0),
+    localSlotCount_(0),
+    argumentSlotCount_(0),
+    mir_(mir)
+{
+}
+
+bool
+LIRGraph::addConstantToPool(double d, uint32 *index)
+{
+    *index = constantPool_.length();
+    return constantPool_.append(DoubleValue(d));
+}
+
+bool
+LIRGraph::addConstantToPool(MConstant *ins, uint32 *index)
+{
+    *index = constantPool_.length();
+    return constantPool_.append(ins->value());
+}
+
+uint32
+LBlock::firstId()
+{
+    if (phis_.length()) {
+        return phis_[0]->id();
+    } else {
+        for (LInstructionIterator i(instructions_.begin()); i != instructions_.end(); i++) {
+            if (i->id())
+                return i->id();
+        }
+    }
+    return 0;
+}
+uint32
+LBlock::lastId()
+{
+    LInstruction *last = *instructions_.rbegin();
+    JS_ASSERT(last->id());
+    if (last->numDefs())
+        return last->getDef(last->numDefs() - 1)->virtualRegister();
+    return last->id();
+}
+
+LSnapshot::LSnapshot(MResumePoint *mir)
+  : numSlots_(mir->numOperands() * BOX_PIECES),
+    slots_(NULL),
+    mir_(mir),
+    snapshotOffset_(INVALID_SNAPSHOT_OFFSET),
+    bailoutId_(INVALID_BAILOUT_ID)
+{ }
+
+bool
+LSnapshot::init(MIRGenerator *gen)
+{
+    slots_ = gen->allocate<LAllocation>(numSlots_);
+    return !!slots_;
+}
+
+LSnapshot *
+LSnapshot::New(MIRGenerator *gen, MResumePoint *mir)
+{
+    LSnapshot *snapshot = new LSnapshot(mir);
+    if (!snapshot->init(gen))
+        return NULL;
+
+    IonSpew(IonSpew_Snapshots, "Generating LIR snapshot %p from MIR (%p)",
+            (void *)snapshot, (void *)mir);
+
+    return snapshot;
+}
+
+bool
+LPhi::init(MIRGenerator *gen)
+{
+    inputs_ = gen->allocate<LAllocation>(numInputs_);
+    return !!inputs_;
+}
+
+LPhi::LPhi(MPhi *mir)
+  : numInputs_(mir->numOperands())
+{
+}
+
+LPhi *
+LPhi::New(MIRGenerator *gen, MPhi *ins)
+{
+    LPhi *phi = new LPhi(ins);
+    if (!phi->init(gen))
+        return NULL;
+    return phi;
+}
+
+void
+LInstruction::printName(FILE *fp)
+{
+    static const char *names[] =
+    {
+#define LIROP(x) #x,
+        LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+    };
+    const char *name = names[op()];
+    size_t len = strlen(name);
+    for (size_t i = 0; i < len; i++)
+        fprintf(fp, "%c", tolower(name[i]));
+}
+
+static const char *TypeChars[] =
+{
+    "i",            // INTEGER
+    "p",            // POINTER
+    "o",            // OBJECT
+    "f",            // DOUBLE
+    "t",            // TYPE
+    "d",            // PAYLOAD
+    "x"             // BOX
+};
+
+static void
+PrintDefinition(FILE *fp, const LDefinition &def)
+{
+    fprintf(fp, "[%s", TypeChars[def.type()]);
+    if (def.virtualRegister())
+        fprintf(fp, ":%d", def.virtualRegister());
+    if (def.policy() == LDefinition::PRESET) {
+        fprintf(fp, " (");
+        LAllocation::PrintAllocation(fp, def.output());
+        fprintf(fp, ")");
+    } else if (def.policy() == LDefinition::MUST_REUSE_INPUT) {
+        fprintf(fp, " (!)");
+    } else if (def.policy() == LDefinition::REDEFINED) {
+        fprintf(fp, " (r)");
+    }
+    fprintf(fp, "]");
+}
+
+static void
+PrintUse(FILE *fp, const LUse *use)
+{
+    fprintf(fp, "v%d:", use->virtualRegister());
+    if (use->policy() == LUse::ANY) {
+        fprintf(fp, "*");
+    } else if (use->policy() == LUse::REGISTER) {
+        fprintf(fp, "r");
+    } else if (use->policy() == LUse::COPY) {
+        fprintf(fp, "c");
+    } else {
+        // Unfortunately, we don't know here whether the virtual register is a
+        // float or a double. Should we steal a bit in LUse for help? For now,
+        // nothing defines any fixed xmm registers.
+        fprintf(fp, "%s", Registers::GetName(Registers::Code(use->registerCode())));
+    }
+}
+
+void
+LAllocation::PrintAllocation(FILE *fp, const LAllocation *a)
+{
+    switch (a->kind()) {
+      case LAllocation::CONSTANT_VALUE:
+      case LAllocation::CONSTANT_INDEX:
+        fprintf(fp, "c");
+        break;
+      case LAllocation::GPR:
+        fprintf(fp, "=%s", a->toGeneralReg()->reg().name());
+        break;
+      case LAllocation::FPU:
+        fprintf(fp, "=%s", a->toFloatReg()->reg().name());
+        break;
+      case LAllocation::STACK_SLOT:
+        fprintf(fp, "stack:i%d", a->toStackSlot()->slot());
+        break;
+      case LAllocation::DOUBLE_SLOT:
+        fprintf(fp, "stack:d%d", a->toStackSlot()->slot());
+        break;
+      case LAllocation::ARGUMENT:
+        fprintf(fp, "arg:%d", a->toArgument()->index());
+        break;
+      case LAllocation::USE:
+        PrintUse(fp, a->toUse());
+        break;
+      default:
+        JS_NOT_REACHED("what?");
+        break;
+    }
+}
+
+void
+LInstruction::printOperands(FILE *fp)
+{
+    for (size_t i = 0; i < numOperands(); i++) {
+        fprintf(fp, " (");
+        LAllocation::PrintAllocation(fp, getOperand(i));
+        fprintf(fp, ")");
+        if (i != numOperands() - 1)
+            fprintf(fp, ",");
+    }
+}
+
+void
+LInstruction::assignSnapshot(LSnapshot *snapshot)
+{
+    JS_ASSERT(!snapshot_);
+    snapshot_ = snapshot;
+    IonSpew(IonSpew_Snapshots, "Assigning snapshot %p to instruction %p",
+            (void *)snapshot, (void *)this);
+}
+
+void
+LInstruction::print(FILE *fp)
+{
+    printName(fp);
+
+    fprintf(fp, " (");
+    for (size_t i = 0; i < numDefs(); i++) {
+        PrintDefinition(fp, *getDef(i));
+        if (i != numDefs() - 1)
+            fprintf(fp, ", ");
+    }
+    fprintf(fp, ")");
+
+    printInfo(fp);
+
+    if (numTemps()) {
+        fprintf(fp, " t=(");
+        for (size_t i = 0; i < numTemps(); i++) {
+            PrintDefinition(fp, *getTemp(i));
+            if (i != numTemps() - 1)
+                fprintf(fp, ", ");
+        }
+        fprintf(fp, ")");
+    }
+}
+
+void
+LMoveGroup::printOperands(FILE *fp)
+{
+    for (size_t i = 0; i < numMoves(); i++) {
+        const LMove &move = getMove(i);
+        fprintf(fp, "[");
+        LAllocation::PrintAllocation(fp, move.from());
+        fprintf(fp, " -> ");
+        LAllocation::PrintAllocation(fp, move.to());
+        fprintf(fp, "]");
+        if (i != numMoves() - 1)
+            fprintf(fp, ", ");
+    }
+}
+
+Label *
+LTestVAndBranch::ifTrue()
+{
+    return ifTrue_->lir()->label();
+}
+
+Label *
+LTestVAndBranch::ifFalse()
+{
+    return ifFalse_->lir()->label();
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/IonLIR.h
@@ -0,0 +1,966 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   David Anderson <danderson@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsion_lir_h__
+#define jsion_lir_h__
+
+// This file declares the core data structures for LIR: storage allocations for
+// inputs and outputs, as well as the interface instructions must conform to.
+
+#include "jscntxt.h"
+#include "IonAllocPolicy.h"
+#include "InlineList.h"
+#include "FixedArityList.h"
+#include "LOpcodes.h"
+#include "TypeOracle.h"
+#include "IonRegisters.h"
+#include "MIR.h"
+#include "MIRGraph.h"
+#include "shared/Assembler-shared.h"
+#include "Snapshots.h"
+#include "Bailouts.h"
+
+#if defined(JS_CPU_X86)
+# include "x86/StackAssignment-x86.h"
+#elif defined(JS_CPU_X64)
+# include "x64/StackAssignment-x64.h"
+#elif defined(JS_CPU_ARM)
+# include "arm/StackAssignment-arm.h"
+#else
+#error "CPU Not Supported"
+#endif
+
+namespace js {
+namespace ion {
+
+class LUse;
+class LGeneralReg;
+class LFloatReg;
+class LStackSlot;
+class LArgument;
+class LConstantIndex;
+class MBasicBlock;
+class MTableSwitch;
+class MIRGenerator;
+class MSnapshot;
+
+static const uint32 MAX_VIRTUAL_REGISTERS = (1 << 21) - 1;
+static const uint32 VREG_INCREMENT = 1;
+
+static const uint32 THIS_FRAME_SLOT = 0;
+
+#if defined(JS_NUNBOX32)
+# define BOX_PIECES         2
+static const uint32 VREG_TYPE_OFFSET = 0;
+static const uint32 VREG_DATA_OFFSET = 1;
+static const uint32 TYPE_INDEX = 0;
+static const uint32 PAYLOAD_INDEX = 1;
+#elif defined(JS_PUNBOX64)
+# define BOX_PIECES         1
+#else
+# error "Unknown!"
+#endif
+
+// Represents storage for an operand. For constants, the pointer is tagged
+// with a single bit, and the untagged pointer is a pointer to a Value.
+class LAllocation : public TempObject
+{
+    uintptr_t bits_;
+
+  protected:
+    static const uintptr_t TAG_BIT = 1;
+    static const uintptr_t TAG_SHIFT = 0;
+    static const uintptr_t TAG_MASK = 1 << TAG_SHIFT;
+    static const uintptr_t KIND_BITS = 3;
+    static const uintptr_t KIND_SHIFT = TAG_SHIFT + TAG_BIT;
+    static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
+    static const uintptr_t DATA_BITS = (sizeof(uint32) * 8) - KIND_BITS - TAG_BIT;
+    static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
+    static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1;
+
+  public:
+    enum Kind {
+        USE,            // Use of a virtual register, with physical allocation policy.
+        CONSTANT_VALUE, // Constant js::Value.
+        CONSTANT_INDEX, // Constant arbitrary index.
+        GPR,            // General purpose register.
+        FPU,            // Floating-point register.
+        STACK_SLOT,     // 32-bit stack slot.
+        DOUBLE_SLOT,    // 64-bit stack slot.
+        ARGUMENT        // Argument slot.
+    };
+
+  protected:
+    bool isTagged() const {
+        return !!(bits_ & TAG_MASK);
+    }
+
+    int32 data() const {
+        return int32(bits_) >> DATA_SHIFT;
+    }
+    void setData(int32 data) {
+        JS_ASSERT(int32(data) <= int32(DATA_MASK));
+        bits_ &= ~(DATA_MASK << DATA_SHIFT);
+        bits_ |= (data << DATA_SHIFT);
+    }
+    void setKindAndData(Kind kind, uint32 data) {
+        JS_ASSERT(int32(data) <= int32(DATA_MASK));
+        bits_ = (uint32(kind) << KIND_SHIFT) | data << DATA_SHIFT;