Bug 840282 - OdinMonkey (sr=dmandelin)
authorLuke Wagner <luke@mozilla.com>
Fri, 15 Mar 2013 02:29:02 -0700
changeset 124919 b3d85b68449d
parent 124918 0ba5f40a0340
child 124920 04544e876ce3
push id24673
push userlwagner@mozilla.com
push date2013-03-15 11:00 +0000
treeherdermozilla-inbound@b3d85b68449d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdmandelin
bugs840282
milestone22.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 840282 - OdinMonkey (sr=dmandelin)
dom/base/nsJSEnvironment.cpp
dom/workers/RuntimeService.cpp
js/public/MemoryMetrics.h
js/public/Utility.h
js/public/Vector.h
js/src/Makefile.in
js/src/assembler/assembler/X86Assembler.h
js/src/assembler/jit/ExecutableAllocator.cpp
js/src/assembler/jit/ExecutableAllocator.h
js/src/builtin/TestingFunctions.cpp
js/src/frontend/BytecodeCompiler.cpp
js/src/frontend/BytecodeEmitter.cpp
js/src/frontend/FoldConstants.cpp
js/src/frontend/FullParseHandler.h
js/src/frontend/ParseNode-inl.h
js/src/frontend/ParseNode.cpp
js/src/frontend/ParseNode.h
js/src/frontend/Parser-inl.h
js/src/frontend/Parser.cpp
js/src/frontend/Parser.h
js/src/frontend/SharedContext-inl.h
js/src/frontend/SharedContext.h
js/src/frontend/SyntaxParseHandler.h
js/src/frontend/TokenStream.cpp
js/src/frontend/TokenStream.h
js/src/gc/Barrier.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/ion/AsmJS.cpp
js/src/ion/AsmJS.h
js/src/ion/AsmJSLink.cpp
js/src/ion/AsmJSModule.h
js/src/ion/AsmJSSignalHandlers.cpp
js/src/ion/CodeGenerator.cpp
js/src/ion/CodeGenerator.h
js/src/ion/CompileInfo.h
js/src/ion/EffectiveAddressAnalysis.cpp
js/src/ion/EffectiveAddressAnalysis.h
js/src/ion/Ion.cpp
js/src/ion/Ion.h
js/src/ion/IonAllocPolicy.h
js/src/ion/IonAnalysis.cpp
js/src/ion/IonBuilder.cpp
js/src/ion/IonMacroAssembler.cpp
js/src/ion/IonMacroAssembler.h
js/src/ion/IonTypes.h
js/src/ion/LIR-Common.h
js/src/ion/LIR.cpp
js/src/ion/LIR.h
js/src/ion/LOpcodes.h
js/src/ion/Lowering.cpp
js/src/ion/Lowering.h
js/src/ion/MIR.cpp
js/src/ion/MIR.h
js/src/ion/MIRGenerator.h
js/src/ion/MIRGraph.cpp
js/src/ion/MIRGraph.h
js/src/ion/MOpcodes.h
js/src/ion/MoveResolver.h
js/src/ion/ParallelArrayAnalysis.cpp
js/src/ion/RangeAnalysis.cpp
js/src/ion/RegisterAllocator.h
js/src/ion/RegisterSets.h
js/src/ion/Safepoints.cpp
js/src/ion/StupidAllocator.cpp
js/src/ion/TypeOracle.h
js/src/ion/arm/Assembler-arm.cpp
js/src/ion/arm/Assembler-arm.h
js/src/ion/arm/CodeGenerator-arm.cpp
js/src/ion/arm/CodeGenerator-arm.h
js/src/ion/arm/LIR-arm.h
js/src/ion/arm/LOpcodes-arm.h
js/src/ion/arm/MacroAssembler-arm.h
js/src/ion/shared/Assembler-shared.h
js/src/ion/shared/Assembler-x86-shared.cpp
js/src/ion/shared/Assembler-x86-shared.h
js/src/ion/shared/CodeGenerator-shared-inl.h
js/src/ion/shared/CodeGenerator-shared.cpp
js/src/ion/shared/CodeGenerator-shared.h
js/src/ion/shared/CodeGenerator-x86-shared.cpp
js/src/ion/shared/CodeGenerator-x86-shared.h
js/src/ion/shared/LIR-x86-shared.h
js/src/ion/shared/Lowering-shared-inl.h
js/src/ion/shared/Lowering-shared.h
js/src/ion/shared/Lowering-x86-shared.cpp
js/src/ion/shared/Lowering-x86-shared.h
js/src/ion/shared/MacroAssembler-x86-shared.h
js/src/ion/x64/Assembler-x64.cpp
js/src/ion/x64/Assembler-x64.h
js/src/ion/x64/CodeGenerator-x64.cpp
js/src/ion/x64/CodeGenerator-x64.h
js/src/ion/x64/LIR-x64.h
js/src/ion/x64/LOpcodes-x64.h
js/src/ion/x64/Lowering-x64.cpp
js/src/ion/x64/Lowering-x64.h
js/src/ion/x64/MacroAssembler-x64.h
js/src/ion/x86/Architecture-x86.h
js/src/ion/x86/Assembler-x86.cpp
js/src/ion/x86/Assembler-x86.h
js/src/ion/x86/CodeGenerator-x86.cpp
js/src/ion/x86/CodeGenerator-x86.h
js/src/ion/x86/LIR-x86.h
js/src/ion/x86/LOpcodes-x86.h
js/src/ion/x86/Lowering-x86.cpp
js/src/ion/x86/Lowering-x86.h
js/src/ion/x86/MacroAssembler-x86.h
js/src/jit-test/lib/asm.js
js/src/jit-test/tests/asm.js/testBasic.js
js/src/jit-test/tests/asm.js/testCall.js
js/src/jit-test/tests/asm.js/testCompoundPlusMinus.js
js/src/jit-test/tests/asm.js/testControlFlow.js
js/src/jit-test/tests/asm.js/testDebugModeDisables.js
js/src/jit-test/tests/asm.js/testExpressions.js
js/src/jit-test/tests/asm.js/testFFI.js
js/src/jit-test/tests/asm.js/testFastHeapAccess.js
js/src/jit-test/tests/asm.js/testFloatingPoint.js
js/src/jit-test/tests/asm.js/testFunctionPtr.js
js/src/jit-test/tests/asm.js/testGlobals.js
js/src/jit-test/tests/asm.js/testHeapAccess.js
js/src/jit-test/tests/asm.js/testLiterals.js
js/src/jit-test/tests/asm.js/testMathLib.js
js/src/jit-test/tests/asm.js/testTimeout1.js
js/src/jit-test/tests/asm.js/testTimeout2.js
js/src/jit-test/tests/asm.js/testTimeout3.js
js/src/jit-test/tests/asm.js/testTimeout4.js
js/src/jit-test/tests/asm.js/testX86ByteStore.js
js/src/jit-test/tests/asm.js/testZOOB.js
js/src/jit-test/tests/auto-regress/bug759312.js
js/src/js.msg
js/src/jsanalyze.cpp
js/src/jsapi.cpp
js/src/jsapi.h
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jsfun.h
js/src/jsinfer.cpp
js/src/jsinterp.cpp
js/src/jsmath.cpp
js/src/jsmath.h
js/src/jsobjinlines.h
js/src/jsopcode.tbl
js/src/jsscript.cpp
js/src/jsscript.h
js/src/jstypedarray.cpp
js/src/jstypedarray.h
js/src/jstypedarrayinlines.h
js/src/jswin.h
js/src/methodjit/FastOps.cpp
js/src/shell/js.cpp
js/src/vm/CommonPropertyNames.h
js/src/vm/ObjectImpl.h
js/src/vm/Xdr.h
js/xpconnect/shell/xpcshell.cpp
js/xpconnect/src/XPCJSRuntime.cpp
modules/libpref/src/init/all.js
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -969,16 +969,17 @@ static const char js_typeinfer_str[]    
 static const char js_pccounts_content_str[]   = JS_OPTIONS_DOT_STR "pccounts.content";
 static const char js_pccounts_chrome_str[]    = JS_OPTIONS_DOT_STR "pccounts.chrome";
 static const char js_jit_hardening_str[]      = JS_OPTIONS_DOT_STR "jit_hardening";
 static const char js_memlog_option_str[]      = JS_OPTIONS_DOT_STR "mem.log";
 static const char js_memnotify_option_str[]   = JS_OPTIONS_DOT_STR "mem.notify";
 static const char js_disable_explicit_compartment_gc[] =
   JS_OPTIONS_DOT_STR "mem.disable_explicit_compartment_gc";
 static const char js_ion_content_str[]        = JS_OPTIONS_DOT_STR "ion.content";
+static const char js_asmjs_content_str[]      = JS_OPTIONS_DOT_STR "experimental_asmjs";
 static const char js_ion_parallel_compilation_str[] = JS_OPTIONS_DOT_STR "ion.parallel_compilation";
 
 int
 nsJSContext::JSOptionChangedCallback(const char *pref, void *data)
 {
   nsJSContext *context = reinterpret_cast<nsJSContext *>(data);
   uint32_t oldDefaultJSOptions = context->mDefaultJSOptions;
   uint32_t newDefaultJSOptions = oldDefaultJSOptions;
@@ -1009,28 +1010,30 @@ nsJSContext::JSOptionChangedCallback(con
                                                js_methodjit_content_str);
   bool usePCCounts = Preferences::GetBool(chromeWindow || !contentWindow ?
                                             js_pccounts_chrome_str :
                                             js_pccounts_content_str);
   bool useMethodJITAlways = Preferences::GetBool(js_methodjit_always_str);
   bool useTypeInference = !chromeWindow && contentWindow && Preferences::GetBool(js_typeinfer_str);
   bool useHardening = Preferences::GetBool(js_jit_hardening_str);
   bool useIon = Preferences::GetBool(js_ion_content_str);
+  bool useAsmJS = Preferences::GetBool(js_asmjs_content_str);
   bool parallelIonCompilation = Preferences::GetBool(js_ion_parallel_compilation_str);
   nsCOMPtr<nsIXULRuntime> xr = do_GetService(XULRUNTIME_SERVICE_CONTRACTID);
   if (xr) {
     bool safeMode = false;
     xr->GetInSafeMode(&safeMode);
     if (safeMode) {
       useMethodJIT = false;
       usePCCounts = false;
       useTypeInference = false;
       useMethodJITAlways = true;
       useHardening = false;
       useIon = false;
+      useAsmJS = false;
     }
   }
 
   if (useMethodJIT)
     newDefaultJSOptions |= JSOPTION_METHODJIT;
   else
     newDefaultJSOptions &= ~JSOPTION_METHODJIT;
 
@@ -1049,16 +1052,21 @@ nsJSContext::JSOptionChangedCallback(con
   else
     newDefaultJSOptions &= ~JSOPTION_TYPE_INFERENCE;
 
   if (useIon)
     newDefaultJSOptions |= JSOPTION_ION;
   else
     newDefaultJSOptions &= ~JSOPTION_ION;
 
+  if (useAsmJS)
+    newDefaultJSOptions |= JSOPTION_ASMJS;
+  else
+    newDefaultJSOptions &= ~JSOPTION_ASMJS;
+
 #ifdef DEBUG
   // In debug builds, warnings are enabled in chrome context if
   // javascript.options.strict.debug is true
   bool strictDebug = Preferences::GetBool(js_strict_debug_option_str);
   if (strictDebug && (newDefaultJSOptions & JSOPTION_STRICT) == 0) {
     if (chromeWindow || !contentWindow)
       newDefaultJSOptions |= JSOPTION_STRICT;
   }
--- a/dom/workers/RuntimeService.cpp
+++ b/dom/workers/RuntimeService.cpp
@@ -149,16 +149,17 @@ enum {
   PREF_strict = 0,
   PREF_werror,
   PREF_methodjit,
   PREF_methodjit_always,
   PREF_typeinference,
   PREF_jit_hardening,
   PREF_mem_max,
   PREF_ion,
+  PREF_asmjs,
   PREF_mem_gc_allocation_threshold_mb,
 
 #ifdef JS_GC_ZEAL
   PREF_gczeal,
 #endif
 
   PREF_COUNT
 };
@@ -169,16 +170,17 @@ const char* gPrefsToWatch[] = {
   JS_OPTIONS_DOT_STR "strict",
   JS_OPTIONS_DOT_STR "werror",
   JS_OPTIONS_DOT_STR "methodjit.content",
   JS_OPTIONS_DOT_STR "methodjit_always",
   JS_OPTIONS_DOT_STR "typeinference",
   JS_OPTIONS_DOT_STR "jit_hardening",
   JS_OPTIONS_DOT_STR "mem.max",
   JS_OPTIONS_DOT_STR "ion.content",
+  JS_OPTIONS_DOT_STR "experimental_asmjs",
   "dom.workers.mem.gc_allocation_threshold_mb"
 
 #ifdef JS_GC_ZEAL
   , PREF_WORKERS_GCZEAL
 #endif
 };
 
 MOZ_STATIC_ASSERT(NS_ARRAY_LENGTH(gPrefsToWatch) == PREF_COUNT,
@@ -223,16 +225,19 @@ PrefCallback(const char* aPrefName, void
       newOptions |= JSOPTION_METHODJIT_ALWAYS;
     }
     if (Preferences::GetBool(gPrefsToWatch[PREF_typeinference])) {
       newOptions |= JSOPTION_TYPE_INFERENCE;
     }
     if (Preferences::GetBool(gPrefsToWatch[PREF_ion])) {
       newOptions |= JSOPTION_ION;
     }
+    if (Preferences::GetBool(gPrefsToWatch[PREF_asmjs])) {
+      newOptions |= JSOPTION_ASMJS;
+    }
 
     RuntimeService::SetDefaultJSContextOptions(newOptions);
     rts->UpdateAllWorkerJSContextOptions();
   }
 #ifdef JS_GC_ZEAL
   else if (!strcmp(aPrefName, gPrefsToWatch[PREF_gczeal])) {
     int32_t gczeal = Preferences::GetInt(gPrefsToWatch[PREF_gczeal]);
     RuntimeService::SetDefaultGCZeal(uint8_t(clamped(gczeal, 0, 3)));
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -124,16 +124,17 @@ struct RuntimeSizes
 
     size_t object;
     size_t atomsTable;
     size_t contexts;
     size_t dtoa;
     size_t temporary;
     size_t jaegerCode;
     size_t ionCode;
+    size_t asmJSCode;
     size_t regexpCode;
     size_t unusedCode;
     size_t regexpData;
     size_t stack;
     size_t gcMarker;
     size_t mathCache;
     size_t scriptData;
     size_t scriptSources;
--- a/js/public/Utility.h
+++ b/js/public/Utility.h
@@ -558,16 +558,23 @@ SCOPED_TEMPLATE(ScopedJSFreePtr, ScopedF
 
 template <typename T>
 struct ScopedDeletePtrTraits : public ScopedFreePtrTraits<T>
 {
     static void release(T *ptr) { js_delete(ptr); }
 };
 SCOPED_TEMPLATE(ScopedJSDeletePtr, ScopedDeletePtrTraits)
 
+template <typename T>
+struct ScopedReleasePtrTraits : public ScopedFreePtrTraits<T>
+{
+    static void release(T *ptr) { if (ptr) ptr->release(); }
+};
+SCOPED_TEMPLATE(ScopedReleasePtr, ScopedReleasePtrTraits)
+
 } /* namespace js */
 
 namespace js {
 
 /*
  * "Move" References
  *
  * Some types can be copied much more efficiently if we know the original's
--- a/js/public/Vector.h
+++ b/js/public/Vector.h
@@ -524,17 +524,17 @@ class Vector : private AllocPolicy
 /* Vector Implementation */
 
 template <class T, size_t N, class AllocPolicy>
 JS_ALWAYS_INLINE
 Vector<T,N,AllocPolicy>::Vector(AllocPolicy ap)
   : AllocPolicy(ap), mBegin((T *)storage.addr()), mLength(0),
     mCapacity(sInlineCapacity)
 #ifdef DEBUG
-  , mReserved(0), entered(false)
+  , mReserved(sInlineCapacity), entered(false)
 #endif
 {}
 
 /* Move constructor. */
 template <class T, size_t N, class AllocPolicy>
 JS_ALWAYS_INLINE
 Vector<T, N, AllocPolicy>::Vector(MoveRef<Vector> rhs)
     : AllocPolicy(rhs)
@@ -561,17 +561,17 @@ Vector<T, N, AllocPolicy>::Vector(MoveRe
          * Take src's buffer, and turn src into an empty vector using
          * in-line storage.
          */
         mBegin = rhs->mBegin;
         rhs->mBegin = (T *) rhs->storage.addr();
         rhs->mCapacity = sInlineCapacity;
         rhs->mLength = 0;
 #ifdef DEBUG
-        rhs->mReserved = 0;
+        rhs->mReserved = sInlineCapacity;
 #endif
     }
 }
 
 /* Move assignment. */
 template <class T, size_t N, class AP>
 JS_ALWAYS_INLINE
 Vector<T, N, AP> &
@@ -801,17 +801,17 @@ Vector<T,N,AP>::clearAndFree()
 
     if (usingInlineStorage())
         return;
 
     this->free_(beginNoCheck());
     mBegin = (T *)storage.addr();
     mCapacity = sInlineCapacity;
 #ifdef DEBUG
-    mReserved = 0;
+    mReserved = sInlineCapacity;
 #endif
 }
 
 template <class T, size_t N, class AP>
 inline bool
 Vector<T,N,AP>::canAppendWithoutRealloc(size_t needed) const
 {
     return mLength + needed <= mCapacity;
@@ -991,17 +991,17 @@ Vector<T,N,AP>::extractRawBuffer()
         /* mBegin, mCapacity are unchanged. */
         mLength = 0;
     } else {
         ret = mBegin;
         mBegin = (T *)storage.addr();
         mLength = 0;
         mCapacity = sInlineCapacity;
 #ifdef DEBUG
-        mReserved = 0;
+        mReserved = sInlineCapacity;
 #endif
     }
     return ret;
 }
 
 template <class T, size_t N, class AP>
 inline void
 Vector<T,N,AP>::replaceRawBuffer(T *p, size_t aLength)
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -41,16 +41,17 @@ include $(MFBT_ROOT)/exported_headers.mk
 
 VPATH		+= \
 		$(srcdir) \
 		$(srcdir)/builtin \
 		$(srcdir)/devtools \
 		$(srcdir)/ds \
 		$(srcdir)/frontend \
 		$(srcdir)/gc \
+		$(srcdir)/ion \
 		$(srcdir)/vm \
 		$(NULL)
 
 CPPSRCS		= \
 		jsalloc.cpp \
 		jsanalyze.cpp \
 		jsapi.cpp \
 		jsarray.cpp \
@@ -138,16 +139,19 @@ CPPSRCS		= \
 		Statistics.cpp \
 		StoreBuffer.cpp \
 		Iteration.cpp \
 		Verifier.cpp \
 		StringBuffer.cpp \
 		Unicode.cpp \
 		Xdr.cpp \
 		Module.cpp \
+		AsmJS.cpp \
+		AsmJSLink.cpp \
+		AsmJSSignalHandlers.cpp \
 		$(NULL)
 
 # Changes to internal header files, used externally, massively slow down
 # browser builds.  Don't add new files here unless you know what you're
 # doing!
 INSTALLED_HEADERS = \
 		js-config.h \
 		jscpucfg.h \
@@ -294,16 +298,17 @@ CPPSRCS +=	MIR.cpp \
 		TypePolicy.cpp \
 		ValueNumbering.cpp \
 		RangeAnalysis.cpp \
 		VMFunctions.cpp \
 		ParallelFunctions.cpp \
 		AliasAnalysis.cpp \
 		ParallelArrayAnalysis.cpp \
 		UnreachableCodeElimination.cpp \
+		EffectiveAddressAnalysis.cpp \
 		$(NULL)
 endif #ENABLE_ION
 ifeq (86, $(findstring 86,$(TARGET_CPU)))
 ifdef ENABLE_ION
 CPPSRCS +=	CodeGenerator-x86-shared.cpp
 CPPSRCS +=	IonFrames-x86-shared.cpp
 CPPSRCS +=	MoveEmitter-x86-shared.cpp
 CPPSRCS +=	Assembler-x86-shared.cpp
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -222,16 +222,18 @@ private:
         OP_TEST_EvGv                    = 0x85,
         OP_XCHG_EvGv                    = 0x87,
         OP_MOV_EbGv                     = 0x88,
         OP_MOV_EvGv                     = 0x89,
         OP_MOV_GvEv                     = 0x8B,
         OP_LEA                          = 0x8D,
         OP_GROUP1A_Ev                   = 0x8F,
         OP_NOP                          = 0x90,
+        OP_PUSHFLAGS                    = 0x9C,
+        OP_POPFLAGS                     = 0x9D,
         OP_CDQ                          = 0x99,
         OP_MOV_EAXOv                    = 0xA1,
         OP_MOV_OvEAX                    = 0xA3,
         OP_MOV_EAXIv                    = 0xB8,
         OP_GROUP2_EvIb                  = 0xC1,
         OP_RET_Iz                       = 0xC2,
         OP_RET                          = 0xC3,
         OP_GROUP11_EvIb                 = 0xC6,
@@ -320,16 +322,17 @@ private:
 
         GROUP2_OP_SHL = 4,
         GROUP2_OP_SHR = 5,
         GROUP2_OP_SAR = 7,
 
         GROUP3_OP_TEST = 0,
         GROUP3_OP_NOT  = 2,
         GROUP3_OP_NEG  = 3,
+        GROUP3_OP_DIV  = 6,
         GROUP3_OP_IDIV = 7,
 
         GROUP5_OP_CALLN = 2,
         GROUP5_OP_JMPN  = 4,
         GROUP5_OP_PUSH  = 6,
 
         FPU6_OP_FSTP = 3,
 
@@ -433,16 +436,28 @@ public:
     }
 
     void pop_m(int offset, RegisterID base)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
     }
 
+    void push_flags()
+    {
+        spew("push flags register");
+        m_formatter.oneByteOp(OP_PUSHFLAGS);
+    }
+
+    void pop_flags()
+    {
+        spew("pop flags register");
+        m_formatter.oneByteOp(OP_POPFLAGS);
+    }
+
     // Arithmetic operations:
 
 #if !WTF_CPU_X86_64
     void adcl_im(int imm, void* addr)
     {
         FIXME_INSN_PRINTING;
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
@@ -1019,21 +1034,28 @@ public:
     void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
     {
         spew("imull      %d, %s, %s",
              value, nameIReg(4, src), nameIReg(4, dst));
         m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
         m_formatter.immediate32(value);
     }
 
-    void idivl_r(RegisterID dst)
+    void idivl_r(RegisterID divisor)
     {
         spew("idivl      %s", 
-             nameIReg(4, dst));
-        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+             nameIReg(4, divisor));
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, divisor);
+    }
+
+    void divl_r(RegisterID divisor)
+    {
+        spew("div        %s",
+             nameIReg(4, divisor));
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, divisor);
     }
 
     // Comparisons:
 
     void cmpl_rr(RegisterID src, RegisterID dst)
     {
         spew("cmpl       %s, %s", 
              nameIReg(4, src), nameIReg(4, dst));
@@ -1379,16 +1401,24 @@ public:
     void movw_rm(RegisterID src, int offset, RegisterID base)
     {
         spew("movw       %s, %s0x%x(%s)",
              nameIReg(2,src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.prefix(PRE_OPERAND_SIZE);
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
     }
     
+    void movw_rm_disp32(RegisterID src, int offset, RegisterID base)
+    {
+        spew("movw       %s, %s0x%x(%s)",
+             nameIReg(2,src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+    }
+    
     void movl_rm(RegisterID src, int offset, RegisterID base)
     {
         spew("movl       %s, %s0x%x(%s)",
              nameIReg(4,src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
     }
 
     void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
@@ -1406,17 +1436,17 @@ public:
     }
 
     void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("movl       %s, %d(%s,%s,%d)", 
              nameIReg(4, src), offset, nameIReg(base), nameIReg(index), scale);
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
     }
-    
+
     void movl_mEAX(void* addr)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp(OP_MOV_EAXOv);
 #if WTF_CPU_X86_64
         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
 #else
         m_formatter.immediate32(reinterpret_cast<int>(addr));
@@ -1431,16 +1461,25 @@ public:
     }
 
     void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
     {
         FIXME_INSN_PRINTING;
         m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
     }
 
+#if WTF_CPU_X86
+    void movl_mr(void* base, RegisterID index, int scale, RegisterID dst)
+    {
+        spew("movl       %d(%s,%d), %s",
+             int(base), nameIReg(index), scale, nameIReg(dst));
+        m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, index, scale, int(base));
+    }
+#endif
+
     void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         spew("movl       %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameIReg(4, dst));
         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
     }
 
     void movl_i32r(int imm, RegisterID dst)
@@ -1612,17 +1651,39 @@ public:
     
     void movsxd_rr(RegisterID src, RegisterID dst)
     {
         spew("movsxd     %s, %s",
              nameIReg(4, src), nameIReg(8, dst));
         m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
     }
     
-    
+    JmpSrc movl_ripr(RegisterID dst)
+    {
+        spew("movl     \?(%%rip), %s",
+             nameIReg(dst));
+        m_formatter.oneByteRipOp(OP_MOV_GvEv, (RegisterID)dst, 0);
+        return JmpSrc(m_formatter.size());
+    }
+
+    JmpSrc movl_rrip(RegisterID src)
+    {
+        spew("movl     %s, \?(%%rip)",
+             nameIReg(src));
+        m_formatter.oneByteRipOp(OP_MOV_EvGv, (RegisterID)src, 0);
+        return JmpSrc(m_formatter.size());
+    }
+
+    JmpSrc movq_ripr(RegisterID dst)
+    {
+        spew("movl     \?(%%rip), %s",
+             nameIReg(dst));
+        m_formatter.oneByteRipOp64(OP_MOV_GvEv, dst, 0);
+        return JmpSrc(m_formatter.size());
+    }
 #else
     void movl_rm(RegisterID src, void* addr)
     {
         spew("movl       %s, 0(%p)",
              nameIReg(4, src), addr);
         if (src == X86Registers::eax)
             movl_EAXm(addr);
         else 
@@ -1649,72 +1710,107 @@ public:
 
     void movb_rm(RegisterID src, int offset, RegisterID base)
     {
         spew("movb       %s, %s0x%x(%s)",
              nameIReg(1, src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp8(OP_MOV_EbGv, src, base, offset);
     }
 
+    void movb_rm_disp32(RegisterID src, int offset, RegisterID base)
+    {
+        spew("movb       %s, %s0x%x(%s)",
+             nameIReg(1, src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.oneByteOp8_disp32(OP_MOV_EbGv, src, base, offset);
+    }
+
     void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("movb       %s, %d(%s,%s,%d)",
              nameIReg(1, src), offset, nameIReg(base), nameIReg(index), scale);
         m_formatter.oneByteOp8(OP_MOV_EbGv, src, base, index, scale, offset);
     }
 
     void movzbl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movzbl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
     }
 
+    void movzbl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        spew("movzbl     %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
+        m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEb, dst, base, offset);
+    }
+
     void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         spew("movzbl     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameIReg(dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
     }
 
     void movxbl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movxbl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
     }
 
+    void movxbl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        spew("movxbl     %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
+        m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEb, dst, base, offset);
+    }
+
     void movxbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         spew("movxbl     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameIReg(dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
     }
 
     void movzwl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movzwl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
     }
 
+    void movzwl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        spew("movzwl     %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
+        m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEw, dst, base, offset);
+    }
+
     void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         spew("movzwl     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameIReg(dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
     }
 
     void movxwl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movxwl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
     }
 
+    void movxwl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        spew("movxwl     %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
+        m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEw, dst, base, offset);
+    }
+
     void movxwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         spew("movxwl     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameIReg(dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
     }
 
     void movzbl_rr(RegisterID src, RegisterID dst)
@@ -1742,16 +1838,24 @@ public:
     }
 #if WTF_CPU_X86_64
     void leaq_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("leaq       %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(8,dst));
         m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
     }
+
+    JmpSrc leaq_rip(RegisterID dst)
+    {
+        spew("leaq       \?(%%rip), %s",
+             nameIReg(dst));
+        m_formatter.oneByteRipOp64(OP_LEA, dst, 0);
+        return JmpSrc(m_formatter.size());
+    }
 #endif
 
     // Flow control:
 
     JmpSrc call()
     {
         m_formatter.oneByteOp(OP_CALL_rel32);
         JmpSrc r = m_formatter.immediateRel32();
@@ -2136,32 +2240,56 @@ public:
     void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
     {
         spew("movsd      %s, %s0x%x(%s)",
              nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
     }
 
+    void movsd_rm_disp32(XMMRegisterID src, int offset, RegisterID base)
+    {
+        spew("movsd      %s, %s0x%x(%s)",
+             nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp_disp32(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+    }
+
     void movss_rm(XMMRegisterID src, int offset, RegisterID base)
     {
         spew("movss      %s, %s0x%x(%s)",
              nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
     }
 
+    void movss_rm_disp32(XMMRegisterID src, int offset, RegisterID base)
+    {
+        spew("movss      %s, %s0x%x(%s)",
+             nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp_disp32(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+    }
+
     void movss_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         spew("movss      %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
+    void movss_mr_disp32(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        spew("movss      %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp_disp32(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
     void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("movsd       %s, %d(%s,%s,%d)", 
              nameFPReg(src), offset, nameIReg(base), nameIReg(index), scale);
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
     }
 
@@ -2184,16 +2312,24 @@ public:
     void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         spew("movsd      %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
+    void movsd_mr_disp32(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        spew("movsd      %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp_disp32(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
     void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
     {
         spew("movsd      %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), scale, nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, index, scale, offset);
     }
 
@@ -2208,16 +2344,41 @@ public:
 #if !WTF_CPU_X86_64
     void movsd_mr(const void* address, XMMRegisterID dst)
     {
         spew("movsd      %p, %s",
              address, nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
     }
+
+    void movsd_rm(XMMRegisterID src, const void* address)
+    {
+        spew("movsd      %s, %p",
+             nameFPReg(src), address);
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+    }
+#else
+    JmpSrc movsd_ripr(XMMRegisterID dst)
+    {
+        spew("movsd     \?(%%rip), %s",
+             nameFPReg(dst));
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteRipOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, 0);
+        return JmpSrc(m_formatter.size());
+    }
+    JmpSrc movsd_rrip(XMMRegisterID src)
+    {
+        spew("movsd     %s, \?(%%rip)",
+             nameFPReg(src));
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0);
+        return JmpSrc(m_formatter.size());
+    }
 #endif
 
     void movdqa_rm(XMMRegisterID src, int offset, RegisterID base)
     {
         spew("movdqa     %s, %s0x%x(%s)",
              nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.prefix(PRE_SSE_66);
         m_formatter.twoByteOp(OP2_MOVDQA_WsdVsd, (RegisterID)src, base, offset);
@@ -2453,16 +2614,21 @@ public:
         m_formatter.jumpTablePointer(ptr);
     }
 
     void doubleConstant(double d)
     {
         m_formatter.doubleConstant(d);
     }
 
+    void int64Constant(int64_t i)
+    {
+        m_formatter.int64Constant(i);
+    }
+
     // Linking & patching:
     //
     // 'link' and 'patch' methods are for use on unprotected code - such as the code
     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
     // code has been finalized it is (platform support permitting) within a non-
     // writable region of memory; to modify the code in an execute-only execuable
     // pool the 'repatch' and 'relink' methods should be used.
 
@@ -2749,27 +2915,55 @@ private:
         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIfNeeded(reg, index, base);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, base, index, scale, offset);
         }
 
+        void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID index, int scale, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, index, 0);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM_disp32(reg, index, scale, offset);
+        }
+
 #if !WTF_CPU_X86_64
         void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, address);
         }
 #else
         void oneByteRipOp(OneByteOpcodeID opcode, int reg, int ripOffset)
         {
             m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, 0, 0);
+            m_buffer.putByteUnchecked(opcode);
+            putModRm(ModRmMemoryNoDisp, reg, noBase);
+            m_buffer.putIntUnchecked(ripOffset);
+        }
+
+        void oneByteRipOp64(OneByteOpcodeID opcode, int reg, int ripOffset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexW(reg, 0, 0);
+            m_buffer.putByteUnchecked(opcode);
+            putModRm(ModRmMemoryNoDisp, reg, noBase);
+            m_buffer.putIntUnchecked(ripOffset);
+        }
+
+        void twoByteRipOp(TwoByteOpcodeID opcode, int reg, int ripOffset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, 0, 0);
+            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
             m_buffer.putByteUnchecked(opcode);
             putModRm(ModRmMemoryNoDisp, reg, noBase);
             m_buffer.putIntUnchecked(ripOffset);
         }
 #endif
 
         void twoByteOp(TwoByteOpcodeID opcode)
         {
@@ -2791,16 +2985,25 @@ private:
         {
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIfNeeded(reg, 0, base);
             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, base, offset);
         }
 
+        void twoByteOp_disp32(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, 0, base);
+            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM_disp32(reg, base, offset);
+        }
+
         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIfNeeded(reg, index, base);
             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, base, index, scale, offset);
         }
@@ -2940,16 +3143,27 @@ private:
             ASSERT(!byteRegRequiresRex(reg));
 #endif
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, base, offset);
         }
 
+        void oneByteOp8_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+#if !WTF_CPU_X86_64
+            ASSERT(!byteRegRequiresRex(reg));
+#endif
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM_disp32(reg, base, offset);
+        }
+
         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
 #if !WTF_CPU_X86_64
             ASSERT(!byteRegRequiresRex(reg));
 #endif
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIf(byteRegRequiresRex(reg), reg, index, base);
             m_buffer.putByteUnchecked(opcode);
@@ -3023,16 +3237,22 @@ private:
             union {
                 uint64_t u64;
                 double d;
             } u;
             u.d = d;
             m_buffer.putInt64Unchecked(u.u64);
         }
 
+        void int64Constant(int64_t i)
+        {
+            m_buffer.ensureSpace(sizeof(int64_t));
+            m_buffer.putInt64Unchecked(i);
+        }
+
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
         unsigned char *buffer() const { return m_buffer.buffer(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
         void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind) {
@@ -3189,16 +3409,38 @@ private:
                 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
                 m_buffer.putByteUnchecked(offset);
             } else {
                 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
                 m_buffer.putIntUnchecked(offset);
             }
         }
 
+        void memoryModRM_disp32(int reg, RegisterID index, int scale, int offset)
+        {
+            ASSERT(index != noIndex);
+
+            // NB: the base-less memoryModRM overloads generate different code
+            // then the base-full memoryModRM overloads in the base == noBase
+            // case. The base-less overloads assume that the desired effective
+            // address is:
+            //
+            //   reg := [scaled index] + disp32
+            //
+            // which means the mod needs to be ModRmMemoryNoDisp. The base-full
+            // overloads pass ModRmMemoryDisp32 in all cases and thus, when
+            // base == noBase (== ebp), the effective address is:
+            //
+            //   reg := [scaled index] + disp32 + [ebp]
+            //
+            // See Intel developer manual, Vol 2, 2.1.5, Table 2-3.
+            putModRmSib(ModRmMemoryNoDisp, reg, noBase, index, scale);
+            m_buffer.putIntUnchecked(offset);
+        }
+
 #if !WTF_CPU_X86_64
         void memoryModRM(int reg, const void* address)
         {
             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
             putModRm(ModRmMemoryNoDisp, reg, noBase);
             m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
         }
 #endif
--- a/js/src/assembler/jit/ExecutableAllocator.cpp
+++ b/js/src/assembler/jit/ExecutableAllocator.cpp
@@ -35,30 +35,32 @@ size_t ExecutableAllocator::pageSize = 0
 size_t ExecutableAllocator::largeAllocSize = 0;
 
 ExecutablePool::~ExecutablePool()
 {
     m_allocator->releasePoolPages(this);
 }
 
 void
-ExecutableAllocator::sizeOfCode(size_t *jaeger, size_t *ion, size_t *regexp, size_t *unused) const
+ExecutableAllocator::sizeOfCode(size_t *jaeger, size_t *ion, size_t *asmJS, size_t *regexp, size_t *unused) const
 {
     *jaeger = 0;
     *ion    = 0;
+    *asmJS  = 0;
     *regexp = 0;
     *unused = 0;
 
     if (m_pools.initialized()) {
         for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
             ExecutablePool* pool = r.front();
             *jaeger += pool->m_jaegerCodeBytes;
             *ion    += pool->m_ionCodeBytes;
+            *asmJS  += pool->m_asmJSCodeBytes;
             *regexp += pool->m_regexpCodeBytes;
             *unused += pool->m_allocation.size - pool->m_jaegerCodeBytes - pool->m_ionCodeBytes
-                                               - pool->m_regexpCodeBytes;
+                                               - pool->m_asmJSCodeBytes - pool->m_regexpCodeBytes;
         }
     }
 }
 
 }
 
 #endif // HAVE(ASSEMBLER)
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -77,17 +77,17 @@ extern  "C" void sync_instruction_memory
 #if ENABLE_ASSEMBLER
 
 //#define DEBUG_STRESS_JSC_ALLOCATOR
 
 namespace JSC {
 
   class ExecutableAllocator;
 
-  enum CodeKind { JAEGER_CODE, ION_CODE, REGEXP_CODE };
+  enum CodeKind { JAEGER_CODE, ION_CODE, REGEXP_CODE, ASMJS_CODE };
 
   // These are reference-counted. A new one starts with a count of 1.
   class ExecutablePool {
 
     friend class ExecutableAllocator;
 private:
     struct Allocation {
         char* pages;
@@ -103,16 +103,17 @@ private:
     Allocation m_allocation;
 
     // Reference count for automatic reclamation.
     unsigned m_refCount;
 
     // Number of bytes currently used for Method and Regexp JIT code.
     size_t m_jaegerCodeBytes;
     size_t m_ionCodeBytes;
+    size_t m_asmJSCodeBytes;
     size_t m_regexpCodeBytes;
 
 public:
     // Flag for downstream use, whether to try to release references to this pool.
     bool m_destroy;
 
     // GC number in which the m_destroy flag was most recently set. Used downstream to
     // remember whether m_destroy was computed for the currently active GC.
@@ -124,17 +125,17 @@ public:
         // XXX: disabled, see bug 654820.
         //JS_ASSERT_IF(willDestroy, m_refCount == 1);
         if (--m_refCount == 0)
             js_delete(this);
     }
 
     ExecutablePool(ExecutableAllocator* allocator, Allocation a)
       : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
-        m_refCount(1), m_jaegerCodeBytes(0), m_ionCodeBytes(0), m_regexpCodeBytes(0),
+        m_refCount(1), m_jaegerCodeBytes(0), m_ionCodeBytes(0), m_asmJSCodeBytes(0), m_regexpCodeBytes(0),
         m_destroy(false), m_gcNumber(0)
     { }
 
     ~ExecutablePool();
 
 private:
     // It should be impossible for us to roll over, because only small
     // pools have multiple holders, and they have one holder per chunk
@@ -149,16 +150,17 @@ private:
     {
         JS_ASSERT(n <= available());
         void *result = m_freePtr;
         m_freePtr += n;
 
         switch (kind) {
           case JAEGER_CODE: m_jaegerCodeBytes += n;          break;
           case ION_CODE:    m_ionCodeBytes    += n;          break;
+          case ASMJS_CODE:  m_asmJSCodeBytes  += n;          break;
           case REGEXP_CODE: m_regexpCodeBytes += n;          break;
           default:          JS_NOT_REACHED("bad code kind"); break;
         }
         return result;
     }
 
     size_t available() const {
         JS_ASSERT(m_end >= m_freePtr);
@@ -248,17 +250,17 @@ public:
         JS_ASSERT(pool->m_allocation.pages);
         if (destroyCallback)
             destroyCallback(pool->m_allocation.pages, pool->m_allocation.size);
         systemRelease(pool->m_allocation);
         JS_ASSERT(m_pools.initialized());
         m_pools.remove(m_pools.lookup(pool));   // this asserts if |pool| is not in m_pools
     }
 
-    void sizeOfCode(size_t *jaeger, size_t *ion, size_t *regexp, size_t *unused) const;
+    void sizeOfCode(size_t *jaeger, size_t *ion, size_t *asmJS, size_t *regexp, size_t *unused) const;
 
     void setDestroyCallback(DestroyCallback destroyCallback) {
         this->destroyCallback = destroyCallback;
     }
 
     void setRandomize(bool enabled) {
         allocBehavior = enabled ? AllocationCanRandomize : AllocationDeterministic;
     }
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -1035,16 +1035,21 @@ static JSFunctionSpecWithHelp TestingFun
 "  assertions are disabled."),
 
     JS_FN_HELP("displayName", DisplayName, 1, 0,
 "displayName(fn)",
 "  Gets the display name for a function, which can possibly be a guessed or\n"
 "  inferred name based on where the function was defined. This can be\n"
 "  different from the 'name' property on the function."),
 
+    JS_FN_HELP("isAsmJSCompilationAvailable", IsAsmJSCompilationAvailable, 0, 0,
+"isAsmJSCompilationAvailable",
+"  Returns whether asm.js compilation is currently available or whether it is disabled\n"
+"  (e.g., by the debugger)."),
+
     JS_FN_HELP("inParallelSection", testingFunc_inParallelSection, 0, 0,
 "inParallelSection()",
 "  True if this code is executing within a parallel section."),
 
     JS_FS_HELP_END
 };
 
 bool
--- a/js/src/frontend/BytecodeCompiler.cpp
+++ b/js/src/frontend/BytecodeCompiler.cpp
@@ -7,16 +7,17 @@
 
 #include "frontend/BytecodeCompiler.h"
 
 #include "jsprobes.h"
 
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/FoldConstants.h"
 #include "frontend/NameFunctions.h"
+#include "ion/AsmJS.h"
 #include "vm/GlobalObject.h"
 
 #include "jsinferinlines.h"
 
 #include "frontend/ParseMaps-inl.h"
 #include "frontend/ParseNode-inl.h"
 #include "frontend/Parser-inl.h"
 #include "frontend/SharedContext-inl.h"
@@ -401,16 +402,23 @@ frontend::CompileFunctionBody(JSContext 
 
     if (fn->pn_body) {
         JS_ASSERT(fn->pn_body->isKind(PNK_ARGSBODY));
         fn->pn_body->append(pn);
         fn->pn_body->pn_pos = pn->pn_pos;
         pn = fn->pn_body;
     }
 
+    /*
+     * Do asm.js compilation once the parse tree has been fully assembled but
+     * before emitting since we need to know whether to emit JSOP_LINKASMJS.
+     */
+    if (fn->pn_funbox->useAsm && !CompileAsmJS(cx, parser.tokenStream, fn, script))
+        return false;
+
     if (!SetSourceMap(cx, parser.tokenStream, ss, script))
         return false;
 
     if (!EmitFunctionScript(cx, &funbce, pn))
         return false;
 
     if (!sct.complete())
         return false;
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -29,16 +29,17 @@
 #include "jsopcode.h"
 #include "jsscript.h"
 #include "jsautooplen.h"        // generated headers last
 
 #include "ds/LifoAlloc.h"
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/Parser.h"
 #include "frontend/TokenStream.h"
+#include "ion/AsmJS.h"
 #include "vm/Debugger.h"
 #include "vm/RegExpObject.h"
 #include "vm/Shape.h"
 
 #include "jsatominlines.h"
 #include "jsscriptinlines.h"
 
 #include "frontend/ParseMaps-inl.h"
@@ -2494,16 +2495,27 @@ frontend::EmitFunctionScript(JSContext *
 
     if (funbox->isGenerator()) {
         bce->switchToProlog();
         if (Emit1(cx, bce, JSOP_GENERATOR) < 0)
             return false;
         bce->switchToMain();
     }
 
+    if (bce->script->asmJS) {
+        /* asm.js means no funny stuff. */
+        JS_ASSERT(!funbox->argumentsHasLocalBinding());
+        JS_ASSERT(!funbox->isGenerator());
+
+        bce->switchToProlog();
+        if (Emit1(cx, bce, JSOP_LINKASMJS) < 0)
+            return false;
+        bce->switchToMain();
+    }
+
     if (!EmitTree(cx, bce, body))
         return false;
 
     /*
      * Always end the script with a JSOP_STOP. Some other parts of the codebase
      * depend on this opcode, e.g. js_InternalInterpret.
      */
     if (Emit1(cx, bce, JSOP_STOP) < 0)
@@ -4375,16 +4387,23 @@ EmitFunc(JSContext *cx, BytecodeEmitter 
                                                       parent->staticLevel + 1,
                                                       bce->script->scriptSource(),
                                                       funbox->bufStart, funbox->bufEnd));
         if (!script)
             return false;
 
         script->bindings = funbox->bindings;
 
+        // Do asm.js compilation at the beginning of emitting to avoid
+        // compiling twice when JS_BufferIsCompilableUnit and to know whether
+        // to emit JSOP_LINKASMJS. Don't fold constants as this will
+        // misrepresent the source JS as written to the type checker.
+        if (funbox->useAsm && !CompileAsmJS(cx, *bce->tokenStream(), pn, script))
+            return false;
+
         BytecodeEmitter bce2(bce, bce->parser, funbox, script, bce->evalCaller,
                              bce->hasGlobalScope, pn->pn_pos.begin.lineno, bce->selfHostingMode);
         if (!bce2.init())
             return false;
 
         /* We measured the max scope depth when we parsed the function. */
         if (!EmitFunctionScript(cx, &bce2, pn->pn_body))
             return false;
--- a/js/src/frontend/FoldConstants.cpp
+++ b/js/src/frontend/FoldConstants.cpp
@@ -11,16 +11,17 @@
 
 #include "frontend/FoldConstants.h"
 #include "frontend/ParseNode.h"
 #include "frontend/Parser.h"
 #include "vm/NumericConversions.h"
 
 #include "jsatominlines.h"
 
+#include "frontend/Parser-inl.h"
 #include "vm/String-inl.h"
 
 using namespace js;
 using namespace js::frontend;
 
 static ParseNode *
 ContainsVarOrConst(ParseNode *pn)
 {
@@ -249,18 +250,27 @@ FoldConstants<FullParseHandler>(JSContex
                                 Parser<FullParseHandler> *parser,
                                 bool inGenexpLambda, bool inCond)
 {
     ParseNode *pn = *pnp;
     ParseNode *pn1 = NULL, *pn2 = NULL, *pn3 = NULL;
 
     JS_CHECK_RECURSION(cx, return false);
 
+    // Don't fold constants if the code has requested "use asm" as
+    // constant-folding will misrepresent the source text for the purpose
+    // of type checking. (Also guard against entering a function containing
+    // "use asm", see PN_FUNC case below.)
+    if (parser->pc->useAsmOrInsideUseAsm())
+        return true;
+
     switch (pn->getArity()) {
       case PN_CODE:
+        if (pn->pn_funbox->useAsmOrInsideUseAsm())
+            return true;
         if (pn->getKind() == PNK_MODULE) {
             if (!FoldConstants(cx, &pn->pn_body, parser))
                 return false;
         } else {
             JS_ASSERT(pn->getKind() == PNK_FUNCTION);
             if (!FoldConstants(cx, &pn->pn_body, parser, pn->pn_funbox->inGenexpLambda))
                 return false;
         }
--- a/js/src/frontend/FullParseHandler.h
+++ b/js/src/frontend/FullParseHandler.h
@@ -115,18 +115,18 @@ class FullParseHandler
         return new_<BinaryNode>(kind, op, left->pn_pos, left, (ParseNode *) NULL);
     }
     ParseNode *newBinary(ParseNodeKind kind, ParseNode *left, ParseNode *right,
                          JSOp op = JSOP_NOP) {
         TokenPos pos = TokenPos::make(left->pn_pos.begin, right->pn_pos.end);
         return new_<BinaryNode>(kind, op, pos, left, right);
     }
     ParseNode *newBinaryOrAppend(ParseNodeKind kind, ParseNode *left, ParseNode *right,
-                                 JSOp op = JSOP_NOP) {
-        return ParseNode::newBinaryOrAppend(kind, op, left, right, this, foldConstants);
+                                 ParseContext<FullParseHandler> *pc, JSOp op = JSOP_NOP) {
+        return ParseNode::newBinaryOrAppend(kind, op, left, right, this, pc, foldConstants);
     }
     void setBinaryRHS(ParseNode *pn, ParseNode *rhs) {
         JS_ASSERT(pn->isArity(PN_BINARY));
         pn->pn_right = rhs;
         pn->pn_pos.end = rhs->pn_pos.end;
     }
 
     ParseNode *newTernary(ParseNodeKind kind,
--- a/js/src/frontend/ParseNode-inl.h
+++ b/js/src/frontend/ParseNode-inl.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sw=4 et tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef ParseNode_inl_h__
 #define ParseNode_inl_h__
 
+#include "frontend/Parser.h"
 #include "frontend/ParseNode.h"
 
 namespace js {
 namespace frontend {
 
 inline bool
 UpvarCookie::set(JSContext *cx, unsigned newLevel, uint16_t newSlot)
 {
--- a/js/src/frontend/ParseNode.cpp
+++ b/js/src/frontend/ParseNode.cpp
@@ -299,22 +299,30 @@ ParseNode::append(ParseNodeKind kind, JS
             list->pn_xflags |= PNX_CANTFOLD;
     }
 
     return list;
 }
 
 ParseNode *
 ParseNode::newBinaryOrAppend(ParseNodeKind kind, JSOp op, ParseNode *left, ParseNode *right,
-                             FullParseHandler *handler, bool foldConstants)
+                             FullParseHandler *handler, ParseContext<FullParseHandler> *pc,
+                             bool foldConstants)
 {
     if (!left || !right)
         return NULL;
 
     /*
+     * Ensure that the parse tree is faithful to the source when "use asm" (for
+     * the purpose of type checking).
+     */
+    if (pc->useAsmOrInsideUseAsm())
+        return handler->new_<BinaryNode>(kind, op, left, right);
+
+    /*
      * Flatten a left-associative (left-heavy) tree of a given operator into
      * a list to reduce js::FoldConstants and js::frontend::EmitTree recursion.
      */
     if (left->isKind(kind) && left->isOp(op) && (js_CodeSpec[op].format & JOF_LEFTASSOC))
         return append(kind, op, left, right, handler);
 
     /*
      * Fold constant addition immediately, to conserve node space and, what's
--- a/js/src/frontend/ParseNode.h
+++ b/js/src/frontend/ParseNode.h
@@ -585,17 +585,18 @@ struct ParseNode {
 
     /*
      * Either append right to left, if left meets the conditions necessary to
      * append (see append), or form a binary node whose children are right and
      * left.
      */
     static ParseNode *
     newBinaryOrAppend(ParseNodeKind kind, JSOp op, ParseNode *left, ParseNode *right,
-                      FullParseHandler *handler, bool foldConstants);
+                      FullParseHandler *handler, ParseContext<FullParseHandler> *pc,
+                      bool foldConstants);
 
     inline PropertyName *name() const;
     inline JSAtom *atom() const;
 
     /*
      * The pn_expr and lexdef members are arms of an unsafe union. Unless you
      * know exactly what you're doing, use only the following methods to access
      * them. For less overhead and assertions for protection, use pn->expr()
--- a/js/src/frontend/Parser-inl.h
+++ b/js/src/frontend/Parser-inl.h
@@ -5,16 +5,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef Parser_inl_h__
 #define Parser_inl_h__
 
 #include "frontend/Parser.h"
 
+#include "frontend/SharedContext-inl.h"
+
 namespace js {
 namespace frontend {
 
 template <typename ParseHandler>
 inline unsigned
 ParseContext<ParseHandler>::blockid()
 {
     return topStmt ? topStmt->blockid : bodyid;
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -427,16 +427,18 @@ FunctionBox::FunctionBox(JSContext *cx, 
   : ObjectBox(fun, traceListHead),
     SharedContext(cx, strict),
     bindings(),
     bufStart(0),
     bufEnd(0),
     ndefaults(0),
     inWith(false),                  // initialized below
     inGenexpLambda(false),
+    useAsm(false),
+    insideUseAsm(outerpc && outerpc->useAsmOrInsideUseAsm()),
     funCxFlags()
 {
     if (!outerpc) {
         inWith = false;
 
     } else if (outerpc->parsingWith) {
         // This covers cases that don't involve eval().  For example:
         //
@@ -1280,16 +1282,24 @@ Parser<FullParseHandler>::leaveFunction(
                  * setter can either ignore the set (in non-strict mode) or
                  * produce an error (in strict mode).
                  */
                 if (dn->isClosed() || dn->isAssigned())
                     funbox->function()->setIsHeavyweight();
                 continue;
             }
 
+            /*
+             * If there are no uses of this placeholder (e.g., it was created
+             * for an identifierName that turned out to be a label), there is
+             * nothing left to do.
+             */
+            if (!dn->dn_uses)
+                continue;
+
             Definition *outer_dn = pc->decls().lookupFirst(atom);
 
             /*
              * Make sure to deoptimize lexical dependencies that are polluted
              * by eval and function statements (which both flag the function as
              * having an extensible scope) or any enclosing 'with'.
              */
             if (funbox->hasExtensibleScope() || pc->parsingWith)
@@ -2107,16 +2117,23 @@ Parser<ParseHandler>::maybeParseDirectiv
                     // directive prologue -- octal escapes -- and complain now.
                     if (tokenStream.sawOctalEscape()) {
                         report(ParseError, false, null(), JSMSG_DEPRECATED_OCTAL);
                         return false;
                     }
                     pc->sc->strict = true;
                 }
             }
+        } else if (directive == context->names().useAsm) {
+            if (pc->sc->isFunctionBox()) {
+                pc->sc->asFunctionBox()->useAsm = true;
+            } else {
+                if (!report(ParseWarning, false, pn, JSMSG_USE_ASM_DIRECTIVE_FAIL))
+                    return false;
+            }
         }
     }
     return true;
 }
 
 template <>
 void
 Parser<FullParseHandler>::addStatementToList(ParseNode *pn, ParseNode *kid, bool *hasFunctionStmt)
@@ -4481,17 +4498,17 @@ Parser<ParseHandler>::variables(ParseNod
 
             MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_BAD_DESTRUCT_DECL);
             JS_ASSERT(tokenStream.currentToken().t_op == JSOP_NOP);
 
             Node init = assignExpr();
             if (!init)
                 return null();
 
-            pn2 = handler.newBinaryOrAppend(PNK_ASSIGN, pn2, init);
+            pn2 = handler.newBinaryOrAppend(PNK_ASSIGN, pn2, init, pc);
             if (!pn2)
                 return null();
             handler.addList(pn, pn2);
             continue;
         }
 #endif /* JS_HAS_DESTRUCTURING */
 
         if (tt != TOK_NAME) {
@@ -4604,30 +4621,30 @@ BEGIN_EXPR_PARSER(mulExpr1)
     TokenKind tt;
     while (pn && ((tt = tokenStream.getToken()) == TOK_STAR || tt == TOK_DIV || tt == TOK_MOD)) {
         ParseNodeKind kind = (tt == TOK_STAR)
                              ? PNK_STAR
                              : (tt == TOK_DIV)
                              ? PNK_DIV
                              : PNK_MOD;
         JSOp op = tokenStream.currentToken().t_op;
-        pn = handler.newBinaryOrAppend(kind, pn, unaryExpr(), op);
+        pn = handler.newBinaryOrAppend(kind, pn, unaryExpr(), pc, op);
     }
     return pn;
 }
 END_EXPR_PARSER(mulExpr1)
 
 BEGIN_EXPR_PARSER(addExpr1)
 {
     Node pn = mulExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_PLUS, TOK_MINUS)) {
         TokenKind tt = tokenStream.currentToken().type;
         JSOp op = (tt == TOK_PLUS) ? JSOP_ADD : JSOP_SUB;
         ParseNodeKind kind = (tt == TOK_PLUS) ? PNK_ADD : PNK_SUB;
-        pn = handler.newBinaryOrAppend(kind, pn, mulExpr1n(), op);
+        pn = handler.newBinaryOrAppend(kind, pn, mulExpr1n(), pc, op);
     }
     return pn;
 }
 END_EXPR_PARSER(addExpr1)
 
 inline ParseNodeKind
 ShiftTokenToParseNodeKind(const Token &token)
 {
@@ -4692,17 +4709,17 @@ BEGIN_EXPR_PARSER(relExpr1)
             /*
              * Recognize the 'in' token as an operator only if we're not
              * currently in the init expr of a for loop.
              */
             (oldParsingForInit == 0 && tokenStream.isCurrentTokenType(TOK_IN)) ||
             tokenStream.isCurrentTokenType(TOK_INSTANCEOF))) {
         ParseNodeKind kind = RelationalTokenToParseNodeKind(tokenStream.currentToken());
         JSOp op = tokenStream.currentToken().t_op;
-        pn = handler.newBinaryOrAppend(kind, pn, shiftExpr1n(), op);
+        pn = handler.newBinaryOrAppend(kind, pn, shiftExpr1n(), pc, op);
     }
     /* Restore previous state of parsingForInit flag. */
     pc->parsingForInit |= oldParsingForInit;
 
     return pn;
 }
 END_EXPR_PARSER(relExpr1)
 
@@ -4736,55 +4753,55 @@ BEGIN_EXPR_PARSER(eqExpr1)
     return left;
 }
 END_EXPR_PARSER(eqExpr1)
 
 BEGIN_EXPR_PARSER(bitAndExpr1)
 {
     Node pn = eqExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_BITAND))
-        pn = handler.newBinaryOrAppend(PNK_BITAND, pn, eqExpr1n(), JSOP_BITAND);
+        pn = handler.newBinaryOrAppend(PNK_BITAND, pn, eqExpr1n(), pc, JSOP_BITAND);
     return pn;
 }
 END_EXPR_PARSER(bitAndExpr1)
 
 BEGIN_EXPR_PARSER(bitXorExpr1)
 {
     Node pn = bitAndExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_BITXOR))
-        pn = handler.newBinaryOrAppend(PNK_BITXOR, pn, bitAndExpr1n(), JSOP_BITXOR);
+        pn = handler.newBinaryOrAppend(PNK_BITXOR, pn, bitAndExpr1n(), pc, JSOP_BITXOR);
     return pn;
 }
 END_EXPR_PARSER(bitXorExpr1)
 
 BEGIN_EXPR_PARSER(bitOrExpr1)
 {
     Node pn = bitXorExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_BITOR))
-        pn = handler.newBinaryOrAppend(PNK_BITOR, pn, bitXorExpr1n(), JSOP_BITOR);
+        pn = handler.newBinaryOrAppend(PNK_BITOR, pn, bitXorExpr1n(), pc, JSOP_BITOR);
     return pn;
 }
 END_EXPR_PARSER(bitOrExpr1)
 
 BEGIN_EXPR_PARSER(andExpr1)
 {
     Node pn = bitOrExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_AND))
-        pn = handler.newBinaryOrAppend(PNK_AND, pn, bitOrExpr1n(), JSOP_AND);
+        pn = handler.newBinaryOrAppend(PNK_AND, pn, bitOrExpr1n(), pc, JSOP_AND);
     return pn;
 }
 END_EXPR_PARSER(andExpr1)
 
 template <typename ParseHandler>
 JS_ALWAYS_INLINE typename ParseHandler::Node
 Parser<ParseHandler>::orExpr1()
 {
     Node pn = andExpr1i();
     while (pn && tokenStream.isCurrentTokenType(TOK_OR))
-        pn = handler.newBinaryOrAppend(PNK_OR, pn, andExpr1n(), JSOP_OR);
+        pn = handler.newBinaryOrAppend(PNK_OR, pn, andExpr1n(), pc, JSOP_OR);
     return pn;
 }
 
 template <typename ParseHandler>
 JS_ALWAYS_INLINE typename ParseHandler::Node
 Parser<ParseHandler>::condExpr1()
 {
     Node condition = orExpr1();
@@ -4902,17 +4919,17 @@ Parser<ParseHandler>::assignExpr()
     JSOp op = tokenStream.currentToken().t_op;
     if (!setAssignmentLhsOps(lhs, op))
         return null();
 
     Node rhs = assignExpr();
     if (!rhs)
         return null();
 
-    return handler.newBinaryOrAppend(kind, lhs, rhs, op);
+    return handler.newBinaryOrAppend(kind, lhs, rhs, pc, op);
 }
 
 template <> bool
 Parser<FullParseHandler>::setLvalKid(ParseNode *pn, ParseNode *kid, const char *name)
 {
     if (!kid->isKind(PNK_NAME) &&
         !kid->isKind(PNK_DOT) &&
         (!kid->isKind(PNK_CALL) ||
--- a/js/src/frontend/Parser.h
+++ b/js/src/frontend/Parser.h
@@ -212,16 +212,20 @@ struct ParseContext                 /* t
     // True if we are at the topmost level of a entire script or function body.
     // For example, while parsing this code we would encounter f1 and f2 at
     // body level, but we would not encounter f3 or f4 at body level:
     //
     //   function f1() { function f2() { } }
     //   if (cond) { function f3() { if (cond) { function f4() { } } } }
     //
     bool atBodyLevel();
+
+    inline bool useAsmOrInsideUseAsm() const {
+        return sc->isFunctionBox() && sc->asFunctionBox()->useAsmOrInsideUseAsm();
+    }
 };
 
 template <typename ParseHandler>
 bool
 GenerateBlockId(ParseContext<ParseHandler> *pc, uint32_t &blockid);
 
 template <typename ParseHandler>
 struct BindData;
--- a/js/src/frontend/SharedContext-inl.h
+++ b/js/src/frontend/SharedContext-inl.h
@@ -37,23 +37,16 @@ SharedContext::asGlobalSharedContext()
 
 inline ModuleBox *
 SharedContext::asModuleBox()
 {
     JS_ASSERT(isModuleBox());
     return static_cast<ModuleBox*>(this);
 }
 
-inline FunctionBox *
-SharedContext::asFunctionBox()
-{
-    JS_ASSERT(isFunctionBox());
-    return static_cast<FunctionBox*>(this);
-}
-
 GlobalSharedContext::GlobalSharedContext(JSContext *cx, JSObject *scopeChain, bool strict)
   : SharedContext(cx, strict),
     scopeChain_(cx, scopeChain)
 {
 }
 
 } /* namespace frontend */
 
--- a/js/src/frontend/SharedContext.h
+++ b/js/src/frontend/SharedContext.h
@@ -199,16 +199,18 @@ class FunctionBox : public ObjectBox, pu
 {
   public:
     Bindings        bindings;               /* bindings for this function */
     size_t          bufStart;
     size_t          bufEnd;
     uint16_t        ndefaults;
     bool            inWith:1;               /* some enclosing scope is a with-statement */
     bool            inGenexpLambda:1;       /* lambda from generator expression */
+    bool            useAsm:1;               /* function contains "use asm" directive */
+    bool            insideUseAsm:1;         /* nested function of function of "use asm" directive */
 
     FunctionContextFlags funCxFlags;
 
     template <typename ParseHandler>
     FunctionBox(JSContext *cx, ObjectBox* traceListHead, JSFunction *fun, ParseContext<ParseHandler> *pc,
                 bool strict);
 
     ObjectBox *toObjectBox() { return this; }
@@ -221,18 +223,31 @@ class FunctionBox : public ObjectBox, pu
     bool definitelyNeedsArgsObj()   const { return funCxFlags.definitelyNeedsArgsObj; }
 
     void setIsGenerator()                  { funCxFlags.isGenerator              = true; }
     void setMightAliasLocals()             { funCxFlags.mightAliasLocals         = true; }
     void setHasExtensibleScope()           { funCxFlags.hasExtensibleScope       = true; }
     void setArgumentsHasLocalBinding()     { funCxFlags.argumentsHasLocalBinding = true; }
     void setDefinitelyNeedsArgsObj()       { JS_ASSERT(funCxFlags.argumentsHasLocalBinding);
                                              funCxFlags.definitelyNeedsArgsObj   = true; }
+
+    // Return whether this function has either specified "use asm" or is
+    // (transitively) nested inside a function that has.
+    bool useAsmOrInsideUseAsm() const {
+        return useAsm || insideUseAsm;
+    }
 };
 
+inline FunctionBox *
+SharedContext::asFunctionBox()
+{
+    JS_ASSERT(isFunctionBox());
+    return static_cast<FunctionBox*>(this);
+}
+
 /*
  * NB: If you add a new type of statement that is a scope, add it between
  * STMT_WITH and STMT_CATCH, or you will break StmtInfoBase::linksScope. If you
  * add a non-looping statement type, add it before STMT_DO_LOOP or you will
  * break StmtInfoBase::isLoop().
  *
  * Also remember to keep the statementName array in BytecodeEmitter.cpp in
  * sync.
--- a/js/src/frontend/SyntaxParseHandler.h
+++ b/js/src/frontend/SyntaxParseHandler.h
@@ -67,17 +67,18 @@ class SyntaxParseHandler
     Node newUnary(ParseNodeKind kind, JSOp op = JSOP_NOP) { return NodeGeneric; }
     void setUnaryKid(Node pn, Node kid) {}
 
     Node newBinary(ParseNodeKind kind, JSOp op = JSOP_NOP) { return NodeGeneric; }
     Node newBinary(ParseNodeKind kind, Node left, JSOp op = JSOP_NOP) { return NodeGeneric; }
     Node newBinary(ParseNodeKind kind, Node left, Node right, JSOp op = JSOP_NOP) {
         return NodeGeneric;
     }
-    Node newBinaryOrAppend(ParseNodeKind kind, Node left, Node right, JSOp op = JSOP_NOP) {
+    Node newBinaryOrAppend(ParseNodeKind kind, Node left, Node right,
+                           ParseContext<SyntaxParseHandler> *pc, JSOp op = JSOP_NOP) {
         return NodeGeneric;
     }
     void setBinaryRHS(Node pn, Node rhs) {}
 
     Node newTernary(ParseNodeKind kind, Node first, Node second, Node third, JSOp op = JSOP_NOP) {
         return NodeGeneric;
     }
 
--- a/js/src/frontend/TokenStream.cpp
+++ b/js/src/frontend/TokenStream.cpp
@@ -607,16 +607,25 @@ bool
 TokenStream::reportStrictWarningErrorNumberVA(const TokenPos &pos, unsigned errorNumber, va_list args)
 {
     if (!cx->hasStrictOption())
         return true;
 
     return reportCompileErrorNumberVA(pos, JSREPORT_STRICT | JSREPORT_WARNING, errorNumber, args);
 }
 
+void
+TokenStream::reportAsmJSError(ParseNode *pn, unsigned errorNumber, ...)
+{
+    va_list args;
+    va_start(args, errorNumber);
+    reportCompileErrorNumberVA(pn->pn_pos, JSREPORT_WARNING, errorNumber, args);
+    va_end(args);
+}
+
 /*
  * We have encountered a '\': check for a Unicode escape sequence after it.
  * Return 'true' and the character code value (by value) if we found a
  * Unicode escape sequence.  Otherwise, return 'false'.  In both cases, do not
  * advance along the buffer.
  */
 bool
 TokenStream::peekUnicodeEscape(int *result)
--- a/js/src/frontend/TokenStream.h
+++ b/js/src/frontend/TokenStream.h
@@ -484,16 +484,19 @@ class TokenStream
     // reportError()) in TokenStream, Parser, and BytecodeEmitter.
     bool reportCompileErrorNumberVA(const TokenPos &pos, unsigned flags, unsigned errorNumber,
                                     va_list args);
     bool reportStrictModeErrorNumberVA(const TokenPos &pos, bool strictMode, unsigned errorNumber,
                                        va_list args);
     bool reportStrictWarningErrorNumberVA(const TokenPos &pos, unsigned errorNumber,
                                           va_list args);
 
+    // asm.js reporter
+    void reportAsmJSError(ParseNode *pn, unsigned errorNumber, ...);
+
   private:
     // These are private because they should only be called by the tokenizer
     // while tokenizing not by, for example, BytecodeEmitter.
     bool reportStrictModeError(unsigned errorNumber, ...);
     bool strictMode() const { return strictModeGetter && strictModeGetter->strictMode(); }
 
     void onError();
     static JSAtom *atomize(JSContext *cx, CharBuffer &cb);
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -326,16 +326,17 @@ typedef EncapsulatedPtr<JSObject> Encaps
 typedef EncapsulatedPtr<JSScript> EncapsulatedPtrScript;
 
 typedef RelocatablePtr<JSObject> RelocatablePtrObject;
 typedef RelocatablePtr<JSScript> RelocatablePtrScript;
 
 typedef HeapPtr<JSObject> HeapPtrObject;
 typedef HeapPtr<JSFunction> HeapPtrFunction;
 typedef HeapPtr<JSString> HeapPtrString;
+typedef HeapPtr<PropertyName> HeapPtrPropertyName;
 typedef HeapPtr<JSScript> HeapPtrScript;
 typedef HeapPtr<Shape> HeapPtrShape;
 typedef HeapPtr<BaseShape> HeapPtrBaseShape;
 typedef HeapPtr<types::TypeObject> HeapPtrTypeObject;
 
 /* Useful for hashtables with a HeapPtr as key. */
 template<class T>
 struct HeapPtrHasher
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -327,16 +327,17 @@ DeclMarkerImpl(BaseShape, BaseShape)
 DeclMarkerImpl(BaseShape, UnownedBaseShape)
 DeclMarkerImpl(IonCode, ion::IonCode)
 DeclMarkerImpl(Object, ArgumentsObject)
 DeclMarkerImpl(Object, DebugScopeObject)
 DeclMarkerImpl(Object, GlobalObject)
 DeclMarkerImpl(Object, JSObject)
 DeclMarkerImpl(Object, JSFunction)
 DeclMarkerImpl(Object, ScopeObject)
+DeclMarkerImpl(Object, ArrayBufferObject)
 DeclMarkerImpl(Script, JSScript)
 DeclMarkerImpl(Shape, Shape)
 DeclMarkerImpl(String, JSAtom)
 DeclMarkerImpl(String, JSString)
 DeclMarkerImpl(String, JSFlatString)
 DeclMarkerImpl(String, JSLinearString)
 DeclMarkerImpl(String, PropertyName)
 DeclMarkerImpl(TypeObject, js::types::TypeObject)
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -93,16 +93,17 @@ DeclMarker(BaseShape, BaseShape)
 DeclMarker(BaseShape, UnownedBaseShape)
 DeclMarker(IonCode, ion::IonCode)
 DeclMarker(Object, ArgumentsObject)
 DeclMarker(Object, DebugScopeObject)
 DeclMarker(Object, GlobalObject)
 DeclMarker(Object, JSObject)
 DeclMarker(Object, JSFunction)
 DeclMarker(Object, ScopeObject)
+DeclMarker(Object, ArrayBufferObject)
 DeclMarker(Script, JSScript)
 DeclMarker(Shape, Shape)
 DeclMarker(String, JSAtom)
 DeclMarker(String, JSString)
 DeclMarker(String, JSFlatString)
 DeclMarker(String, JSLinearString)
 DeclMarker(String, PropertyName)
 DeclMarker(TypeObject, types::TypeObject)
new file mode 100644
--- /dev/null
+++ b/js/src/ion/AsmJS.cpp
@@ -0,0 +1,5007 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsmath.h"
+#include "frontend/ParseNode.h"
+#include "ion/AsmJS.h"
+#include "ion/AsmJSModule.h"
+
+#include "frontend/ParseNode-inl.h"
+
+using namespace js;
+using namespace js::frontend;
+using namespace mozilla;
+
+#ifdef JS_ASMJS
+
+#include "ion/CodeGenerator.h"
+#include "ion/MIR.h"
+#include "ion/MIRGraph.h"
+
+using namespace js::ion;
+
+/*****************************************************************************/
+// ParseNode utilities
+
+static inline ParseNode *
+NextNode(ParseNode *pn)
+{
+    return pn->pn_next;
+}
+
+static inline ParseNode *
+UnaryKid(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_UNARY));
+    return pn->pn_kid;
+}
+
+static inline ParseNode *
+BinaryRight(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_BINARY));
+    return pn->pn_right;
+}
+
+static inline ParseNode *
+BinaryLeft(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_BINARY));
+    return pn->pn_left;
+}
+
+static inline ParseNode *
+TernaryKid1(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_TERNARY));
+    return pn->pn_kid1;
+}
+
+static inline ParseNode *
+TernaryKid2(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_TERNARY));
+    return pn->pn_kid2;
+}
+
+static inline ParseNode *
+TernaryKid3(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_TERNARY));
+    return pn->pn_kid3;
+}
+
+static inline ParseNode *
+ListHead(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_LIST));
+    return pn->pn_head;
+}
+
+static inline unsigned
+ListLength(ParseNode *pn)
+{
+    JS_ASSERT(pn->isArity(PN_LIST));
+    return pn->pn_count;
+}
+
+static inline ParseNode *
+CallCallee(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_CALL));
+    return ListHead(pn);
+}
+
+static inline unsigned
+CallArgListLength(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_CALL));
+    JS_ASSERT(ListLength(pn) >= 1);
+    return ListLength(pn) - 1;
+}
+
+static inline ParseNode *
+CallArgList(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_CALL));
+    return NextNode(ListHead(pn));
+}
+
+static inline ParseNode *
+VarListHead(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_VAR));
+    return ListHead(pn);
+}
+
+static inline ParseNode *
+CaseExpr(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_CASE) || pn->isKind(PNK_DEFAULT));
+    return BinaryLeft(pn);
+}
+
+static inline ParseNode *
+CaseBody(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_CASE) || pn->isKind(PNK_DEFAULT));
+    return BinaryRight(pn);
+}
+
+static inline JSAtom *
+StringAtom(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_STRING));
+    return pn->pn_atom;
+}
+
+static inline bool
+IsExpressionStatement(ParseNode *pn)
+{
+    return pn->isKind(PNK_SEMI);
+}
+
+static inline ParseNode *
+ExpressionStatementExpr(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_SEMI));
+    return UnaryKid(pn);
+}
+
+static inline PropertyName *
+LoopControlMaybeLabel(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_BREAK) || pn->isKind(PNK_CONTINUE));
+    JS_ASSERT(pn->isArity(PN_NULLARY));
+    return pn->as<LoopControlStatement>().label();
+}
+
+static inline PropertyName *
+LabeledStatementLabel(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_COLON));
+    return pn->pn_atom->asPropertyName();
+}
+
+static inline ParseNode *
+LabeledStatementStatement(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_COLON));
+    return pn->expr();
+}
+
+static double
+NumberNodeValue(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_NUMBER));
+    return pn->pn_dval;
+}
+
+static bool
+NumberNodeHasFrac(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_NUMBER));
+    return pn->pn_u.number.decimalPoint == HasDecimal;
+}
+
+static ParseNode *
+DotBase(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_DOT));
+    JS_ASSERT(pn->isArity(PN_NAME));
+    return pn->expr();
+}
+
+static PropertyName *
+DotMember(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_DOT));
+    JS_ASSERT(pn->isArity(PN_NAME));
+    return pn->pn_atom->asPropertyName();
+}
+
+static ParseNode *
+ElemBase(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_ELEM));
+    return BinaryLeft(pn);
+}
+
+static ParseNode *
+ElemIndex(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_ELEM));
+    return BinaryRight(pn);
+}
+
+static inline JSFunction *
+FunctionObject(ParseNode *fn)
+{
+    JS_ASSERT(fn->isKind(PNK_FUNCTION));
+    JS_ASSERT(fn->isArity(PN_CODE));
+    return fn->pn_funbox->function();
+}
+
+static inline PropertyName *
+FunctionName(ParseNode *fn)
+{
+    if (JSAtom *atom = FunctionObject(fn)->atom())
+        return atom->asPropertyName();
+    return NULL;
+}
+
+static inline ParseNode *
+FunctionArgsList(ParseNode *fn, unsigned *numFormals)
+{
+    JS_ASSERT(fn->isKind(PNK_FUNCTION));
+    ParseNode *argsBody = fn->pn_body;
+    JS_ASSERT(argsBody->isKind(PNK_ARGSBODY));
+    *numFormals = argsBody->pn_count - 1;
+    return ListHead(argsBody);
+}
+
+static inline bool
+FunctionHasStatementList(ParseNode *fn)
+{
+    JS_ASSERT(fn->isKind(PNK_FUNCTION));
+    ParseNode *argsBody = fn->pn_body;
+    JS_ASSERT(argsBody->isKind(PNK_ARGSBODY));
+    ParseNode *body = argsBody->last();
+    return body->isKind(PNK_STATEMENTLIST);
+}
+
+static inline ParseNode *
+FunctionStatementList(ParseNode *fn)
+{
+    JS_ASSERT(FunctionHasStatementList(fn));
+    return fn->pn_body->last();
+}
+
+static inline ParseNode *
+FunctionLastStatementOrNull(ParseNode *fn)
+{
+    ParseNode *list = FunctionStatementList(fn);
+    return list->pn_count == 0 ? NULL : list->last();
+}
+
+static inline bool
+IsNormalObjectField(JSContext *cx, ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_COLON));
+    return pn->getOp() == JSOP_INITPROP &&
+           BinaryLeft(pn)->isKind(PNK_NAME) &&
+           BinaryLeft(pn)->name() != cx->names().proto;
+}
+
+static inline PropertyName *
+ObjectNormalFieldName(JSContext *cx, ParseNode *pn)
+{
+    JS_ASSERT(IsNormalObjectField(cx, pn));
+    return BinaryLeft(pn)->name();
+}
+
+static inline ParseNode *
+ObjectFieldInitializer(ParseNode *pn)
+{
+    JS_ASSERT(pn->isKind(PNK_COLON));
+    return BinaryRight(pn);
+}
+
+static inline bool
+IsDefinition(ParseNode *pn)
+{
+    return pn->isKind(PNK_NAME) && pn->isDefn();
+}
+
+static inline ParseNode *
+MaybeDefinitionInitializer(ParseNode *pn)
+{
+    JS_ASSERT(IsDefinition(pn));
+    return pn->expr();
+}
+
+static inline bool
+IsUseOfName(ParseNode *pn, PropertyName *name)
+{
+    return pn->isKind(PNK_NAME) && pn->name() == name;
+}
+
+static inline ParseNode *
+SkipEmptyStatements(ParseNode *pn)
+{
+    while (pn && pn->isKind(PNK_SEMI) && !UnaryKid(pn))
+        pn = pn->pn_next;
+    return pn;
+}
+
+static inline ParseNode *
+NextNonEmptyStatement(ParseNode *pn)
+{
+    return SkipEmptyStatements(pn->pn_next);
+}
+
+/*****************************************************************************/
+
+// Respresents the type of a general asm.js expression.
+class Type
+{
+  public:
+    enum Which {
+        Double,
+        Doublish,
+        Fixnum,
+        Int,
+        Signed,
+        Unsigned,
+        Intish,
+        Void
+    };
+
+  private:
+    Which which_;
+
+  public:
+    Type() : which_(Which(-1)) {}
+    Type(Which w) : which_(w) {}
+
+    bool operator==(Type rhs) const { return which_ == rhs.which_; }
+    bool operator!=(Type rhs) const { return which_ != rhs.which_; }
+
+    bool isSigned() const {
+        return which_ == Signed || which_ == Fixnum;
+    }
+
+    bool isUnsigned() const {
+        return which_ == Unsigned || which_ == Fixnum;
+    }
+
+    bool isInt() const {
+        return isSigned() || isUnsigned() || which_ == Int;
+    }
+
+    bool isIntish() const {
+        return isInt() || which_ == Intish;
+    }
+
+    bool isDouble() const {
+        return which_ == Double;
+    }
+
+    bool isDoublish() const {
+        return isDouble() || which_ == Doublish;
+    }
+
+    bool isVoid() const {
+        return which_ == Void;
+    }
+
+    bool isExtern() const {
+        return isDouble() || isSigned() || isUnsigned();
+    }
+
+    MIRType toMIRType() const {
+        switch (which_) {
+          case Double:
+          case Doublish:
+            return MIRType_Double;
+          case Fixnum:
+          case Int:
+          case Signed:
+          case Unsigned:
+          case Intish:
+            return MIRType_Int32;
+          case Void:
+            return MIRType_None;
+        }
+        JS_NOT_REACHED("Invalid Type");
+        return MIRType_None;
+    }
+};
+
+// Represents the subset of Type that can be used as the return type of a
+// function.
+class RetType
+{
+  public:
+    enum Which {
+        Void = Type::Void,
+        Signed = Type::Signed,
+        Double = Type::Double
+    };
+
+  private:
+    Which which_;
+
+  public:
+    RetType() {}
+    RetType(Which w) : which_(w) {}
+    RetType(AsmJSCoercion coercion) {
+        switch (coercion) {
+          case AsmJS_ToInt32: which_ = Signed; break;
+          case AsmJS_ToNumber: which_ = Double; break;
+        }
+    }
+    Which which() const {
+        return which_;
+    }
+    Type toType() const {
+        return Type::Which(which_);
+    }
+    AsmJSModule::ReturnType toModuleReturnType() const {
+        switch (which_) {
+          case Void: return AsmJSModule::Return_Void;
+          case Signed: return AsmJSModule::Return_Int32;
+          case Double: return AsmJSModule::Return_Double;
+        }
+        JS_NOT_REACHED("Unexpected return type");
+        return AsmJSModule::Return_Void;
+    }
+    MIRType toMIRType() const {
+        switch (which_) {
+          case Void: return MIRType_None;
+          case Signed: return MIRType_Int32;
+          case Double: return MIRType_Double;
+        }
+        JS_NOT_REACHED("Unexpected return type");
+        return MIRType_None;
+    }
+    bool operator==(RetType rhs) const { return which_ == rhs.which_; }
+    bool operator!=(RetType rhs) const { return which_ != rhs.which_; }
+};
+
+// Implements <: (subtype) operator when the rhs is an RetType
+static inline bool
+operator<=(Type lhs, RetType rhs)
+{
+    switch (rhs.which()) {
+      case RetType::Signed: return lhs.isSigned();
+      case RetType::Double: return lhs == Type::Double;
+      case RetType::Void:   return lhs == Type::Void;
+    }
+    JS_NOT_REACHED("Unexpected rhs type");
+    return false;
+}
+
+// Represents the subset of Type that can be used as a variable or
+// argument's type. Note: AsmJSCoercion and VarType are kept separate to
+// make very clear the signed/int distinction: a coercion may explicitly sign
+// an *expression* but, when stored as a variable, this signedness information
+// is explicitly thrown away by the asm.js type system. E.g., in
+//
+//   function f(i) {
+//     i = i | 0;             (1)
+//     if (...)
+//         i = foo() >>> 0;
+//     else
+//         i = bar() | 0;
+//     return i | 0;          (2)
+//
+// the AsmJSCoercion of (1) is Signed (since | performs ToInt32) but, when
+// translated to an VarType, the result is a plain Int since, as shown, it
+// is legal to assign both Signed and Unsigned (or some other Int) values to
+// it. For (2), the AsmJSCoercion is also Signed but, when translated to an
+// RetType, the result is Signed since callers (asm.js and non-asm.js) can
+// rely on the return value being Signed.
+class VarType
+{
+  public:
+    enum Which {
+        Int = Type::Int,
+        Double = Type::Double
+    };
+
+  private:
+    Which which_;
+
+  public:
+    VarType()
+      : which_(Which(-1)) {}
+    VarType(Which w)
+      : which_(w) {}
+    VarType(AsmJSCoercion coercion) {
+        switch (coercion) {
+          case AsmJS_ToInt32: which_ = Int; break;
+          case AsmJS_ToNumber: which_ = Double; break;
+        }
+    }
+    Which which() const {
+        return which_;
+    }
+    Type toType() const {
+        return Type::Which(which_);
+    }
+    MIRType toMIRType() const {
+        return which_ == Int ? MIRType_Int32 : MIRType_Double;
+    }
+    AsmJSCoercion toCoercion() const {
+        return which_ == Int ? AsmJS_ToInt32 : AsmJS_ToNumber;
+    }
+    static VarType FromMIRType(MIRType type) {
+        JS_ASSERT(type == MIRType_Int32 || type == MIRType_Double);
+        return type == MIRType_Int32 ? Int : Double;
+    }
+    bool operator==(VarType rhs) const { return which_ == rhs.which_; }
+    bool operator!=(VarType rhs) const { return which_ != rhs.which_; }
+};
+
+// Implements <: (subtype) operator when the rhs is an VarType
+static inline bool
+operator<=(Type lhs, VarType rhs)
+{
+    switch (rhs.which()) {
+      case VarType::Int:    return lhs.isInt();
+      case VarType::Double: return lhs.isDouble();
+    }
+    JS_NOT_REACHED("Unexpected rhs type");
+    return false;
+}
+
+// Passed from parent expressions to child expressions to indicate if and how
+// the child expression's result will be coerced. While most type checking
+// occurs bottom-up (with child expressions returning the type of the result
+// and parents checking these types), FFI calls naturally want to know the
+// parent's context to determine the appropriate result type. If a parent
+// passes NoCoercion to an FFI all, then the FFI's return type will be "Void"
+// which will cause a type error if the result is used.
+//
+// The other application of Use is to support the asm.js type rule which
+// allows (a-b+c-d+e)|0 without intermediate conversions. The type system has
+// only binary +/- nodes so we simulate the n-ary expression by having the
+// outer parent +/- expression pass in Use::AddOrSub so that the inner
+// expression knows to return type Int instead of Intish.
+class Use
+{
+  public:
+    enum Which {
+        NoCoercion,
+        ToInt32,
+        ToNumber,
+        AddOrSub
+    };
+
+  private:
+    Which which_;
+    unsigned *pcount_;
+
+  public:
+    Use()
+      : which_(Which(-1)), pcount_(NULL) {}
+    Use(Which w)
+      : which_(w), pcount_(NULL) { JS_ASSERT(w != AddOrSub); }
+    Use(unsigned *pcount)
+      : which_(AddOrSub), pcount_(pcount) {}
+    Which which() const {
+        return which_;
+    }
+    unsigned &addOrSubCount() const {
+        JS_ASSERT(which_ == AddOrSub);
+        return *pcount_;
+    }
+    Type toFFIReturnType() const {
+        switch (which_) {
+          case NoCoercion: return Type::Void;
+          case ToInt32: return Type::Intish;
+          case ToNumber: return Type::Doublish;
+          case AddOrSub: return Type::Void;
+        }
+        JS_NOT_REACHED("unexpected use type");
+        return Type::Void;
+    }
+    MIRType toMIRType() const {
+        switch (which_) {
+          case NoCoercion: return MIRType_None;
+          case ToInt32: return MIRType_Int32;
+          case ToNumber: return MIRType_Double;
+          case AddOrSub: return MIRType_None;
+        }
+        JS_NOT_REACHED("unexpected use type");
+        return MIRType_None;
+    }
+    bool operator==(Use rhs) const { return which_ == rhs.which_; }
+    bool operator!=(Use rhs) const { return which_ != rhs.which_; }
+};
+
+/*****************************************************************************/
+// Numeric literal utilities
+
+// Represents the type and value of an asm.js numeric literal.
+//
+// A literal is a double iff the literal contains an exponent or decimal point
+// (even if the fractional part is 0). Otherwise, integers may be classified:
+//  fixnum: [0, 2^31)
+//  negative int: [-2^31, 0)
+//  big unsigned: [2^31, 2^32)
+//  out of range: otherwise
+class NumLit
+{
+  public:
+    enum Which {
+        Fixnum = Type::Fixnum,
+        NegativeInt = Type::Signed,
+        BigUnsigned = Type::Unsigned,
+        Double = Type::Double,
+        OutOfRangeInt = -1
+    };
+
+  private:
+    Which which_;
+    Value v_;
+
+  public:
+    NumLit(Which w, Value v)
+      : which_(w), v_(v)
+    {}
+
+    Which which() const {
+        return which_;
+    }
+
+    int32_t toInt32() const {
+        JS_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned);
+        return v_.toInt32();
+    }
+
+    double toDouble() const {
+        return v_.toDouble();
+    }
+
+    Type type() const {
+        JS_ASSERT(which_ != OutOfRangeInt);
+        return Type::Which(which_);
+    }
+
+    Value value() const {
+        JS_ASSERT(which_ != OutOfRangeInt);
+        return v_;
+    }
+};
+
+// Note: '-' is never rolled into the number; numbers are always positive and
+// negations must be applied manually.
+static bool
+IsNumericLiteral(ParseNode *pn)
+{
+    return pn->isKind(PNK_NUMBER) ||
+           (pn->isKind(PNK_NEG) && UnaryKid(pn)->isKind(PNK_NUMBER));
+}
+
+static NumLit
+ExtractNumericLiteral(ParseNode *pn)
+{
+    JS_ASSERT(IsNumericLiteral(pn));
+    ParseNode *numberNode;
+    double d;
+    if (pn->isKind(PNK_NEG)) {
+        numberNode = UnaryKid(pn);
+        d = -NumberNodeValue(numberNode);
+    } else {
+        numberNode = pn;
+        d = NumberNodeValue(numberNode);
+    }
+
+    if (NumberNodeHasFrac(numberNode))
+        return NumLit(NumLit::Double, DoubleValue(d));
+
+    int64_t i64 = int64_t(d);
+    if (d != double(i64))
+        return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+
+    if (i64 >= 0) {
+        if (i64 <= INT32_MAX)
+            return NumLit(NumLit::Fixnum, Int32Value(i64));
+        if (i64 <= UINT32_MAX)
+            return NumLit(NumLit::BigUnsigned, Int32Value(uint32_t(i64)));
+        return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+    }
+    if (i64 >= INT32_MIN)
+        return NumLit(NumLit::NegativeInt, Int32Value(i64));
+    return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+}
+
+static inline bool
+IsLiteralUint32(ParseNode *pn, uint32_t *u32)
+{
+    if (!IsNumericLiteral(pn))
+        return false;
+
+    NumLit literal = ExtractNumericLiteral(pn);
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::BigUnsigned:
+        *u32 = uint32_t(literal.toInt32());
+        return true;
+      case NumLit::NegativeInt:
+      case NumLit::Double:
+      case NumLit::OutOfRangeInt:
+        return false;
+    }
+
+    JS_NOT_REACHED("Bad literal type");
+}
+
+static inline bool
+IsBits32(ParseNode *pn, int32_t i)
+{
+    if (!IsNumericLiteral(pn))
+        return false;
+
+    NumLit literal = ExtractNumericLiteral(pn);
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::BigUnsigned:
+      case NumLit::NegativeInt:
+        return literal.toInt32() == i;
+      case NumLit::Double:
+      case NumLit::OutOfRangeInt:
+        return false;
+    }
+
+    JS_NOT_REACHED("Bad literal type");
+}
+
+/*****************************************************************************/
+// Typed array utilities
+
+static Type
+TypedArrayLoadType(ArrayBufferView::ViewType viewType)
+{
+    switch (viewType) {
+      case ArrayBufferView::TYPE_INT8:
+      case ArrayBufferView::TYPE_INT16:
+      case ArrayBufferView::TYPE_INT32:
+      case ArrayBufferView::TYPE_UINT8:
+      case ArrayBufferView::TYPE_UINT16:
+      case ArrayBufferView::TYPE_UINT32:
+        return Type::Intish;
+      case ArrayBufferView::TYPE_FLOAT32:
+      case ArrayBufferView::TYPE_FLOAT64:
+        return Type::Doublish;
+      default:;
+    }
+    JS_NOT_REACHED("Unexpected array type");
+    return Type();
+}
+
+enum ArrayStoreEnum {
+    ArrayStore_Intish,
+    ArrayStore_Double
+};
+
+static ArrayStoreEnum
+TypedArrayStoreType(ArrayBufferView::ViewType viewType)
+{
+    switch (viewType) {
+      case ArrayBufferView::TYPE_INT8:
+      case ArrayBufferView::TYPE_INT16:
+      case ArrayBufferView::TYPE_INT32:
+      case ArrayBufferView::TYPE_UINT8:
+      case ArrayBufferView::TYPE_UINT16:
+      case ArrayBufferView::TYPE_UINT32:
+        return ArrayStore_Intish;
+      case ArrayBufferView::TYPE_FLOAT32:
+      case ArrayBufferView::TYPE_FLOAT64:
+        return ArrayStore_Double;
+      default:;
+    }
+    JS_NOT_REACHED("Unexpected array type");
+    return ArrayStore_Double;
+}
+
+/*****************************************************************************/
+
+typedef Vector<PropertyName*,1> LabelVector;
+typedef Vector<MBasicBlock*,16> CaseVector;
+
+// ModuleCompiler encapsulates the compilation of an entire asm.js module. Over
+// the course of an ModuleCompiler object's lifetime, many FunctionCompiler
+// objects will be created and destroyed in sequence, one for each function in
+// the module.
+//
+// *** asm.js FFI calls ***
+//
+// asm.js allows calling out to non-asm.js via "FFI calls". The asm.js type
+// system does not place any constraints on the FFI call. In particular:
+//  - an FFI call's target is not known or speculated at module-compile time;
+//  - a single external function can be called with different signatures.
+//
+// If performance didn't matter, all FFI calls could simply box their arguments
+// and call js::Invoke. However, we'd like to be able to specialize FFI calls
+// to be more efficient in several cases:
+//
+//  - for calls to JS functions which have been jitted, we'd like to call
+//    directly into JIT code without going through C++.
+//
+//  - for calls to certain builtins, we'd like to be call directly into the C++
+//    code for the builtin without going through the general call path.
+//
+// All of this requires dynamic specialization techniques which must happen
+// after module compilation. To support this, at module-compilation time, each
+// FFI call generates a call signature according to the system ABI, as if the
+// callee was a C++ function taking/returning the same types as the caller was
+// passing/expecting. The callee is loaded from a fixed offset in the global
+// data array which allows the callee to change at runtime. Initially, the
+// callee is stub which boxes its arguments and calls js::Invoke.
+//
+// To do this, we need to generate a callee stub for each pairing of FFI callee
+// and signature. We call this pairing an "exit". For example, this code has
+// two external functions and three exits:
+//
+//  function f(global, imports) {
+//    "use asm";
+//    var foo = imports.foo;
+//    var bar = imports.bar;
+//    function g() {
+//      foo(1);      // Exit #1: (int) -> void
+//      foo(1.5);    // Exit #2: (double) -> void
+//      bar(1)|0;    // Exit #3: (int) -> int
+//      bar(2)|0;    // Exit #3: (int) -> int
+//    }
+//
+// The ModuleCompiler maintains a hash table (ExitMap) which allows a call site
+// to add a new exit or reuse an existing one. The key is an ExitDescriptor
+// (which holds the exit pairing) and the value is an index into the
+// Vector<Exit> stored in the AsmJSModule.
+class ModuleCompiler
+{
+  public:
+    class Func
+    {
+        ParseNode *fn_;
+        ParseNode *body_;
+        MIRTypeVector argTypes_;
+        RetType returnType_;
+        mutable Label code_;
+
+      public:
+        Func(ParseNode *fn, ParseNode *body, MoveRef<MIRTypeVector> types, RetType returnType)
+          : fn_(fn),
+            body_(body),
+            argTypes_(types),
+            returnType_(returnType),
+            code_()
+        {}
+
+        Func(MoveRef<Func> rhs)
+          : fn_(rhs->fn_),
+            body_(rhs->body_),
+            argTypes_(Move(rhs->argTypes_)),
+            returnType_(rhs->returnType_),
+            code_(rhs->code_)
+        {}
+
+        ~Func()
+        {
+            // Avoid spurious Label assertions on compilation failure.
+            if (!code_.bound())
+                code_.bind(0);
+        }
+
+        ParseNode *fn() const { return fn_; }
+        ParseNode *body() const { return body_; }
+        unsigned numArgs() const { return argTypes_.length(); }
+        VarType argType(unsigned i) const { return VarType::FromMIRType(argTypes_[i]); }
+        const MIRTypeVector &argMIRTypes() const { return argTypes_; }
+        RetType returnType() const { return returnType_; }
+        Label *codeLabel() const { return &code_; }
+    };
+
+    class Global
+    {
+      public:
+        enum Which { Variable, Function, FuncPtrTable, FFI, ArrayView, MathBuiltin, Constant };
+
+      private:
+        Which which_;
+        union {
+            struct {
+                uint32_t index_;
+                VarType::Which type_;
+            } var;
+            uint32_t funcIndex_;
+            uint32_t funcPtrTableIndex_;
+            uint32_t ffiIndex_;
+            ArrayBufferView::ViewType viewType_;
+            AsmJSMathBuiltin mathBuiltin_;
+            double constant_;
+        } u;
+
+        friend class ModuleCompiler;
+
+        Global(Which which) : which_(which) {}
+
+      public:
+        Which which() const {
+            return which_;
+        }
+        VarType varType() const {
+            JS_ASSERT(which_ == Variable);
+            return VarType(u.var.type_);
+        }
+        uint32_t varIndex() const {
+            JS_ASSERT(which_ == Variable);
+            return u.var.index_;
+        }
+        uint32_t funcIndex() const {
+            JS_ASSERT(which_ == Function);
+            return u.funcIndex_;
+        }
+        uint32_t funcPtrTableIndex() const {
+            JS_ASSERT(which_ == FuncPtrTable);
+            return u.funcPtrTableIndex_;
+        }
+        unsigned ffiIndex() const {
+            JS_ASSERT(which_ == FFI);
+            return u.ffiIndex_;
+        }
+        ArrayBufferView::ViewType viewType() const {
+            JS_ASSERT(which_ == ArrayView);
+            return u.viewType_;
+        }
+        AsmJSMathBuiltin mathBuiltin() const {
+            JS_ASSERT(which_ == MathBuiltin);
+            return u.mathBuiltin_;
+        }
+        double constant() const {
+            JS_ASSERT(which_ == Constant);
+            return u.constant_;
+        }
+    };
+
+    typedef Vector<const Func*> FuncPtrVector;
+
+    class FuncPtrTable
+    {
+        FuncPtrVector elems_;
+        unsigned baseIndex_;
+
+      public:
+        FuncPtrTable(MoveRef<FuncPtrVector> elems, unsigned baseIndex)
+          : elems_(elems), baseIndex_(baseIndex) {}
+        FuncPtrTable(MoveRef<FuncPtrTable> rhs)
+          : elems_(Move(rhs->elems_)), baseIndex_(rhs->baseIndex_) {}
+
+        const Func &sig() const { return *elems_[0]; }
+        unsigned numElems() const { return elems_.length(); }
+        const Func &elem(unsigned i) const { return *elems_[i]; }
+        unsigned baseIndex() const { return baseIndex_; }
+        unsigned mask() const { JS_ASSERT(IsPowerOfTwo(numElems())); return numElems() - 1; }
+    };
+
+    typedef Vector<FuncPtrTable> FuncPtrTableVector;
+
+    class ExitDescriptor
+    {
+        PropertyName *name_;
+        MIRTypeVector argTypes_;
+        Use use_;
+
+      public:
+        ExitDescriptor(PropertyName *name, MoveRef<MIRTypeVector> argTypes, Use use)
+          : name_(name),
+            argTypes_(argTypes),
+            use_(use)
+        {}
+        ExitDescriptor(MoveRef<ExitDescriptor> rhs)
+          : name_(rhs->name_),
+            argTypes_(Move(rhs->argTypes_)),
+            use_(rhs->use_)
+        {}
+        const MIRTypeVector &argTypes() const {
+            return argTypes_;
+        }
+        Use use() const {
+            return use_;
+        }
+
+        // ExitDescriptor is a HashPolicy:
+        typedef ExitDescriptor Lookup;
+        static HashNumber hash(const ExitDescriptor &d) {
+            HashNumber hn = HashGeneric(d.name_, d.use_.which());
+            for (unsigned i = 0; i < d.argTypes_.length(); i++)
+                hn = AddToHash(hn, d.argTypes_[i]);
+            return hn;
+        }
+        static bool match(const ExitDescriptor &lhs, const ExitDescriptor &rhs) {
+            if (lhs.name_ != rhs.name_ ||
+                lhs.argTypes_.length() != rhs.argTypes_.length() ||
+                lhs.use_ != rhs.use_)
+            {
+                return false;
+            }
+            for (unsigned i = 0; i < lhs.argTypes_.length(); i++) {
+                if (lhs.argTypes_[i] != rhs.argTypes_[i])
+                    return false;
+            }
+            return true;
+        }
+    };
+
+    typedef HashMap<ExitDescriptor, unsigned, ExitDescriptor, ContextAllocPolicy> ExitMap;
+
+  private:
+    typedef HashMap<PropertyName*, AsmJSMathBuiltin> MathNameMap;
+    typedef HashMap<PropertyName*, Global> GlobalMap;
+    typedef Vector<Func> FuncVector;
+    typedef Vector<AsmJSGlobalAccess> GlobalAccessVector;
+
+    JSContext *                    cx_;
+    LifoAlloc                      lifo_;
+    TempAllocator                  alloc_;
+    IonContext                     ionContext_;
+    MacroAssembler                 masm_;
+
+    ScopedJSDeletePtr<AsmJSModule> module_;
+
+    PropertyName *                 moduleFunctionName_;
+    PropertyName *                 globalArgumentName_;
+    PropertyName *                 importArgumentName_;
+    PropertyName *                 bufferArgumentName_;
+
+    GlobalMap                      globals_;
+    FuncVector                     functions_;
+    FuncPtrTableVector             funcPtrTables_;
+    ExitMap                        exits_;
+    MathNameMap                    standardLibraryMathNames_;
+
+    GlobalAccessVector             globalAccesses_;
+
+    Label                          stackOverflowLabel_;
+    Label                          operationCallbackLabel_;
+
+    const char *                   errorString_;
+    ParseNode *                    errorNode_;
+    TokenStream &                  tokenStream_;
+
+    DebugOnly<int>                 currentPass_;
+
+    bool addStandardLibraryMathName(const char *name, AsmJSMathBuiltin builtin) {
+        JSAtom *atom = Atomize(cx_, name, strlen(name));
+        if (!atom)
+            return false;
+        return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
+    }
+
+    static const size_t LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
+
+  public:
+    ModuleCompiler(JSContext *cx, TokenStream &ts)
+      : cx_(cx),
+        lifo_(LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+        alloc_(&lifo_),
+        ionContext_(cx, cx->compartment, &alloc_),
+        masm_(),
+        moduleFunctionName_(NULL),
+        globalArgumentName_(NULL),
+        importArgumentName_(NULL),
+        bufferArgumentName_(NULL),
+        globals_(cx),
+        functions_(cx),
+        funcPtrTables_(cx),
+        exits_(cx),
+        standardLibraryMathNames_(cx),
+        globalAccesses_(cx),
+        errorString_(NULL),
+        errorNode_(NULL),
+        tokenStream_(ts),
+        currentPass_(1)
+    {}
+
+    ~ModuleCompiler() {
+        if (errorString_)
+            tokenStream_.reportAsmJSError(errorNode_, JSMSG_USE_ASM_TYPE_FAIL, errorString_);
+
+        // Avoid spurious Label assertions on compilation failure.
+        if (!stackOverflowLabel_.bound())
+            stackOverflowLabel_.bind(0);
+        if (!operationCallbackLabel_.bound())
+            operationCallbackLabel_.bind(0);
+    }
+
+    bool init() {
+        if (!cx_->compartment->ensureIonCompartmentExists(cx_))
+            return false;
+
+        if (!globals_.init() || !exits_.init())
+            return false;
+
+        if (!standardLibraryMathNames_.init() ||
+            !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
+            !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
+            !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
+            !addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) ||
+            !addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) ||
+            !addStandardLibraryMathName("atan", AsmJSMathBuiltin_atan) ||
+            !addStandardLibraryMathName("ceil", AsmJSMathBuiltin_ceil) ||
+            !addStandardLibraryMathName("floor", AsmJSMathBuiltin_floor) ||
+            !addStandardLibraryMathName("exp", AsmJSMathBuiltin_exp) ||
+            !addStandardLibraryMathName("log", AsmJSMathBuiltin_log) ||
+            !addStandardLibraryMathName("pow", AsmJSMathBuiltin_pow) ||
+            !addStandardLibraryMathName("sqrt", AsmJSMathBuiltin_sqrt) ||
+            !addStandardLibraryMathName("abs", AsmJSMathBuiltin_abs) ||
+            !addStandardLibraryMathName("atan2", AsmJSMathBuiltin_atan2) ||
+            !addStandardLibraryMathName("imul", AsmJSMathBuiltin_imul))
+        {
+            return false;
+        }
+
+        module_ = cx_->new_<AsmJSModule>();
+        if (!module_)
+            return false;
+
+        return true;
+    }
+
+    bool fail(const char *str, ParseNode *pn) {
+        JS_ASSERT(!errorString_);
+        JS_ASSERT(!errorNode_);
+        JS_ASSERT(str);
+        JS_ASSERT(pn);
+        errorString_ = str;
+        errorNode_ = pn;
+        return false;
+    }
+
+    /*************************************************** Read-only interface */
+
+    JSContext *cx() const { return cx_; }
+    LifoAlloc &lifo() { return lifo_; }
+    TempAllocator &alloc() { return alloc_; }
+    MacroAssembler &masm() { return masm_; }
+    Label &stackOverflowLabel() { return stackOverflowLabel_; }
+    Label &operationCallbackLabel() { return operationCallbackLabel_; }
+    bool hasError() const { return errorString_ != NULL; }
+    const AsmJSModule &module() const { return *module_.get(); }
+
+    PropertyName *moduleFunctionName() const { return moduleFunctionName_; }
+    PropertyName *globalArgumentName() const { return globalArgumentName_; }
+    PropertyName *importArgumentName() const { return importArgumentName_; }
+    PropertyName *bufferArgumentName() const { return bufferArgumentName_; }
+
+    const Global *lookupGlobal(PropertyName *name) const {
+        if (GlobalMap::Ptr p = globals_.lookup(name))
+            return &p->value;
+        return NULL;
+    }
+    const FuncPtrTable *lookupFuncPtrTable(PropertyName *name) const {
+        if (GlobalMap::Ptr p = globals_.lookup(name)) {
+            if (p->value.which() == Global::FuncPtrTable)
+                return &funcPtrTables_[p->value.funcPtrTableIndex()];
+        }
+        return NULL;
+    }
+    const Func *lookupFunction(PropertyName *name) const {
+        if (GlobalMap::Ptr p = globals_.lookup(name)) {
+            if (p->value.which() == Global::Function)
+                return &functions_[p->value.funcIndex()];
+        }
+        return NULL;
+    }
+    unsigned numFunctions() const {
+        return functions_.length();
+    }
+    const Func &function(unsigned i) const {
+        return functions_[i];
+    }
+    bool lookupStandardLibraryMathName(PropertyName *name, AsmJSMathBuiltin *mathBuiltin) const {
+        if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
+            *mathBuiltin = p->value;
+            return true;
+        }
+        return false;
+    }
+    ExitMap::Range allExits() const {
+        return exits_.all();
+    }
+
+    /***************************************************** Mutable interface */
+
+    void initModuleFunctionName(PropertyName *n) { moduleFunctionName_ = n; }
+    void initGlobalArgumentName(PropertyName *n) { globalArgumentName_ = n; }
+    void initImportArgumentName(PropertyName *n) { importArgumentName_ = n; }
+    void initBufferArgumentName(PropertyName *n) { bufferArgumentName_ = n; }
+
+    bool addGlobalVarInitConstant(PropertyName *varName, VarType type, const Value &v) {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::Variable);
+        uint32_t index;
+        if (!module_->addGlobalVarInitConstant(v, &index))
+            return false;
+        g.u.var.index_ = index;
+        g.u.var.type_ = type.which();
+        return globals_.putNew(varName, g);
+    }
+    bool addGlobalVarImport(PropertyName *varName, PropertyName *fieldName, AsmJSCoercion coercion)
+    {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::Variable);
+        uint32_t index;
+        if (!module_->addGlobalVarImport(fieldName, coercion, &index))
+            return false;
+        g.u.var.index_ = index;
+        g.u.var.type_ = VarType(coercion).which();
+        return globals_.putNew(varName, g);
+    }
+    bool addFunction(MoveRef<Func> func) {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::Function);
+        g.u.funcIndex_ = functions_.length();
+        if (!globals_.putNew(FunctionName(func->fn()), g))
+            return false;
+        return functions_.append(func);
+    }
+    bool addFuncPtrTable(PropertyName *varName, MoveRef<FuncPtrVector> funcPtrs) {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::FuncPtrTable);
+        g.u.funcPtrTableIndex_ = funcPtrTables_.length();
+        if (!globals_.putNew(varName, g))
+            return false;
+        FuncPtrTable table(funcPtrs, module_->numFuncPtrTableElems());
+        if (!module_->incrementNumFuncPtrTableElems(table.numElems()))
+            return false;
+        return funcPtrTables_.append(Move(table));
+    }
+    bool addFFI(PropertyName *varName, PropertyName *field) {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::FFI);
+        uint32_t index;
+        if (!module_->addFFI(field, &index))
+            return false;
+        g.u.ffiIndex_ = index;
+        return globals_.putNew(varName, g);
+    }
+    bool addArrayView(PropertyName *varName, ArrayBufferView::ViewType vt, PropertyName *fieldName) {
+        JS_ASSERT(currentPass_ == 1);
+        Global g(Global::ArrayView);
+        if (!module_->addArrayView(vt, fieldName))
+            return false;
+        g.u.viewType_ = vt;
+        return globals_.putNew(varName, g);
+    }
+    bool addMathBuiltin(PropertyName *varName, AsmJSMathBuiltin mathBuiltin, PropertyName *fieldName) {
+        JS_ASSERT(currentPass_ == 1);
+        if (!module_->addMathBuiltin(mathBuiltin, fieldName))
+            return false;
+        Global g(Global::MathBuiltin);
+        g.u.mathBuiltin_ = mathBuiltin;
+        return globals_.putNew(varName, g);
+    }
+    bool addGlobalConstant(PropertyName *varName, double constant, PropertyName *fieldName) {
+        JS_ASSERT(currentPass_ == 1);
+        if (!module_->addGlobalConstant(constant, fieldName))
+            return false;
+        Global g(Global::Constant);
+        g.u.constant_ = constant;
+        return globals_.putNew(varName, g);
+    }
+    bool collectAccesses(MIRGenerator &gen) {
+        if (!module_->addHeapAccesses(gen.heapAccesses()))
+            return false;
+
+        for (unsigned i = 0; i < gen.globalAccesses().length(); i++) {
+            if (!globalAccesses_.append(gen.globalAccesses()[i]))
+                return false;
+        }
+        return true;
+    }
+    bool addGlobalAccess(AsmJSGlobalAccess access) {
+        return globalAccesses_.append(access);
+    }
+    bool addExportedFunction(const Func *func, PropertyName *maybeFieldName) {
+        JS_ASSERT(currentPass_ == 1);
+        AsmJSModule::ArgCoercionVector argCoercions;
+        if (!argCoercions.resize(func->numArgs()))
+            return false;
+        for (unsigned i = 0; i < func->numArgs(); i++)
+            argCoercions[i] = func->argType(i).toCoercion();
+        AsmJSModule::ReturnType returnType = func->returnType().toModuleReturnType();
+        return module_->addExportedFunction(FunctionObject(func->fn()), maybeFieldName,
+                                            Move(argCoercions), returnType);
+    }
+
+    void setFirstPassComplete() {
+        JS_ASSERT(currentPass_ == 1);
+        currentPass_ = 2;
+    }
+
+    Func &function(unsigned funcIndex) {
+        JS_ASSERT(currentPass_ == 2);
+        return functions_[funcIndex];
+    }
+    bool addExit(unsigned ffiIndex, PropertyName *name, MoveRef<MIRTypeVector> argTypes, Use use,
+                 unsigned *exitIndex)
+    {
+        JS_ASSERT(currentPass_ == 2);
+        ExitDescriptor exitDescriptor(name, argTypes, use);
+        ExitMap::AddPtr p = exits_.lookupForAdd(exitDescriptor);
+        if (p) {
+            *exitIndex = p->value;
+            return true;
+        }
+        if (!module_->addExit(ffiIndex, exitIndex))
+            return false;
+        return exits_.add(p, Move(exitDescriptor), *exitIndex);
+    }
+
+
+    void setSecondPassComplete() {
+        JS_ASSERT(currentPass_ == 2);
+        masm_.align(gc::PageSize);
+        module_->setFunctionBytes(masm_.size());
+        currentPass_ = 3;
+    }
+
+    void setExitOffset(unsigned exitIndex) {
+        JS_ASSERT(currentPass_ == 3);
+        module_->exit(exitIndex).initCodeOffset(masm_.size());
+    }
+    void setEntryOffset(unsigned exportIndex) {
+        JS_ASSERT(currentPass_ == 3);
+        module_->exportedFunction(exportIndex).initCodeOffset(masm_.size());
+    }
+
+    bool finish(ScopedJSDeletePtr<AsmJSModule> *module) {
+        // After finishing, the only valid operation on an ModuleCompiler is
+        // destruction.
+        JS_ASSERT(currentPass_ == 3);
+        currentPass_ = -1;
+
+        // Finish the code section.
+        masm_.finish();
+        if (masm_.oom())
+            return false;
+
+        // The global data section sits immediately after the executable (and
+        // other) data allocated by the MacroAssembler. Round up bytesNeeded so
+        // that doubles/pointers stay aligned.
+        size_t codeBytes = AlignBytes(masm_.bytesNeeded(), sizeof(double));
+        size_t totalBytes = codeBytes + module_->globalDataBytes();
+
+        // The code must be page aligned, so include extra space so that we can
+        // AlignBytes the allocation result below.
+        size_t allocedBytes = totalBytes + gc::PageSize;
+
+        // Allocate the slab of memory.
+        JSC::ExecutableAllocator *execAlloc = cx_->compartment->ionCompartment()->execAlloc();
+        JSC::ExecutablePool *pool;
+        uint8_t *unalignedBytes = (uint8_t*)execAlloc->alloc(allocedBytes, &pool, JSC::ASMJS_CODE);
+        if (!unalignedBytes)
+            return false;
+        uint8_t *code = (uint8_t*)AlignBytes((uintptr_t)unalignedBytes, gc::PageSize);
+
+        // The ExecutablePool owns the memory and must be released by the AsmJSModule.
+        module_->takeOwnership(pool, code, codeBytes, totalBytes);
+
+        // Copy the buffer into executable memory (c.f. IonCode::copyFrom).
+        masm_.executableCopy(code);
+        masm_.processCodeLabels(code);
+        JS_ASSERT(masm_.jumpRelocationTableBytes() == 0);
+        JS_ASSERT(masm_.dataRelocationTableBytes() == 0);
+        JS_ASSERT(masm_.preBarrierTableBytes() == 0);
+        JS_ASSERT(!masm_.hasEnteredExitFrame());
+
+        // Patch everything that needs an absolute address:
+
+        // Entry points
+        for (unsigned i = 0; i < module_->numExportedFunctions(); i++)
+            module_->exportedFunction(i).patch(code);
+
+        // Exit points
+        for (unsigned i = 0; i < module_->numExits(); i++) {
+            module_->exit(i).patch(code);
+            module_->exitIndexToGlobalDatum(i).exit = module_->exit(i).code();
+            module_->exitIndexToGlobalDatum(i).fun = NULL;
+        }
+        module_->setOperationCallbackExit(code + masm_.actualOffset(operationCallbackLabel_.offset()));
+
+        // Function-pointer table entries
+        unsigned elemIndex = 0;
+        for (unsigned i = 0; i < funcPtrTables_.length(); i++) {
+            FuncPtrTable &table = funcPtrTables_[i];
+            JS_ASSERT(elemIndex == table.baseIndex());
+            for (unsigned j = 0; j < table.numElems(); j++) {
+                uint8_t *funcPtr = code + masm_.actualOffset(table.elem(j).codeLabel()->offset());
+                module_->funcPtrIndexToGlobalDatum(elemIndex++) = funcPtr;
+            }
+            JS_ASSERT(elemIndex == table.baseIndex() + table.numElems());
+        }
+        JS_ASSERT(elemIndex == module_->numFuncPtrTableElems());
+
+        // Global accesses in function bodies
+        for (unsigned i = 0; i < globalAccesses_.length(); i++) {
+            AsmJSGlobalAccess access = globalAccesses_[i];
+            masm_.patchAsmJSGlobalAccess(access.offset, code, codeBytes, access.globalDataOffset);
+        }
+
+        // The AsmJSHeapAccess offsets need to be updated to reflect the
+        // "actualOffset" (an ARM distinction).
+        for (unsigned i = 0; i < module_->numHeapAccesses(); i++) {
+            AsmJSHeapAccess &access = module_->heapAccess(i);
+            access.updateOffset(masm_.actualOffset(access.offset()));
+        }
+
+        *module = module_.forget();
+        return true;
+    }
+};
+
+/*****************************************************************************/
+
+// Encapsulates the compilation of a single function in an asm.js module. The
+// function compiler handles the creation and final backend compilation of the
+// MIR graph. Also see ModuleCompiler comment.
+class FunctionCompiler
+{
+  public:
+    struct Local
+    {
+        enum Which { Var, Arg } which;
+        VarType type;
+        unsigned slot;
+        Value initialValue;
+
+        Local(VarType t, unsigned slot)
+          : which(Arg), type(t), slot(slot), initialValue(MagicValue(JS_GENERIC_MAGIC)) {}
+        Local(VarType t, unsigned slot, const Value &init)
+          : which(Var), type(t), slot(slot), initialValue(init) {}
+    };
+    typedef HashMap<PropertyName*, Local> LocalMap;
+
+  private:
+    typedef Vector<MBasicBlock*, 2> BlockVector;
+    typedef HashMap<PropertyName*, BlockVector> LabeledBlockMap;
+    typedef HashMap<ParseNode*, BlockVector> UnlabeledBlockMap;
+    typedef Vector<ParseNode*, 4> NodeStack;
+
+    ModuleCompiler &       m_;
+    ModuleCompiler::Func & func_;
+    LocalMap               locals_;
+
+    LifoAllocScope         lifoAllocScope_;
+    MIRGraph               mirGraph_;
+    MIRGenerator           mirGen_;
+    CompileInfo            compileInfo_;
+    AutoFlushCache         autoFlushCache_;
+
+    MBasicBlock *          curBlock_;
+    NodeStack              loopStack_;
+    NodeStack              breakableStack_;
+    UnlabeledBlockMap      unlabeledBreaks_;
+    UnlabeledBlockMap      unlabeledContinues_;
+    LabeledBlockMap        labeledBreaks_;
+    LabeledBlockMap        labeledContinues_;
+
+  public:
+    FunctionCompiler(ModuleCompiler &m, ModuleCompiler::Func &func, MoveRef<LocalMap> locals)
+      : m_(m),
+        func_(func),
+        locals_(locals),
+        lifoAllocScope_(&m.lifo()),
+        mirGraph_(&m.alloc()),
+        mirGen_(m.cx()->compartment, &m.alloc(), &mirGraph_, &compileInfo_),
+        compileInfo_(locals_.count()),
+        autoFlushCache_("asm.js"),
+        curBlock_(NULL),
+        loopStack_(m.cx()),
+        breakableStack_(m.cx()),
+        unlabeledBreaks_(m.cx()),
+        unlabeledContinues_(m.cx()),
+        labeledBreaks_(m.cx()),
+        labeledContinues_(m.cx())
+    {}
+
+    bool init()
+    {
+        if (!unlabeledBreaks_.init() ||
+            !unlabeledContinues_.init() ||
+            !labeledBreaks_.init() ||
+            !labeledContinues_.init())
+        {
+            return false;
+        }
+
+        if (!newBlock(/* pred = */ NULL, &curBlock_))
+            return false;
+
+        curBlock_->add(MAsmJSCheckOverRecursed::New(&m_.stackOverflowLabel()));
+
+        for (ABIArgIter i(func_.argMIRTypes()); !i.done(); i++) {
+            MAsmJSParameter *ins = MAsmJSParameter::New(*i, i.mirType());
+            curBlock_->add(ins);
+            curBlock_->initSlot(compileInfo_.localSlot(i.index()), ins);
+        }
+
+        for (LocalMap::Range r = locals_.all(); !r.empty(); r.popFront()) {
+            const Local &local = r.front().value;
+            if (local.which == Local::Var) {
+                MConstant *ins = MConstant::New(local.initialValue);
+                curBlock_->add(ins);
+                curBlock_->initSlot(compileInfo_.localSlot(local.slot), ins);
+            }
+        }
+
+        return true;
+    }
+
+    bool fail(const char *str, ParseNode *pn)
+    {
+        m_.fail(str, pn);
+        return false;
+    }
+
+    ~FunctionCompiler()
+    {
+        if (!m().hasError() && !cx()->isExceptionPending()) {
+            JS_ASSERT(loopStack_.empty());
+            JS_ASSERT(unlabeledBreaks_.empty());
+            JS_ASSERT(unlabeledContinues_.empty());
+            JS_ASSERT(labeledBreaks_.empty());
+            JS_ASSERT(labeledContinues_.empty());
+            JS_ASSERT(curBlock_ == NULL);
+        }
+    }
+
+    /*************************************************** Read-only interface */
+
+    JSContext *            cx() const { return m_.cx(); }
+    ModuleCompiler &       m() const { return m_; }
+    const AsmJSModule &    module() const { return m_.module(); }
+    ModuleCompiler::Func & func() const { return func_; }
+    MIRGraph &             mirGraph() { return mirGraph_; }
+    MIRGenerator &         mirGen() { return mirGen_; }
+
+    const Local *lookupLocal(PropertyName *name) const
+    {
+        if (LocalMap::Ptr p = locals_.lookup(name))
+            return &p->value;
+        return NULL;
+    }
+
+    MDefinition *getLocalDef(const Local &local)
+    {
+        if (!curBlock_)
+            return NULL;
+        return curBlock_->getSlot(compileInfo_.localSlot(local.slot));
+    }
+
+    const ModuleCompiler::Func *lookupFunction(PropertyName *name) const
+    {
+        if (locals_.has(name))
+            return NULL;
+        if (const ModuleCompiler::Func *func = m_.lookupFunction(name))
+            return func;
+        return NULL;
+    }
+
+    const ModuleCompiler::Global *lookupGlobal(PropertyName *name) const
+    {
+        if (locals_.has(name))
+            return NULL;
+        return m_.lookupGlobal(name);
+    }
+
+    /************************************************* Expression generation */
+
+    MDefinition *constant(const Value &v)
+    {
+        if (!curBlock_)
+            return NULL;
+        JS_ASSERT(v.isNumber());
+        MConstant *constant = MConstant::New(v);
+        curBlock_->add(constant);
+        return constant;
+    }
+
+    template <class T>
+    MDefinition *unary(MDefinition *op)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::NewAsmJS(op);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition *unary(MDefinition *op, MIRType type)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::NewAsmJS(op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition *binary(MDefinition *lhs, MDefinition *rhs)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::New(lhs, rhs);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition *binary(MDefinition *lhs, MDefinition *rhs, MIRType type)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::NewAsmJS(lhs, rhs, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition *mul(MDefinition *lhs, MDefinition *rhs, MIRType type, MMul::Mode mode)
+    {
+        if (!curBlock_)
+            return NULL;
+        MMul *ins = MMul::New(lhs, rhs, type, mode);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition *bitwise(MDefinition *lhs, MDefinition *rhs)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::NewAsmJS(lhs, rhs);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition *bitwise(MDefinition *op)
+    {
+        if (!curBlock_)
+            return NULL;
+        T *ins = T::NewAsmJS(op);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition *compare(MDefinition *lhs, MDefinition *rhs, JSOp op, MCompare::CompareType type)
+    {
+        if (!curBlock_)
+            return NULL;
+        MCompare *ins = MCompare::NewAsmJS(lhs, rhs, op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    void assign(const Local &local, MDefinition *def)
+    {
+        if (!curBlock_)
+            return;
+        curBlock_->setSlot(compileInfo_.localSlot(local.slot), def);
+    }
+
+    MDefinition *loadHeap(ArrayBufferView::ViewType vt, MDefinition *ptr)
+    {
+        if (!curBlock_)
+            return NULL;
+        MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(vt, ptr);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void storeHeap(ArrayBufferView::ViewType vt, MDefinition *ptr, MDefinition *v)
+    {
+        if (!curBlock_)
+            return;
+        curBlock_->add(MAsmJSStoreHeap::New(vt, ptr, v));
+    }
+
+    MDefinition *loadGlobalVar(const ModuleCompiler::Global &global)
+    {
+        if (!curBlock_)
+            return NULL;
+        MIRType type = global.varType().toMIRType();
+        unsigned globalDataOffset = module().globalVarIndexToGlobalDataOffset(global.varIndex());
+        MAsmJSLoadGlobalVar *load = MAsmJSLoadGlobalVar::New(type, globalDataOffset);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void storeGlobalVar(const ModuleCompiler::Global &global, MDefinition *v)
+    {
+        if (!curBlock_)
+            return;
+        unsigned globalDataOffset = module().globalVarIndexToGlobalDataOffset(global.varIndex());
+        curBlock_->add(MAsmJSStoreGlobalVar::New(globalDataOffset, v));
+    }
+
+    /***************************************************************** Calls */
+
+    // The IonMonkey backend maintains a single stack offset (from the stack
+    // pointer to the base of the frame) by adding the total amount of spill
+    // space required plus the maximum stack required for argument passing.
+    // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+    // manually accumulate, for the entire function, the maximum required stack
+    // space for argument passing. (This is passed to the CodeGenerator via
+    // MIRGenerator::maxAsmJSStackArgBytes.) Naively, this would just be the
+    // maximum of the stack space required for each individual call (as
+    // determined by the call ABI). However, as an optimization, arguments are
+    // stored to the stack immediately after evaluation (to decrease live
+    // ranges and reduce spilling). This introduces the complexity that,
+    // between evaluating an argument and making the call, another argument
+    // evaluation could perform a call that also needs to store to the stack.
+    // When this occurs childClobbers_ = true and the parent expression's
+    // arguments are stored above the maximum depth clobbered by a child
+    // expression.
+
+    class Args
+    {
+        ABIArgGenerator abi_;
+        uint32_t prevMaxStackBytes_;
+        uint32_t maxChildStackBytes_;
+        uint32_t spIncrement_;
+        Vector<Type, 8> types_;
+        MAsmJSCall::Args regArgs_;
+        Vector<MAsmJSPassStackArg*> stackArgs_;
+        bool childClobbers_;
+
+        friend class FunctionCompiler;
+
+      public:
+        Args(FunctionCompiler &f)
+          : prevMaxStackBytes_(0),
+            maxChildStackBytes_(0),
+            spIncrement_(0),
+            types_(f.cx()),
+            regArgs_(f.cx()),
+            stackArgs_(f.cx()),
+            childClobbers_(false)
+        {}
+        unsigned length() const {
+            return types_.length();
+        }
+        Type type(unsigned i) const {
+            return types_[i];
+        }
+    };
+
+    void startCallArgs(Args *args)
+    {
+        if (!curBlock_)
+            return;
+        args->prevMaxStackBytes_ = mirGen_.resetAsmJSMaxStackArgBytes();
+    }
+
+    bool passArg(MDefinition *argDef, Type type, Args *args)
+    {
+        if (!args->types_.append(type))
+            return false;
+
+        if (!curBlock_)
+            return true;
+
+        uint32_t childStackBytes = mirGen_.resetAsmJSMaxStackArgBytes();
+        args->maxChildStackBytes_ = Max(args->maxChildStackBytes_, childStackBytes);
+        if (childStackBytes > 0 && !args->stackArgs_.empty())
+            args->childClobbers_ = true;
+
+        ABIArg arg = args->abi_.next(type.toMIRType());
+        if (arg.kind() == ABIArg::Stack) {
+            MAsmJSPassStackArg *mir = MAsmJSPassStackArg::New(arg.offsetFromArgBase(), argDef);
+            curBlock_->add(mir);
+            if (!args->stackArgs_.append(mir))
+                return false;
+        } else {
+            if (!args->regArgs_.append(MAsmJSCall::Arg(arg.reg(), argDef)))
+                return false;
+        }
+        return true;
+    }
+
+    void finishCallArgs(Args *args)
+    {
+        if (!curBlock_)
+            return;
+        uint32_t parentStackBytes = args->abi_.stackBytesConsumedSoFar();
+        uint32_t newStackBytes;
+        if (args->childClobbers_) {
+            args->spIncrement_ = AlignBytes(args->maxChildStackBytes_, StackAlignment);
+            for (unsigned i = 0; i < args->stackArgs_.length(); i++)
+                args->stackArgs_[i]->incrementOffset(args->spIncrement_);
+            newStackBytes = Max(args->prevMaxStackBytes_,
+                                args->spIncrement_ + parentStackBytes);
+        } else {
+            args->spIncrement_ = 0;
+            newStackBytes = Max(args->prevMaxStackBytes_,
+                                Max(args->maxChildStackBytes_, parentStackBytes));
+        }
+        mirGen_.setAsmJSMaxStackArgBytes(newStackBytes);
+    }
+
+  private:
+    bool call(MAsmJSCall::Callee callee, const Args &args, MIRType returnType, MDefinition **def)
+    {
+        if (!curBlock_) {
+            *def = NULL;
+            return true;
+        }
+        MAsmJSCall *ins = MAsmJSCall::New(callee, args.regArgs_, returnType, args.spIncrement_);
+        if (!ins)
+            return false;
+        curBlock_->add(ins);
+        *def = ins;
+        return true;
+    }
+
+  public:
+    bool internalCall(const ModuleCompiler::Func &func, const Args &args, MDefinition **def)
+    {
+        MIRType returnType = func.returnType().toMIRType();
+        return call(MAsmJSCall::Callee(func.codeLabel()), args, returnType, def);
+    }
+
+    bool funcPtrCall(const ModuleCompiler::FuncPtrTable &funcPtrTable, MDefinition *index,
+                     const Args &args, MDefinition **def)
+    {
+        if (!curBlock_) {
+            *def = NULL;
+            return true;
+        }
+
+        MConstant *mask = MConstant::New(Int32Value(funcPtrTable.mask()));
+        curBlock_->add(mask);
+        MBitAnd *maskedIndex = MBitAnd::NewAsmJS(index, mask);
+        curBlock_->add(maskedIndex);
+        unsigned globalDataOffset = module().funcPtrIndexToGlobalDataOffset(funcPtrTable.baseIndex());
+        MAsmJSLoadFuncPtr *ptrFun = MAsmJSLoadFuncPtr::New(globalDataOffset, maskedIndex);
+        curBlock_->add(ptrFun);
+
+        MIRType returnType = funcPtrTable.sig().returnType().toMIRType();
+        return call(MAsmJSCall::Callee(ptrFun), args, returnType, def);
+    }
+
+    bool ffiCall(unsigned exitIndex, const Args &args, MIRType returnType, MDefinition **def)
+    {
+        if (!curBlock_) {
+            *def = NULL;
+            return true;
+        }
+
+        JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0);
+        unsigned globalDataOffset = module().exitIndexToGlobalDataOffset(exitIndex);
+
+        MAsmJSLoadFFIFunc *ptrFun = MAsmJSLoadFFIFunc::New(globalDataOffset);
+        curBlock_->add(ptrFun);
+
+        return call(MAsmJSCall::Callee(ptrFun), args, returnType, def);
+    }
+
+    bool builtinCall(void *builtin, const Args &args, MIRType returnType, MDefinition **def)
+    {
+        return call(MAsmJSCall::Callee(builtin), args, returnType, def);
+    }
+
+    /*********************************************** Control flow generation */
+
+    void returnExpr(MDefinition *expr)
+    {
+        if (!curBlock_)
+            return;
+        MAsmJSReturn *ins = MAsmJSReturn::New(expr);
+        curBlock_->end(ins);
+        curBlock_ = NULL;
+    }
+
+    void returnVoid()
+    {
+        if (!curBlock_)
+            return;
+        MAsmJSVoidReturn *ins = MAsmJSVoidReturn::New();
+        curBlock_->end(ins);
+        curBlock_ = NULL;
+    }
+
+    bool branchAndStartThen(MDefinition *cond, MBasicBlock **thenBlock, MBasicBlock **elseBlock)
+    {
+        if (!curBlock_) {
+            *thenBlock = NULL;
+            *elseBlock = NULL;
+            return true;
+        }
+        if (!newBlock(curBlock_, thenBlock) || !newBlock(curBlock_, elseBlock))
+            return false;
+        curBlock_->end(MTest::New(cond, *thenBlock, *elseBlock));
+        curBlock_ = *thenBlock;
+        return true;
+    }
+
+    void joinIf(MBasicBlock *joinBlock)
+    {
+        if (!joinBlock)
+            return;
+        if (curBlock_) {
+            curBlock_->end(MGoto::New(joinBlock));
+            joinBlock->addPredecessor(curBlock_);
+        }
+        curBlock_ = joinBlock;
+        mirGraph_.moveBlockToEnd(curBlock_);
+    }
+
+    MBasicBlock *switchToElse(MBasicBlock *elseBlock)
+    {
+        if (!elseBlock)
+            return NULL;
+        MBasicBlock *thenEnd = curBlock_;
+        curBlock_ = elseBlock;
+        mirGraph_.moveBlockToEnd(curBlock_);
+        return thenEnd;
+    }
+
+    bool joinIfElse(MBasicBlock *thenEnd)
+    {
+        if (!curBlock_ && !thenEnd)
+            return true;
+        MBasicBlock *pred = curBlock_ ? curBlock_ : thenEnd;
+        MBasicBlock *join;
+        if (!newBlock(pred, &join))
+            return false;
+        if (curBlock_)
+            curBlock_->end(MGoto::New(join));
+        if (thenEnd)
+            thenEnd->end(MGoto::New(join));
+        if (curBlock_ && thenEnd)
+            join->addPredecessor(thenEnd);
+        curBlock_ = join;
+        return true;
+    }
+
+    void pushPhiInput(MDefinition *def)
+    {
+        if (!curBlock_)
+            return;
+        JS_ASSERT(curBlock_->stackDepth() == compileInfo_.firstStackSlot());
+        curBlock_->push(def);
+    }
+
+    MDefinition *popPhiOutput()
+    {
+        if (!curBlock_)
+            return NULL;
+        JS_ASSERT(curBlock_->stackDepth() == compileInfo_.firstStackSlot() + 1);
+        return curBlock_->pop();
+    }
+
+    bool startPendingLoop(ParseNode *pn, MBasicBlock **loopEntry)
+    {
+        if (!loopStack_.append(pn) || !breakableStack_.append(pn))
+            return false;
+        JS_ASSERT_IF(curBlock_, curBlock_->loopDepth() == loopStack_.length() - 1);
+        if (!curBlock_) {
+            *loopEntry = NULL;
+            return true;
+        }
+        *loopEntry = MBasicBlock::NewPendingLoopHeader(mirGraph_, compileInfo_, curBlock_, NULL);
+        if (!*loopEntry)
+            return false;
+        mirGraph_.addBlock(*loopEntry);
+        (*loopEntry)->setLoopDepth(loopStack_.length());
+        curBlock_->end(MGoto::New(*loopEntry));
+        curBlock_ = *loopEntry;
+        return true;
+    }
+
+    bool branchAndStartLoopBody(MDefinition *cond, MBasicBlock **afterLoop)
+    {
+        if (!curBlock_) {
+            *afterLoop = NULL;
+            return true;
+        }
+        JS_ASSERT(curBlock_->loopDepth() > 0);
+        MBasicBlock *body;
+        if (!newBlock(curBlock_, &body))
+            return false;
+        if (cond->isConstant() && ToBoolean(cond->toConstant()->value())) {
+            *afterLoop = NULL;
+            curBlock_->end(MGoto::New(body));
+        } else {
+            if (!newBlockWithDepth(curBlock_, curBlock_->loopDepth() - 1, afterLoop))
+                return false;
+            curBlock_->end(MTest::New(cond, body, *afterLoop));
+        }
+        curBlock_ = body;
+        return true;
+    }
+
+  private:
+    ParseNode *popLoop()
+    {
+        ParseNode *pn = loopStack_.back();
+        JS_ASSERT(!unlabeledContinues_.has(pn));
+        loopStack_.popBack();
+        breakableStack_.popBack();
+        return pn;
+    }
+
+  public:
+    bool closeLoop(MBasicBlock *loopEntry, MBasicBlock *afterLoop)
+    {
+        ParseNode *pn = popLoop();
+        if (!loopEntry) {
+            JS_ASSERT(!afterLoop);
+            JS_ASSERT(!curBlock_);
+            JS_ASSERT(!unlabeledBreaks_.has(pn));
+            return true;
+        }
+        JS_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
+        JS_ASSERT_IF(afterLoop, afterLoop->loopDepth() == loopStack_.length());
+        if (curBlock_) {
+            JS_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
+            curBlock_->end(MGoto::New(loopEntry));
+            loopEntry->setBackedge(curBlock_);
+        }
+        curBlock_ = afterLoop;
+        if (curBlock_)
+            mirGraph_.moveBlockToEnd(curBlock_);
+        return bindUnlabeledBreaks(pn);
+    }
+
+    bool branchAndCloseDoWhileLoop(MDefinition *cond, MBasicBlock *loopEntry)
+    {
+        ParseNode *pn = popLoop();
+        if (!loopEntry) {
+            JS_ASSERT(!curBlock_);
+            JS_ASSERT(!unlabeledBreaks_.has(pn));
+            return true;
+        }
+        JS_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
+        if (curBlock_) {
+            JS_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
+            if (cond->isConstant()) {
+                if (ToBoolean(cond->toConstant()->value())) {
+                    curBlock_->end(MGoto::New(loopEntry));
+                    loopEntry->setBackedge(curBlock_);
+                    curBlock_ = NULL;
+                } else {
+                    MBasicBlock *afterLoop;
+                    if (!newBlock(curBlock_, &afterLoop))
+                        return false;
+                    curBlock_->end(MGoto::New(afterLoop));
+                    curBlock_ = afterLoop;
+                }
+            } else {
+                MBasicBlock *afterLoop;
+                if (!newBlock(curBlock_, &afterLoop))
+                    return false;
+                curBlock_->end(MTest::New(cond, loopEntry, afterLoop));
+                loopEntry->setBackedge(curBlock_);
+                curBlock_ = afterLoop;
+            }
+        }
+        return bindUnlabeledBreaks(pn);
+    }
+
+    bool bindContinues(ParseNode *pn, const LabelVector *maybeLabels)
+    {
+        if (UnlabeledBlockMap::Ptr p = unlabeledContinues_.lookup(pn)) {
+            if (!bindBreaksOrContinues(&p->value))
+                return false;
+            unlabeledContinues_.remove(p);
+        }
+        return bindLabeledBreaksOrContinues(maybeLabels, &labeledContinues_);
+    }
+
+    bool bindLabeledBreaks(const LabelVector *maybeLabels)
+    {
+        return bindLabeledBreaksOrContinues(maybeLabels, &labeledBreaks_);
+    }
+
+    bool addBreak(PropertyName *maybeLabel) {
+        if (maybeLabel)
+            return addBreakOrContinue(maybeLabel, &labeledBreaks_);
+        return addBreakOrContinue(breakableStack_.back(), &unlabeledBreaks_);
+    }
+
+    bool addContinue(PropertyName *maybeLabel) {
+        if (maybeLabel)
+            return addBreakOrContinue(maybeLabel, &labeledContinues_);
+        return addBreakOrContinue(loopStack_.back(), &unlabeledContinues_);
+    }
+
+    bool startSwitch(ParseNode *pn, MDefinition *expr, int32_t low, int32_t high,
+                     MBasicBlock **switchBlock)
+    {
+        if (!breakableStack_.append(pn))
+            return false;
+        if (!curBlock_) {
+            *switchBlock = NULL;
+            return true;
+        }
+        curBlock_->end(MTableSwitch::New(expr, low, high));
+        *switchBlock = curBlock_;
+        curBlock_ = NULL;
+        return true;
+    }
+
+    bool startSwitchCase(MBasicBlock *switchBlock, MBasicBlock **next)
+    {
+        if (!switchBlock) {
+            *next = NULL;
+            return true;
+        }
+        if (!newBlock(switchBlock, next))
+            return false;
+        if (curBlock_) {
+            curBlock_->end(MGoto::New(*next));
+            (*next)->addPredecessor(curBlock_);
+        }
+        curBlock_ = *next;
+        return true;
+    }
+
+    bool startSwitchDefault(MBasicBlock *switchBlock, CaseVector *cases, MBasicBlock **defaultBlock)
+    {
+        if (!startSwitchCase(switchBlock, defaultBlock))
+            return false;
+        if (!*defaultBlock)
+            return true;
+        for (unsigned i = 0; i < cases->length(); i++) {
+            if (!(*cases)[i]) {
+                MBasicBlock *bb;
+                if (!newBlock(switchBlock, &bb))
+                    return false;
+                bb->end(MGoto::New(*defaultBlock));
+                (*defaultBlock)->addPredecessor(bb);
+                (*cases)[i] = bb;
+            }
+        }
+        mirGraph_.moveBlockToEnd(*defaultBlock);
+        return true;
+    }
+
+    bool joinSwitch(MBasicBlock *switchBlock, const CaseVector &cases, MBasicBlock *defaultBlock)
+    {
+        ParseNode *pn = breakableStack_.popCopy();
+        if (!switchBlock)
+            return true;
+        MTableSwitch *mir = switchBlock->lastIns()->toTableSwitch();
+        mir->addDefault(defaultBlock);
+        for (unsigned i = 0; i < cases.length(); i++)
+            mir->addCase(cases[i]);
+        if (curBlock_) {
+            MBasicBlock *next;
+            if (!newBlock(curBlock_, &next))
+                return false;
+            curBlock_->end(MGoto::New(next));
+            curBlock_ = next;
+        }
+        return bindUnlabeledBreaks(pn);
+    }
+
+    /*************************************************************************/
+  private:
+    bool newBlockWithDepth(MBasicBlock *pred, unsigned loopDepth, MBasicBlock **block)
+    {
+        *block = MBasicBlock::New(mirGraph_, compileInfo_, pred, /* pc = */ NULL, MBasicBlock::NORMAL);
+        if (!*block)
+            return false;
+        mirGraph_.addBlock(*block);
+        (*block)->setLoopDepth(loopDepth);
+        return true;
+    }
+
+    bool newBlock(MBasicBlock *pred, MBasicBlock **block)
+    {
+        return newBlockWithDepth(pred, loopStack_.length(), block);
+    }
+
+    bool bindBreaksOrContinues(BlockVector *preds)
+    {
+        for (unsigned i = 0; i < preds->length(); i++) {
+            MBasicBlock *pred = (*preds)[i];
+            if (curBlock_ && curBlock_->begin() == curBlock_->end()) {
+                pred->end(MGoto::New(curBlock_));
+                curBlock_->addPredecessor(pred);
+            } else {
+                MBasicBlock *next;
+                if (!newBlock(pred, &next))
+                    return false;
+                pred->end(MGoto::New(next));
+                if (curBlock_) {
+                    curBlock_->end(MGoto::New(next));
+                    next->addPredecessor(curBlock_);
+                }
+                curBlock_ = next;
+            }
+            JS_ASSERT(curBlock_->begin() == curBlock_->end());
+        }
+        preds->clear();
+        return true;
+    }
+
+    bool bindLabeledBreaksOrContinues(const LabelVector *maybeLabels, LabeledBlockMap *map)
+    {
+        if (!maybeLabels)
+            return true;
+        const LabelVector &labels = *maybeLabels;
+        for (unsigned i = 0; i < labels.length(); i++) {
+            if (LabeledBlockMap::Ptr p = map->lookup(labels[i])) {
+                if (!bindBreaksOrContinues(&p->value))
+                    return false;
+                map->remove(p);
+            }
+        }
+        return true;
+    }
+
+    template <class Key, class Map>
+    bool addBreakOrContinue(Key key, Map *map)
+    {
+        if (!curBlock_)
+            return true;
+        typename Map::AddPtr p = map->lookupForAdd(key);
+        if (!p) {
+            BlockVector empty(m().cx());
+            if (!map->add(p, key, Move(empty)))
+                return false;
+        }
+        if (!p->value.append(curBlock_))
+            return false;
+        curBlock_ = NULL;
+        return true;
+    }
+
+    bool bindUnlabeledBreaks(ParseNode *pn)
+    {
+        if (UnlabeledBlockMap::Ptr p = unlabeledBreaks_.lookup(pn)) {
+            if (!bindBreaksOrContinues(&p->value))
+                return false;
+            unlabeledBreaks_.remove(p);
+        }
+        return true;
+    }
+};
+
+/*****************************************************************************/
+// An AsmJSModule contains the persistent results of asm.js module compilation,
+// viz., the jit code and dynamic link information.
+//
+// An AsmJSModule object is created at the end of module compilation and
+// subsequently owned by an AsmJSModuleClass JSObject.
+
+static void AsmJSModuleObject_finalize(FreeOp *fop, RawObject obj);
+static void AsmJSModuleObject_trace(JSTracer *trc, JSRawObject obj);
+
+static const unsigned ASM_CODE_RESERVED_SLOT = 0;
+static const unsigned ASM_CODE_NUM_RESERVED_SLOTS = 1;
+
+static Class AsmJSModuleClass = {
+    "AsmJSModuleObject",
+    JSCLASS_IS_ANONYMOUS | JSCLASS_IMPLEMENTS_BARRIERS |
+    JSCLASS_HAS_RESERVED_SLOTS(ASM_CODE_NUM_RESERVED_SLOTS),
+    JS_PropertyStub,         /* addProperty */
+    JS_PropertyStub,         /* delProperty */
+    JS_PropertyStub,         /* getProperty */
+    JS_StrictPropertyStub,   /* setProperty */
+    JS_EnumerateStub,
+    JS_ResolveStub,
+    NULL,                    /* convert     */
+    AsmJSModuleObject_finalize,
+    NULL,                    /* checkAccess */
+    NULL,                    /* call        */
+    NULL,                    /* hasInstance */
+    NULL,                    /* construct   */
+    AsmJSModuleObject_trace
+};
+
+AsmJSModule &
+js::AsmJSModuleObjectToModule(JSObject *obj)
+{
+    JS_ASSERT(obj->getClass() == &AsmJSModuleClass);
+    return *(AsmJSModule *)obj->getReservedSlot(ASM_CODE_RESERVED_SLOT).toPrivate();
+}
+
+static void
+AsmJSModuleObject_finalize(FreeOp *fop, RawObject obj)
+{
+    fop->delete_(&AsmJSModuleObjectToModule(obj));
+}
+
+static void
+AsmJSModuleObject_trace(JSTracer *trc, JSRawObject obj)
+{
+    AsmJSModuleObjectToModule(obj).trace(trc);
+}
+
+static JSObject *
+NewAsmJSModuleObject(JSContext *cx, ScopedJSDeletePtr<AsmJSModule> *module)
+{
+    JSObject *obj = NewObjectWithGivenProto(cx, &AsmJSModuleClass, NULL, NULL);
+    if (!obj)
+        return NULL;
+
+    obj->setReservedSlot(ASM_CODE_RESERVED_SLOT, PrivateValue(module->forget()));
+    return obj;
+}
+
+/*****************************************************************************/
+// asm.js type-checking and code-generation algorithm
+
+static bool
+CheckIdentifier(ModuleCompiler &m, PropertyName *name, ParseNode *nameNode)
+{
+    if (name == m.cx()->names().arguments || name == m.cx()->names().eval)
+        return m.fail("disallowed asm.js parameter name", nameNode);
+    return true;
+}
+
+static bool
+CheckModuleLevelName(ModuleCompiler &m, PropertyName *name, ParseNode *nameNode)
+{
+    if (!CheckIdentifier(m, name, nameNode))
+        return false;
+
+    if (name == m.moduleFunctionName() ||
+        name == m.globalArgumentName() ||
+        name == m.importArgumentName() ||
+        name == m.bufferArgumentName() ||
+        m.lookupGlobal(name))
+    {
+        return m.fail("Duplicate names not allowed", nameNode);
+    }
+
+    return true;
+}
+
+static bool
+CheckFunctionHead(ModuleCompiler &m, ParseNode *fn, ParseNode **stmtIter)
+{
+    if (FunctionObject(fn)->hasRest())
+        return m.fail("rest args not allowed in asm.js", fn);
+    if (!FunctionHasStatementList(fn))
+        return m.fail("expression closures not allowed in asm.js", fn);
+
+    *stmtIter = ListHead(FunctionStatementList(fn));
+    return true;
+}
+
+static bool
+CheckArgument(ModuleCompiler &m, ParseNode *arg, PropertyName **name)
+{
+    if (!IsDefinition(arg))
+        return m.fail("overlapping argument names not allowed", arg);
+
+    if (MaybeDefinitionInitializer(arg))
+        return m.fail("default arguments not allowed", arg);
+
+    if (!CheckIdentifier(m, arg->name(), arg))
+        return false;
+
+    *name = arg->name();
+    return true;
+}
+
+static bool
+CheckModuleArgument(ModuleCompiler &m, ParseNode *arg, PropertyName **name)
+{
+    if (!CheckArgument(m, arg, name))
+        return false;
+
+    if (!CheckModuleLevelName(m, *name, arg))
+        return false;
+
+    return true;
+}
+
+static bool
+CheckModuleArguments(ModuleCompiler &m, ParseNode *fn)
+{
+    unsigned numFormals;
+    ParseNode *arg1 = FunctionArgsList(fn, &numFormals);
+    ParseNode *arg2 = arg1 ? NextNode(arg1) : NULL;
+    ParseNode *arg3 = arg2 ? NextNode(arg2) : NULL;
+
+    if (numFormals > 3)
+        return m.fail("asm.js modules takes at most 3 argument.", fn);
+
+    PropertyName *arg1Name = NULL;
+    if (numFormals >= 1 && !CheckModuleArgument(m, arg1, &arg1Name))
+        return false;
+    m.initGlobalArgumentName(arg1Name);
+
+    PropertyName *arg2Name = NULL;
+    if (numFormals >= 2 && !CheckModuleArgument(m, arg2, &arg2Name))
+        return false;
+    m.initImportArgumentName(arg2Name);
+
+    PropertyName *arg3Name = NULL;
+    if (numFormals >= 3 && !CheckModuleArgument(m, arg3, &arg3Name))
+        return false;
+    m.initBufferArgumentName(arg3Name);
+
+    return true;
+}
+
+static bool
+SkipUseAsmDirective(ModuleCompiler &m, ParseNode **stmtIter)
+{
+    ParseNode *firstStatement = *stmtIter;
+
+    if (!IsExpressionStatement(firstStatement))
+        return m.fail("No funny stuff before the 'use asm' directive", firstStatement);
+
+    ParseNode *expr = ExpressionStatementExpr(firstStatement);
+    if (!expr || !expr->isKind(PNK_STRING))
+        return m.fail("No funny stuff before the 'use asm' directive", firstStatement);
+
+    if (StringAtom(expr) != m.cx()->names().useAsm)
+        return m.fail("asm.js precludes other directives", firstStatement);
+
+    *stmtIter = NextNode(firstStatement);
+    return true;
+}
+
+static bool
+CheckGlobalVariableInitConstant(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode)
+{
+    NumLit literal = ExtractNumericLiteral(initNode);
+    VarType type;
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::NegativeInt:
+      case NumLit::BigUnsigned:
+        type = VarType::Int;
+        break;
+      case NumLit::Double:
+        type = VarType::Double;
+        break;
+      case NumLit::OutOfRangeInt:
+        return m.fail("Global initializer is out of representable integer range", initNode);
+    }
+    return m.addGlobalVarInitConstant(varName, type, literal.value());
+}
+
+static bool
+CheckTypeAnnotation(ModuleCompiler &m, ParseNode *coercionNode, AsmJSCoercion *coercion,
+                    ParseNode **coercedExpr = NULL)
+{
+    switch (coercionNode->getKind()) {
+      case PNK_BITOR: {
+        ParseNode *rhs = BinaryRight(coercionNode);
+
+        if (!IsNumericLiteral(rhs))
+            return m.fail("Must use |0 for argument/return coercion.", rhs);
+
+        NumLit rhsLiteral = ExtractNumericLiteral(rhs);
+        if (rhsLiteral.which() != NumLit::Fixnum || rhsLiteral.toInt32() != 0)
+            return m.fail("Must use |0 for argument/return coercion.", rhs);
+
+        *coercion = AsmJS_ToInt32;
+        if (coercedExpr)
+            *coercedExpr = BinaryLeft(coercionNode);
+        return true;
+      }
+      case PNK_POS: {
+        *coercion = AsmJS_ToNumber;
+        if (coercedExpr)
+            *coercedExpr = UnaryKid(coercionNode);
+        return true;
+      }
+      default:;
+    }
+
+    return m.fail("in coercion expression, the expression must be of the form +x or x|0", coercionNode);
+}
+
+static bool
+CheckGlobalVariableInitImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode)
+{
+    AsmJSCoercion coercion;
+    ParseNode *coercedExpr;
+    if (!CheckTypeAnnotation(m, initNode, &coercion, &coercedExpr))
+        return false;
+
+    if (!coercedExpr->isKind(PNK_DOT))
+        return m.fail("Bad global variable import expression", coercedExpr);
+
+    ParseNode *base = DotBase(coercedExpr);
+    PropertyName *field = DotMember(coercedExpr);
+
+    if (!IsUseOfName(base, m.importArgumentName()))
+        return m.fail("Expecting c.y where c is the import parameter", coercedExpr);
+
+    return m.addGlobalVarImport(varName, field, coercion);
+}
+
+static bool
+CheckNewArrayView(ModuleCompiler &m, PropertyName *varName, ParseNode *newExpr, bool first)
+{
+    ParseNode *ctorExpr = ListHead(newExpr);
+    if (!ctorExpr->isKind(PNK_DOT))
+        return m.fail("Only valid 'new' import is 'new global.XYZArray(buf)'", ctorExpr);
+
+    ParseNode *base = DotBase(ctorExpr);
+    PropertyName *field = DotMember(ctorExpr);
+
+    if (!IsUseOfName(base, m.globalArgumentName()))
+        return m.fail("Expecting global.y", base);
+
+    ParseNode *bufArg = NextNode(ctorExpr);
+    if (!bufArg)
+        return m.fail("Constructor needs an argument", ctorExpr);
+
+    if (NextNode(bufArg) != NULL)
+        return m.fail("Only one argument may be passed to a typed array constructor", bufArg);
+
+    if (!IsUseOfName(bufArg, m.bufferArgumentName()))
+        return m.fail("Argument to typed array constructor must be ArrayBuffer name", bufArg);
+
+    JSAtomState &names = m.cx()->names();
+    ArrayBufferView::ViewType type;
+    if (field == names.Int8Array)
+        type = ArrayBufferView::TYPE_INT8;
+    else if (field == names.Uint8Array)
+        type = ArrayBufferView::TYPE_UINT8;
+    else if (field == names.Int16Array)
+        type = ArrayBufferView::TYPE_INT16;
+    else if (field == names.Uint16Array)
+        type = ArrayBufferView::TYPE_UINT16;
+    else if (field == names.Int32Array)
+        type = ArrayBufferView::TYPE_INT32;
+    else if (field == names.Uint32Array)
+        type = ArrayBufferView::TYPE_UINT32;
+    else if (field == names.Float32Array)
+        type = ArrayBufferView::TYPE_FLOAT32;
+    else if (field == names.Float64Array)
+        type = ArrayBufferView::TYPE_FLOAT64;
+    else
+        return m.fail("could not match typed array name", ctorExpr);
+
+    return m.addArrayView(varName, type, field);
+}
+
+static bool
+CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode)
+{
+    ParseNode *base = DotBase(initNode);
+    PropertyName *field = DotMember(initNode);
+
+    if (base->isKind(PNK_DOT)) {
+        ParseNode *global = DotBase(base);
+        PropertyName *math = DotMember(base);
+        if (!IsUseOfName(global, m.globalArgumentName()) || math != m.cx()->names().Math)
+            return m.fail("Expecting global.Math", base);
+
+        AsmJSMathBuiltin mathBuiltin;
+        if (!m.lookupStandardLibraryMathName(field, &mathBuiltin))
+            return m.fail("Does not match a standard Math builtin", initNode);
+
+        return m.addMathBuiltin(varName, mathBuiltin, field);
+    }
+
+    if (IsUseOfName(base, m.globalArgumentName())) {
+        if (field == m.cx()->names().NaN)
+            return m.addGlobalConstant(varName, js_NaN, field);
+        if (field == m.cx()->names().Infinity)
+            return m.addGlobalConstant(varName, js_PositiveInfinity, field);
+        return m.fail("Does not match a standard global constant", initNode);
+    }
+
+    if (IsUseOfName(base, m.importArgumentName()))
+        return m.addFFI(varName, field);
+
+    return m.fail("Expecting c.y where c is either the global or import parameter", initNode);
+}
+
+static bool
+CheckModuleGlobal(ModuleCompiler &m, ParseNode *var, bool first)
+{
+    if (!IsDefinition(var))
+        return m.fail("Import variable names must be unique", var);
+
+    if (!CheckModuleLevelName(m, var->name(), var))
+        return false;
+
+    ParseNode *initNode = MaybeDefinitionInitializer(var);
+    if (!initNode)
+        return m.fail("Module import needs initializer", var);
+
+    if (IsNumericLiteral(initNode))
+        return CheckGlobalVariableInitConstant(m, var->name(), initNode);
+
+    if (initNode->isKind(PNK_BITOR) || initNode->isKind(PNK_POS))
+        return CheckGlobalVariableInitImport(m, var->name(), initNode);
+
+    if (initNode->isKind(PNK_NEW))
+        return CheckNewArrayView(m, var->name(), initNode, first);
+
+    if (initNode->isKind(PNK_DOT))
+        return CheckGlobalDotImport(m, var->name(), initNode);
+
+    return m.fail("Unsupported import expression", initNode);
+
+}
+
+static bool
+CheckModuleGlobals(ModuleCompiler &m, ParseNode **stmtIter)
+{
+    ParseNode *stmt = SkipEmptyStatements(*stmtIter);
+
+    bool first = true;
+
+    for (; stmt && stmt->isKind(PNK_VAR); stmt = NextNonEmptyStatement(stmt)) {
+        for (ParseNode *var = VarListHead(stmt); var; var = NextNode(var)) {
+            if (!CheckModuleGlobal(m, var, first))
+                return false;
+            first = false;
+        }
+    }
+
+    *stmtIter = stmt;
+    return true;
+}
+
+static bool
+CheckArgumentType(ModuleCompiler &m, ParseNode *fn, PropertyName *argName, ParseNode *stmt,
+                  VarType *type)
+{
+    if (!stmt)
+        return m.fail("Missing parameter type declaration statements", fn);
+
+    if (!IsExpressionStatement(stmt))
+        return m.fail("Expecting expression statement type of the form 'arg = coercion;'", stmt);
+
+    ParseNode *initNode = ExpressionStatementExpr(stmt);
+    if (!initNode || !initNode->isKind(PNK_ASSIGN))
+        return m.fail("Expecting expression statement type of the form 'arg = coercion;'", stmt);
+
+    ParseNode *argNode = BinaryLeft(initNode);
+    ParseNode *coercionNode = BinaryRight(initNode);
+
+    if (!IsUseOfName(argNode, argName))
+        return m.fail("left-hand side of 'arg = expr;' must be the name of an argument.", argNode);
+
+    ParseNode *coercedExpr;
+    AsmJSCoercion coercion;
+    if (!CheckTypeAnnotation(m, coercionNode, &coercion, &coercedExpr))
+        return false;
+
+    if (!IsUseOfName(coercedExpr, argName))
+        return m.fail("For argument type declaration, need 'x = coercion(x)'", coercedExpr);
+
+    *type = VarType(coercion);
+    return true;
+}
+
+static bool
+CheckArguments(ModuleCompiler &m, ParseNode *fn, MIRTypeVector *argTypes, ParseNode **stmtIter)
+{
+    ParseNode *stmt = *stmtIter;
+
+    unsigned numFormals;
+    ParseNode *argpn = FunctionArgsList(fn, &numFormals);
+
+    HashSet<PropertyName*> dupSet(m.cx());
+    if (!dupSet.init())
+        return false;
+
+    for (unsigned i = 0; i < numFormals; i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) {
+        PropertyName *argName;
+        if (!CheckArgument(m, argpn, &argName))
+            return false;
+
+        if (dupSet.has(argName))
+            return m.fail("asm.js arguments must have distinct names", argpn);
+        if (!dupSet.putNew(argName))
+            return false;
+
+        VarType argType;
+        if (!CheckArgumentType(m, fn, argName, stmt, &argType))
+            return false;
+
+        if (!argTypes->append(argType.toMIRType()))
+            return false;
+    }
+
+    *stmtIter = stmt;
+    return true;
+}
+
+static bool
+CheckReturnType(ModuleCompiler &m, ParseNode *fn, RetType *returnType)
+{
+    ParseNode *stmt = FunctionLastStatementOrNull(fn);
+    if (!stmt || !stmt->isKind(PNK_RETURN) || !UnaryKid(stmt)) {
+        *returnType = RetType::Void;
+        return true;
+    }
+
+    ParseNode *coercionNode = UnaryKid(stmt);
+
+    if (IsNumericLiteral(coercionNode)) {
+        switch (ExtractNumericLiteral(coercionNode).which()) {
+          case NumLit::BigUnsigned:
+          case NumLit::OutOfRangeInt:
+            return m.fail("Returned literal is out of integer range", coercionNode);
+          case NumLit::Fixnum:
+          case NumLit::NegativeInt:
+            *returnType = RetType::Signed;
+            break;
+          case NumLit::Double:
+            *returnType = RetType::Double;
+            break;
+        }
+    } else {
+        AsmJSCoercion coercion;
+        if (!CheckTypeAnnotation(m, coercionNode, &coercion))
+            return false;
+        *returnType = RetType(coercion);
+    }
+
+    JS_ASSERT(returnType->toType().isExtern());
+    return true;
+}
+
+static bool
+CheckFunctionSignature(ModuleCompiler &m, ParseNode *fn)
+{
+    PropertyName *name = FunctionName(fn);
+    if (!CheckModuleLevelName(m, name, fn))
+        return false;
+
+    ParseNode *stmtIter = NULL;
+
+    if (!CheckFunctionHead(m, fn, &stmtIter))
+        return false;
+
+    MIRTypeVector argTypes(m.cx());
+    if (!CheckArguments(m, fn, &argTypes, &stmtIter))
+        return false;
+
+    RetType returnType;
+    if (!CheckReturnType(m, fn, &returnType))
+        return false;
+
+    ModuleCompiler::Func func(fn, stmtIter, Move(argTypes), returnType);
+    if (!m.addFunction(Move(func)))
+        return false;
+
+    return true;
+}
+
+static bool
+CheckFunctionSignatures(ModuleCompiler &m, ParseNode **stmtIter)
+{
+    ParseNode *fn = SkipEmptyStatements(*stmtIter);
+
+    for (; fn && fn->isKind(PNK_FUNCTION); fn = NextNonEmptyStatement(fn)) {
+        if (!CheckFunctionSignature(m, fn))
+            return false;
+    }
+
+    if (fn && fn->isKind(PNK_NOP))
+        return m.fail("duplicate function names are not allowed", fn);
+
+    *stmtIter = fn;
+    return true;
+}
+
+static bool
+SameSignature(const ModuleCompiler::Func &a, const ModuleCompiler::Func &b)
+{
+    if (a.numArgs() != b.numArgs() || a.returnType() != b.returnType())
+        return false;
+    for (unsigned i = 0; i < a.numArgs(); i++) {
+        if (a.argType(i) != b.argType(i))
+            return false;
+    }
+    return true;
+}
+
+static bool
+CheckFuncPtrTable(ModuleCompiler &m, ParseNode *var)
+{
+    if (!IsDefinition(var))
+        return m.fail("Function-pointer table name must be unique", var);
+
+    PropertyName *name = var->name();
+
+    if (!CheckModuleLevelName(m, name, var))
+        return false;
+
+    ParseNode *arrayLiteral = MaybeDefinitionInitializer(var);
+    if (!arrayLiteral || !arrayLiteral->isKind(PNK_ARRAY))
+        return m.fail("Function-pointer table's initializer must be an array literal", var);
+
+    unsigned length = ListLength(arrayLiteral);
+
+    if (!IsPowerOfTwo(length))
+        return m.fail("Function-pointer table's length must be a power of 2", arrayLiteral);
+
+    ModuleCompiler::FuncPtrVector funcPtrs(m.cx());
+    const ModuleCompiler::Func *firstFunction = NULL;
+
+    for (ParseNode *elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
+        if (!elem->isKind(PNK_NAME))
+            return m.fail("Function-pointer table's elements must be names of functions", elem);
+
+        PropertyName *funcName = elem->name();
+        const ModuleCompiler::Func *func = m.lookupFunction(funcName);
+        if (!func)
+            return m.fail("Function-pointer table's elements must be names of functions", elem);
+
+        if (firstFunction) {
+            if (!SameSignature(*firstFunction, *func))
+                return m.fail("All functions in table must have same signature", elem);
+        } else {
+            firstFunction = func;
+        }
+
+        if (!funcPtrs.append(func))
+            return false;
+    }
+
+    return m.addFuncPtrTable(name, Move(funcPtrs));
+}
+
+static bool
+CheckFuncPtrTables(ModuleCompiler &m, ParseNode **stmtIter)
+{
+    ParseNode *stmt = SkipEmptyStatements(*stmtIter);
+
+    for (; stmt && stmt->isKind(PNK_VAR); stmt = NextNonEmptyStatement(stmt)) {
+        for (ParseNode *var = VarListHead(stmt); var; var = NextNode(var)) {
+            if (!CheckFuncPtrTable(m, var))
+                return false;
+        }
+    }
+
+    *stmtIter = stmt;
+    return true;
+}
+
+static bool
+CheckModuleExportFunction(ModuleCompiler &m, ParseNode *returnExpr)
+{
+    if (!returnExpr->isKind(PNK_NAME))
+        return m.fail("an asm.js export statement must be of the form 'return name'", returnExpr);
+
+    PropertyName *funcName = returnExpr->name();
+
+    const ModuleCompiler::Func *func = m.lookupFunction(funcName);
+    if (!func)
+        return m.fail("exported function name not found", returnExpr);
+
+    return m.addExportedFunction(func, /* maybeFieldName = */ NULL);
+}
+
+static bool
+CheckModuleExportObject(ModuleCompiler &m, ParseNode *object)
+{
+    JS_ASSERT(object->isKind(PNK_OBJECT));
+
+    for (ParseNode *pn = ListHead(object); pn; pn = NextNode(pn)) {
+        if (!IsNormalObjectField(m.cx(), pn))
+            return m.fail("Only normal object properties may be used in the export object literal", pn);
+
+        PropertyName *fieldName = ObjectNormalFieldName(m.cx(), pn);
+
+        ParseNode *initNode = ObjectFieldInitializer(pn);
+        if (!initNode->isKind(PNK_NAME))
+            return m.fail("Initializer of exported object literal must be name of function", initNode);
+
+        PropertyName *funcName = initNode->name();
+
+        const ModuleCompiler::Func *func = m.lookupFunction(funcName);
+        if (!func)
+            return m.fail("exported function name not found", initNode);
+
+        if (!m.addExportedFunction(func, fieldName))
+            return false;
+    }
+
+    return true;
+}
+
+static bool
+CheckModuleExports(ModuleCompiler &m, ParseNode *fn, ParseNode **stmtIter)
+{
+    ParseNode *returnNode = SkipEmptyStatements(*stmtIter);
+
+    if (!returnNode || !returnNode->isKind(PNK_RETURN))
+        return m.fail("asm.js must end with a return export statement", fn);
+
+    ParseNode *returnExpr = UnaryKid(returnNode);
+
+    if (!returnExpr)
+        return m.fail("an asm.js export statement must return something", returnNode);
+
+    if (returnExpr->isKind(PNK_OBJECT)) {
+        if (!CheckModuleExportObject(m, returnExpr))
+            return false;
+    } else {
+        if (!CheckModuleExportFunction(m, returnExpr))
+            return false;
+    }
+
+    *stmtIter = NextNonEmptyStatement(returnNode);
+    return true;
+}
+
+static bool
+CheckExpr(FunctionCompiler &f, ParseNode *expr, Use use, MDefinition **def, Type *type);
+
+static bool
+CheckNumericLiteral(FunctionCompiler &f, ParseNode *num, MDefinition **def, Type *type)
+{
+    JS_ASSERT(IsNumericLiteral(num));
+    NumLit literal = ExtractNumericLiteral(num);
+
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::NegativeInt:
+      case NumLit::BigUnsigned:
+      case NumLit::Double:
+        break;
+      case NumLit::OutOfRangeInt:
+        return f.fail("Numeric literal out of representable integer range", num);
+    }
+
+    *type = literal.type();
+    *def = f.constant(literal.value());
+    return true;
+}
+
+static bool
+CheckVarRef(FunctionCompiler &f, ParseNode *varRef, MDefinition **def, Type *type)
+{
+    PropertyName *name = varRef->name();
+
+    if (const FunctionCompiler::Local *local = f.lookupLocal(name)) {
+        *def = f.getLocalDef(*local);
+        *type = local->type.toType();
+        return true;
+    }
+
+    if (const ModuleCompiler::Global *global = f.lookupGlobal(name)) {
+        switch (global->which()) {
+          case ModuleCompiler::Global::Constant:
+            *def = f.constant(DoubleValue(global->constant()));
+            *type = Type::Double;
+            break;
+          case ModuleCompiler::Global::Variable:
+            *def = f.loadGlobalVar(*global);
+            *type = global->varType().toType();
+            break;
+          case ModuleCompiler::Global::Function:
+          case ModuleCompiler::Global::FFI:
+          case ModuleCompiler::Global::MathBuiltin:
+          case ModuleCompiler::Global::FuncPtrTable:
+          case ModuleCompiler::Global::ArrayView:
+            return f.fail("Global may not be accessed by ordinary expressions", varRef);
+        }
+        return true;
+    }
+
+    return f.fail("Name not found in scope", varRef);
+}
+
+static bool
+CheckArrayAccess(FunctionCompiler &f, ParseNode *elem, ArrayBufferView::ViewType *viewType,
+                 MDefinition **def)
+{
+    ParseNode *viewName = ElemBase(elem);
+    ParseNode *indexExpr = ElemIndex(elem);
+
+    if (!viewName->isKind(PNK_NAME))
+        return f.fail("Left-hand side of x[y] must be a name", viewName);
+
+    const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
+    if (!global || global->which() != ModuleCompiler::Global::ArrayView)
+        return f.fail("Left-hand side of x[y] must be typed array view name", viewName);
+
+    *viewType = global->viewType();
+
+    uint32_t pointer;
+    if (IsLiteralUint32(indexExpr, &pointer)) {
+        pointer <<= TypedArrayShift(*viewType);
+        *def = f.constant(Int32Value(pointer));
+        return true;
+    }
+
+    MDefinition *pointerDef;
+    if (indexExpr->isKind(PNK_RSH)) {
+        ParseNode *shiftNode = BinaryRight(indexExpr);
+        ParseNode *pointerNode = BinaryLeft(indexExpr);
+
+        uint32_t shift;
+        if (!IsLiteralUint32(shiftNode, &shift) || shift != TypedArrayShift(*viewType))
+            return f.fail("The shift amount must be a constant matching the array "
+                          "element size", shiftNode);
+
+        Type pointerType;
+        if (!CheckExpr(f, pointerNode, Use::ToInt32, &pointerDef, &pointerType))
+            return false;
+
+        if (!pointerType.isIntish())
+            return f.fail("Pointer input must be intish", pointerNode);
+    } else {
+        if (TypedArrayShift(*viewType) != 0)
+            return f.fail("The shift amount is 0 so this must be a Int8/Uint8 array", indexExpr);
+
+        Type pointerType;
+        if (!CheckExpr(f, indexExpr, Use::ToInt32, &pointerDef, &pointerType))
+            return false;
+
+        if (!pointerType.isInt())
+            return f.fail("Pointer input must be int", indexExpr);
+    }
+
+    // Mask off the low bits to account for clearing effect of a right shift
+    // followed by the left shift implicit in the array access. E.g., H32[i>>2]
+    // loses the low two bits.
+    int32_t mask = ~((uint32_t(1) << TypedArrayShift(*viewType)) - 1);
+    *def = f.bitwise<MBitAnd>(pointerDef, f.constant(Int32Value(mask)));
+    return true;
+}
+
+static bool
+CheckArrayLoad(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
+{
+    ArrayBufferView::ViewType viewType;
+    MDefinition *pointerDef;
+    if (!CheckArrayAccess(f, elem, &viewType, &pointerDef))
+        return false;
+
+    *def = f.loadHeap(viewType, pointerDef);
+    *type = TypedArrayLoadType(viewType);
+    return true;
+}
+
+static bool
+CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type)
+{
+    ArrayBufferView::ViewType viewType;
+    MDefinition *pointerDef;
+    if (!CheckArrayAccess(f, lhs, &viewType, &pointerDef))
+        return false;
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, rhs, Use::NoCoercion, &rhsDef, &rhsType))
+        return false;
+
+    switch (TypedArrayStoreType(viewType)) {
+      case ArrayStore_Intish:
+        if (!rhsType.isIntish())
+            return f.fail("Right-hand side of store must be intish", lhs);
+        break;
+      case ArrayStore_Double:
+        if (rhsType != Type::Double)
+            return f.fail("Right-hand side of store must be double", lhs);
+        break;
+    }
+
+    f.storeHeap(viewType, pointerDef, rhsDef);
+
+    *def = rhsDef;
+    *type = rhsType;
+    return true;
+}
+
+static bool
+CheckAssignName(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type)
+{
+    PropertyName *name = lhs->name();
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, rhs, Use::NoCoercion, &rhsDef, &rhsType))
+        return false;
+
+    if (const FunctionCompiler::Local *lhsVar = f.lookupLocal(name)) {
+        if (!(rhsType <= lhsVar->type))
+            return f.fail("Right-hand side of assignment must be subtype of left-hand side", lhs);
+        f.assign(*lhsVar, rhsDef);
+    } else if (const ModuleCompiler::Global *global = f.lookupGlobal(name)) {
+        if (global->which() != ModuleCompiler::Global::Variable)
+            return f.fail("Only global variables are mutable, not FFI functions etc", lhs);
+        if (!(rhsType <= global->varType()))
+            return f.fail("Right-hand side of assignment must be subtype of left-hand side", lhs);
+        f.storeGlobalVar(*global, rhsDef);
+    } else {
+        return f.fail("Variable name in left-hand side of assignment not found", lhs);
+    }
+
+    *def = rhsDef;
+    *type = rhsType;
+    return true;
+}
+
+static bool
+CheckAssign(FunctionCompiler &f, ParseNode *assign, MDefinition **def, Type *type)
+{
+    JS_ASSERT(assign->isKind(PNK_ASSIGN));
+    ParseNode *lhs = BinaryLeft(assign);
+    ParseNode *rhs = BinaryRight(assign);
+
+    if (lhs->getKind() == PNK_ELEM)
+        return CheckStoreArray(f, lhs, rhs, def, type);
+
+    if (lhs->getKind() == PNK_NAME)
+        return CheckAssignName(f, lhs, rhs, def, type);
+
+    return f.fail("Left-hand side of assignment must be a variable or heap", assign);
+}
+
+static bool
+CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 2)
+        return f.fail("Math.imul must be passed 2 arguments", call);
+
+    ParseNode *lhs = CallArgList(call);
+    ParseNode *rhs = NextNode(lhs);
+
+    MDefinition *lhsDef;
+    Type lhsType;
+    if (!CheckExpr(f, lhs, Use::ToInt32, &lhsDef, &lhsType))
+        return false;
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, rhs, Use::ToInt32, &rhsDef, &rhsType))
+        return false;
+
+    if (!lhsType.isIntish() || !rhsType.isIntish())
+        return f.fail("Math.imul calls must be passed 2 intish arguments", call);
+
+    *def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer);
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 1)
+        return f.fail("Math.abs must be passed 1 argument", call);
+
+    ParseNode *arg = CallArgList(call);
+
+    MDefinition *argDef;
+    Type argType;
+    if (!CheckExpr(f, arg, Use::ToNumber, &argDef, &argType))
+        return false;
+
+    if (argType.isSigned()) {
+        *def = f.unary<MAbs>(argDef, MIRType_Int32);
+        *type = Type::Unsigned;
+        return true;
+    }
+
+    if (argType.isDoublish()) {
+        *def = f.unary<MAbs>(argDef, MIRType_Double);
+        *type = Type::Double;
+        return true;
+    }
+
+    return f.fail("Math.abs must be passed either a signed or doublish argument", call);
+}
+
+static bool
+CheckCallArgs(FunctionCompiler &f, ParseNode *callNode, Use use, FunctionCompiler::Args *args)
+{
+    f.startCallArgs(args);
+
+    ParseNode *argNode = CallArgList(callNode);
+    for (unsigned i = 0; i < CallArgListLength(callNode); i++, argNode = NextNode(argNode)) {
+        MDefinition *argDef;
+        Type argType;
+        if (!CheckExpr(f, argNode, use, &argDef, &argType))
+            return false;
+
+        if (argType.isVoid())
+            return f.fail("Void cannot be passed as an argument", argNode);
+
+        if (!f.passArg(argDef, argType, args))
+            return false;
+    }
+
+    f.finishCallArgs(args);
+    return true;
+}
+
+static bool
+CheckInternalCall(FunctionCompiler &f, ParseNode *callNode, const ModuleCompiler::Func &callee,
+                  MDefinition **def, Type *type)
+{
+    FunctionCompiler::Args args(f);
+
+    if (!CheckCallArgs(f, callNode, Use::NoCoercion, &args))
+        return false;
+
+    if (args.length() != callee.numArgs())
+        return f.fail("Wrong number of arguments", callNode);
+
+    for (unsigned i = 0; i < args.length(); i++) {
+        if (!(args.type(i) <= callee.argType(i)))
+            return f.fail("actual arg type is not subtype of formal arg type", callNode);
+    }
+
+    if (!f.internalCall(callee, args, def))
+        return false;
+
+    *type = callee.returnType().toType();
+    return true;
+}
+
+static bool
+CheckFuncPtrCall(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, Type *type)
+{
+    ParseNode *callee = CallCallee(callNode);
+    ParseNode *elemBase = ElemBase(callee);
+    ParseNode *indexExpr = ElemIndex(callee);
+
+    if (!elemBase->isKind(PNK_NAME))
+        return f.fail("Expecting name (of function-pointer array)", elemBase);
+
+    const ModuleCompiler::FuncPtrTable *table = f.m().lookupFuncPtrTable(elemBase->name());
+    if (!table)
+        return f.fail("Expecting name of function-pointer array", elemBase);
+
+    if (!indexExpr->isKind(PNK_BITAND))
+        return f.fail("Function-pointer table index expression needs & mask", indexExpr);
+
+    ParseNode *indexNode = BinaryLeft(indexExpr);
+    ParseNode *maskNode = BinaryRight(indexExpr);
+
+    uint32_t mask;
+    if (!IsLiteralUint32(maskNode, &mask) || mask != table->mask())
+        return f.fail("Function-pointer table index mask must be the table length minus 1", maskNode);
+
+    MDefinition *indexDef;
+    Type indexType;
+    if (!CheckExpr(f, indexNode, Use::ToInt32, &indexDef, &indexType))
+        return false;
+
+    if (!indexType.isIntish())
+        return f.fail("Function-pointer table index expression must be intish", indexNode);
+
+    FunctionCompiler::Args args(f);
+
+    if (!CheckCallArgs(f, callNode, Use::NoCoercion, &args))
+        return false;
+
+    if (args.length() != table->sig().numArgs())
+        return f.fail("Wrong number of arguments", callNode);
+
+    for (unsigned i = 0; i < args.length(); i++) {
+        if (!(args.type(i) <= table->sig().argType(i)))
+            return f.fail("actual arg type is not subtype of formal arg type", callNode);
+    }
+
+    if (!f.funcPtrCall(*table, indexDef, args, def))
+        return false;
+
+    *type = table->sig().returnType().toType();
+    return true;
+}
+
+static bool
+CheckFFICall(FunctionCompiler &f, ParseNode *callNode, unsigned ffiIndex, Use use,
+             MDefinition **def, Type *type)
+{
+    FunctionCompiler::Args args(f);
+
+    if (!CheckCallArgs(f, callNode, Use::NoCoercion, &args))
+        return false;
+
+    MIRTypeVector argMIRTypes(f.cx());
+    for (unsigned i = 0; i < args.length(); i++) {
+        if (!args.type(i).isExtern())
+            return f.fail("args to FFI call must be <: extern", callNode);
+        if (!argMIRTypes.append(args.type(i).toMIRType()))
+            return false;
+    }
+
+    unsigned exitIndex;
+    if (!f.m().addExit(ffiIndex, CallCallee(callNode)->name(), Move(argMIRTypes), use, &exitIndex))
+        return false;
+
+    if (!f.ffiCall(exitIndex, args, use.toMIRType(), def))
+        return false;
+
+    *type = use.toFFIReturnType();
+    return true;
+}
+
+static inline void *
+UnaryMathFunCast(double (*pf)(double))
+{
+    return JS_FUNC_TO_DATA_PTR(void*, pf);
+}
+
+static inline void *
+BinaryMathFunCast(double (*pf)(double, double))
+{
+    return JS_FUNC_TO_DATA_PTR(void*, pf);
+}
+
+static bool
+CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltin mathBuiltin,
+                     MDefinition **def, Type *type)
+{
+    unsigned arity = 0;
+    void *callee = NULL;
+    switch (mathBuiltin) {
+      case AsmJSMathBuiltin_imul:  return CheckMathIMul(f, callNode, def, type);
+      case AsmJSMathBuiltin_abs:   return CheckMathAbs(f, callNode, def, type);
+      case AsmJSMathBuiltin_sin:   arity = 1; callee = UnaryMathFunCast(sin);        break;
+      case AsmJSMathBuiltin_cos:   arity = 1; callee = UnaryMathFunCast(cos);        break;
+      case AsmJSMathBuiltin_tan:   arity = 1; callee = UnaryMathFunCast(tan);        break;
+      case AsmJSMathBuiltin_asin:  arity = 1; callee = UnaryMathFunCast(asin);       break;
+      case AsmJSMathBuiltin_acos:  arity = 1; callee = UnaryMathFunCast(acos);       break;
+      case AsmJSMathBuiltin_atan:  arity = 1; callee = UnaryMathFunCast(atan);       break;
+      case AsmJSMathBuiltin_ceil:  arity = 1; callee = UnaryMathFunCast(ceil);       break;
+      case AsmJSMathBuiltin_floor: arity = 1; callee = UnaryMathFunCast(floor);      break;
+      case AsmJSMathBuiltin_exp:   arity = 1; callee = UnaryMathFunCast(exp);        break;
+      case AsmJSMathBuiltin_log:   arity = 1; callee = UnaryMathFunCast(log);        break;
+      case AsmJSMathBuiltin_sqrt:  arity = 1; callee = UnaryMathFunCast(sqrt);       break;
+      case AsmJSMathBuiltin_pow:   arity = 2; callee = BinaryMathFunCast(ecmaPow);   break;
+      case AsmJSMathBuiltin_atan2: arity = 2; callee = BinaryMathFunCast(ecmaAtan2); break;
+    }
+
+    FunctionCompiler::Args args(f);
+
+    if (!CheckCallArgs(f, callNode, Use::ToNumber, &args))
+        return false;
+
+    if (args.length() != arity)
+        return f.fail("Math builtin call passed wrong number of argument", callNode);
+
+    for (unsigned i = 0; i < args.length(); i++) {
+        if (!args.type(i).isDoublish())
+            return f.fail("Builtin calls must be passed 1 doublish argument", callNode);
+    }
+
+    if (!f.builtinCall(callee, args, MIRType_Double, def))
+        return false;
+
+    *type = Type::Double;
+    return true;
+}
+
+static bool
+CheckCall(FunctionCompiler &f, ParseNode *call, Use use, MDefinition **def, Type *type)
+{
+    ParseNode *callee = CallCallee(call);
+
+    if (callee->isKind(PNK_ELEM))
+        return CheckFuncPtrCall(f, call, def, type);
+
+    if (!callee->isKind(PNK_NAME))
+        return f.fail("Unexpected callee expression type", callee);
+
+    if (const ModuleCompiler::Global *global = f.lookupGlobal(callee->name())) {
+        switch (global->which()) {
+          case ModuleCompiler::Global::Function:
+            return CheckInternalCall(f, call, f.m().function(global->funcIndex()), def, type);
+          case ModuleCompiler::Global::FFI:
+            return CheckFFICall(f, call, global->ffiIndex(), use, def, type);
+          case ModuleCompiler::Global::MathBuiltin:
+            return CheckMathBuiltinCall(f, call, global->mathBuiltin(), def, type);
+          case ModuleCompiler::Global::Constant:
+          case ModuleCompiler::Global::Variable:
+          case ModuleCompiler::Global::FuncPtrTable:
+          case ModuleCompiler::Global::ArrayView:
+            return f.fail("Global is not callable function", callee);
+        }
+    }
+
+    return f.fail("Call target not found in global scope", callee);
+}
+
+static bool
+CheckPos(FunctionCompiler &f, ParseNode *pos, MDefinition **def, Type *type)
+{
+    JS_ASSERT(pos->isKind(PNK_POS));
+    ParseNode *operand = UnaryKid(pos);
+
+    MDefinition *operandDef;
+    Type operandType;
+    if (!CheckExpr(f, operand, Use::ToNumber, &operandDef, &operandType))
+        return false;
+
+    if (operandType.isSigned())
+        *def = f.unary<MToDouble>(operandDef);
+    else if (operandType.isUnsigned())
+        *def = f.unary<MAsmJSUnsignedToDouble>(operandDef);
+    else if (operandType.isDoublish())
+        *def = operandDef;
+    else
+        return f.fail("Operand to unary + must be signed, unsigned or doubleish", operand);
+
+    *type = Type::Double;
+    return true;
+}
+
+static bool
+CheckNot(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type)
+{
+    JS_ASSERT(expr->isKind(PNK_NOT));
+    ParseNode *operand = UnaryKid(expr);
+
+    MDefinition *operandDef;
+    Type operandType;
+    if (!CheckExpr(f, operand, Use::NoCoercion, &operandDef, &operandType))
+        return false;
+
+    if (!operandType.isInt())
+        return f.fail("Operand to ! must be int", operand);
+
+    *def = f.unary<MNot>(operandDef);
+    *type = Type::Int;
+    return true;
+}
+
+static bool
+CheckNeg(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type)
+{
+    JS_ASSERT(expr->isKind(PNK_NEG));
+    ParseNode *operand = UnaryKid(expr);
+
+    MDefinition *operandDef;
+    Type operandType;
+    if (!CheckExpr(f, operand, Use::ToNumber, &operandDef, &operandType))
+        return false;
+
+    if (operandType.isInt()) {
+        *def = f.unary<MAsmJSNeg>(operandDef, MIRType_Int32);
+        *type = Type::Intish;
+        return true;
+    }
+
+    if (operandType.isDoublish()) {
+        *def = f.unary<MAsmJSNeg>(operandDef, MIRType_Double);
+        *type = Type::Double;
+        return true;
+    }
+
+    return f.fail("Operand to unary - must be an int", operand);
+}
+
+static bool
+CheckBitNot(FunctionCompiler &f, ParseNode *neg, MDefinition **def, Type *type)
+{
+    JS_ASSERT(neg->isKind(PNK_BITNOT));
+    ParseNode *operand = UnaryKid(neg);
+
+    if (operand->isKind(PNK_BITNOT)) {
+        MDefinition *operandDef;
+        Type operandType;
+        if (!CheckExpr(f, UnaryKid(operand), Use::NoCoercion, &operandDef, &operandType))
+            return false;
+
+        if (operandType.isDouble()) {
+            *def = f.unary<MTruncateToInt32>(operandDef);
+            *type = Type::Signed;
+            return true;
+        }
+    }
+
+    MDefinition *operandDef;
+    Type operandType;
+    if (!CheckExpr(f, operand, Use::ToInt32, &operandDef, &operandType))
+        return false;
+
+    if (!operandType.isIntish())
+        return f.fail("Operand to ~ must be intish", operand);
+
+    *def = f.bitwise<MBitNot>(operandDef);
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckComma(FunctionCompiler &f, ParseNode *comma, Use use, MDefinition **def, Type *type)
+{
+    JS_ASSERT(comma->isKind(PNK_COMMA));
+    ParseNode *operands = ListHead(comma);
+
+    ParseNode *pn = operands;
+    for (; NextNode(pn); pn = NextNode(pn)) {
+        if (!CheckExpr(f, pn, Use::NoCoercion, def, type))
+            return false;
+    }
+
+    if (!CheckExpr(f, pn, use, def, type))
+        return false;
+
+    return true;
+}
+
+static bool
+CheckConditional(FunctionCompiler &f, ParseNode *ternary, MDefinition **def, Type *type)
+{
+    JS_ASSERT(ternary->isKind(PNK_CONDITIONAL));
+    ParseNode *cond = TernaryKid1(ternary);
+    ParseNode *thenExpr = TernaryKid2(ternary);
+    ParseNode *elseExpr = TernaryKid3(ternary);
+
+    MDefinition *condDef;
+    Type condType;
+    if (!CheckExpr(f, cond, Use::NoCoercion, &condDef, &condType))
+        return false;
+
+    if (!condType.isInt())
+        return f.fail("Condition of if must be boolish", cond);
+
+    MBasicBlock *thenBlock, *elseBlock;
+    if (!f.branchAndStartThen(condDef, &thenBlock, &elseBlock))
+        return false;
+
+    MDefinition *thenDef;
+    Type thenType;
+    if (!CheckExpr(f, thenExpr, Use::NoCoercion, &thenDef, &thenType))
+        return false;
+
+    f.pushPhiInput(thenDef);
+    MBasicBlock *thenEnd = f.switchToElse(elseBlock);
+
+    MDefinition *elseDef;
+    Type elseType;
+    if (!CheckExpr(f, elseExpr, Use::NoCoercion, &elseDef, &elseType))
+        return false;
+
+    f.pushPhiInput(elseDef);
+    if (!f.joinIfElse(thenEnd))
+        return false;
+    *def = f.popPhiOutput();
+
+    if (thenType.isInt() && elseType.isInt())
+        *type = Type::Int;
+    else if (thenType.isDouble() && elseType.isDouble())
+        *type = Type::Double;
+    else
+        return f.fail("Then/else branches of conditional must both be int or double", ternary);
+
+    return true;
+}
+
+static bool
+IsValidIntMultiplyConstant(ParseNode *expr)
+{
+    if (!IsNumericLiteral(expr))
+        return false;
+
+    NumLit literal = ExtractNumericLiteral(expr);
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::NegativeInt:
+        if (abs(literal.toInt32()) < (1<<20))
+            return true;
+        return false;
+      case NumLit::BigUnsigned:
+      case NumLit::Double:
+      case NumLit::OutOfRangeInt:
+        return false;
+    }
+
+    JS_NOT_REACHED("Bad literal");
+    return false;
+}
+
+static bool
+CheckMultiply(FunctionCompiler &f, ParseNode *star, MDefinition **def, Type *type)
+{
+    JS_ASSERT(star->isKind(PNK_STAR));
+    ParseNode *lhs = BinaryLeft(star);
+    ParseNode *rhs = BinaryRight(star);
+
+    MDefinition *lhsDef;
+    Type lhsType;
+    if (!CheckExpr(f, lhs, Use::ToNumber, &lhsDef, &lhsType))
+        return false;
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, rhs, Use::ToNumber, &rhsDef, &rhsType))
+        return false;
+
+    if (lhsType.isInt() && rhsType.isInt()) {
+        if (!IsValidIntMultiplyConstant(lhs) && !IsValidIntMultiplyConstant(rhs))
+            return f.fail("One arg to int multiply must be small (-2^20, 2^20) int literal", star);
+        *def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer);
+        *type = Type::Signed;
+        return true;
+    }
+
+    if (lhsType.isDoublish() && rhsType.isDoublish()) {
+        *def = f.mul(lhsDef, rhsDef, MIRType_Double, MMul::Normal);
+        *type = Type::Double;
+        return true;
+    }
+
+    return f.fail("Arguments to * must both be doubles", star);
+}
+
+static bool
+CheckAddOrSub(FunctionCompiler &f, ParseNode *expr, Use use, MDefinition **def, Type *type)
+{
+    JS_ASSERT(expr->isKind(PNK_ADD) || expr->isKind(PNK_SUB));
+    ParseNode *lhs = BinaryLeft(expr);
+    ParseNode *rhs = BinaryRight(expr);
+
+    Use argUse;
+    unsigned addOrSubCount = 1;
+    if (use.which() == Use::AddOrSub) {
+        if (++use.addOrSubCount() > (1<<20))
+            return f.fail("Too many + or - without intervening coercion", expr);
+        argUse = use;
+    } else {
+        argUse = Use(&addOrSubCount);
+    }
+
+    MDefinition *lhsDef, *rhsDef;
+    Type lhsType, rhsType;
+    if (!CheckExpr(f, lhs, argUse, &lhsDef, &lhsType))
+        return false;
+    if (!CheckExpr(f, rhs, argUse, &rhsDef, &rhsType))
+        return false;
+
+    if (lhsType.isInt() && rhsType.isInt()) {
+        *def = expr->isKind(PNK_ADD)
+               ? f.binary<MAdd>(lhsDef, rhsDef, MIRType_Int32)
+               : f.binary<MSub>(lhsDef, rhsDef, MIRType_Int32);
+        if (use.which() == Use::AddOrSub)
+            *type = Type::Int;
+        else
+            *type = Type::Intish;
+        return true;
+    }
+
+    if (expr->isKind(PNK_ADD) && lhsType.isDouble() && rhsType.isDouble()) {
+        *def = f.binary<MAdd>(lhsDef, rhsDef, MIRType_Double);
+        *type = Type::Double;
+        return true;
+    }
+
+    if (expr->isKind(PNK_SUB) && lhsType.isDoublish() && rhsType.isDoublish()) {
+        *def = f.binary<MSub>(lhsDef, rhsDef, MIRType_Double);
+        *type = Type::Double;
+        return true;
+    }
+
+    return f.fail("Arguments to + or - must both be ints or doubles", expr);
+}
+
+static bool
+CheckDivOrMod(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type)
+{
+    JS_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD));
+    ParseNode *lhs = BinaryLeft(expr);
+    ParseNode *rhs = BinaryRight(expr);
+
+    MDefinition *lhsDef, *rhsDef;
+    Type lhsType, rhsType;
+    if (!CheckExpr(f, lhs, Use::ToNumber, &lhsDef, &lhsType))
+        return false;
+    if (!CheckExpr(f, rhs, Use::ToNumber, &rhsDef, &rhsType))
+        return false;
+
+    if (lhsType.isDoublish() && rhsType.isDoublish()) {
+        *def = expr->isKind(PNK_DIV)
+               ? f.binary<MDiv>(lhsDef, rhsDef, MIRType_Double)
+               : f.binary<MMod>(lhsDef, rhsDef, MIRType_Double);
+        *type = Type::Double;
+        return true;
+    }
+
+    if (lhsType.isSigned() && rhsType.isSigned()) {
+        if (expr->isKind(PNK_DIV)) {
+            *def = f.binary<MDiv>(lhsDef, rhsDef, MIRType_Int32);
+            *type = Type::Intish;
+        } else {
+            *def = f.binary<MMod>(lhsDef, rhsDef, MIRType_Int32);
+            *type = Type::Int;
+        }
+        return true;
+    }
+
+    if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+        if (expr->isKind(PNK_DIV)) {
+            *def = f.binary<MAsmJSUDiv>(lhsDef, rhsDef);
+            *type = Type::Intish;
+        } else {
+            *def = f.binary<MAsmJSUMod>(lhsDef, rhsDef);
+            *type = Type::Int;
+        }
+        return true;
+    }
+
+    return f.fail("Arguments to / or % must both be double, signed, or unsigned", expr);
+}
+
+static bool
+CheckComparison(FunctionCompiler &f, ParseNode *comp, MDefinition **def, Type *type)
+{
+    JS_ASSERT(comp->isKind(PNK_LT) || comp->isKind(PNK_LE) || comp->isKind(PNK_GT) ||
+              comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE));
+    ParseNode *lhs = BinaryLeft(comp);
+    ParseNode *rhs = BinaryRight(comp);
+
+    MDefinition *lhsDef, *rhsDef;
+    Type lhsType, rhsType;
+    if (!CheckExpr(f, lhs, Use::NoCoercion, &lhsDef, &lhsType))
+        return false;
+    if (!CheckExpr(f, rhs, Use::NoCoercion, &rhsDef, &rhsType))
+        return false;
+
+    if ((lhsType.isSigned() && rhsType.isSigned()) || (lhsType.isUnsigned() && rhsType.isUnsigned())) {
+        MCompare::CompareType compareType = lhsType.isUnsigned()
+                                            ? MCompare::Compare_UInt32
+                                            : MCompare::Compare_Int32;
+        *def = f.compare(lhsDef, rhsDef, comp->getOp(), compareType);
+        *type = Type::Int;
+        return true;
+    }
+
+    if (lhsType.isDouble() && rhsType.isDouble()) {
+        *def = f.compare(lhsDef, rhsDef, comp->getOp(), MCompare::Compare_Double);
+        *type = Type::Int;
+        return true;
+    }
+
+    return f.fail("The arguments to a comparison must both be signed, unsigned or doubles", comp);
+}
+
+static bool
+CheckBitwise(FunctionCompiler &f, ParseNode *bitwise, MDefinition **def, Type *type)
+{
+    ParseNode *lhs = BinaryLeft(bitwise);
+    ParseNode *rhs = BinaryRight(bitwise);
+
+    int32_t identityElement;
+    bool onlyOnRight;
+    switch (bitwise->getKind()) {
+      case PNK_BITOR:  identityElement = 0;  onlyOnRight = false; *type = Type::Signed;   break;
+      case PNK_BITAND: identityElement = -1; onlyOnRight = false; *type = Type::Signed;   break;
+      case PNK_BITXOR: identityElement = 0;  onlyOnRight = false; *type = Type::Signed;   break;
+      case PNK_LSH:    identityElement = 0;  onlyOnRight = true;  *type = Type::Signed;   break;
+      case PNK_RSH:    identityElement = 0;  onlyOnRight = true;  *type = Type::Signed;   break;
+      case PNK_URSH:   identityElement = 0;  onlyOnRight = true;  *type = Type::Unsigned; break;
+      default: JS_NOT_REACHED("not a bitwise op");
+    }
+
+    if (!onlyOnRight && IsBits32(lhs, identityElement)) {
+        Type rhsType;
+        if (!CheckExpr(f, rhs, Use::ToInt32, def, &rhsType))
+            return false;
+        if (!rhsType.isIntish())
+            return f.fail("Operands to bitwise ops must be intish", bitwise);
+        return true;
+    }
+
+    if (IsBits32(rhs, identityElement)) {
+        Type lhsType;
+        if (!CheckExpr(f, lhs, Use::ToInt32, def, &lhsType))
+            return false;
+        if (!lhsType.isIntish())
+            return f.fail("Operands to bitwise ops must be intish", bitwise);
+        return true;
+    }
+
+    MDefinition *lhsDef;
+    Type lhsType;
+    if (!CheckExpr(f, lhs, Use::ToInt32, &lhsDef, &lhsType))
+        return false;
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, rhs, Use::ToInt32, &rhsDef, &rhsType))
+        return false;
+
+    if (!lhsType.isIntish() || !rhsType.isIntish())
+        return f.fail("Operands to bitwise ops must be intish", bitwise);
+
+    switch (bitwise->getKind()) {
+      case PNK_BITOR:  *def = f.bitwise<MBitOr>(lhsDef, rhsDef); break;
+      case PNK_BITAND: *def = f.bitwise<MBitAnd>(lhsDef, rhsDef); break;
+      case PNK_BITXOR: *def = f.bitwise<MBitXor>(lhsDef, rhsDef); break;
+      case PNK_LSH:    *def = f.bitwise<MLsh>(lhsDef, rhsDef); break;
+      case PNK_RSH:    *def = f.bitwise<MRsh>(lhsDef, rhsDef); break;
+      case PNK_URSH:   *def = f.bitwise<MUrsh>(lhsDef, rhsDef); break;
+      default: JS_NOT_REACHED("not a bitwise op");
+    }
+
+    return true;
+}
+
+static bool
+CheckExpr(FunctionCompiler &f, ParseNode *expr, Use use, MDefinition **def, Type *type)
+{
+    JS_CHECK_RECURSION(f.cx(), return false);
+
+    if (!f.m().alloc().ensureBallast())
+        return false;
+
+    if (IsNumericLiteral(expr))
+        return CheckNumericLiteral(f, expr, def, type);
+
+    switch (expr->getKind()) {
+      case PNK_NAME:        return CheckVarRef(f, expr, def, type);
+      case PNK_ELEM:        return CheckArrayLoad(f, expr, def, type);
+      case PNK_ASSIGN:      return CheckAssign(f, expr, def, type);
+      case PNK_CALL:        return CheckCall(f, expr, use, def, type);
+      case PNK_POS:         return CheckPos(f, expr, def, type);
+      case PNK_NOT:         return CheckNot(f, expr, def, type);
+      case PNK_NEG:         return CheckNeg(f, expr, def, type);
+      case PNK_BITNOT:      return CheckBitNot(f, expr, def, type);
+      case PNK_COMMA:       return CheckComma(f, expr, use, def, type);
+      case PNK_CONDITIONAL: return CheckConditional(f, expr, def, type);
+
+      case PNK_STAR:        return CheckMultiply(f, expr, def, type);
+
+      case PNK_ADD:
+      case PNK_SUB:         return CheckAddOrSub(f, expr, use, def, type);
+
+      case PNK_DIV:
+      case PNK_MOD:         return CheckDivOrMod(f, expr, def, type);
+
+      case PNK_LT:
+      case PNK_LE:
+      case PNK_GT:
+      case PNK_GE:
+      case PNK_EQ:
+      case PNK_NE:          return CheckComparison(f, expr, def, type);
+
+      case PNK_BITOR:
+      case PNK_BITAND:
+      case PNK_BITXOR:
+      case PNK_LSH:
+      case PNK_RSH:
+      case PNK_URSH:        return CheckBitwise(f, expr, def, type);
+
+      default:;
+    }
+
+    return f.fail("Unsupported expression", expr);
+}
+
+static bool
+CheckStatement(FunctionCompiler &f, ParseNode *stmt, LabelVector *maybeLabels = NULL);
+
+static bool
+CheckExprStatement(FunctionCompiler &f, ParseNode *exprStmt)
+{
+    JS_ASSERT(exprStmt->isKind(PNK_SEMI));
+    ParseNode *expr = UnaryKid(exprStmt);
+
+    if (!expr)
+        return true;
+
+    MDefinition *_1;
+    Type _2;
+    if (!CheckExpr(f, UnaryKid(exprStmt), Use::NoCoercion, &_1, &_2))
+        return false;
+
+    return true;
+}
+
+static bool
+CheckWhile(FunctionCompiler &f, ParseNode *whileStmt, const LabelVector *maybeLabels)
+{
+    JS_ASSERT(whileStmt->isKind(PNK_WHILE));
+    ParseNode *cond = BinaryLeft(whileStmt);
+    ParseNode *body = BinaryRight(whileStmt);
+
+    MBasicBlock *loopEntry;
+    if (!f.startPendingLoop(whileStmt, &loopEntry))
+        return false;
+
+    MDefinition *condDef;
+    Type condType;
+    if (!CheckExpr(f, cond, Use::NoCoercion, &condDef, &condType))
+        return false;
+
+    if (!condType.isInt())
+        return f.fail("Condition of while loop must be boolish", cond);
+
+    MBasicBlock *afterLoop;
+    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
+        return false;
+
+    if (!CheckStatement(f, body))
+        return false;
+
+    if (!f.bindContinues(whileStmt, maybeLabels))
+        return false;
+
+    return f.closeLoop(loopEntry, afterLoop);
+}
+
+static bool
+CheckFor(FunctionCompiler &f, ParseNode *forStmt, const LabelVector *maybeLabels)
+{
+    JS_ASSERT(forStmt->isKind(PNK_FOR));
+    ParseNode *forHead = BinaryLeft(forStmt);
+    ParseNode *body = BinaryRight(forStmt);
+
+    if (!forHead->isKind(PNK_FORHEAD))
+        return f.fail("Unsupported for-loop statement", forHead);
+
+    ParseNode *maybeInit = TernaryKid1(forHead);
+    ParseNode *maybeCond = TernaryKid2(forHead);
+    ParseNode *maybeInc = TernaryKid3(forHead);
+
+    if (maybeInit) {
+        MDefinition *_1;
+        Type _2;
+        if (!CheckExpr(f, maybeInit, Use::NoCoercion, &_1, &_2))
+            return false;
+    }
+
+    MBasicBlock *loopEntry;
+    if (!f.startPendingLoop(forStmt, &loopEntry))
+        return false;
+
+    MDefinition *condDef;
+    if (maybeCond) {
+        Type condType;
+        if (!CheckExpr(f, maybeCond, Use::NoCoercion, &condDef, &condType))
+            return false;
+
+        if (!condType.isInt())
+            return f.fail("Condition of while loop must be boolish", maybeCond);
+    } else {
+        condDef = f.constant(Int32Value(1));
+    }
+
+    MBasicBlock *afterLoop;
+    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
+        return false;
+
+    if (!CheckStatement(f, body))
+        return false;
+
+    if (!f.bindContinues(forStmt, maybeLabels))
+        return false;
+
+    if (maybeInc) {
+        MDefinition *_1;
+        Type _2;
+        if (!CheckExpr(f, maybeInc, Use::NoCoercion, &_1, &_2))
+            return false;
+    }
+
+    return f.closeLoop(loopEntry, afterLoop);
+}
+
+static bool
+CheckDoWhile(FunctionCompiler &f, ParseNode *whileStmt, const LabelVector *maybeLabels)
+{
+    JS_ASSERT(whileStmt->isKind(PNK_DOWHILE));
+    ParseNode *body = BinaryLeft(whileStmt);
+    ParseNode *cond = BinaryRight(whileStmt);
+
+    MBasicBlock *loopEntry;
+    if (!f.startPendingLoop(whileStmt, &loopEntry))
+        return false;
+
+    if (!CheckStatement(f, body))
+        return false;
+
+    if (!f.bindContinues(whileStmt, maybeLabels))
+        return false;
+
+    MDefinition *condDef;
+    Type condType;
+    if (!CheckExpr(f, cond, Use::NoCoercion, &condDef, &condType))
+        return false;
+
+    if (!condType.isInt())
+        return f.fail("Condition of while loop must be boolish", cond);
+
+    return f.branchAndCloseDoWhileLoop(condDef, loopEntry);
+}
+
+static bool
+CheckLabel(FunctionCompiler &f, ParseNode *labeledStmt, LabelVector *maybeLabels)
+{
+    JS_ASSERT(labeledStmt->isKind(PNK_COLON));
+    PropertyName *label = LabeledStatementLabel(labeledStmt);
+    ParseNode *stmt = LabeledStatementStatement(labeledStmt);
+
+    if (maybeLabels) {
+        if (!maybeLabels->append(label))
+            return false;
+        if (!CheckStatement(f, stmt, maybeLabels))
+            return false;
+        return true;
+    }
+
+    LabelVector labels(f.cx());
+    if (!labels.append(label))
+        return false;
+
+    if (!CheckStatement(f, stmt, &labels))
+        return false;
+
+    return f.bindLabeledBreaks(&labels);
+}
+
+static bool
+CheckIf(FunctionCompiler &f, ParseNode *ifStmt)
+{
+    JS_ASSERT(ifStmt->isKind(PNK_IF));
+    ParseNode *cond = TernaryKid1(ifStmt);
+    ParseNode *thenStmt = TernaryKid2(ifStmt);
+    ParseNode *elseStmt = TernaryKid3(ifStmt);
+
+    MDefinition *condDef;
+    Type condType;
+    if (!CheckExpr(f, cond, Use::NoCoercion, &condDef, &condType))
+        return false;
+
+    if (!condType.isInt())
+        return f.fail("Condition of if must be boolish", cond);
+
+    MBasicBlock *thenBlock, *elseBlock;
+    if (!f.branchAndStartThen(condDef, &thenBlock, &elseBlock))
+        return false;
+
+    if (!CheckStatement(f, thenStmt))
+        return false;
+
+    if (!elseStmt) {
+        f.joinIf(elseBlock);
+    } else {
+        MBasicBlock *thenEnd = f.switchToElse(elseBlock);
+        if (!CheckStatement(f, elseStmt))
+            return false;
+        if (!f.joinIfElse(thenEnd))
+            return false;
+    }
+
+    return true;
+}
+
+static bool
+CheckCaseExpr(FunctionCompiler &f, ParseNode *caseExpr, int32_t *value)
+{
+    if (!IsNumericLiteral(caseExpr))
+        return f.fail("Switch case expression must be an integer literal", caseExpr);
+
+    NumLit literal = ExtractNumericLiteral(caseExpr);
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::NegativeInt:
+        *value = literal.toInt32();
+        break;
+      case NumLit::OutOfRangeInt:
+      case NumLit::BigUnsigned:
+        return f.fail("Switch case expression out of integer range", caseExpr);
+      case NumLit::Double:
+        return f.fail("Switch case expression must be an integer literal", caseExpr);
+    }
+
+    return true;
+}
+
+static bool
+CheckDefaultAtEnd(FunctionCompiler &f, ParseNode *stmt)
+{
+    for (; stmt; stmt = NextNode(stmt)) {
+        JS_ASSERT(stmt->isKind(PNK_CASE) || stmt->isKind(PNK_DEFAULT));
+        if (stmt->isKind(PNK_DEFAULT) && NextNode(stmt) != NULL)
+            return f.fail("default label must be at the end", stmt);
+    }
+
+    return true;
+}
+
+static bool
+CheckSwitchRange(FunctionCompiler &f, ParseNode *stmt, int32_t *low, int32_t *high,
+                 int32_t *tableLength)
+{
+    if (stmt->isKind(PNK_DEFAULT)) {
+        *low = 0;
+        *high = -1;
+        *tableLength = 0;
+        return true;
+    }
+
+    int32_t i = 0;
+    if (!CheckCaseExpr(f, CaseExpr(stmt), &i))
+        return false;
+
+    *low = *high = i;
+
+    for (stmt = NextNode(stmt); stmt && stmt->isKind(PNK_CASE); stmt = NextNode(stmt)) {
+        int32_t i = 0;
+        if (!CheckCaseExpr(f, CaseExpr(stmt), &i))
+            return false;
+
+        *low = Min(*low, i);
+        *high = Max(*high, i);
+    }
+
+    int64_t i64 = (int64_t(*high) - int64_t(*low)) + 1;
+    if (i64 > 512*1024*1024)
+        return f.fail("All switch statements generate tables; this table would be bigger than 512MiB", stmt);
+
+    *tableLength = int32_t(i64);
+    return true;
+}
+
+static bool
+CheckSwitch(FunctionCompiler &f, ParseNode *switchStmt)
+{
+    JS_ASSERT(switchStmt->isKind(PNK_SWITCH));
+    ParseNode *switchExpr = BinaryLeft(switchStmt);
+    ParseNode *switchBody = BinaryRight(switchStmt);
+
+    if (!switchBody->isKind(PNK_STATEMENTLIST))
+        return f.fail("Switch body may not contain 'let' declarations", switchBody);
+
+    MDefinition *exprDef;
+    Type exprType;
+    if (!CheckExpr(f, switchExpr, Use::NoCoercion, &exprDef, &exprType))
+        return false;
+
+    if (!exprType.isSigned())
+        return f.fail("Switch expression must be a signed integer", switchExpr);
+
+    ParseNode *stmt = ListHead(switchBody);
+
+    if (!CheckDefaultAtEnd(f, stmt))
+        return false;
+
+    if (!stmt)
+        return true;
+
+    int32_t low = 0, high = 0, tableLength = 0;
+    if (!CheckSwitchRange(f, stmt, &low, &high, &tableLength))
+        return false;
+
+    CaseVector cases(f.cx());
+    if (!cases.resize(tableLength))
+        return false;
+
+    MBasicBlock *switchBlock;
+    if (!f.startSwitch(switchStmt, exprDef, low, high, &switchBlock))
+        return false;
+
+    for (; stmt && stmt->isKind(PNK_CASE); stmt = NextNode(stmt)) {
+        int32_t caseValue = ExtractNumericLiteral(CaseExpr(stmt)).toInt32();
+        unsigned caseIndex = caseValue - low;
+
+        if (cases[caseIndex])
+            return f.fail("No duplicate case labels", stmt);
+
+        if (!f.startSwitchCase(switchBlock, &cases[caseIndex]))
+            return false;
+
+        if (!CheckStatement(f, CaseBody(stmt)))
+            return false;
+    }
+
+    MBasicBlock *defaultBlock;
+    if (!f.startSwitchDefault(switchBlock, &cases, &defaultBlock))
+        return false;
+
+    if (stmt && stmt->isKind(PNK_DEFAULT)) {
+        if (!CheckStatement(f, CaseBody(stmt)))
+            return false;
+    }
+
+    return f.joinSwitch(switchBlock, cases, defaultBlock);
+}
+
+static bool
+CheckReturn(FunctionCompiler &f, ParseNode *returnStmt)
+{
+    JS_ASSERT(returnStmt->isKind(PNK_RETURN));
+    ParseNode *expr = UnaryKid(returnStmt);
+
+    if (!expr) {
+        if (f.func().returnType().which() != RetType::Void)
+            return f.fail("All return statements must return void", returnStmt);
+
+        f.returnVoid();
+        return true;
+    }
+
+    MDefinition *def;
+    Type type;
+    if (!CheckExpr(f, expr, Use::NoCoercion, &def, &type))
+        return false;
+
+    if (!(type <= f.func().returnType()))
+        return f.fail("All returns must return the same type", expr);
+
+    if (f.func().returnType().which() == RetType::Void)
+        f.returnVoid();
+    else
+        f.returnExpr(def);
+    return true;
+}
+
+static bool
+CheckStatements(FunctionCompiler &f, ParseNode *stmtHead)
+{
+    for (ParseNode *stmt = stmtHead; stmt; stmt = NextNode(stmt)) {
+        if (!CheckStatement(f, stmt))
+            return false;
+    }
+
+    return true;
+}
+
+static bool
+CheckStatementList(FunctionCompiler &f, ParseNode *stmt)
+{
+    JS_ASSERT(stmt->isKind(PNK_STATEMENTLIST));
+    return CheckStatements(f, ListHead(stmt));
+}
+
+static bool
+CheckStatement(FunctionCompiler &f, ParseNode *stmt, LabelVector *maybeLabels)
+{
+    JS_CHECK_RECURSION(f.cx(), return false);
+
+    if (!f.m().alloc().ensureBallast())
+        return false;
+
+    switch (stmt->getKind()) {
+      case PNK_SEMI:          return CheckExprStatement(f, stmt);
+      case PNK_WHILE:         return CheckWhile(f, stmt, maybeLabels);
+      case PNK_FOR:           return CheckFor(f, stmt, maybeLabels);
+      case PNK_DOWHILE:       return CheckDoWhile(f, stmt, maybeLabels);
+      case PNK_COLON:         return CheckLabel(f, stmt, maybeLabels);
+      case PNK_IF:            return CheckIf(f, stmt);
+      case PNK_SWITCH:        return CheckSwitch(f, stmt);
+      case PNK_RETURN:        return CheckReturn(f, stmt);
+      case PNK_STATEMENTLIST: return CheckStatementList(f, stmt);
+      case PNK_BREAK:         return f.addBreak(LoopControlMaybeLabel(stmt));
+      case PNK_CONTINUE:      return f.addContinue(LoopControlMaybeLabel(stmt));
+      default:;
+    }
+
+    return f.fail("Unexpected statement kind", stmt);
+}
+
+static bool
+CheckVariableDecl(ModuleCompiler &m, ParseNode *var, FunctionCompiler::LocalMap *locals)
+{
+    if (!IsDefinition(var))
+        return m.fail("Local variable names must not restate argument names", var);
+
+    PropertyName *name = var->name();
+
+    if (!CheckIdentifier(m, name, var))
+        return false;
+
+    ParseNode *initNode = MaybeDefinitionInitializer(var);
+    if (!initNode)
+        return m.fail("Variable needs explicit type declaration via an initial value", var);
+
+    if (!IsNumericLiteral(initNode))
+        return m.fail("Variable initialization value needs to be a numeric literal", initNode);
+
+    NumLit literal = ExtractNumericLiteral(initNode);
+    VarType type;
+    switch (literal.which()) {
+      case NumLit::Fixnum:
+      case NumLit::NegativeInt:
+      case NumLit::BigUnsigned:
+        type = VarType::Int;
+        break;
+      case NumLit::Double:
+        type = VarType::Double;
+        break;
+      case NumLit::OutOfRangeInt:
+        return m.fail("Variable initializer is out of representable integer range", initNode);
+    }
+
+    FunctionCompiler::LocalMap::AddPtr p = locals->lookupForAdd(name);
+    if (p)
+        return m.fail("Local names must be unique", initNode);
+
+    unsigned slot = locals->count();
+    if (!locals->add(p, name, FunctionCompiler::Local(type, slot, literal.value())))
+        return false;
+
+    return true;
+}
+
+static bool
+CheckVariableDecls(ModuleCompiler &m, FunctionCompiler::LocalMap *locals, ParseNode **stmtIter)
+{
+    ParseNode *stmt = *stmtIter;
+
+    for (; stmt && stmt->isKind(PNK_VAR); stmt = NextNode(stmt)) {
+        for (ParseNode *var = VarListHead(stmt); var; var = NextNode(var)) {
+            if (!CheckVariableDecl(m, var, locals))
+                return false;
+        }
+    }
+
+    *stmtIter = stmt;
+    return true;
+}
+
+static bool
+CheckFunctionBody(ModuleCompiler &m, ModuleCompiler::Func &func)
+{
+    // CheckFunctionSignature already has already checked the
+    // function head as well as argument type declarations. The ParseNode*
+    // stored in f.body points to the first non-argument statement.
+    ParseNode *stmtIter = func.body();
+
+    FunctionCompiler::LocalMap locals(m.cx());
+    if (!locals.init())
+        return false;
+
+    unsigned numFormals;
+    ParseNode *arg = FunctionArgsList(func.fn(), &numFormals);
+    for (unsigned i = 0; i < numFormals; i++, arg = NextNode(arg)) {
+        if (!locals.putNew(arg->name(), FunctionCompiler::Local(func.argType(i), i)))
+            return false;
+    }
+
+    if (!CheckVariableDecls(m, &locals, &stmtIter))
+        return false;
+
+    FunctionCompiler f(m, func, Move(locals));
+    if (!f.init())
+        return false;
+
+    if (!CheckStatements(f, stmtIter))
+        return false;
+
+    f.returnVoid();
+
+    m.masm().bind(func.codeLabel());
+
+    ScopedJSDeletePtr<CodeGenerator> codegen(CompileBackEnd(&f.mirGen(), &m.masm()));
+    if (!codegen)
+        return m.fail("Internal compiler failure (probably out of memory)", func.fn());
+
+    if (!m.collectAccesses(f.mirGen()))
+        return false;
+
+    // Unlike regular IonMonkey which links and generates a new IonCode for
+    // every function, we accumulate all the functions in the module in a
+    // single MacroAssembler and link at end. Linking asm.js doesn't require a
+    // CodeGenerator so we can destory it now.
+    return true;
+}
+
+static const unsigned CodeAlignment = 8;
+
+static bool
+CheckFunctionBodies(ModuleCompiler &m)
+{
+    for (unsigned i = 0; i < m.numFunctions(); i++) {
+        if (!CheckFunctionBody(m, m.function(i)))
+            return false;
+
+        // A single MacroAssembler is reused for all function compilations so
+        // that there is a single linear code segment for each module. To avoid
+        // spiking memory, each FunctionCompiler creates a LifoAllocScope so
+        // that all MIR/LIR nodes are freed after each function is compiled.
+        // This method is responsible for cleaning out any dangling pointers
+        // that the MacroAssembler may have kept.
+        m.masm().resetForNewCodeGenerator();
+
+        // Align internal function headers.
+        m.masm().align(CodeAlignment);
+    }
+
+    return true;
+}
+
+static RegisterSet AllRegs = RegisterSet(GeneralRegisterSet(Registers::AllMask),
+                                         FloatRegisterSet(FloatRegisters::AllMask));
+static RegisterSet NonVolatileRegs = RegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
+                                                 FloatRegisterSet(FloatRegisters::NonVolatileMask));
+
+static void
+LoadAsmJSActivationIntoRegister(MacroAssembler &masm, Register reg)
+{
+    masm.movePtr(ImmWord(GetIonContext()->compartment->rt), reg);
+    size_t offset = offsetof(JSRuntime, mainThread) +
+                    PerThreadData::offsetOfAsmJSActivationStackReadOnly();
+    masm.loadPtr(Address(reg, offset), reg);
+}
+
+static void
+LoadJSContextFromActivation(MacroAssembler &masm, Register activation, Register dest)
+{
+    masm.loadPtr(Address(activation, AsmJSActivation::offsetOfContext()), dest);
+}
+
+static void
+AssertStackAlignment(MacroAssembler &masm)
+{
+    JS_ASSERT((AlignmentAtPrologue + masm.framePushed()) % StackAlignment == 0);
+#ifdef DEBUG
+    Label ok;
+    JS_ASSERT(IsPowerOfTwo(StackAlignment));
+    masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
+    masm.breakpoint();
+    masm.bind(&ok);
+#endif
+}
+
+static unsigned
+StackArgBytes(const MIRTypeVector &argTypes)
+{
+    ABIArgIter iter(argTypes);
+    while (!iter.done())
+        iter++;
+    return iter.stackBytesConsumedSoFar();
+}
+
+static unsigned
+StackDecrementForCall(MacroAssembler &masm, const MIRTypeVector &argTypes, unsigned extraBytes = 0)
+{
+    // Include extra padding so that, after pushing the arguments and
+    // extraBytes, the stack is aligned for a call instruction.
+    unsigned argBytes = StackArgBytes(argTypes);
+    unsigned alreadyPushed = AlignmentAtPrologue + masm.framePushed();
+    return AlignBytes(alreadyPushed + extraBytes + argBytes, StackAlignment) - alreadyPushed;
+}
+
+static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * STACK_SLOT_SIZE +
+                                             NonVolatileRegs.fpus().size() * sizeof(double);
+
+static bool
+GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFunc)
+{
+    MacroAssembler &masm = m.masm();
+
+    // In constrast to the system ABI, the Ion convention is that all registers
+    // are clobbered by calls. Thus, we must save the caller's non-volatile
+    // registers.
+    //
+    // NB: GenerateExits assumes that masm.framePushed() == 0 before
+    // PushRegsInMask(NonVolatileRegs).
+    masm.setFramePushed(0);
+    masm.PushRegsInMask(NonVolatileRegs);
+
+    // Remember the stack pointer in the current AsmJSActivation. This will be
+    // used by error exit paths to set the stack pointer back to what it was
+    // right after the (C++) caller's non-volatile registers were saved so that
+    // they can be restored.
+    JS_ASSERT(masm.framePushed() == FramePushedAfterSave);
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
+    LoadAsmJSActivationIntoRegister(masm, activation);
+    masm.movePtr(StackPointer, Operand(activation, AsmJSActivation::offsetOfErrorRejoinSP()));
+
+#if defined(JS_CPU_X64)
+    // Install the heap pointer into the globally-pinned HeapReg. The heap
+    // pointer is stored in the global data section and is patched at dynamic
+    // link time.
+    CodeOffsetLabel label = masm.loadRipRelativeInt64(HeapReg);
+    m.addGlobalAccess(AsmJSGlobalAccess(label.offset(), m.module().heapOffset()));
+#endif
+
+    Register argv = ABIArgGenerator::NonArgReturnVolatileReg1;
+    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg2;
+#if defined(JS_CPU_X86)
+    masm.movl(Operand(StackPointer, NativeFrameSize + masm.framePushed()), argv);
+#elif defined(JS_CPU_X64)
+    masm.movq(IntArgReg0, argv);
+    masm.Push(argv);
+#endif
+
+    // Bump the stack for the call.
+    const ModuleCompiler::Func &func = *m.lookupFunction(exportedFunc.name());
+    unsigned stackDec = StackDecrementForCall(masm, func.argMIRTypes());
+    masm.reserveStack(stackDec);
+
+    for (ABIArgIter iter(func.argMIRTypes()); !iter.done(); iter++) {
+        Operand src(argv, iter.index() * sizeof(uint64_t));
+        switch (iter->kind()) {
+          case ABIArg::GPR:
+            masm.load32(src, iter->gpr());
+            break;
+          case ABIArg::FPU:
+            masm.loadDouble(src, iter->fpu());
+            break;
+          case ABIArg::Stack:
+            if (iter.mirType() == MIRType_Int32) {
+                masm.load32(src, scratch);
+                masm.storePtr(scratch, Operand(StackPointer, iter->offsetFromArgBase()));
+            } else {
+                JS_ASSERT(iter.mirType() == MIRType_Double);
+                masm.loadDouble(src, ScratchFloatReg);
+                masm.storeDouble(ScratchFloatReg, Operand(StackPointer, iter->offsetFromArgBase()));
+            }
+            break;
+        }
+    }
+
+    AssertStackAlignment(masm);
+    masm.call(func.codeLabel());
+
+    masm.freeStack(stackDec);
+
+#if defined(JS_CPU_X86)
+    masm.movl(Operand(StackPointer, NativeFrameSize + masm.framePushed()), argv);
+#elif defined(JS_CPU_X64)
+    masm.Pop(argv);
+#endif
+
+    // Store return value in argv[0]
+    switch (func.returnType().which()) {
+      case RetType::Void:
+        break;
+      case RetType::Signed:
+        masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
+        break;
+      case RetType::Double:
+        masm.canonicalizeDouble(ReturnFloatReg);
+        masm.storeDouble(ReturnFloatReg, Address(argv, 0));
+        break;
+    }
+
+    // Restore clobbered registers.
+    masm.PopRegsInMask(NonVolatileRegs);
+    JS_ASSERT(masm.framePushed() == 0);
+
+    masm.move32(Imm32(true), ReturnReg);
+    masm.ret();
+    return true;
+}
+
+static bool
+GenerateEntries(ModuleCompiler &m)
+{
+    for (unsigned i = 0; i < m.module().numExportedFunctions(); i++) {
+        m.setEntryOffset(i);
+        if (!GenerateEntry(m, m.module().exportedFunction(i)))
+            return false;
+    }
+
+    return true;
+}
+
+static int32_t
+InvokeFromAsmJS_Ignore(JSContext *cx, AsmJSModule::ExitDatum *exitDatum, int32_t argc, Value *argv)
+{
+    RootedValue fval(cx, ObjectValue(*exitDatum->fun));
+    RootedValue rval(cx);
+    if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval.address()))
+        return false;
+
+    return true;
+}
+
+static int32_t
+InvokeFromAsmJS_ToInt32(JSContext *cx, AsmJSModule::ExitDatum *exitDatum, int32_t argc, Value *argv)
+{
+    RootedValue fval(cx, ObjectValue(*exitDatum->fun));
+    RootedValue rval(cx);
+    if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval.address()))
+        return false;
+
+    int32_t i32;
+    if (!ToInt32(cx, rval, &i32))
+        return false;
+    argv[0] = Int32Value(i32);
+
+    return true;
+}
+
+static int32_t
+InvokeFromAsmJS_ToNumber(JSContext *cx, AsmJSModule::ExitDatum *exitDatum, int32_t argc, Value *argv)
+{
+    RootedValue fval(cx, ObjectValue(*exitDatum->fun));
+    RootedValue rval(cx);
+    if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval.address()))
+        return false;
+
+    double dbl;
+    if (!ToNumber(cx, rval, &dbl))
+        return false;
+    argv[0] = DoubleValue(dbl);
+
+    return true;
+}
+
+// See "asm.js FFI calls" comment above.
+static void
+GenerateFFIExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, unsigned exitIndex,
+                Label *throwLabel)
+{
+    MacroAssembler &masm = m.masm();
+    masm.align(CodeAlignment);
+    m.setExitOffset(exitIndex);
+
+    MIRType typeArray[] = { MIRType_Pointer,   // cx
+                            MIRType_Pointer,   // exitDatum
+                            MIRType_Int32,     // argc
+                            MIRType_Pointer }; // argv
+    MIRTypeVector invokeArgTypes(m.cx());
+    invokeArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray));
+
+    // Reserve space for a call to InvokeFromAsmJS_* and an array of values
+    // passed to this FFI call.
+    unsigned arraySize = Max<size_t>(1, exit.argTypes().length()) * sizeof(Value);
+    unsigned stackDec = StackDecrementForCall(masm, invokeArgTypes, arraySize);
+    masm.setFramePushed(0);
+    masm.reserveStack(stackDec);
+
+    // Fill the argument array.
+    unsigned offsetToCallerStackArgs = NativeFrameSize + masm.framePushed();
+    unsigned offsetToArgv = StackArgBytes(invokeArgTypes);
+    for (ABIArgIter i(exit.argTypes()); !i.done(); i++) {
+        Address dstAddr = Address(StackPointer, offsetToArgv + i.index() * sizeof(Value));
+        switch (i->kind()) {
+          case ABIArg::GPR:
+            masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
+            break;
+          case ABIArg::FPU:
+            masm.canonicalizeDouble(i->fpu());
+            masm.storeDouble(i->fpu(), dstAddr);
+            break;
+          case ABIArg::Stack:
+            if (i.mirType() == MIRType_Int32) {
+                Address src(StackPointer, offsetToCallerStackArgs + i->offsetFromArgBase());
+                Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1;
+                masm.load32(src, scratch);
+                masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr);
+            } else {
+                JS_ASSERT(i.mirType() == MIRType_Double);
+                Address src(StackPointer, offsetToCallerStackArgs + i->offsetFromArgBase());
+                masm.loadDouble(src, ScratchFloatReg);
+                masm.canonicalizeDouble(ScratchFloatReg);
+                masm.storeDouble(ScratchFloatReg, dstAddr);
+            }
+            break;
+        }
+    }
+
+    // Prepare the arguments for the call to InvokeFromAsmJS_*.
+    ABIArgIter i(invokeArgTypes);
+    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1;
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg2;
+    LoadAsmJSActivationIntoRegister(masm, activation);
+
+    // argument 0: cx
+    if (i->kind() == ABIArg::GPR) {
+        LoadJSContextFromActivation(masm, activation, i->gpr());
+    } else {
+        LoadJSContextFromActivation(masm, activation, scratch);
+        masm.movePtr(scratch, Operand(StackPointer, i->offsetFromArgBase()));
+    }
+    i++;
+
+    // argument 1: exitDatum
+    CodeOffsetLabel label;
+#if defined(JS_CPU_X64)
+    label = masm.leaRipRelative(i->gpr());
+#else
+    if (i->kind() == ABIArg::GPR) {
+        label = masm.movlWithPatch(Imm32(0), i->gpr());
+    } else {
+        label = masm.movlWithPatch(Imm32(0), scratch);
+        masm.movl(scratch, Operand(StackPointer, i->offsetFromArgBase()));
+    }
+#endif
+    unsigned globalDataOffset = m.module().exitIndexToGlobalDataOffset(exitIndex);
+    m.addGlobalAccess(AsmJSGlobalAccess(label.offset(), globalDataOffset));
+    i++;
+
+    // argument 2: argc
+    unsigned argc = exit.argTypes().length();
+    if (i->kind() == ABIArg::GPR)
+        masm.mov(Imm32(argc), i->gpr());
+    else
+        masm.move32(Imm32(argc), Operand(StackPointer, i->offsetFromArgBase()));
+    i++;
+
+    // argument 3: argv
+    Address argv(StackPointer, offsetToArgv);
+    if (i->kind() == ABIArg::GPR) {
+        masm.computeEffectiveAddress(argv, i->gpr());
+    } else {
+        masm.computeEffectiveAddress(argv, scratch);
+        masm.movePtr(scratch, Operand(StackPointer, i->offsetFromArgBase()));
+    }
+    i++;
+    JS_ASSERT(i.done());
+
+    // Make the call, test whether it succeeded, and extract the return value.
+    AssertStackAlignment(masm);
+    switch (exit.use().which()) {
+      case Use::NoCoercion:
+        masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_Ignore)));
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        break;
+      case Use::ToInt32:
+        masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_ToInt32)));
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        masm.unboxInt32(argv, ReturnReg);
+        break;
+      case Use::ToNumber:
+        masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_ToNumber)));
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        masm.loadDouble(argv, ReturnFloatReg);
+        break;
+      case Use::AddOrSub:
+        JS_NOT_REACHED("Should have been a type error");
+    }
+
+    // Note: the caller is IonMonkey code which means there are no non-volatile
+    // registers to restore.
+    masm.freeStack(stackDec);
+    masm.ret();
+}
+
+// The stack-overflow exit is called when the stack limit has definitely been
+// exceeded. In this case, we can clobber everything since we are about to pop
+// all the frames.
+static void
+GenerateStackOverflowExit(ModuleCompiler &m, Label *throwLabel)
+{
+    MacroAssembler &masm = m.masm();
+    masm.align(CodeAlignment);
+    masm.bind(&m.stackOverflowLabel());
+
+#if defined(JS_CPU_X86)
+    // Ensure that at least one slot is pushed for passing 'cx' below.
+    masm.push(Imm32(0));
+#endif
+
+    // We know that StackPointer is word-aligned, but nothing past that. Thus,
+    // we must align StackPointer dynamically. Don't worry about restoring
+    // StackPointer since throwLabel will clobber StackPointer immediately.
+    masm.andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
+    if (ShadowStackSpace)
+        masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
+
+    // Prepare the arguments for the call to js_ReportOverRecursed.
+#if defined(JS_CPU_X86)
+    LoadAsmJSActivationIntoRegister(masm, eax);
+    LoadJSContextFromActivation(masm, eax, eax);
+    masm.storePtr(eax, Address(StackPointer, 0));
+#elif defined(JS_CPU_X64)
+    LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
+    LoadJSContextFromActivation(masm, IntArgReg0, IntArgReg0);
+#else
+# error "ARM here"
+#endif
+
+    void (*pf)(JSContext*) = js_ReportOverRecursed;
+    masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, pf)));
+    masm.jmp(throwLabel);
+}
+
+// The operation-callback exit is called from arbitrarily-interrupted asm.js
+// code. That means we must first save *all* registers and restore *all*
+// registers when we resume. The address to resume to (assuming that
+// js_HandleExecutionInterrupt doesn't indicate that the execution should be
+// aborted) is stored in AsmJSActivation::resumePC_. Unfortunately, loading
+// this requires a scratch register which we don't have after restoring all
+// registers. To hack around this, push the resumePC on the stack so that it
+// can be popped directly into PC.
+static void
+GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
+{
+    MacroAssembler &masm = m.masm();
+    masm.align(CodeAlignment);
+    masm.bind(&m.operationCallbackLabel());
+
+    // Be very careful here not to perturb the machine state before saving it
+    // to the stack. In particular, add/sub instructions may set conditions in
+    // the flags register.
+    masm.push(Imm32(0));            // space for resumePC
+    masm.pushFlags();               // after this we are safe to use sub
+    masm.setFramePushed(0);         // set to zero so we can use masm.framePushed() below
+    masm.PushRegsInMask(AllRegs);   // save all GP/FP registers
+
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
+    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg2;
+
+    // Store resumePC into the reserved space.
+    LoadAsmJSActivationIntoRegister(masm, activation);
+    masm.loadPtr(Address(activation, AsmJSActivation::offsetOfResumePC()), scratch);
+    masm.storePtr(scratch, Address(StackPointer, masm.framePushed() + sizeof(void*)));
+
+    // We know that StackPointer is word-aligned, but not necessarily
+    // stack-aligned, so we need to align it dynamically.
+    masm.mov(StackPointer, ABIArgGenerator::NonVolatileReg);
+#if defined(JS_CPU_X86)
+    // Ensure that at least one slot is pushed for passing 'cx' below.
+    masm.push(Imm32(0));
+#endif
+    masm.andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
+    if (ShadowStackSpace)
+        masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
+
+    // argument 0: cx
+#if defined(JS_CPU_X86)
+    LoadJSContextFromActivation(masm, activation, scratch);
+    masm.storePtr(scratch, Address(StackPointer, 0));
+#elif defined(JS_CPU_X64)
+    LoadJSContextFromActivation(masm, activation, IntArgReg0);
+#endif
+
+    JSBool (*pf)(JSContext*) = js_HandleExecutionInterrupt;
+    masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, pf)));
+    masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+    // Restore the StackPointer to it's position before the call.
+    masm.mov(ABIArgGenerator::NonVolatileReg, StackPointer);
+
+    // Restore the machine state to before the interrupt.
+    masm.PopRegsInMask(AllRegs);  // restore all GP/FP registers
+    masm.popFlags();              // after this, nothing that sets conditions
+    masm.ret();                   // pop resumePC into PC
+}
+
+// If an exception is thrown, simply pop all frames (since asm.js does not
+// contain try/catch). To do this:
+//  1. Restore 'sp' to it's value right after the PushRegsInMask in GenerateEntry.
+//  2. PopRegsInMask to restore the caller's non-volatile registers.
+//  3. Return (to CallAsmJS).
+static void
+GenerateThrowExit(ModuleCompiler &m, Label *throwLabel)
+{
+    MacroAssembler &masm = m.masm();
+    masm.align(CodeAlignment);
+    masm.bind(throwLabel);
+
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
+    LoadAsmJSActivationIntoRegister(masm, activation);
+
+    masm.setFramePushed(FramePushedAfterSave);
+    masm.mov(Operand(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer);
+    masm.PopRegsInMask(NonVolatileRegs);
+    JS_ASSERT(masm.framePushed() == 0);
+
+    masm.mov(Imm32(0), ReturnReg);
+    masm.ret();
+}
+
+static bool
+GenerateExits(ModuleCompiler &m)
+{
+    Label throwLabel;
+
+    for (ModuleCompiler::ExitMap::Range r = m.allExits(); !r.empty(); r.popFront()) {
+        GenerateFFIExit(m, r.front().key, r.front().value, &throwLabel);
+        if (m.masm().oom())
+            return false;
+    }
+
+    if (m.stackOverflowLabel().used())
+        GenerateStackOverflowExit(m, &throwLabel);
+
+    GenerateOperationCallbackExit(m, &throwLabel);
+
+    GenerateThrowExit(m, &throwLabel);
+    return true;
+}
+
+static bool
+CheckModule(JSContext *cx, TokenStream &ts, ParseNode *fn, ScopedJSDeletePtr<AsmJSModule> *module)
+{
+    ModuleCompiler m(cx, ts);
+    if (!m.init())
+        return false;
+
+    if (PropertyName *moduleFunctionName = FunctionName(fn)) {
+        if (!CheckModuleLevelName(m, moduleFunctionName, fn))
+            return false;
+        m.initModuleFunctionName(moduleFunctionName);
+    }
+
+    ParseNode *stmtIter = NULL;
+
+    if (!CheckFunctionHead(m, fn, &stmtIter))
+        return false;
+
+    if (!CheckModuleArguments(m, fn))
+        return false;
+
+    if (!SkipUseAsmDirective(m, &stmtIter))
+        return false;
+
+    if (!CheckModuleGlobals(m, &stmtIter))
+        return false;
+
+    if (!CheckFunctionSignatures(m, &stmtIter))
+        return false;
+
+    if (!CheckFuncPtrTables(m, &stmtIter))
+        return false;
+
+    if (!CheckModuleExports(m, fn, &stmtIter))
+        return false;
+
+    if (stmtIter)
+        return m.fail("The top-level export (return) must be the last statement.", stmtIter);
+
+    m.setFirstPassComplete();
+
+    if (!CheckFunctionBodies(m))
+        return false;
+
+    m.setSecondPassComplete();
+
+    if (!GenerateEntries(m))
+        return false;
+
+    if (!GenerateExits(m))
+        return false;
+
+    return m.finish(module);
+}
+
+#endif // defined(JS_ASMJS)
+
+static bool
+Warn(JSContext *cx, int code, const char *str = NULL)
+{
+    return JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING, js_GetErrorMessage,
+                                        NULL, code, str);
+}
+
+extern bool
+EnsureAsmJSSignalHandlersInstalled();
+
+bool
+js::CompileAsmJS(JSContext *cx, TokenStream &ts, ParseNode *fn, HandleScript script)
+{
+    if (!JSC::MacroAssembler().supportsFloatingPoint())
+        return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by lack of floating point support");
+
+    if (!cx->hasOption(JSOPTION_ASMJS))
+        return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by javascript.options.experimental_asmjs "
+                                                 "in about:config");
+
+    if (cx->compartment->debugMode())
+        return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by debugger");
+
+#ifdef JS_ASMJS
+    if (!EnsureAsmJSSignalHandlersInstalled())
+        return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Platform missing signal handler support");
+
+    ScopedJSDeletePtr<AsmJSModule> module;
+    if (!CheckModule(cx, ts, fn, &module))
+        return !cx->isExceptionPending();
+
+    RootedObject moduleObj(cx, NewAsmJSModuleObject(cx, &module));
+    if (!moduleObj)
+        return false;
+
+    JS_ASSERT(!script->asmJS);
+    script->asmJS.init(moduleObj);
+
+    return Warn(cx, JSMSG_USE_ASM_TYPE_OK);
+#else
+    return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Platform not supported");
+#endif
+}
+
+JSBool
+js::IsAsmJSCompilationAvailable(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+
+#ifdef JS_ASMJS
+    bool available = JSC::MacroAssembler().supportsFloatingPoint() &&
+                     !cx->compartment->debugMode();
+#else
+    bool available = false;
+#endif
+
+    args.rval().set(BooleanValue(available));
+    return true;
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/AsmJS.h
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_asmjs_h__)
+#define jsion_asmjs_h__
+
+// asm.js compilation is only available on desktop x86/x64 at the moment.
+// Don't panic, mobile support is coming soon.
+#if defined(JS_ION) && \
+    !defined(ANDROID) && \
+    (defined(JS_CPU_X86) || defined(JS_CPU_X64)) &&  \
+    (defined(__linux__) || defined(XP_WIN) || defined(XP_MACOSX))
+# define JS_ASMJS
+#endif
+
+namespace js {
+
+class SPSProfiler;
+class AsmJSModule;
+namespace frontend { struct TokenStream; struct ParseNode; }
+
+// Return whether asm.js optimization is inhibitted by the platform or
+// dynamically disabled. (Exposed as JSNative for shell testing.)
+extern JSBool
+IsAsmJSCompilationAvailable(JSContext *cx, unsigned argc, Value *vp);
+
+// Called after parsing a function 'fn' which contains the "use asm" directive.
+// This function performs type-checking and code-generation. If type-checking
+// succeeds, the generated module is assigned to script->asmJS. Otherwise, a
+// warning will be emitted and script->asmJS is left null. The function returns
+// 'false' only if a real JS semantic error (probably OOM) is pending.
+extern bool
+CompileAsmJS(JSContext *cx, frontend::TokenStream &ts, frontend::ParseNode *fn, HandleScript s);
+
+// Called by the JSOP_LINKASMJS opcode (which is emitted as the first opcode of
+// a "use asm" function which successfully typechecks). This function performs
+// the validation and dynamic linking of a module to it's given arguments. If
+// validation succeeds, the module's return value (it's exports) are returned
+// as an object in 'rval' and the interpreter should return 'rval' immediately.
+// Otherwise, there was a validation error and execution should continue
+// normally in the interpreter. The function returns 'false' only if a real JS
+// semantic error (OOM or exception thrown when executing GetProperty on the
+// arguments) is pending.
+extern bool
+LinkAsmJS(JSContext *cx, StackFrame *fp, MutableHandleValue rval);
+
+// Force any currently-executing asm.js code to call
+// js_HandleExecutionInterrupt.
+void
+TriggerOperationCallbackForAsmJSCode(JSRuntime *rt);
+
+// The JSRuntime maintains a stack of AsmJSModule activations. An "activation"
+// of module A is an initial call from outside A into a function inside A,
+// followed by a sequence of calls inside A, and terminated by a call that
+// leaves A. The AsmJSActivation stack serves three purposes:
+//  - record the correct cx to pass to VM calls from asm.js;
+//  - record enough information to pop all the frames of an activation if an
+//    exception is thrown;
+//  - record the information necessary for asm.js signal handlers to safely
+//    recover from (expected) out-of-bounds access, the operation callback,
+//    stack overflow, division by zero, etc.
+class AsmJSActivation
+{
+    JSContext *cx_;
+    const AsmJSModule &module_;
+    unsigned entryIndex_;
+    AsmJSActivation *prev_;
+    void *errorRejoinSP_;
+    SPSProfiler *profiler_;
+    void *resumePC_;
+
+  public:
+    AsmJSActivation(JSContext *cx, const AsmJSModule &module, unsigned entryIndex);
+    ~AsmJSActivation();
+
+    const AsmJSModule &module() const { return module_; }
+
+    // Read by JIT code:
+    static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
+    static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
+
+    // Initialized by JIT code:
+    static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
+
+    // Set from SIGSEGV handler:
+    void setResumePC(void *pc) { resumePC_ = pc; }
+};
+
+// The asm.js spec requires that the ArrayBuffer's byteLength be a multiple of 4096.
+static const size_t AsmJSAllocationGranularity = 4096;
+
+// On x64, the internal ArrayBuffer data array is inflated to 4GiB (only the
+// byteLength portion of which is accessible) so that out-of-bounds accesses
+// (made using a uint32 index) are guaranteed to raise a SIGSEGV.
+# ifdef JS_CPU_X64
+static const size_t AsmJSBufferProtectedSize = 4 * 1024ULL * 1024ULL * 1024ULL;
+# endif
+
+} // namespace js
+
+#endif // jsion_asmjs_h__
new file mode 100644
--- /dev/null
+++ b/js/src/ion/AsmJSLink.cpp
@@ -0,0 +1,403 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsmath.h"
+#include "jscntxt.h"
+
+#include "jstypedarrayinlines.h"
+
+#include "AsmJS.h"
+#include "AsmJSModule.h"
+
+using namespace js;
+using namespace js::ion;
+using namespace mozilla;
+
+static bool
+LinkFail(JSContext *cx, const char *str)
+{
+    JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING, js_GetErrorMessage,
+                                 NULL, JSMSG_USE_ASM_LINK_FAIL, str);
+    return false;
+}
+
+static bool
+ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Global global,
+                       HandleValue importVal)
+{
+    JS_ASSERT(global.which() == AsmJSModule::Global::Variable);
+
+    void *datum = module.globalVarIndexToGlobalDatum(global.varIndex());
+
+    switch (global.varInitKind()) {
+      case AsmJSModule::Global::InitConstant: {
+        const Value &v = global.varInitConstant();
+        if (v.isInt32())
+            *(int32_t *)datum = v.toInt32();
+        else
+            *(double *)datum = v.toDouble();
+        break;
+      }
+      case AsmJSModule::Global::InitImport: {
+        RootedPropertyName field(cx, global.varImportField());
+        RootedValue v(cx);
+        if (!GetProperty(cx, importVal, field, &v))
+            return false;
+
+        switch (global.varImportCoercion()) {
+          case AsmJS_ToInt32:
+            if (!ToInt32(cx, v, (int32_t *)datum))
+                return false;
+            break;
+          case AsmJS_ToNumber:
+            if (!ToNumber(cx, v, (double *)datum))
+                return false;
+            break;
+        }
+        break;
+      }
+    }
+
+    return true;
+}
+
+static bool
+ValidateFFI(JSContext *cx, AsmJSModule::Global global, HandleValue importVal,
+            AutoObjectVector *ffis)
+{
+    RootedPropertyName field(cx, global.ffiField());
+    RootedValue v(cx);
+    if (!GetProperty(cx, importVal, field, &v))
+        return false;
+
+    if (!v.isObject() || !v.toObject().isFunction())
+        return LinkFail(cx, "FFI imports must be functions");
+
+    (*ffis)[global.ffiIndex()] = v.toObject().toFunction();
+    return true;
+}
+
+static bool
+ValidateArrayView(JSContext *cx, AsmJSModule::Global global, HandleValue globalVal,
+                  HandleValue bufferVal)
+{
+    RootedPropertyName field(cx, global.viewName());
+    RootedValue v(cx);
+    if (!GetProperty(cx, globalVal, field, &v))
+        return false;
+
+    if (!IsTypedArrayConstructor(v, global.viewType()))
+        return LinkFail(cx, "bad typed array constructor");
+
+    return true;
+}
+
+static bool
+ValidateMathBuiltin(JSContext *cx, AsmJSModule::Global global, HandleValue globalVal)
+{
+    RootedValue v(cx);
+    if (!GetProperty(cx, globalVal, cx->names().Math, &v))
+        return false;
+    RootedPropertyName field(cx, global.mathName());
+    if (!GetProperty(cx, v, field, &v))
+        return false;
+
+    Native native = NULL;
+    switch (global.mathBuiltin()) {
+      case AsmJSMathBuiltin_sin: native = math_sin; break;
+      case AsmJSMathBuiltin_cos: native = math_cos; break;
+      case AsmJSMathBuiltin_tan: native = math_tan; break;
+      case AsmJSMathBuiltin_asin: native = math_asin; break;
+      case AsmJSMathBuiltin_acos: native = math_acos; break;
+      case AsmJSMathBuiltin_atan: native = math_atan; break;
+      case AsmJSMathBuiltin_ceil: native = js_math_ceil; break;
+      case AsmJSMathBuiltin_floor: native = js_math_floor; break;
+      case AsmJSMathBuiltin_exp: native = math_exp; break;
+      case AsmJSMathBuiltin_log: native = math_log; break;
+      case AsmJSMathBuiltin_pow: native = js_math_pow; break;
+      case AsmJSMathBuiltin_sqrt: native = js_math_sqrt; break;
+      case AsmJSMathBuiltin_abs: native = js_math_abs; break;
+      case AsmJSMathBuiltin_atan2: native = math_atan2; break;
+      case AsmJSMathBuiltin_imul: native = math_imul; break;
+    }
+
+    if (!IsNativeFunction(v, native))
+        return LinkFail(cx, "bad Math.* builtin");
+
+    return true;
+}
+
+static bool
+ValidateGlobalConstant(JSContext *cx, AsmJSModule::Global global, HandleValue globalVal)
+{
+    RootedPropertyName field(cx, global.constantName());
+    RootedValue v(cx);
+    if (!GetProperty(cx, globalVal, field, &v))
+        return false;
+
+    if (!v.isNumber())
+        return LinkFail(cx, "global constant value needs to be a number");
+
+    // NaN != NaN
+    if (MOZ_DOUBLE_IS_NaN(global.constantValue())) {
+        if (!MOZ_DOUBLE_IS_NaN(v.toNumber()))
+            return LinkFail(cx, "global constant value needs to be NaN");
+    } else {
+        if (v.toNumber() != global.constantValue())
+            return LinkFail(cx, "global constant value mismatch");
+    }
+
+    return true;
+}
+
+static bool
+DynamicallyLinkModule(JSContext *cx, StackFrame *fp, HandleObject moduleObj)
+{
+    AsmJSModule &module = AsmJSModuleObjectToModule(moduleObj);
+    if (module.isLinked())
+        return LinkFail(cx, "As a temporary limitation, modules cannot be linked more than "
+                            "once. This limitation should be removed in a future release. To "
+                            "work around this, compile a second module (e.g., using the "
+                            "Function constructor).");
+
+    RootedValue globalVal(cx, UndefinedValue());
+    if (fp->numActualArgs() > 0)
+        globalVal = fp->unaliasedActual(0);
+
+    RootedValue importVal(cx, UndefinedValue());
+    if (fp->numActualArgs() > 1)
+        importVal = fp->unaliasedActual(1);
+
+    RootedValue bufferVal(cx, UndefinedValue());
+    if (fp->numActualArgs() > 2)
+        bufferVal = fp->unaliasedActual(2);
+
+    Rooted<ArrayBufferObject*> heap(cx);
+    if (module.hasArrayView()) {
+        if (!IsTypedArrayBuffer(bufferVal))
+            return LinkFail(cx, "bad ArrayBuffer argument");
+
+        heap = &bufferVal.toObject().asArrayBuffer();
+
+        if (!IsPowerOfTwo(heap->byteLength()) || heap->byteLength() < AsmJSAllocationGranularity)
+            return LinkFail(cx, "ArrayBuffer byteLength must be a power of two greater than 4096");
+
+        if (!ArrayBufferObject::prepareForAsmJS(cx, heap))
+            return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+
+#if defined(JS_CPU_X86)
+        void *heapOffset = (void*)heap->dataPointer();
+        void *heapLength = (void*)heap->byteLength();
+        uint8_t *code = module.functionCode();
+        for (unsigned i = 0; i < module.numHeapAccesses(); i++) {
+            const AsmJSHeapAccess &access = module.heapAccess(i);
+            JSC::X86Assembler::setPointer(access.patchLengthAt(code), heapLength);
+            JSC::X86Assembler::setPointer(access.patchOffsetAt(code), heapOffset);
+        }
+#endif
+    }
+
+    AutoObjectVector ffis(cx);
+    if (!ffis.resize(module.numFFIs()))
+        return false;
+
+    for (unsigned i = 0; i < module.numGlobals(); i++) {
+        AsmJSModule::Global global = module.global(i);
+        switch (global.which()) {
+          case AsmJSModule::Global::Variable:
+            if (!ValidateGlobalVariable(cx, module, global, importVal))
+                return false;
+            break;
+          case AsmJSModule::Global::FFI:
+            if (!ValidateFFI(cx, global, importVal, &ffis))
+                return false;
+            break;
+          case AsmJSModule::Global::ArrayView:
+            if (!ValidateArrayView(cx, global, globalVal, bufferVal))
+                return false;
+            break;
+          case AsmJSModule::Global::MathBuiltin:
+            if (!ValidateMathBuiltin(cx, global, globalVal))
+                return false;
+            break;
+          case AsmJSModule::Global::Constant:
+            if (!ValidateGlobalConstant(cx, global, globalVal))
+                return false;
+            break;
+        }
+    }
+
+    for (unsigned i = 0; i < module.numExits(); i++)
+        module.exitIndexToGlobalDatum(i).fun = ffis[module.exit(i).ffiIndex()]->toFunction();
+
+    module.setIsLinked(heap);
+    return true;
+}
+
+AsmJSActivation::AsmJSActivation(JSContext *cx, const AsmJSModule &module, unsigned entryIndex)
+  : cx_(cx),
+    module_(module),
+    entryIndex_(entryIndex),
+    errorRejoinSP_(NULL),
+    profiler_(NULL),
+    resumePC_(NULL)
+{
+    if (cx->runtime->spsProfiler.enabled()) {
+        profiler_ = &cx->runtime->spsProfiler;
+        JSFunction *fun = module_.exportedFunction(entryIndex_).unclonedFunObj();
+        profiler_->enter(cx_, fun->nonLazyScript(), fun);
+    }
+
+    prev_ = cx_->runtime->mainThread.asmJSActivationStack_;
+
+    PerThreadData::AsmJSActivationStackLock lock(cx_->runtime->mainThread);
+    cx_->runtime->mainThread.asmJSActivationStack_ = this;
+}
+
+AsmJSActivation::~AsmJSActivation()
+{
+    if (profiler_) {
+        JSFunction *fun = module_.exportedFunction(entryIndex_).unclonedFunObj();
+        profiler_->exit(cx_, fun->nonLazyScript(), fun);
+    }
+
+    JS_ASSERT(cx_->runtime->mainThread.asmJSActivationStack_ == this);
+
+    PerThreadData::AsmJSActivationStackLock lock(cx_->runtime->mainThread);
+    cx_->runtime->mainThread.asmJSActivationStack_ = prev_;
+}
+
+static const unsigned ASM_MODULE_SLOT = 0;
+static const unsigned ASM_EXPORT_INDEX_SLOT = 1;
+
+static JSBool
+CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs callArgs = CallArgsFromVp(argc, vp);
+    RootedFunction callee(cx, callArgs.callee().toFunction());
+
+    // An asm.js function stores, in its extended slots:
+    //  - a pointer to the module from which it was returned
+    //  - its index in the ordered list of exported functions
+    RootedObject moduleObj(cx, &callee->getExtendedSlot(ASM_MODULE_SLOT).toObject());
+    const AsmJSModule &module = AsmJSModuleObjectToModule(moduleObj);
+
+    // An exported function points to the code as well as the exported
+    // function's signature, which implies the dynamic coercions performed on
+    // the arguments.
+    unsigned exportIndex = callee->getExtendedSlot(ASM_EXPORT_INDEX_SLOT).toInt32();
+    const AsmJSModule::ExportedFunction &func = module.exportedFunction(exportIndex);
+
+    // The calling convention for an external call into asm.js is to pass an
+    // array of 8-byte values where each value contains either a coerced int32
+    // (in the low word) or double value, with the coercions specified by the
+    // asm.js signature. The external entry point unpacks this array into the
+    // system-ABI-specified registers and stack memory and then calls into the
+    // internal entry point. The return value is stored in the first element of
+    // the array (which, therefore, must have length >= 1).
+
+    Vector<uint64_t, 8> coercedArgs(cx);
+    if (!coercedArgs.resize(Max<size_t>(1, func.numArgs())))
+        return false;
+
+    RootedValue v(cx);
+    for (unsigned i = 0; i < func.numArgs(); ++i) {
+        v = i < callArgs.length() ? callArgs[i] : UndefinedValue();
+        switch (func.argCoercion(i)) {
+          case AsmJS_ToInt32:
+            if (!ToInt32(cx, v, (int32_t*)&coercedArgs[i]))
+                return false;
+            break;
+          case AsmJS_ToNumber:
+            if (!ToNumber(cx, v, (double*)&coercedArgs[i]))
+                return false;
+            break;
+        }
+    }
+
+    {
+        AsmJSActivation activation(cx, module, exportIndex);
+
+        // Call into generated code.
+        if (!func.code()(coercedArgs.begin()))
+            return false;
+    }
+
+    switch (func.returnType()) {
+      case AsmJSModule::Return_Void:
+        callArgs.rval().set(UndefinedValue());
+        break;
+      case AsmJSModule::Return_Int32:
+        callArgs.rval().set(Int32Value(*(int32_t*)&coercedArgs[0]));
+        break;
+      case AsmJSModule::Return_Double:
+        callArgs.rval().set(NumberValue(*(double*)&coercedArgs[0]));
+        break;
+    }
+
+    return true;
+}
+
+static JSFunction *
+NewExportedFunction(JSContext *cx, const AsmJSModule::ExportedFunction &func,
+                    HandleObject moduleObj, unsigned exportIndex)
+{
+    RootedPropertyName name(cx, func.name());
+    JSFunction *fun = NewFunction(cx, NullPtr(), CallAsmJS, func.numArgs(),
+                                  JSFunction::NATIVE_FUN, cx->global(), name,
+                                  JSFunction::ExtendedFinalizeKind);
+    if (!fun)
+        return NULL;
+
+    fun->setExtendedSlot(ASM_MODULE_SLOT, ObjectValue(*moduleObj));
+    fun->setExtendedSlot(ASM_EXPORT_INDEX_SLOT, Int32Value(exportIndex));
+    return fun;
+}
+
+bool
+js::LinkAsmJS(JSContext *cx, StackFrame *fp, MutableHandleValue rval)
+{
+    RootedObject moduleObj(cx, fp->fun()->nonLazyScript()->asmJS);
+    const AsmJSModule &module = AsmJSModuleObjectToModule(moduleObj);
+
+    if (!DynamicallyLinkModule(cx, fp, moduleObj))
+        return !cx->isExceptionPending();
+
+    if (module.numExportedFunctions() == 1) {
+        const AsmJSModule::ExportedFunction &func = module.exportedFunction(0);
+        if (!func.maybeFieldName()) {
+            RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, 0));
+            if (!fun)
+                return false;
+
+            rval.set(ObjectValue(*fun));
+            return true;
+        }
+    }
+
+    gc::AllocKind allocKind = gc::GetGCObjectKind(module.numExportedFunctions());
+    RootedObject obj(cx, NewBuiltinClassInstance(cx, &ObjectClass, allocKind));
+    if (!obj)
+        return false;
+
+    for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
+        const AsmJSModule::ExportedFunction &func = module.exportedFunction(i);
+
+        RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, i));
+        if (!fun)
+            return false;
+
+        JS_ASSERT(func.maybeFieldName() != NULL);
+        RootedId id(cx, NameToId(func.maybeFieldName()));
+        RootedValue val(cx, ObjectValue(*fun));
+        if (!DefineNativeProperty(cx, obj, id, val, NULL, NULL, JSPROP_ENUMERATE, 0, 0))
+            return false;
+    }
+
+    rval.set(ObjectValue(*obj));
+    return true;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/AsmJSModule.h
@@ -0,0 +1,549 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(jsion_asmjsmodule_h__)
+#define jsion_asmjsmodule_h__
+
+#include "gc/Marking.h"
+#include "ion/RegisterSets.h"
+
+#include "jstypedarrayinlines.h"
+
+namespace js {
+
+// The basis of the asm.js type system is the EcmaScript-defined coercions
+// ToInt32 and ToNumber.
+enum AsmJSCoercion
+{
+    AsmJS_ToInt32,
+    AsmJS_ToNumber
+};
+
+// The asm.js spec recognizes this set of builtin Math functions.
+enum AsmJSMathBuiltin
+{
+    AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
+    AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
+    AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
+    AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
+    AsmJSMathBuiltin_abs, AsmJSMathBuiltin_atan2, AsmJSMathBuiltin_imul
+};
+
+// An asm.js module represents the collection of functions nested inside a
+// single outer "use asm" function. For example, this asm.js module:
+//   function() { "use asm"; function f() {} function g() {} return f }
+// contains the functions 'f' and 'g'.
+//
+// An asm.js module contains both the jit-code produced by compiling all the
+// functions in the module as well all the data required to perform the
+// link-time validation step in the asm.js spec.
+//
+// NB: this means that AsmJSModule must be GC-safe.
+class AsmJSModule
+{
+  public:
+    class Global
+    {
+      public:
+        enum Which { Variable, FFI, ArrayView, MathBuiltin, Constant };
+        enum VarInitKind { InitConstant, InitImport };
+
+      private:
+        Which which_;
+        union {
+            struct {
+                uint32_t index_;
+                VarInitKind initKind_;
+                union {
+                    Value constant_;
+                    AsmJSCoercion coercion_;
+                } init;
+            } var;
+            uint32_t ffiIndex_;
+            ArrayBufferView::ViewType viewType_;
+            AsmJSMathBuiltin mathBuiltin_;
+            double constantValue_;
+        } u;
+        HeapPtrPropertyName name_;
+
+        friend class AsmJSModule;
+        Global(Which which) : which_(which) {}
+
+        void trace(JSTracer *trc) {
+            if (name_)
+                MarkString(trc, &name_, "asm.js global name");
+        }
+
+      public:
+        Which which() const {
+            return which_;
+        }
+        uint32_t varIndex() const {
+            JS_ASSERT(which_ == Variable);
+            return u.var.index_;
+        }
+        VarInitKind varInitKind() const {
+            JS_ASSERT(which_ == Variable);
+            return u.var.initKind_;
+        }
+        const Value &varInitConstant() const {
+            JS_ASSERT(which_ == Variable);
+            JS_ASSERT(u.var.initKind_ == InitConstant);
+            return u.var.init.constant_;
+        }
+        AsmJSCoercion varImportCoercion() const {
+            JS_ASSERT(which_ == Variable);
+            JS_ASSERT(u.var.initKind_ == InitImport);
+            return u.var.init.coercion_;
+        }
+        PropertyName *varImportField() const {
+            JS_ASSERT(which_ == Variable);
+            JS_ASSERT(u.var.initKind_ == InitImport);
+            return name_;
+        }
+        PropertyName *ffiField() const {
+            JS_ASSERT(which_ == FFI);
+            return name_;
+        }
+        uint32_t ffiIndex() const {
+            JS_ASSERT(which_ == FFI);
+            return u.ffiIndex_;
+        }
+        PropertyName *viewName() const {
+            JS_ASSERT(which_ == ArrayView);
+            return name_;
+        }
+        ArrayBufferView::ViewType viewType() const {
+            JS_ASSERT(which_ == ArrayView);
+            return u.viewType_;
+        }
+        PropertyName *mathName() const {
+            JS_ASSERT(which_ == MathBuiltin);
+            return name_;
+        }
+        AsmJSMathBuiltin mathBuiltin() const {
+            JS_ASSERT(which_ == MathBuiltin);
+            return u.mathBuiltin_;
+        }
+        PropertyName *constantName() const {
+            JS_ASSERT(which_ == Constant);
+            return name_;
+        }
+        double constantValue() const {
+            JS_ASSERT(which_ == Constant);
+            return u.constantValue_;
+        }
+    };
+
+    class Exit
+    {
+        unsigned ffiIndex_;
+        union {
+            unsigned codeOffset_;
+            uint8_t *code_;
+        } u;
+
+      public:
+        Exit(unsigned ffiIndex)
+          : ffiIndex_(ffiIndex)
+        {
+          u.codeOffset_ = 0;
+        }
+        unsigned ffiIndex() const {
+            return ffiIndex_;
+        }
+        void initCodeOffset(unsigned off) {
+            JS_ASSERT(!u.codeOffset_);
+            u.codeOffset_ = off;
+        }
+        void patch(uint8_t *baseAddress) {
+            u.code_ = baseAddress + u.codeOffset_;
+        }
+        uint8_t *code() const {
+            return u.code_;
+        }
+    };
+
+    typedef int32_t (*CodePtr)(uint64_t *args);
+
+    typedef Vector<AsmJSCoercion, 0, SystemAllocPolicy> ArgCoercionVector;
+
+    enum ReturnType { Return_Int32, Return_Double, Return_Void };
+
+    class ExportedFunction
+    {
+      public:
+
+      private:
+
+        HeapPtrFunction fun_;
+        HeapPtrPropertyName maybeFieldName_;
+        ArgCoercionVector argCoercions_;
+        ReturnType returnType_;
+        bool hasCodePtr_;
+        union {
+            unsigned codeOffset_;
+            CodePtr code_;
+        } u;
+
+        friend class AsmJSModule;
+
+        ExportedFunction(JSFunction *fun,
+                         PropertyName *maybeFieldName,
+                         MoveRef<ArgCoercionVector> argCoercions,
+                         ReturnType returnType)
+          : fun_(fun),
+            maybeFieldName_(maybeFieldName),
+            argCoercions_(argCoercions),
+            returnType_(returnType),
+            hasCodePtr_(false)
+        {
+            u.codeOffset_ = 0;
+        }
+
+        void trace(JSTracer *trc) {
+            MarkObject(trc, &fun_, "asm.js export name");
+            if (maybeFieldName_)
+                MarkString(trc, &maybeFieldName_, "asm.js export field");
+        }
+
+      public:
+        ExportedFunction(MoveRef<ExportedFunction> rhs)
+          : fun_(rhs->fun_),
+            maybeFieldName_(rhs->maybeFieldName_),
+            argCoercions_(Move(rhs->argCoercions_)),
+            returnType_(rhs->returnType_),
+            hasCodePtr_(rhs->hasCodePtr_),
+            u(rhs->u)
+        {}
+
+        void initCodeOffset(unsigned off) {
+            JS_ASSERT(!hasCodePtr_); 
+            JS_ASSERT(!u.codeOffset_);
+            u.codeOffset_ = off;
+        }
+        void patch(uint8_t *baseAddress) {
+            JS_ASSERT(!hasCodePtr_);
+            JS_ASSERT(u.codeOffset_);
+            hasCodePtr_ = true;
+            u.code_ = JS_DATA_TO_FUNC_PTR(CodePtr, baseAddress + u.codeOffset_);
+        }
+
+        PropertyName *name() const {
+            return fun_->name();
+        }
+        JSFunction *unclonedFunObj() const {
+            return fun_;
+        }
+        PropertyName *maybeFieldName() const {
+            return maybeFieldName_;
+        }
+        unsigned numArgs() const {
+            return argCoercions_.length();
+        }
+        AsmJSCoercion argCoercion(unsigned i) const {
+            return argCoercions_[i];
+        }
+        ReturnType returnType() const {
+            return returnType_;
+        }
+        CodePtr code() const {
+            JS_ASSERT(hasCodePtr_);
+            return u.code_;
+        }
+    };
+
+  private:
+    typedef Vector<ExportedFunction, 0, SystemAllocPolicy> ExportedFunctionVector;
+    typedef Vector<Global, 0, SystemAllocPolicy> GlobalVector;
+    typedef Vector<Exit, 0, SystemAllocPolicy> ExitVector;
+    typedef Vector<ion::AsmJSHeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
+
+    GlobalVector                          globals_;
+    ExitVector                            exits_;
+    ExportedFunctionVector                exports_;
+    HeapAccessVector                      heapAccesses_;
+    uint32_t                              numGlobalVars_;
+    uint32_t                              numFFIs_;
+    uint32_t                              numFuncPtrTableElems_;
+    bool                                  hasArrayView_;
+
+    ScopedReleasePtr<JSC::ExecutablePool> codePool_;
+    uint8_t *                             code_;
+    uint8_t *                             operationCallbackExit_;
+    size_t                                functionBytes_;
+    size_t                                codeBytes_;
+    size_t                                totalBytes_;
+
+    bool                                  linked_;
+    HeapPtr<ArrayBufferObject>            maybeHeap_;
+
+    uint8_t *globalData() const {
+        JS_ASSERT(code_);
+        return code_ + codeBytes_;
+    }
+
+  public:
+    AsmJSModule()
+      : numGlobalVars_(0),
+        numFFIs_(0),
+        numFuncPtrTableElems_(0),
+        hasArrayView_(false),
+        code_(NULL),
+        operationCallbackExit_(NULL),
+        functionBytes_(0),
+        codeBytes_(0),
+        totalBytes_(0),
+        linked_(false)
+    {}
+
+    void trace(JSTracer *trc) {
+        for (unsigned i = 0; i < globals_.length(); i++)
+            globals_[i].trace(trc);
+        for (unsigned i = 0; i < exports_.length(); i++)
+            exports_[i].trace(trc);
+        for (unsigned i = 0; i < exits_.length(); i++) {
+            if (exitIndexToGlobalDatum(i).fun)
+                MarkObject(trc, &exitIndexToGlobalDatum(i).fun, "asm.js imported function");
+        }
+        if (maybeHeap_)
+            MarkObject(trc, &maybeHeap_, "asm.js heap");
+    }
+
+    bool addGlobalVarInitConstant(const Value &v, uint32_t *globalIndex) {
+        if (numGlobalVars_ == UINT32_MAX)
+            return false;
+        Global g(Global::Variable);
+        g.u.var.initKind_ = Global::InitConstant;
+        g.u.var.init.constant_ = v;
+        g.u.var.index_ = *globalIndex = numGlobalVars_++;
+        return globals_.append(g);
+    }
+    bool addGlobalVarImport(PropertyName *fieldName, AsmJSCoercion coercion, uint32_t *globalIndex) {
+        Global g(Global::Variable);
+        g.u.var.initKind_ = Global::InitImport;
+        g.u.var.init.coercion_ = coercion;
+        g.u.var.index_ = *globalIndex = numGlobalVars_++;
+        g.name_ = fieldName;
+        return globals_.append(g);
+    }
+    bool incrementNumFuncPtrTableElems(uint32_t numElems) {
+        if (UINT32_MAX - numFuncPtrTableElems_ < numElems)
+            return false;
+        numFuncPtrTableElems_ += numElems;
+        return true;
+    }
+    bool addFFI(PropertyName *field, uint32_t *ffiIndex) {
+        if (numFFIs_ == UINT32_MAX)
+            return false;
+        Global g(Global::FFI);
+        g.u.ffiIndex_ = *ffiIndex = numFFIs_++;
+        g.name_ = field;
+        return globals_.append(g);
+    }
+    bool addArrayView(ArrayBufferView::ViewType vt, PropertyName *field) {
+        hasArrayView_ = true;
+        Global g(Global::ArrayView);
+        g.u.viewType_ = vt;
+        g.name_ = field;
+        return globals_.append(g);
+    }
+    bool addMathBuiltin(AsmJSMathBuiltin mathBuiltin, PropertyName *field) {
+        Global g(Global::MathBuiltin);
+        g.u.mathBuiltin_ = mathBuiltin;
+        g.name_ = field;
+        return globals_.append(g);
+    }
+    bool addGlobalConstant(double value, PropertyName *fieldName) {
+        Global g(Global::Constant);
+        g.u.constantValue_ = value;
+        g.name_ = fieldName;
+        return globals_.append(g);
+    }
+    bool addExit(unsigned ffiIndex, unsigned *exitIndex) {
+        *exitIndex = unsigned(exits_.length());
+        return exits_.append(Exit(ffiIndex));
+    }
+
+    bool addExportedFunction(RawFunction fun, PropertyName *maybeFieldName,
+                             MoveRef<ArgCoercionVector> argCoercions, ReturnType returnType)
+    {
+        ExportedFunction func(fun, maybeFieldName, argCoercions, returnType);
+        return exports_.append(Move(func));
+    }
+    unsigned numExportedFunctions() const {
+        return exports_.length();
+    }
+    const ExportedFunction &exportedFunction(unsigned i) const {
+        return exports_[i];
+    }
+    ExportedFunction &exportedFunction(unsigned i) {
+        return exports_[i];
+    }
+    bool hasArrayView() const {
+        return hasArrayView_;
+    }
+    unsigned numFFIs() const {
+        return numFFIs_;
+    }
+    unsigned numGlobalVars() const {
+        return numGlobalVars_;
+    }
+    unsigned numGlobals() const {
+        return globals_.length();
+    }
+    Global global(unsigned i) const {
+        return globals_[i];
+    }
+    unsigned numFuncPtrTableElems() const {
+        return numFuncPtrTableElems_;
+    }
+    unsigned numExits() const {
+        return exits_.length();
+    }
+    Exit &exit(unsigned i) {
+        return exits_[i];
+    }
+    const Exit &exit(unsigned i) const {
+        return exits_[i];
+    }
+
+    // An Exit holds bookkeeping information about an exit; the ExitDatum
+    // struct overlays the actual runtime data stored in the global data
+    // section.
+    struct ExitDatum
+    {
+        uint8_t *exit;
+        HeapPtrFunction fun;
+    };
+
+    // Global data section
+    //
+    // The global data section is placed after the executable code (i.e., at
+    // offset codeBytes_) in the module's linear allocation. The global data
+    // are laid out in this order:
+    //   0. a pointer/descriptor for the heap that was linked to the module
+    //   1. global variable state (elements are sizeof(uint64_t))
+    //   2. function-pointer table elements (elements are sizeof(void*))
+    //   3. exits (elements are sizeof(ExitDatum))
+    //
+    // NB: The list of exits is extended while emitting function bodies and
+    // thus exits must be at the end of the list to avoid invalidating indices.
+    size_t globalDataBytes() const {
+        return sizeof(void*) +
+               numGlobalVars_ * sizeof(uint64_t) +
+               numFuncPtrTableElems_ * sizeof(void*) +
+               exits_.length() * sizeof(ExitDatum);
+    }
+    unsigned heapOffset() const {
+        return 0;
+    }
+    uint8_t *&heapDatum() const {
+        return *(uint8_t**)(globalData() + heapOffset());
+    }
+    unsigned globalVarIndexToGlobalDataOffset(unsigned i) const {
+        JS_ASSERT(i < numGlobalVars_);
+        return sizeof(void*) +
+               i * sizeof(uint64_t);
+    }
+    void *globalVarIndexToGlobalDatum(unsigned i) const {
+        return (void *)(globalData() + globalVarIndexToGlobalDataOffset(i));
+    }
+    unsigned funcPtrIndexToGlobalDataOffset(unsigned i) const {
+        return sizeof(void*) +
+               numGlobalVars_ * sizeof(uint64_t) +
+               i * sizeof(void*);
+    }
+    void *&funcPtrIndexToGlobalDatum(unsigned i) const {
+        return *(void **)(globalData() + funcPtrIndexToGlobalDataOffset(i));
+    }
+    unsigned exitIndexToGlobalDataOffset(unsigned exitIndex) const {
+        JS_ASSERT(exitIndex < exits_.length());
+        return sizeof(void*) +
+               numGlobalVars_ * sizeof(uint64_t) +
+               numFuncPtrTableElems_ * sizeof(void*) +
+               exitIndex * sizeof(ExitDatum);
+    }
+    ExitDatum &exitIndexToGlobalDatum(unsigned exitIndex) const {
+        return *(ExitDatum *)(globalData() + exitIndexToGlobalDataOffset(exitIndex));
+    }
+
+    void setFunctionBytes(size_t functionBytes) {
+        JS_ASSERT(functionBytes % gc::PageSize == 0);
+        functionBytes_ = functionBytes;
+    }
+    size_t functionBytes() const {
+        JS_ASSERT(functionBytes_);
+        JS_ASSERT(functionBytes_ % gc::PageSize == 0);
+        return functionBytes_;
+    }
+
+    bool addHeapAccesses(const Vector<ion::AsmJSHeapAccess> &accesses) {
+        if (!heapAccesses_.reserve(heapAccesses_.length() + accesses.length()))
+            return false;
+        for (size_t i = 0; i < accesses.length(); i++)
+            heapAccesses_.infallibleAppend(accesses[i]);
+        return true;
+    }
+    unsigned numHeapAccesses() const {
+        return heapAccesses_.length();
+    }
+    ion::AsmJSHeapAccess &heapAccess(unsigned i) {
+        return heapAccesses_[i];
+    }
+    const ion::AsmJSHeapAccess &heapAccess(unsigned i) const {
+        return heapAccesses_[i];
+    }
+
+    void takeOwnership(JSC::ExecutablePool *pool, uint8_t *code, size_t codeBytes, size_t totalBytes) {
+        JS_ASSERT(uintptr_t(code) % gc::PageSize == 0);
+        codePool_ = pool;
+        code_ = code;
+        codeBytes_ = codeBytes;
+        totalBytes_ = totalBytes;
+    }
+    uint8_t *functionCode() const {
+        JS_ASSERT(code_);
+        JS_ASSERT(uintptr_t(code_) % gc::PageSize == 0);
+        return code_;
+    }
+
+    void setOperationCallbackExit(uint8_t *ptr) {
+        operationCallbackExit_ = ptr;
+    }
+    uint8_t *operationCallbackExit() const {
+        return operationCallbackExit_;
+    }
+
+    void setIsLinked(Handle<ArrayBufferObject*> maybeHeap) {
+        JS_ASSERT(!linked_);
+        linked_ = true;
+        maybeHeap_ = maybeHeap;
+        heapDatum() = maybeHeap_ ? maybeHeap_->dataPointer() : NULL;
+    }
+    bool isLinked() const {
+        return linked_;
+    }
+    uint8_t *maybeHeap() const {
+        JS_ASSERT(linked_);
+        return heapDatum();
+    }
+    size_t heapLength() const {
+        JS_ASSERT(linked_);
+        return maybeHeap_ ? maybeHeap_->byteLength() : 0;
+    }
+};
+
+// The AsmJSModule C++ object is held by a JSObject that takes care of calling
+// 'trace' and the destructor on finalization.
+extern AsmJSModule &
+AsmJSModuleObjectToModule(JSObject *obj);
+
+}  // namespace js
+
+#endif  // jsion_asmjsmodule_h__
+
new file mode 100644
--- /dev/null
+++ b/js/src/ion/AsmJSSignalHandlers.cpp
@@ -0,0 +1,563 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscntxt.h"
+
+#include "jstypedarrayinlines.h"
+
+#include "ion/AsmJS.h"
+#include "ion/AsmJSModule.h"
+#include "assembler/assembler/MacroAssembler.h"
+
+using namespace js;
+using namespace js::ion;
+
+#ifdef JS_ASMJS
+
+// Prevent races trying to install the signal handlers.
+#ifdef JS_THREADSAFE
+# include "jslock.h"
+
+class SignalMutex
+{
+    PRLock *mutex_;
+
+  public:
+    SignalMutex() {
+        mutex_ = PR_NewLock();
+        if (!mutex_)
+            MOZ_CRASH();
+    }
+    ~SignalMutex() {
+        PR_DestroyLock(mutex_);
+    }
+    class Lock {
+        static bool sHandlersInstalled;
+      public:
+        Lock();
+        ~Lock();
+        bool handlersInstalled() const { return sHandlersInstalled; }
+        void setHandlersInstalled() { sHandlersInstalled = true; }
+    };
+} signalMutex;
+
+bool SignalMutex::Lock::sHandlersInstalled = false;
+
+SignalMutex::Lock::Lock()
+{
+    PR_Lock(signalMutex.mutex_);
+}
+
+SignalMutex::Lock::~Lock()
+{
+    PR_Unlock(signalMutex.mutex_);
+}
+#else
+struct SignalMutex
+{
+    class Lock {
+        static bool sHandlersInstalled;
+      public:
+        Lock() { (void)this; }
+        bool handlersInstalled() const { return sHandlersInstalled; }
+        void setHandlersInstalled() { sHandlersInstalled = true; }
+    };
+};
+
+bool SignalMutex::Lock::sHandlersInstalled = false;
+#endif
+
+static AsmJSActivation *
+InnermostAsmJSActivation()
+{
+    PerThreadData *threadData = TlsPerThreadData.get();
+    if (!threadData)
+        return NULL;
+
+    return threadData->asmJSActivationStackFromOwnerThread();
+}
+
+static bool
+PCIsInModule(const AsmJSModule &module, void *pc)
+{
+    uint8_t *code = module.functionCode();
+    return pc >= code && pc < (code + module.functionBytes());
+}
+
+# if defined(JS_CPU_X64)
+template <class T>
+static void
+SetXMMRegToNaN(bool isFloat32, T *xmm_reg)
+{
+    if (isFloat32) {
+        JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
+        float *floats = reinterpret_cast<float*>(xmm_reg);
+        floats[0] = js_NaN;
+        floats[1] = 0;
+        floats[2] = 0;
+        floats[3] = 0;
+    } else {
+        JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
+        double *dbls = reinterpret_cast<double*>(xmm_reg);
+        dbls[0] = js_NaN;
+        dbls[1] = 0;
+    }
+}
+
+// Perform a binary search on the projected offsets of the known heap accesses
+// in the module.
+static const AsmJSHeapAccess *
+LookupHeapAccess(const AsmJSModule &module, uint8_t *pc)
+{
+    JS_ASSERT(PCIsInModule(module, pc));
+    size_t targetOffset = pc - module.functionCode();
+
+    if (module.numHeapAccesses() == 0)
+        return NULL;
+
+    size_t low = 0;
+    size_t high = module.numHeapAccesses() - 1;
+    while (high - low >= 2) {
+        size_t mid = low + (high - low) / 2;
+        uint32_t midOffset = module.heapAccess(mid).offset();
+        if (targetOffset == midOffset)
+            return &module.heapAccess(mid);
+        if (targetOffset < midOffset)
+            high = mid;
+        else
+            low = mid;
+    }
+    if (targetOffset == module.heapAccess(low).offset())
+        return &module.heapAccess(low);
+    if (targetOffset == module.heapAccess(high).offset())
+        return &module.heapAccess(high);
+
+    return NULL;
+}
+# endif
+
+# if defined(XP_WIN)
+#  include "jswin.h"
+
+static uint8_t **
+ContextToPC(PCONTEXT context)
+{
+#  if defined(JS_CPU_X64)
+    JS_STATIC_ASSERT(sizeof(context->Rip) == sizeof(void*));
+    return reinterpret_cast<uint8_t**>(&context->Rip);
+#  else
+    JS_STATIC_ASSERT(sizeof(context->Eip) == sizeof(void*));
+    return reinterpret_cast<uint8_t**>(&context->Eip);
+#  endif
+}
+
+#  if defined(JS_CPU_X64)
+static void
+SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
+{
+    if (reg.isFloat()) {
+        switch (reg.fpu().code()) {
+          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &context->Xmm0); break;
+          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &context->Xmm1); break;
+          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &context->Xmm2); break;
+          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &context->Xmm3); break;
+          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &context->Xmm4); break;
+          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &context->Xmm5); break;
+          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &context->Xmm6); break;
+          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &context->Xmm7); break;
+          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &context->Xmm8); break;
+          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &context->Xmm9); break;
+          case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &context->Xmm10); break;
+          case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &context->Xmm11); break;
+          case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &context->Xmm12); break;
+          case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &context->Xmm13); break;
+          case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &context->Xmm14); break;
+          case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &context->Xmm15); break;
+          default: MOZ_CRASH();
+        }
+    } else {
+        switch (reg.gpr().code()) {
+          case JSC::X86Registers::eax: context->Rax = 0; break;
+          case JSC::X86Registers::ecx: context->Rcx = 0; break;
+          case JSC::X86Registers::edx: context->Rdx = 0; break;
+          case JSC::X86Registers::ebx: context->Rbx = 0; break;
+          case JSC::X86Registers::esp: context->Rsp = 0; break;
+          case JSC::X86Registers::ebp: context->Rbp = 0; break;
+          case JSC::X86Registers::esi: context->Rsi = 0; break;
+          case JSC::X86Registers::edi: context->Rdi = 0; break;
+          case JSC::X86Registers::r8:  context->R8  = 0; break;
+          case JSC::X86Registers::r9:  context->R9  = 0; break;
+          case JSC::X86Registers::r10: context->R10 = 0; break;
+          case JSC::X86Registers::r11: context->R11 = 0; break;
+          case JSC::X86Registers::r12: context->R12 = 0; break;
+          case JSC::X86Registers::r13: context->R13 = 0; break;
+          case JSC::X86Registers::r14: context->R14 = 0; break;
+          case JSC::X86Registers::r15: context->R15 = 0; break;
+          default: MOZ_CRASH();
+        }
+    }
+}
+#  endif
+
+static bool
+HandleException(PEXCEPTION_POINTERS exception)
+{
+    EXCEPTION_RECORD *record = exception->ExceptionRecord;
+    CONTEXT *context = exception->ContextRecord;
+
+    if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
+        return false;
+
+    AsmJSActivation *activation = InnermostAsmJSActivation();
+    if (!activation)
+        return false;
+
+    uint8_t **ppc = ContextToPC(context);
+    uint8_t *pc = *ppc;
+	JS_ASSERT(pc == record->ExceptionAddress);
+
+    const AsmJSModule &module = activation->module();
+    if (!PCIsInModule(module, pc))
+        return false;
+
+	if (record->NumberParameters < 2)
+		return false;
+
+    void *faultingAddress = (void*)record->ExceptionInformation[1];
+
+    // If we faulted trying to execute code in 'module', this must be an
+    // operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
+    // execution to a trampoline which will call js_HandleExecutionInterrupt.
+    // The trampoline will jump to activation->resumePC if execution isn't
+    // interrupted.
+    if (PCIsInModule(module, faultingAddress)) {
+        activation->setResumePC(pc);
+        *ppc = module.operationCallbackExit();
+        DWORD oldProtect;
+        if (!VirtualProtect(module.functionCode(), module.functionBytes(), PAGE_EXECUTE, &oldProtect))
+            MOZ_CRASH();
+        return true;
+    }
+
+# if defined(JS_CPU_X64)
+    // These checks aren't necessary, but, since we can, check anyway to make
+    // sure we aren't covering up a real bug.
+    if (!module.maybeHeap() ||
+        faultingAddress < module.maybeHeap() ||
+        faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
+    {
+        return false;
+    }
+
+    const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
+    if (!heapAccess)
+        return false;
+
+    // Also not necessary, but, since we can, do.
+    if (heapAccess->isLoad() != !record->ExceptionInformation[0])
+        return false;
+
+    // We now know that this is an out-of-bounds access made by an asm.js
+    // load/store that we should handle. If this is a load, assign the
+    // JS-defined result value to the destination register (ToInt32(undefined)
+    // or ToNumber(undefined), determined by the type of the destination
+    // register) and set the PC to the next op. Upon return from the handler,
+    // execution will resume at this next PC.
+    if (heapAccess->isLoad())
+        SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
+    *ppc += heapAccess->opLength();
+    return true;
+# else
+    return false;
+# endif
+}
+
+static LONG WINAPI
+AsmJSExceptionHandler(LPEXCEPTION_POINTERS exception)
+{
+    if (HandleException(exception))
+        return EXCEPTION_CONTINUE_EXECUTION;
+
+    // No need to worry about calling other handlers, the OS does this for us.
+    return EXCEPTION_CONTINUE_SEARCH;
+}
+
+# else  // If not Windows, assume Unix
+#  include <signal.h>
+#  include <sys/mman.h>
+
+// Unfortunately, we still need OS-specific code to read/write to the thread
+// state via the mcontext_t.
+#  if defined(__linux__)
+static uint8_t **
+ContextToPC(mcontext_t &context)
+{
+#   if defined(JS_CPU_X86)
+    JS_STATIC_ASSERT(sizeof(context.gregs[REG_EIP]) == sizeof(void*));
+    return reinterpret_cast<uint8_t**>(&context.gregs[REG_EIP]);
+#   else
+    JS_STATIC_ASSERT(sizeof(context.gregs[REG_RIP]) == sizeof(void*));
+    return reinterpret_cast<uint8_t**>(&context.gregs[REG_RIP]);
+#   endif
+}
+
+#   if defined(JS_CPU_X64)
+static void
+SetRegisterToCoercedUndefined(mcontext_t &context, bool isFloat32, AnyRegister reg)
+{
+    if (reg.isFloat()) {
+        switch (reg.fpu().code()) {
+          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[0]); break;
+          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[1]); break;
+          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[2]); break;
+          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[3]); break;
+          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[4]); break;
+          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[5]); break;
+          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[6]); break;
+          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[7]); break;
+          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[8]); break;
+          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[9]); break;
+          case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[10]); break;
+          case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[11]); break;
+          case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[12]); break;
+          case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[13]); break;
+          case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[14]); break;
+          case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &context.fpregs->_xmm[15]); break;
+          default: MOZ_CRASH();
+        }
+    } else {
+        switch (reg.gpr().code()) {
+          case JSC::X86Registers::eax: context.gregs[REG_RAX] = 0; break;
+          case JSC::X86Registers::ecx: context.gregs[REG_RCX] = 0; break;
+          case JSC::X86Registers::edx: context.gregs[REG_RDX] = 0; break;
+          case JSC::X86Registers::ebx: context.gregs[REG_RBX] = 0; break;
+          case JSC::X86Registers::esp: context.gregs[REG_RSP] = 0; break;
+          case JSC::X86Registers::ebp: context.gregs[REG_RBP] = 0; break;
+          case JSC::X86Registers::esi: context.gregs[REG_RSI] = 0; break;
+          case JSC::X86Registers::edi: context.gregs[REG_RDI] = 0; break;
+          case JSC::X86Registers::r8:  context.gregs[REG_R8]  = 0; break;
+          case JSC::X86Registers::r9:  context.gregs[REG_R9]  = 0; break;
+          case JSC::X86Registers::r10: context.gregs[REG_R10] = 0; break;
+          case JSC::X86Registers::r11: context.gregs[REG_R11] = 0; break;
+          case JSC::X86Registers::r12: context.gregs[REG_R12] = 0; break;
+          case JSC::X86Registers::r13: context.gregs[REG_R13] = 0; break;
+          case JSC::X86Registers::r14: context.gregs[REG_R14] = 0; break;
+          case JSC::X86Registers::r15: context.gregs[REG_R15] = 0; break;
+          default: MOZ_CRASH();
+        }
+    }
+}
+#   endif
+#  elif defined(XP_MACOSX)
+static uint8_t **
+ContextToPC(mcontext_t context)
+{
+#   if defined(JS_CPU_X86)
+    JS_STATIC_ASSERT(sizeof(context->__ss.__eip) == sizeof(void*));
+    return reinterpret_cast<uint8_t **>(&context->__ss.__eip);
+#   else
+    JS_STATIC_ASSERT(sizeof(context->__ss.__rip) == sizeof(void*));
+    return reinterpret_cast<uint8_t **>(&context->__ss.__rip);
+#   endif
+}
+
+#   if defined(JS_CPU_X64)
+static void
+SetRegisterToCoercedUndefined(mcontext_t &context, bool isFloat32, AnyRegister reg)
+{
+    if (reg.isFloat()) {
+        switch (reg.fpu().code()) {
+          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm0); break;
+          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm1); break;
+          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm2); break;
+          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm3); break;
+          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm4); break;
+          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm5); break;
+          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm6); break;
+          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm7); break;
+          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm8); break;
+          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm9); break;
+          case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm10); break;
+          case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm11); break;
+          case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm12); break;
+          case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm13); break;
+          case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm14); break;
+          case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &context->__fs.__fpu_xmm15); break;
+          default: MOZ_CRASH();
+        }
+    } else {
+        switch (reg.gpr().code()) {
+          case JSC::X86Registers::eax: context->__ss.__rax = 0; break;
+          case JSC::X86Registers::ecx: context->__ss.__rcx = 0; break;
+          case JSC::X86Registers::edx: context->__ss.__rdx = 0; break;
+          case JSC::X86Registers::ebx: context->__ss.__rbx = 0; break;
+          case JSC::X86Registers::esp: context->__ss.__rsp = 0; break;
+          case JSC::X86Registers::ebp: context->__ss.__rbp = 0; break;
+          case JSC::X86Registers::esi: context->__ss.__rsi = 0; break;
+          case JSC::X86Registers::edi: context->__ss.__rdi = 0; break;
+          case JSC::X86Registers::r8:  context->__ss.__r8  = 0; break;
+          case JSC::X86Registers::r9:  context->__ss.__r9  = 0; break;
+          case JSC::X86Registers::r10: context->__ss.__r10 = 0; break;
+          case JSC::X86Registers::r11: context->__ss.__r11 = 0; break;
+          case JSC::X86Registers::r12: context->__ss.__r12 = 0; break;
+          case JSC::X86Registers::r13: context->__ss.__r13 = 0; break;
+          case JSC::X86Registers::r14: context->__ss.__r14 = 0; break;
+          case JSC::X86Registers::r15: context->__ss.__r15 = 0; break;
+          default: MOZ_CRASH();
+        }
+    }
+}
+#   endif
+#  endif  // end of OS-specific mcontext accessors
+
+// Be very cautious and default to not handling; we don't want to accidentally
+// silence real crashes from real bugs.
+static bool
+HandleSignal(int signum, siginfo_t *info, void *ctx)
+{
+    AsmJSActivation *activation = InnermostAsmJSActivation();
+    if (!activation)
+        return false;
+
+    mcontext_t &context = reinterpret_cast<ucontext_t*>(ctx)->uc_mcontext;
+    uint8_t **ppc = ContextToPC(context);
+    uint8_t *pc = *ppc;
+
+    const AsmJSModule &module = activation->module();
+    if (!PCIsInModule(module, pc))
+        return false;
+
+    void *faultingAddress = info->si_addr;
+
+    // If we faulted trying to execute code in 'module', this must be an
+    // operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
+    // execution to a trampoline which will call js_HandleExecutionInterrupt.
+    // The trampoline will jump to activation->resumePC if execution isn't
+    // interrupted.
+    if (PCIsInModule(module, faultingAddress)) {
+        activation->setResumePC(pc);
+        *ppc = module.operationCallbackExit();
+        mprotect(module.functionCode(), module.functionBytes(), PROT_EXEC);
+        return true;
+    }
+
+#  if defined(JS_CPU_X64)
+    // These checks aren't necessary, but, since we can, check anyway to make
+    // sure we aren't covering up a real bug.
+    if (!module.maybeHeap() ||
+        faultingAddress < module.maybeHeap() ||
+        faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
+    {
+        return false;
+    }
+
+    const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
+    if (!heapAccess)
+        return false;
+
+    // We now know that this is an out-of-bounds access made by an asm.js
+    // load/store that we should handle. If this is a load, assign the
+    // JS-defined result value to the destination register (ToInt32(undefined)
+    // or ToNumber(undefined), determined by the type of the destination
+    // register) and set the PC to the next op. Upon return from the handler,
+    // execution will resume at this next PC.
+    if (heapAccess->isLoad())
+        SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
+    *ppc += heapAccess->opLength();
+    return true;
+#  else
+    return false;
+#  endif
+}
+
+static struct sigaction sPrevHandler;
+
+static void
+AsmJSFaultHandler(int signum, siginfo_t *info, void *context)
+{
+    if (HandleSignal(signum, info, context))
+        return;
+
+    // This signal is not for any asm.js code we expect, so we need to forward
+    // the signal to the next handler. If there is no next handler (SIG_IGN or
+    // SIG_DFL), then it's time to crash. To do this, we set the signal back to
+    // it's previous disposition and return. This will cause the faulting op to
+    // be re-executed which will crash in the normal way. The advantage to
+    // doing this is that we remove ourselves from the crash stack which
+    // simplifies crash reports. Note: the order of these tests matter.
+    if (sPrevHandler.sa_flags & SA_SIGINFO) {
+        sPrevHandler.sa_sigaction(signum, info, context);
+        exit(signum);  // backstop
+    } else if (sPrevHandler.sa_handler == SIG_DFL || sPrevHandler.sa_handler == SIG_IGN) {
+        sigaction(signum, &sPrevHandler, NULL);
+    } else {
+        sPrevHandler.sa_handler(signum);
+        exit(signum);  // backstop
+    }
+}
+# endif
+#endif // JS_ASMJS
+
+bool
+EnsureAsmJSSignalHandlersInstalled()
+{
+#if defined(JS_ASMJS)
+    SignalMutex::Lock lock;
+    if (lock.handlersInstalled())
+        return true;
+
+#if defined(XP_WIN)
+    if (!AddVectoredExceptionHandler(/* FirstHandler = */true, AsmJSExceptionHandler))
+        return false;
+#else
+    struct sigaction sigAction;
+    sigAction.sa_sigaction = &AsmJSFaultHandler;
+    sigemptyset(&sigAction.sa_mask);
+    sigAction.sa_flags = SA_SIGINFO;
+    if (sigaction(SIGSEGV, &sigAction, &sPrevHandler))
+        return false;
+    if (sigaction(SIGBUS, &sigAction, &sPrevHandler))
+        return false;
+#endif
+
+    lock.setHandlersInstalled();
+#endif
+    return true;
+}
+
+// To interrupt execution of a JSRuntime, any thread may call
+// JS_TriggerOperationCallback (JSRuntime::triggerOperationCallback from inside
+// the engine). Normally, this sets some state that is polled at regular
+// intervals (function prologues, loop headers), even from jit-code. For tight
+// loops, this poses non-trivial overhead. For asm.js, we can do better: when
+// another thread triggers the operation callback, we simply mprotect all of
+// the innermost asm.js module activation's code. This will trigger a SIGSEGV,
+// taking us into AsmJSFaultHandler. From there, we can manually redirect
+// execution to call js_HandleExecutionInterrupt. The memory is un-protected
+// from the signal handler after control flow is redirected.
+void
+js::TriggerOperationCallbackForAsmJSCode(JSRuntime *rt)
+{
+#if defined(JS_ASMJS)
+    PerThreadData::AsmJSActivationStackLock lock(rt->mainThread);
+
+    AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
+    if (!activation)
+        return;
+
+    const AsmJSModule &module = activation->module();
+
+# if defined(XP_WIN)
+    DWORD oldProtect;
+    if (!VirtualProtect(module.functionCode(), 4096, PAGE_NOACCESS, &oldProtect))
+        MOZ_CRASH();
+# else
+    if (mprotect(module.functionCode(), module.functionBytes(), PROT_NONE))
+        MOZ_CRASH();
+# endif
+#endif
+}
--- a/js/src/ion/CodeGenerator.cpp
+++ b/js/src/ion/CodeGenerator.cpp
@@ -119,18 +119,18 @@ CodeGenerator::visitOutOfLineCache(OutOf
     return cache->accept(this, ool);
 }
 
 StringObject *
 MNewStringObject::templateObj() const {
     return &templateObj_->asString();
 }
 
-CodeGenerator::CodeGenerator(MIRGenerator *gen, LIRGraph *graph)
-  : CodeGeneratorSpecific(gen, graph)
+CodeGenerator::CodeGenerator(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
+  : CodeGeneratorSpecific(gen, graph, masm)
 {
 }
 
 bool
 CodeGenerator::visitValueToInt32(LValueToInt32 *lir)
 {
     ValueOperand operand = ToValue(lir, LValueToInt32::Input);
     Register output = ToRegister(lir->output());
@@ -2041,16 +2041,19 @@ CodeGenerator::maybeCreateScriptCounts()
     if (!cx)
         return NULL;
 
     IonScriptCounts *counts = NULL;
 
     CompileInfo *outerInfo = &gen->info();
     RawScript script = outerInfo->script();
 
+    if (!script)
+        return NULL;
+
     if (cx->runtime->profilingScripts && !script->hasScriptCounts) {
         if (!script->initScriptCounts(cx))
             return NULL;
     }
 
     if (!script->hasScriptCounts)
         return NULL;
 
@@ -2854,16 +2857,26 @@ CodeGenerator::visitPowD(LPowD *ins)
     masm.passABIArg(power);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ecmaPow), MacroAssembler::DOUBLE);
 
     JS_ASSERT(ToFloatRegister(ins->output()) == ReturnFloatReg);
     return true;
 }
 
 bool
+CodeGenerator::visitNegI(LNegI *ins)
+{
+    Register input = ToRegister(ins->input());
+    JS_ASSERT(input == ToRegister(ins->output()));
+
+    masm.neg32(input);
+    return true;
+}
+
+bool
 CodeGenerator::visitNegD(LNegD *ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     JS_ASSERT(input == ToFloatRegister(ins->output()));
 
     masm.negateDouble(input);
     return true;
 }
@@ -3196,17 +3209,17 @@ CodeGenerator::visitIsNullOrLikeUndefine
 
         // Both branches meet here.
         masm.bind(&done);
         return true;
     }
 
     JS_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
 
-    Assembler::Condition cond = JSOpToCondition(op);
+    Assembler::Condition cond = JSOpToCondition(compareType, op);
     if (compareType == MCompare::Compare_Null)
         cond = masm.testNull(cond, value);
     else
         cond = masm.testUndefined(cond, value);
 
     masm.emitSet(cond, output);
     return true;
 }
@@ -3262,17 +3275,17 @@ CodeGenerator::visitIsNullOrLikeUndefine
         } else {
             masm.jump(ifFalseLabel);
         }
         return true;
     }
 
     JS_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
 
-    Assembler::Condition cond = JSOpToCondition(op);
+    Assembler::Condition cond = JSOpToCondition(compareType, op);
     if (compareType == MCompare::Compare_Null)
         cond = masm.testNull(cond, value);
     else
         cond = masm.testUndefined(cond, value);
 
     emitBranch(cond, lir->ifTrue(), lir->ifFalse());
     return true;
 }
@@ -4322,16 +4335,53 @@ CodeGenerator::visitGetArgument(LGetArgu
         Register i = ToRegister(index);
         BaseIndex argPtr(StackPointer, i, ScaleFromElemWidth(sizeof(Value)), argvOffset);
         masm.loadValue(argPtr, result);
     }
     return true;
 }
 
 bool
+CodeGenerator::generateAsmJS()
+{
+    // The caller (either another asm.js function or the external-entry
+    // trampoline) has placed all arguments in registers and on the stack
+    // according to the system ABI. The MAsmJSParameters which represent these
+    // parameters have been useFixed()ed to these ABI-specified positions.
+    // Thus, there is nothing special to do in the prologue except (possibly)
+    // bump the stack.
+    if (!generatePrologue())
+        return false;
+    if (!generateBody())
+        return false;
+    if (!generateEpilogue())
+        return false;
+    if (!generateOutOfLineCode())
+        return false;
+
+    // The only remaining work needed to compile this function is to patch the
+    // switch-statement jump tables (the entries of the table need the absolute
+    // address of the cases). These table entries are accmulated as CodeLabels
+    // in the MacroAssembler's codeLabels_ list and processed all at once at in
+    // the "static-link" phase of module compilation. It is critical that there
+    // is nothing else to do after this point since the LifoAlloc memory
+    // holding the MIR graph is about to be popped and reused. In particular,
+    // every step in CodeGenerator::link must be a nop, as asserted here:
+    JS_ASSERT(snapshots_.size() == 0);
+    JS_ASSERT(bailouts_.empty());
+    JS_ASSERT(graph.numConstants() == 0);
+    JS_ASSERT(safepointIndices_.empty());
+    JS_ASSERT(osiIndices_.empty());
+    JS_ASSERT(cacheList_.empty());
+    JS_ASSERT(safepoints_.size() == 0);
+    JS_ASSERT(graph.mir().numScripts() == 0);
+    return true;
+}
+
+bool
 CodeGenerator::generate()
 {
     if (!safepoints_.init(graph.totalSlotCount()))
         return false;
 
     // Before generating any code, we generate type checks for all parameters.
     // This comes before deoptTable_, because we can't use deopt tables without
     // creating the actual frame.
@@ -5680,11 +5730,83 @@ CodeGenerator::visitOutOfLineParallelAbo
     masm.passABIArg(CallTempReg0);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParallelAbort));
 
     masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
     masm.jump(returnLabel_);
     return true;
 }
 
+bool
+CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
+{
+    MAsmJSCall *mir = ins->mir();
+
+    if (mir->spIncrement())
+        masm.freeStack(mir->spIncrement());
+
+    JS_ASSERT((AlignmentAtPrologue + masm.framePushed()) % StackAlignment == 0);
+#ifdef DEBUG
+    Label ok;
+    JS_ASSERT(IsPowerOfTwo(StackAlignment));
+    masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
+    masm.breakpoint();
+    masm.bind(&ok);
+#endif
+
+    MAsmJSCall::Callee callee = mir->callee();
+    switch (callee.which()) {
+      case MAsmJSCall::Callee::Internal:
+        masm.call(callee.internal());
+        break;
+      case MAsmJSCall::Callee::Dynamic:
+        masm.call(ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
+        break;
+      case MAsmJSCall::Callee::Builtin:
+        masm.call(ImmWord(callee.builtin()));
+        break;
+    }
+
+    if (mir->spIncrement())
+        masm.reserveStack(mir->spIncrement());
+
+    postAsmJSCall(ins);
+    return true;
+}
+
+bool
+CodeGenerator::visitAsmJSParameter(LAsmJSParameter *lir)
+{
+    return true;
+}
+
+bool
+CodeGenerator::visitAsmJSReturn(LAsmJSReturn *lir)
+{
+    // Don't emit a jump to the return label if this is the last block.
+    if (current->mir() != *gen->graph().poBegin())
+        masm.jump(returnLabel_);
+    return true;
+}
+
+bool
+CodeGenerator::visitAsmJSVoidReturn(LAsmJSVoidReturn *lir)
+{
+    // Don't emit a jump to the return label if this is the last block.
+    if (current->mir() != *gen->graph().poBegin())
+        masm.jump(returnLabel_);
+    return true;
+}
+
+bool
+CodeGenerator::visitAsmJSCheckOverRecursed(LAsmJSCheckOverRecursed *lir)
+{
+    uintptr_t *limitAddr = &gen->compartment->rt->mainThread.nativeStackLimit;
+    masm.branchPtr(Assembler::AboveOrEqual,
+                   AbsoluteAddress(limitAddr),
+                   StackPointer,
+                   lir->mir()->onError());
+    return true;
+}
+
 } // namespace ion
 } // namespace js
 
--- a/js/src/ion/CodeGenerator.h
+++ b/js/src/ion/CodeGenerator.h
@@ -36,20 +36,21 @@ class OutOfLineParNewGCThing;
 class OutOfLineUpdateCache;
 
 class CodeGenerator : public CodeGeneratorSpecific
 {
     bool generateArgumentsChecks();
     bool generateBody();
 
   public:
-    CodeGenerator(MIRGenerator *gen, LIRGraph *graph);
+    CodeGenerator(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm = NULL);
 
   public:
     bool generate();
+    bool generateAsmJS();
     bool link();
 
     bool visitLabel(LLabel *lir);
     bool visitNop(LNop *lir);
     bool visitOsiPoint(LOsiPoint *lir);
     bool visitGoto(LGoto *lir);
     bool visitTableSwitch(LTableSwitch *ins);
     bool visitTableSwitchV(LTableSwitchV *ins);
@@ -130,16 +131,17 @@ class CodeGenerator : public CodeGenerat
     bool visitBoundsCheckLower(LBoundsCheckLower *lir);
     bool visitLoadFixedSlotV(LLoadFixedSlotV *ins);
     bool visitLoadFixedSlotT(LLoadFixedSlotT *ins);
     bool visitStoreFixedSlotV(LStoreFixedSlotV *ins);
     bool visitStoreFixedSlotT(LStoreFixedSlotT *ins);
     bool visitAbsI(LAbsI *lir);
     bool visitPowI(LPowI *lir);
     bool visitPowD(LPowD *lir);
+    bool visitNegI(LNegI *lir);
     bool visitNegD(LNegD *lir);
     bool visitRandom(LRandom *lir);
     bool visitMathFunctionD(LMathFunctionD *ins);
     bool visitModD(LModD *ins);
     bool visitMinMaxI(LMinMaxI *lir);
     bool visitBinaryV(LBinaryV *lir);
     bool emitCompareS(LInstruction *lir, JSOp op, Register left, Register right,
                       Register output, Register temp);
@@ -204,19 +206,24 @@ class CodeGenerator : public CodeGenerat
     bool visitInstanceOfO(LInstanceOfO *ins);
     bool visitInstanceOfV(LInstanceOfV *ins);
     bool visitCallInstanceOf(LCallInstanceOf *ins);
     bool visitFunctionBoundary(LFunctionBoundary *lir);
     bool visitGetDOMProperty(LGetDOMProperty *lir);
     bool visitSetDOMProperty(LSetDOMProperty *lir);
     bool visitCallDOMNative(LCallDOMNative *lir);
     bool visitCallGetIntrinsicValue(LCallGetIntrinsicValue *lir);
+    bool visitAsmJSCall(LAsmJSCall *lir);
+    bool visitAsmJSParameter(LAsmJSParameter *lir);
+    bool visitAsmJSReturn(LAsmJSReturn *ret);
+    bool visitAsmJSVoidReturn(LAsmJSVoidReturn *ret);
 
     bool visitCheckOverRecursed(LCheckOverRecursed *lir);
     bool visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool);
+    bool visitAsmJSCheckOverRecursed(LAsmJSCheckOverRecursed *lir);
 
     bool visitParCheckOverRecursed(LParCheckOverRecursed *lir);
     bool visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool);
 
     bool visitParCheckInterrupt(LParCheckInterrupt *lir);
     bool visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool);
 
     bool visitUnboxDouble(LUnboxDouble *lir);
--- a/js/src/ion/CompileInfo.h
+++ b/js/src/ion/CompileInfo.h
@@ -3,16 +3,18 @@
  *
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jsion_compileinfo_h__
 #define jsion_compileinfo_h__
 
+#include "Registers.h"
+
 namespace js {
 namespace ion {
 
 inline unsigned
 CountArgSlots(JSFunction *fun)
 {
     return fun ? fun->nargs + 2 : 1; // +2 for |scopeChain| and |this|, or +1 for |scopeChain|
 }
@@ -31,17 +33,31 @@ class CompileInfo
 {
   public:
     CompileInfo(RawScript script, JSFunction *fun, jsbytecode *osrPc, bool constructing,
                 ExecutionMode executionMode)
       : script_(script), fun_(fun), osrPc_(osrPc), constructing_(constructing),
         executionMode_(executionMode)
     {
         JS_ASSERT_IF(osrPc, JSOp(*osrPc) == JSOP_LOOPENTRY);
-        nslots_ = script->nslots + CountArgSlots(fun);
+        nimplicit_ = 1 /* scope chain */ + (fun ? 1 /* this */: 0);
+        nargs_ = fun ? fun->nargs : 0;
+        nlocals_ = script->nfixed;
+        nstack_ = script->nslots - script->nfixed;
+        nslots_ = nimplicit_ + nargs_ + nlocals_ + nstack_;
+    }
+
+    CompileInfo(unsigned nlocals)
+      : script_(NULL), fun_(NULL), osrPc_(NULL), constructing_(false)
+    {
+        nimplicit_ = 0;
+        nargs_ = 0;
+        nlocals_ = nlocals;
+        nstack_ = 1;  /* For FunctionCompiler::pushPhiInput/popPhiOutput */
+        nslots_ = nlocals_ + nstack_;
     }
 
     RawScript script() const {
         return script_;
     }
     JSFunction *fun() const {
         return fun_;
     }
@@ -85,41 +101,42 @@ class CompileInfo
     inline jssrcnote *getNote(JSContext *cx, jsbytecode *pc) const;
 
     // Total number of slots: args, locals, and stack.
     unsigned nslots() const {
         return nslots_;
     }
 
     unsigned nargs() const {
-        return fun()->nargs;
+        return nargs_;
     }
     unsigned nlocals() const {
-        return script()->nfixed;
+        return nlocals_;
     }
     unsigned ninvoke() const {
-        return nlocals() + CountArgSlots(fun());
+        return nslots_ - nstack_;
     }
 
     uint32_t scopeChainSlot() const {
+        JS_ASSERT(script());
         return 0;
     }
     uint32_t thisSlot() const {
         JS_ASSERT(fun());
         return 1;
     }
     uint32_t firstArgSlot() const {
-        JS_ASSERT(fun());
-        return 2;
+        return nimplicit_;
     }
     uint32_t argSlot(uint32_t i) const {
-        return firstArgSlot() + i;
+        JS_ASSERT(i < nargs_);
+        return nimplicit_ + i;
     }
     uint32_t firstLocalSlot() const {
-        return CountArgSlots(fun());
+        return nimplicit_ + nargs_;
     }
     uint32_t localSlot(uint32_t i) const {
         return firstLocalSlot() + i;
     }
     uint32_t firstStackSlot() const {
         return firstLocalSlot() + nlocals();
     }
     uint32_t stackSlot(uint32_t i) const {
@@ -134,19 +151,23 @@ class CompileInfo
         return executionMode_;
     }
 
     bool isParallelExecution() const {
         return executionMode_ == ParallelExecution;
     }
 
   private:
+    unsigned nimplicit_;
+    unsigned nargs_;
+    unsigned nlocals_;
+    unsigned nstack_;
+    unsigned nslots_;
     JSScript *script_;
     JSFunction *fun_;
-    unsigned nslots_;
     jsbytecode *osrPc_;
     bool constructing_;
     ExecutionMode executionMode_;
 };
 
 } // namespace ion
 } // namespace js
 
new file mode 100644
--- /dev/null
+++ b/js/src/ion/EffectiveAddressAnalysis.cpp
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EffectiveAddressAnalysis.h"
+
+using namespace js;
+using namespace ion;
+
+#ifdef JS_ASMJS
+static void
+AnalyzeLsh(MBasicBlock *block, MLsh *lsh)
+{
+    if (lsh->specialization() != MIRType_Int32)
+        return;
+
+    MDefinition *index = lsh->lhs();
+    JS_ASSERT(index->type() == MIRType_Int32);
+
+    MDefinition *shift = lsh->rhs();
+    if (!shift->isConstant())
+        return;
+
+    Value shiftValue = shift->toConstant()->value();
+    if (!shiftValue.isInt32() || !IsShiftInScaleRange(shiftValue.toInt32()))
+        return;
+
+    Scale scale = ShiftToScale(shiftValue.toInt32());
+
+    int32_t displacement = 0;
+    MInstruction *last = lsh;
+    MDefinition *base = NULL;
+    while (true) {
+        if (last->useCount() != 1)
+            break;
+
+        MUseIterator use = last->usesBegin();
+        if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
+            break;
+
+        MAdd *add = use->consumer()->toDefinition()->toAdd();
+        if (add->specialization() != MIRType_Int32 || !add->isTruncated())
+            break;
+
+        MDefinition *other = add->getOperand(1 - use->index());
+
+        if (other->isConstant()) {
+            displacement += other->toConstant()->value().toInt32();
+        } else {
+            if (base)
+                break;
+            base = other;
+        }
+
+        last = add;
+    }
+
+    if (!base) {
+        uint32_t elemSize = 1 << ScaleToShift(scale);
+        if (displacement % elemSize != 0)
+            return;
+
+        if (last->useCount() != 1)
+            return;
+
+        MUseIterator use = last->usesBegin();
+        if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
+            return;
+
+        MBitAnd *bitAnd = use->consumer()->toDefinition()->toBitAnd();
+        MDefinition *other = bitAnd->getOperand(1 - use->index());
+        if (!other->isConstant() || !other->toConstant()->value().isInt32())
+            return;
+
+        uint32_t bitsClearedByShift = elemSize - 1;
+        uint32_t bitsClearedByMask = ~uint32_t(other->toConstant()->value().toInt32());
+        if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
+            return;
+
+        bitAnd->replaceAllUsesWith(last);
+        return;
+    }
+
+    MEffectiveAddress *eaddr = MEffectiveAddress::New(base, index, scale, displacement);
+    last->replaceAllUsesWith(eaddr);
+    block->insertAfter(last, eaddr);
+}
+#endif
+
+// This analysis converts patterns of the form:
+//   truncate(x + (y << {0,1,2,3}))
+//   truncate(x + (y << {0,1,2,3}) + imm32)
+// into a single lea instruction, and patterns of the form:
+//   asmload(x + imm32)
+//   asmload(x << {0,1,2,3})
+//   asmload((x << {0,1,2,3}) + imm32)
+//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant with shift)
+//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant with shift + imm32)
+// into a single asmload instruction (and for asmstore too).
+//
+// Additionally, we should consider the general forms:
+//   truncate(x + y + imm32)
+//   truncate((y << {0,1,2,3}) + imm32)
+bool
+EffectiveAddressAnalysis::analyze()
+{
+#if defined(JS_ASMJS)
+    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+            if (i->isLsh())
+                AnalyzeLsh(*block, i->toLsh());
+        }
+    }
+#endif
+    return true;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ion/EffectiveAddressAnalysis.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jsion_effective_address_analysis_h__
+#define jsion_effective_address_analysis_h__
+
+#include "MIR.h"
+#include "MIRGraph.h"
+