Bug 900669 - OdinMonkey: avoid all uses of ImmPtr/AbsoluteAddress from asm.js code (r=bbouvier,mjrosenb,sr=jandem)
authorLuke Wagner <luke@mozilla.com>
Mon, 12 Aug 2013 18:41:24 -0500
changeset 148295 ac62fceb9362b22c2b19873ae02f1ef17b58ff42
parent 148294 df368eed61f9f0f3cf3e47dcd16caaedc273af3f
child 148296 a906226a3865fc5cc38b8467cc9a776076a9c318
push id34177
push userlwagner@mozilla.com
push dateMon, 23 Sep 2013 16:50:08 +0000
treeherdermozilla-inbound@ac62fceb9362 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier, mjrosenb, jandem
bugs900669
milestone27.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 900669 - OdinMonkey: avoid all uses of ImmPtr/AbsoluteAddress from asm.js code (r=bbouvier,mjrosenb,sr=jandem)
js/src/assembler/assembler/X86Assembler.h
js/src/jit/AsmJS.cpp
js/src/jit/AsmJSModule.cpp
js/src/jit/AsmJSModule.h
js/src/jit/CodeGenerator.cpp
js/src/jit/IonLinker.h
js/src/jit/MIR.h
js/src/jit/RegisterSets.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/Assembler-x86-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/MacroAssembler-x86.h
js/src/jscntxt.h
js/src/vm/Runtime.h
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -1307,16 +1307,23 @@ public:
 #endif
     void cmpl_rm(RegisterID reg, const void* addr)
     {
         spew("cmpl       %s, %p",
              nameIReg(4, reg), addr);
         m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
     }
 
+    void cmpl_rm_force32(RegisterID reg, const void* addr)
+    {
+        spew("cmpl       %s, %p",
+             nameIReg(4, reg), addr);
+        m_formatter.oneByteOp_disp32(OP_CMP_EvGv, reg, addr);
+    }
+
     void cmpl_im(int imm, const void* addr)
     {
         spew("cmpl       $0x%x, %p", imm, addr);
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
             m_formatter.immediate8(imm);
         } else {
             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
@@ -3354,16 +3361,23 @@ private:
         }
 
         void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             m_buffer.putByteUnchecked(opcode);
             memoryModRM(reg, address);
         }
+
+        void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, const void* address)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM_disp32(reg, address);
+        }
 #if WTF_CPU_X86_64
         void oneByteRipOp(OneByteOpcodeID opcode, int reg, int ripOffset)
         {
             m_buffer.ensureSpace(maxInstructionSize);
             emitRexIfNeeded(reg, 0, 0);
             m_buffer.putByteUnchecked(opcode);
             putModRm(ModRmMemoryNoDisp, reg, noBase);
             m_buffer.putIntUnchecked(ripOffset);
@@ -3882,30 +3896,35 @@ private:
             //
             //   reg := [scaled index] + disp32 + [ebp]
             //
             // See Intel developer manual, Vol 2, 2.1.5, Table 2-3.
             putModRmSib(ModRmMemoryNoDisp, reg, noBase, index, scale);
             m_buffer.putIntUnchecked(offset);
         }
 
-        void memoryModRM(int reg, const void* address)
+        void memoryModRM_disp32(int reg, const void* address)
         {
             int32_t disp = addressImmediate(address);
 
 #if WTF_CPU_X86_64
             // On x64-64, non-RIP-relative absolute mode requires a SIB.
             putModRmSib(ModRmMemoryNoDisp, reg, noBase, noIndex, 0);
 #else
             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
             putModRm(ModRmMemoryNoDisp, reg, noBase);
 #endif
             m_buffer.putIntUnchecked(disp);
         }
 
+        void memoryModRM(int reg, const void* address)
+        {
+            memoryModRM_disp32(reg, address);
+        }
+
         AssemblerBuffer m_buffer;
     } m_formatter;
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(X86)
 
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -7,17 +7,16 @@
 #include "jit/AsmJS.h"
 
 #include "mozilla/Move.h"
 
 #ifdef MOZ_VTUNE
 # include "vtune/VTuneWrapper.h"
 #endif
 
-#include "jsmath.h"
 #include "jsprf.h"
 #include "jsworkers.h"
 #include "prmjtime.h"
 
 #include "frontend/Parser.h"
 #include "jit/AsmJSLink.h"
 #include "jit/AsmJSModule.h"
 #include "jit/AsmJSSignalHandlers.h"
@@ -1767,16 +1766,26 @@ class MOZ_STACK_CLASS ModuleCompiler
         // not need patching after deserialization.
         uint8_t *code = module_->codeBase();
         for (unsigned i = 0; i < globalAccesses_.length(); i++) {
             AsmJSGlobalAccess a = globalAccesses_[i];
             masm_.patchAsmJSGlobalAccess(a.patchAt, code, module_->globalData(), a.globalDataOffset);
         }
 #endif
 
+        // Absolute links
+        for (size_t i = 0; i < masm_.numAsmJSAbsoluteLinks(); i++) {
+            AsmJSAbsoluteLink src = masm_.asmJSAbsoluteLink(i);
+            AsmJSStaticLinkData::AbsoluteLink link;
+            link.patchAt = masm_.actualOffset(src.patchAt.offset());
+            link.target = src.target;
+            if (!linkData->absoluteLinks.append(link))
+                return false;
+        }
+
         *module = module_.forget();
         return true;
     }
 };
 
 } /* anonymous namespace */
 
 /*****************************************************************************/
@@ -2280,17 +2289,17 @@ class FunctionCompiler
         unsigned globalDataOffset = module().exitIndexToGlobalDataOffset(exitIndex);
 
         MAsmJSLoadFFIFunc *ptrFun = MAsmJSLoadFFIFunc::New(globalDataOffset);
         curBlock_->add(ptrFun);
 
         return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def);
     }
 
-    bool builtinCall(void *builtin, const Call &call, MIRType returnType, MDefinition **def)
+    bool builtinCall(AsmJSImmKind builtin, const Call &call, MIRType returnType, MDefinition **def)
     {
         return callPrivate(MAsmJSCall::Callee(builtin), call, returnType, def);
     }
 
     /*********************************************** Control flow generation */
 
     void returnExpr(MDefinition *expr)
     {
@@ -3763,58 +3772,46 @@ CheckFFICall(FunctionCompiler &f, ParseN
 
     if (!f.ffiCall(exitIndex, call, retType.toMIRType(), def))
         return false;
 
     *type = retType.toType();
     return true;
 }
 
-static inline void *
-UnaryMathFunCast(double (*pf)(double))
-{
-    return JS_FUNC_TO_DATA_PTR(void*, pf);
-}
-
-static inline void *
-BinaryMathFunCast(double (*pf)(double, double))
-{
-    return JS_FUNC_TO_DATA_PTR(void*, pf);
-}
-
 static bool
 CheckIsDoublish(FunctionCompiler &f, ParseNode *argNode, Type type)
 {
     if (!type.isDoublish())
         return f.failf(argNode, "%s is not a subtype of doublish", type.toChars());
     return true;
 }
 
 static bool
 CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltin mathBuiltin,
                      RetType retType, MDefinition **def, Type *type)
 {
     unsigned arity = 0;
-    void *callee = NULL;
+    AsmJSImmKind callee;
     switch (mathBuiltin) {
       case AsmJSMathBuiltin_imul:  return CheckMathIMul(f, callNode, retType, def, type);
       case AsmJSMathBuiltin_abs:   return CheckMathAbs(f, callNode, retType, def, type);
-      case AsmJSMathBuiltin_sin:   arity = 1; callee = UnaryMathFunCast(sin);        break;
-      case AsmJSMathBuiltin_cos:   arity = 1; callee = UnaryMathFunCast(cos);        break;
-      case AsmJSMathBuiltin_tan:   arity = 1; callee = UnaryMathFunCast(tan);        break;
-      case AsmJSMathBuiltin_asin:  arity = 1; callee = UnaryMathFunCast(asin);       break;
-      case AsmJSMathBuiltin_acos:  arity = 1; callee = UnaryMathFunCast(acos);       break;
-      case AsmJSMathBuiltin_atan:  arity = 1; callee = UnaryMathFunCast(atan);       break;
-      case AsmJSMathBuiltin_ceil:  arity = 1; callee = UnaryMathFunCast(ceil);       break;
-      case AsmJSMathBuiltin_floor: arity = 1; callee = UnaryMathFunCast(floor);      break;
-      case AsmJSMathBuiltin_exp:   arity = 1; callee = UnaryMathFunCast(exp);        break;
-      case AsmJSMathBuiltin_log:   arity = 1; callee = UnaryMathFunCast(log);        break;
       case AsmJSMathBuiltin_sqrt:  return CheckMathSqrt(f, callNode, retType, def, type);
-      case AsmJSMathBuiltin_pow:   arity = 2; callee = BinaryMathFunCast(ecmaPow);   break;
-      case AsmJSMathBuiltin_atan2: arity = 2; callee = BinaryMathFunCast(ecmaAtan2); break;
+      case AsmJSMathBuiltin_sin:   arity = 1; callee = AsmJSImm_SinD;   break;
+      case AsmJSMathBuiltin_cos:   arity = 1; callee = AsmJSImm_CosD;   break;
+      case AsmJSMathBuiltin_tan:   arity = 1; callee = AsmJSImm_TanD;   break;
+      case AsmJSMathBuiltin_asin:  arity = 1; callee = AsmJSImm_ASinD;  break;
+      case AsmJSMathBuiltin_acos:  arity = 1; callee = AsmJSImm_ACosD;  break;
+      case AsmJSMathBuiltin_atan:  arity = 1; callee = AsmJSImm_ATanD;  break;
+      case AsmJSMathBuiltin_ceil:  arity = 1; callee = AsmJSImm_CeilD;  break;
+      case AsmJSMathBuiltin_floor: arity = 1; callee = AsmJSImm_FloorD; break;
+      case AsmJSMathBuiltin_exp:   arity = 1; callee = AsmJSImm_ExpD;   break;
+      case AsmJSMathBuiltin_log:   arity = 1; callee = AsmJSImm_LogD;   break;
+      case AsmJSMathBuiltin_pow:   arity = 2; callee = AsmJSImm_PowD;   break;
+      case AsmJSMathBuiltin_atan2: arity = 2; callee = AsmJSImm_ATan2D; break;
     }
 
     FunctionCompiler::Call call(f, retType);
     if (!CheckCallArgs(f, callNode, CheckIsDoublish, &call))
         return false;
 
     if (call.sig().args().length() != arity)
         return f.failf(callNode, "call passed %u arguments, expected %u", call.sig().args().length(), arity);
@@ -5439,17 +5436,20 @@ static const RegisterSet AllRegsExceptSP
                 FloatRegisterSet(FloatRegisters::AllMask));
 static const RegisterSet NonVolatileRegs =
     RegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
                 FloatRegisterSet(FloatRegisters::NonVolatileMask));
 
 static void
 LoadAsmJSActivationIntoRegister(MacroAssembler &masm, Register reg)
 {
-    masm.loadPtr(AbsoluteAddress(GetIonContext()->runtime->mainThread.addressOfAsmJSActivationStackReadOnly()), reg);
+    masm.movePtr(AsmJSImm_Runtime, reg);
+    size_t offset = offsetof(JSRuntime, mainThread) +
+                    PerThreadData::offsetOfAsmJSActivationStackReadOnly();
+    masm.loadPtr(Address(reg, offset), reg);
 }
 
 static void
 LoadJSContextFromActivation(MacroAssembler &masm, Register activation, Register dest)
 {
     masm.loadPtr(Address(activation, AsmJSActivation::offsetOfContext()), dest);
 }
 
@@ -5640,34 +5640,36 @@ TryEnablingIon(JSContext *cx, AsmJSModul
     IonScript *ionScript = script->ionScript();
     if (!ionScript->addDependentAsmJSModule(cx, DependentAsmJSModuleExit(&module, exitIndex)))
         return false;
 
     module.exitIndexToGlobalDatum(exitIndex).exit = module.ionExitTrampoline(module.exit(exitIndex));
     return true;
 }
 
-static int32_t
+namespace js {
+
+int32_t
 InvokeFromAsmJS_Ignore(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv)
 {
     AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module();
 
     RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun);
     RootedValue fval(cx, ObjectValue(*fun));
     RootedValue rval(cx);
     if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval))
         return false;
 
     if (!TryEnablingIon(cx, module, fun, exitIndex, argc, argv))
         return false;
 
     return true;
 }
 
-static int32_t
+int32_t
 InvokeFromAsmJS_ToInt32(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv)
 {
     AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module();
 
     RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun);
     RootedValue fval(cx, ObjectValue(*fun));
     RootedValue rval(cx);
     if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval))
@@ -5679,17 +5681,17 @@ InvokeFromAsmJS_ToInt32(JSContext *cx, i
     int32_t i32;
     if (!ToInt32(cx, rval, &i32))
         return false;
     argv[0] = Int32Value(i32);
 
     return true;
 }
 
-static int32_t
+int32_t
 InvokeFromAsmJS_ToNumber(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv)
 {
     AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module();
 
     RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun);
     RootedValue fval(cx, ObjectValue(*fun));
     RootedValue rval(cx);
     if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval))
@@ -5701,16 +5703,18 @@ InvokeFromAsmJS_ToNumber(JSContext *cx, 
     double dbl;
     if (!ToNumber(cx, rval, &dbl))
         return false;
     argv[0] = DoubleValue(dbl);
 
     return true;
 }
 
+}  // namespace js
+
 static void
 FillArgumentArray(ModuleCompiler &m, const VarTypeVector &argTypes,
                   unsigned offsetToArgs, unsigned offsetToCallerStackArgs,
                   Register scratch)
 {
     MacroAssembler &masm = m.masm();
 
     for (ABIArgTypeIter i(argTypes); !i.done(); i++) {
@@ -5818,26 +5822,26 @@ GenerateFFIInterpreterExit(ModuleCompile
     }
     i++;
     JS_ASSERT(i.done());
 
     // Make the call, test whether it succeeded, and extract the return value.
     AssertStackAlignment(masm);
     switch (exit.sig().retType().which()) {
       case RetType::Void:
-        masm.call(ImmPtr(InvokeFromAsmJS_Ignore));
+        masm.call(AsmJSImm_InvokeFromAsmJS_Ignore);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case RetType::Signed:
-        masm.call(ImmPtr(InvokeFromAsmJS_ToInt32));
+        masm.call(AsmJSImm_InvokeFromAsmJS_ToInt32);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.unboxInt32(argv, ReturnReg);
         break;
       case RetType::Double:
-        masm.call(ImmPtr(InvokeFromAsmJS_ToNumber));
+        masm.call(AsmJSImm_InvokeFromAsmJS_ToNumber);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.loadDouble(argv, ReturnFloatReg);
         break;
     }
 
     // Note: the caller is IonMonkey code which means there are no non-volatile
     // registers to restore.
     masm.freeStack(stackDec);
@@ -5869,62 +5873,40 @@ GenerateFFIInterpreterExit(ModuleCompile
 
     // argument 3: argv
     Address argv(StackPointer, ShadowStackSpace);
     masm.lea(Operand(argv), IntArgReg3);
 
     AssertStackAlignment(masm);
     switch (exit.sig().retType().which()) {
       case RetType::Void:
-        masm.call(ImmPtr(InvokeFromAsmJS_Ignore));
+        masm.call(AsmJSImm_InvokeFromAsmJS_Ignore);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case RetType::Signed:
-        masm.call(ImmPtr(InvokeFromAsmJS_ToInt32));
+        masm.call(AsmJSImm_InvokeFromAsmJS_ToInt32);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.unboxInt32(argv, ReturnReg);
         break;
       case RetType::Double:
-        masm.call(ImmPtr(InvokeFromAsmJS_ToNumber));
+        masm.call(AsmJSImm_InvokeFromAsmJS_ToNumber);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
 #if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
         masm.loadValue(argv, softfpReturnOperand);
 #else
         masm.loadDouble(argv, ReturnFloatReg);
 #endif
         break;
     }
 
     masm.freeStack(reserveSize + sizeof(int32_t));
     masm.ret();
 #endif
 }
 
-static int32_t
-ValueToInt32(JSContext *cx, MutableHandleValue val)
-{
-    int32_t i32;
-    if (!ToInt32(cx, val, &i32))
-        return false;
-    val.set(Int32Value(i32));
-
-    return true;
-}
-
-static int32_t
-ValueToNumber(JSContext *cx, MutableHandleValue val)
-{
-    double dbl;
-    if (!ToNumber(cx, val, &dbl))
-        return false;
-    val.set(DoubleValue(dbl));
-
-    return true;
-}
-
 static void
 GenerateOOLConvert(ModuleCompiler &m, RetType retType, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
 
     MIRType typeArray[] = { MIRType_Pointer,   // cx
                             MIRType_Pointer }; // argv
     MIRTypeVector callArgTypes(m.cx());
@@ -5965,55 +5947,37 @@ GenerateOOLConvert(ModuleCompiler &m, Re
         masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
     }
     i++;
     JS_ASSERT(i.done());
 
     // Call
     switch (retType.which()) {
       case RetType::Signed:
-          masm.call(ImmPtr(ValueToInt32));
+          masm.call(AsmJSImm_CoerceInPlace_ToInt32);
           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
           masm.unboxInt32(Address(StackPointer, offsetToArgv), ReturnReg);
           break;
       case RetType::Double:
-          masm.call(ImmPtr(ValueToNumber));
+          masm.call(AsmJSImm_CoerceInPlace_ToNumber);
           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
 #if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
           masm.loadValue(Address(StackPointer, offsetToArgv), softfpReturnOperand);
 #else
           masm.loadDouble(Address(StackPointer, offsetToArgv), ReturnFloatReg);
 #endif
           break;
       default:
           MOZ_ASSUME_UNREACHABLE("Unsupported convert type");
     }
 
     masm.freeStack(stackDec);
 }
 
 static void
-EnableActivation(AsmJSActivation *activation)
-{
-    JSContext *cx = activation->cx();
-    Activation *act = cx->mainThread().activation();
-    JS_ASSERT(act->isJit());
-    act->asJit()->setActive(cx);
-}
-
-static void
-DisableActivation(AsmJSActivation *activation)
-{
-    JSContext *cx = activation->cx();
-    Activation *act = cx->mainThread().activation();
-    JS_ASSERT(act->isJit());
-    act->asJit()->setActive(cx, false);
-}
-
-static void
 GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit,
                          unsigned exitIndex, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
     masm.align(CodeAlignment);
     m.setIonExitOffset(exitIndex);
     masm.setFramePushed(0);
 
@@ -6092,17 +6056,17 @@ GenerateFFIIonExit(ModuleCompiler &m, co
 
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), scratch);
     masm.loadBaselineOrIonNoArgCheck(scratch, scratch, SequentialExecution, maybeDebugBreakpoint);
 
     LoadAsmJSActivationIntoRegister(masm, callee);
     masm.push(scratch);
     masm.setupUnalignedABICall(1, scratch);
     masm.passABIArg(callee);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, EnableActivation));
+    masm.callWithABI(AsmJSImm_EnableActivationFromAsmJS);
     masm.pop(scratch);
 
     // 2. Call
 #if defined(JS_CPU_ARM) && defined(DEBUG)
     // ARM still needs to push, before stack is aligned
     masm.Push(scratch);
 #endif
     AssertStackAlignment(masm);
@@ -6112,17 +6076,17 @@ GenerateFFIIonExit(ModuleCompiler &m, co
     masm.callIon(scratch);
     masm.freeStack(stackDec - extraBytes);
 
     masm.push(JSReturnReg_Type);
     masm.push(JSReturnReg_Data);
     LoadAsmJSActivationIntoRegister(masm, callee);
     masm.setupUnalignedABICall(1, scratch);
     masm.passABIArg(callee);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, DisableActivation));
+    masm.callWithABI(AsmJSImm_DisableActivationFromAsmJS);
     masm.pop(JSReturnReg_Data);
     masm.pop(JSReturnReg_Type);
 
 #ifdef DEBUG
     masm.branchTestMagicValue(Assembler::Equal, JSReturnOperand, JS_ION_ERROR, throwLabel);
     masm.branchTestMagic(Assembler::Equal, JSReturnOperand, &ionFailed);
 #else
     masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
@@ -6198,18 +6162,17 @@ GenerateStackOverflowExit(ModuleCompiler
 #if defined(JS_CPU_X86)
     LoadAsmJSActivationIntoRegister(masm, eax);
     LoadJSContextFromActivation(masm, eax, eax);
     masm.storePtr(eax, Address(StackPointer, 0));
 #else
     LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
     LoadJSContextFromActivation(masm, IntArgReg0, IntArgReg0);
 #endif
-    void (*reportOverRecursed)(JSContext*) = js_ReportOverRecursed;
-    masm.call(ImmPtr(reportOverRecursed));
+    masm.call(AsmJSImm_ReportOverRecursed);
     masm.jump(throwLabel);
 
     return !masm.oom();
 }
 
 // The operation-callback exit is called from arbitrarily-interrupted asm.js
 // code. That means we must first save *all* registers and restore *all*
 // registers (except the stack pointer) when we resume. The address to resume to
@@ -6256,17 +6219,17 @@ GenerateOperationCallbackExit(ModuleComp
     // argument 0: cx
 #if defined(JS_CPU_X86)
     LoadJSContextFromActivation(masm, activation, scratch);
     masm.storePtr(scratch, Address(StackPointer, 0));
 #elif defined(JS_CPU_X64)
     LoadJSContextFromActivation(masm, activation, IntArgReg0);
 #endif
 
-    masm.call(ImmPtr(js_HandleExecutionInterrupt));
+    masm.call(AsmJSImm_HandleExecutionInterrupt);
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the StackPointer to it's position before the call.
     masm.mov(ABIArgGenerator::NonVolatileReg, StackPointer);
 
     // Restore the machine state to before the interrupt.
     masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
     masm.popFlags();              // after this, nothing that sets conditions
@@ -6287,17 +6250,17 @@ GenerateOperationCallbackExit(ModuleComp
     LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
     masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1);
     masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
 
     // argument 0: cx
     masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfContext()), IntArgReg0);
 
     masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask)));   // save all FP registers
-    masm.call(ImmPtr(js_HandleExecutionInterrupt));
+    masm.call(AsmJSImm_HandleExecutionInterrupt);
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the machine state to before the interrupt. this will set the pc!
     masm.PopRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask)));   // restore all FP registers
     masm.mov(r6,sp);
     masm.as_vmsr(r5);
     masm.as_msr(r4);
     // Restore all GP registers
@@ -6450,17 +6413,17 @@ CheckModule(ExclusiveContext *cx, AsmJSP
     TokenKind tk = PeekToken(m.parser());
     if (tk != TOK_EOF && tk != TOK_RC)
         return m.fail(NULL, "top-level export (return) must be the last statement");
 
     AsmJSStaticLinkData linkData(cx);
     if (!FinishModule(m, module, &linkData))
         return false;
 
-    (*module)->staticallyLink(linkData);
+    (*module)->staticallyLink(linkData, cx);
 
     m.buildCompilationTimeReport(compilationTimeReport);
     return true;
 }
 
 static bool
 Warn(AsmJSParser &parser, int errorNumber, const char *str)
 {
@@ -6484,22 +6447,22 @@ EstablishPreconditions(ExclusiveContext 
         return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by javascript.options.asmjs in about:config");
 
     if (!parser.options().compileAndGo)
         return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Temporarily disabled for event-handler and other cloneable scripts");
 
     if (cx->compartment()->debugMode())
         return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by debugger");
 
-# ifdef JS_WORKER_THREADS
+#ifdef JS_WORKER_THREADS
     if (ParallelCompilationEnabled(cx)) {
         if (!EnsureWorkerThreadsInitialized(cx))
             return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Failed compilation thread initialization");
     }
-# endif
+#endif
 
     return true;
 }
 
 static bool
 NoExceptionPending(ExclusiveContext *cx)
 {
     return !cx->isJSContext() || !cx->asJSContext()->isExceptionPending();
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -6,16 +6,18 @@
 
 #include "jit/AsmJSModule.h"
 #include "jit/IonCode.h"
 
 #ifndef XP_WIN
 # include <sys/mman.h>
 #endif
 
+#include "jslibmath.h"
+#include "jsmath.h"
 #ifdef XP_WIN
 # include "jswin.h"
 #endif
 
 #include "js/MemoryMetrics.h"
 
 #include "jsobjinlines.h"
 
@@ -35,16 +37,17 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
     uint8_t *heapOffset = heap->dataPointer();
     void *heapLength = (void*)heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
         if (access.hasLengthCheck())
             JSC::X86Assembler::setPointer(access.patchLengthAt(code_), heapLength);
         void *addr = access.patchOffsetAt(code_);
         uint32_t disp = reinterpret_cast<uint32_t>(JSC::X86Assembler::getPointer(addr));
+        JS_ASSERT(disp <= INT32_MAX);
         JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CPU_ARM)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         jit::Assembler::updateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
@@ -104,28 +107,175 @@ AsmJSModule::allocateAndCopyCode(Exclusi
     if (!code_)
         return false;
 
     JS_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
     masm.executableCopy(code_);
     return true;
 }
 
+static int32_t
+CoerceInPlace_ToInt32(JSContext *cx, MutableHandleValue val)
+{
+    int32_t i32;
+    if (!ToInt32(cx, val, &i32))
+        return false;
+    val.set(Int32Value(i32));
+
+    return true;
+}
+
+static int32_t
+CoerceInPlace_ToNumber(JSContext *cx, MutableHandleValue val)
+{
+    double dbl;
+    if (!ToNumber(cx, val, &dbl))
+        return false;
+    val.set(DoubleValue(dbl));
+
+    return true;
+}
+
+static void
+EnableActivationFromAsmJS(AsmJSActivation *activation)
+{
+    JSContext *cx = activation->cx();
+    Activation *act = cx->mainThread().activation();
+    JS_ASSERT(act->isJit());
+    act->asJit()->setActive(cx);
+}
+
+static void
+DisableActivationFromAsmJS(AsmJSActivation *activation)
+{
+    JSContext *cx = activation->cx();
+    Activation *act = cx->mainThread().activation();
+    JS_ASSERT(act->isJit());
+    act->asJit()->setActive(cx, false);
+}
+
+namespace js {
+
+// Defined in AsmJS.cpp:
+
+int32_t
+InvokeFromAsmJS_Ignore(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv);
+
+int32_t
+InvokeFromAsmJS_ToInt32(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv);
+
+int32_t
+InvokeFromAsmJS_ToNumber(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv);
+
+}
+
+#if defined(JS_CPU_ARM)
+extern "C" {
+
+extern int
+__aeabi_idivmod(int, int);
+
+extern int
+__aeabi_uidivmod(int, int);
+
+}
+#endif
+
+template <class F>
+static inline void *
+FuncCast(F *pf)
+{
+    return JS_FUNC_TO_DATA_PTR(void *, pf);
+}
+
+static void *
+AddressOf(AsmJSImmKind kind, ExclusiveContext *cx)
+{
+    switch (kind) {
+      case AsmJSImm_Runtime:
+        return cx->runtimeAddressForJit();
+      case AsmJSImm_StackLimit:
+        return cx->stackLimitAddress(StackForUntrustedScript);
+      case AsmJSImm_ReportOverRecursed:
+        return FuncCast<void (JSContext*)>(js_ReportOverRecursed);
+      case AsmJSImm_HandleExecutionInterrupt:
+        return FuncCast(js_HandleExecutionInterrupt);
+      case AsmJSImm_InvokeFromAsmJS_Ignore:
+        return FuncCast(InvokeFromAsmJS_Ignore);
+      case AsmJSImm_InvokeFromAsmJS_ToInt32:
+        return FuncCast(InvokeFromAsmJS_ToInt32);
+      case AsmJSImm_InvokeFromAsmJS_ToNumber:
+        return FuncCast(InvokeFromAsmJS_ToNumber);
+      case AsmJSImm_CoerceInPlace_ToInt32:
+        return FuncCast(CoerceInPlace_ToInt32);
+      case AsmJSImm_CoerceInPlace_ToNumber:
+        return FuncCast(CoerceInPlace_ToNumber);
+      case AsmJSImm_ToInt32:
+        return FuncCast<int32_t (double)>(js::ToInt32);
+      case AsmJSImm_EnableActivationFromAsmJS:
+        return FuncCast(EnableActivationFromAsmJS);
+      case AsmJSImm_DisableActivationFromAsmJS:
+        return FuncCast(DisableActivationFromAsmJS);
+#if defined(JS_CPU_ARM)
+      case AsmJSImm_aeabi_idivmod:
+        return FuncCast(__aeabi_idivmod);
+      case AsmJSImm_aeabi_uidivmod:
+        return FuncCast(__aeabi_uidivmod);
+#endif
+      case AsmJSImm_ModD:
+        return FuncCast(NumberMod);
+      case AsmJSImm_SinD:
+        return FuncCast<double (double)>(sin);
+      case AsmJSImm_CosD:
+        return FuncCast<double (double)>(cos);
+      case AsmJSImm_TanD:
+        return FuncCast<double (double)>(tan);
+      case AsmJSImm_ASinD:
+        return FuncCast<double (double)>(asin);
+      case AsmJSImm_ACosD:
+        return FuncCast<double (double)>(acos);
+      case AsmJSImm_ATanD:
+        return FuncCast<double (double)>(atan);
+      case AsmJSImm_CeilD:
+        return FuncCast<double (double)>(ceil);
+      case AsmJSImm_FloorD:
+        return FuncCast<double (double)>(floor);
+      case AsmJSImm_ExpD:
+        return FuncCast<double (double)>(exp);
+      case AsmJSImm_LogD:
+        return FuncCast<double (double)>(log);
+      case AsmJSImm_PowD:
+        return FuncCast(ecmaPow);
+      case AsmJSImm_ATan2D:
+        return FuncCast(ecmaAtan2);
+    }
+
+    MOZ_ASSUME_UNREACHABLE("Bad AsmJSImmKind");
+    return NULL;
+}
+
 void
-AsmJSModule::staticallyLink(const AsmJSStaticLinkData &linkData)
+AsmJSModule::staticallyLink(const AsmJSStaticLinkData &linkData, ExclusiveContext *cx)
 {
     // Process AsmJSStaticLinkData:
 
     operationCallbackExit_ = code_ + linkData.operationCallbackExitOffset;
 
     for (size_t i = 0; i < linkData.relativeLinks.length(); i++) {
         AsmJSStaticLinkData::RelativeLink link = linkData.relativeLinks[i];
         *(void **)(code_ + link.patchAtOffset) = code_ + link.targetOffset;
     }
 
+    for (size_t i = 0; i < linkData.absoluteLinks.length(); i++) {
+        AsmJSStaticLinkData::AbsoluteLink link = linkData.absoluteLinks[i];
+        Assembler::patchDataWithValueCheck(code_ + link.patchAt.offset(),
+                                           PatchedImmPtr(AddressOf(link.target, cx)),
+                                           PatchedImmPtr((void*)-1));
+    }
+
     // Initialize global data segment
 
     for (size_t i = 0; i < exits_.length(); i++) {
         exitIndexToGlobalDatum(i).exit = interpExitTrampoline(exits_[i]);
         exitIndexToGlobalDatum(i).fun = NULL;
     }
 }
 
--- a/js/src/jit/AsmJSModule.h
+++ b/js/src/jit/AsmJSModule.h
@@ -49,21 +49,30 @@ struct AsmJSStaticLinkData
     struct RelativeLink
     {
         uint32_t patchAtOffset;
         uint32_t targetOffset;
     };
 
     typedef Vector<RelativeLink> RelativeLinkVector;
 
+    struct AbsoluteLink
+    {
+        jit::CodeOffsetLabel patchAt;
+        jit::AsmJSImmKind target;
+    };
+
+    typedef Vector<AbsoluteLink> AbsoluteLinkVector;
+
     size_t operationCallbackExitOffset;
     RelativeLinkVector relativeLinks;
+    AbsoluteLinkVector absoluteLinks;
 
     AsmJSStaticLinkData(ExclusiveContext *cx)
-      : relativeLinks(cx)
+      : relativeLinks(cx), absoluteLinks(cx)
     {}
 };
 
 // An asm.js module represents the collection of functions nested inside a
 // single outer "use asm" function. For example, this asm.js module:
 //   function() { "use asm"; function f() {} function g() {} return f }
 // contains the functions 'f' and 'g'.
 //
@@ -668,17 +677,17 @@ class AsmJSModule
         if (len > minHeapLength_)
             minHeapLength_ = len;
     }
     uint32_t minHeapLength() const {
         return minHeapLength_;
     }
 
     bool allocateAndCopyCode(ExclusiveContext *cx, jit::MacroAssembler &masm);
-    void staticallyLink(const AsmJSStaticLinkData &linkData);
+    void staticallyLink(const AsmJSStaticLinkData &linkData, ExclusiveContext *cx);
 
     uint8_t *codeBase() const {
         JS_ASSERT(code_);
         JS_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
         return code_;
     }
 
     uint8_t *operationCallbackExit() const {
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -141,16 +141,17 @@ MNewStringObject::templateObj() const {
 CodeGenerator::CodeGenerator(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
   : CodeGeneratorSpecific(gen, graph, masm),
     unassociatedScriptCounts_(NULL)
 {
 }
 
 CodeGenerator::~CodeGenerator()
 {
+    JS_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0);
     js_delete(unassociatedScriptCounts_);
 }
 
 typedef bool (*StringToNumberFn)(ThreadSafeContext *, JSString *, double *);
 typedef ParallelResult (*StringToNumberParFn)(ForkJoinSlice *, JSString *, double *);
 static const VMFunctionsModal StringToNumberInfo = VMFunctionsModal(
     FunctionInfo<StringToNumberFn>(StringToNumber),
     FunctionInfo<StringToNumberParFn>(StringToNumberPar));
@@ -3827,17 +3828,20 @@ CodeGenerator::visitModD(LModD *ins)
     Register temp = ToRegister(ins->temp());
 
     JS_ASSERT(ToFloatRegister(ins->output()) == ReturnFloatReg);
 
     masm.setupUnalignedABICall(2, temp);
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
 
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NumberMod), MacroAssembler::DOUBLE);
+    if (gen->compilingAsmJS())
+        masm.callWithABI(AsmJSImm_ModD, MacroAssembler::DOUBLE);
+    else
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NumberMod), MacroAssembler::DOUBLE);
     return true;
 }
 
 typedef bool (*BinaryFn)(JSContext *, HandleScript, jsbytecode *,
                          MutableHandleValue, MutableHandleValue, Value *);
 typedef ParallelResult (*BinaryParFn)(ForkJoinSlice *, HandleValue, HandleValue,
                                       Value *);
 
@@ -7388,17 +7392,17 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall
     switch (callee.which()) {
       case MAsmJSCall::Callee::Internal:
         masm.call(callee.internal());
         break;
       case MAsmJSCall::Callee::Dynamic:
         masm.call(ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
         break;
       case MAsmJSCall::Callee::Builtin:
-        masm.call(ImmPtr(callee.builtin()));
+        masm.call(callee.builtin());
         break;
     }
 
     if (mir->spIncrement())
         masm.reserveStack(mir->spIncrement());
 
     postAsmJSCall(ins);
     return true;
@@ -7440,19 +7444,18 @@ CodeGenerator::visitAsmJSVoidReturn(LAsm
     if (current->mir() != *gen->graph().poBegin())
         masm.jump(&returnLabel_);
     return true;
 }
 
 bool
 CodeGenerator::visitAsmJSCheckOverRecursed(LAsmJSCheckOverRecursed *lir)
 {
-    uintptr_t *limitAddr = &GetIonContext()->runtime->mainThread.nativeStackLimit[StackForUntrustedScript];
     masm.branchPtr(Assembler::AboveOrEqual,
-                   AbsoluteAddress(limitAddr),
+                   AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
                    StackPointer,
                    lir->mir()->onError());
     return true;
 }
 
 bool
 CodeGenerator::emitAssertRangeI(const Range *r, Register input)
 {
--- a/js/src/jit/IonLinker.h
+++ b/js/src/jit/IonLinker.h
@@ -27,16 +27,18 @@ class Linker
         js_ReportOutOfMemory(cx);
         return NULL;
     }
 
     IonCode *newCode(JSContext *cx, JSC::ExecutableAllocator *execAlloc, JSC::CodeKind kind) {
         JS_ASSERT(kind == JSC::ION_CODE ||
                   kind == JSC::BASELINE_CODE ||
                   kind == JSC::OTHER_CODE);
+        JS_ASSERT(masm.numAsmJSAbsoluteLinks() == 0);
+
         gc::AutoSuppressGC suppressGC(cx);
         if (masm.oom())
             return fail(cx);
 
         JSC::ExecutablePool *pool;
         size_t bytesNeeded = masm.bytesNeeded() + sizeof(IonCode *) + CodeAlignment;
         if (bytesNeeded >= MAX_BUFFER_SIZE)
             return fail(cx);
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -8784,27 +8784,27 @@ class MAsmJSCall MOZ_FINAL : public MIns
     class Callee {
       public:
         enum Which { Internal, Dynamic, Builtin };
       private:
         Which which_;
         union {
             Label *internal_;
             MDefinition *dynamic_;
-            const void *builtin_;
+            AsmJSImmKind builtin_;
         } u;
       public:
         Callee() {}
         Callee(Label *callee) : which_(Internal) { u.internal_ = callee; }
         Callee(MDefinition *callee) : which_(Dynamic) { u.dynamic_ = callee; }
-        Callee(const void *callee) : which_(Builtin) { u.builtin_ = callee; }
+        Callee(AsmJSImmKind callee) : which_(Builtin) { u.builtin_ = callee; }
         Which which() const { return which_; }
         Label *internal() const { JS_ASSERT(which_ == Internal); return u.internal_; }
         MDefinition *dynamic() const { JS_ASSERT(which_ == Dynamic); return u.dynamic_; }
-        const void *builtin() const { JS_ASSERT(which_ == Builtin); return u.builtin_; }
+        AsmJSImmKind builtin() const { JS_ASSERT(which_ == Builtin); return u.builtin_; }
     };
 
   private:
     struct Operand {
         AnyRegister reg;
         MUse use;
     };
 
--- a/js/src/jit/RegisterSets.h
+++ b/js/src/jit/RegisterSets.h
@@ -780,29 +780,34 @@ class ABIArg
     Register gpr() const { JS_ASSERT(kind() == GPR); return Register::FromCode(u.gpr_); }
     FloatRegister fpu() const { JS_ASSERT(kind() == FPU); return FloatRegister::FromCode(u.fpu_); }
     uint32_t offsetFromArgBase() const { JS_ASSERT(kind() == Stack); return u.offset_; }
 
     bool argInRegister() const { return kind() != Stack; }
     AnyRegister reg() const { return kind_ == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
 };
 
+// Summarizes a heap access made by asm.js code that needs to be patched later
+// and/or looked up by the asm.js signal handlers. Different architectures need
+// to know different things (x64: offset and length, ARM: where to patch in
+// heap length, x86: where to patch in heap length and base) hence the massive
+// #ifdefery.
 class AsmJSHeapAccess
 {
     uint32_t offset_;
 #if defined(JS_CPU_X86)
     uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
 #endif
 #if defined(JS_CPU_X86) || defined(JS_CPU_X64)
     uint8_t opLength_;  // the length of the load/store instruction
     uint8_t isFloat32Load_;
-    jit::AnyRegister::Code loadedReg_ : 8;
+    AnyRegister::Code loadedReg_ : 8;
 #endif
 
-    JS_STATIC_ASSERT(jit::AnyRegister::Total < UINT8_MAX);
+    JS_STATIC_ASSERT(AnyRegister::Total < UINT8_MAX);
 
   public:
 #if defined(JS_CPU_X86) || defined(JS_CPU_X64)
     // If 'cmp' equals 'offset' or if it is not supplied then the
     // cmpDelta_ is zero indicating that there is no length to patch.
     AsmJSHeapAccess(uint32_t offset, uint32_t after, ArrayBufferView::ViewType vt,
                     AnyRegister loadedReg, uint32_t cmp = UINT32_MAX)
       : offset_(offset),
@@ -834,17 +839,17 @@ class AsmJSHeapAccess
     bool hasLengthCheck() const { return cmpDelta_ > 0; }
     void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); }
     void *patchOffsetAt(uint8_t *code) const { return code + (offset_ + opLength_); }
 #endif
 #if defined(JS_CPU_X86) || defined(JS_CPU_X64)
     unsigned opLength() const { return opLength_; }
     bool isLoad() const { return loadedReg_ != UINT8_MAX; }
     bool isFloat32Load() const { return isFloat32Load_; }
-    jit::AnyRegister loadedReg() const { return jit::AnyRegister::FromCode(loadedReg_); }
+    AnyRegister loadedReg() const { return AnyRegister::FromCode(loadedReg_); }
 #endif
 };
 
 typedef Vector<AsmJSHeapAccess, 0, IonAllocPolicy> AsmJSHeapAccessVector;
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -2557,33 +2557,40 @@ Assembler::patchWrite_NearCall(CodeLocat
     uint8_t *dest = toCall.raw();
     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
     // Ensure everyone sees the code that was just written into memory.
 
     AutoFlushCache::updateTop(uintptr_t(inst), 4);
 
 }
 void
-Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+                                   PatchedImmPtr expectedValue)
 {
     Instruction *ptr = (Instruction *) label.raw();
     InstructionIterator iter(ptr);
     Register dest;
     Assembler::RelocStyle rs;
     DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs);
     JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value));
     reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
                                                                  dest, Always, rs, ptr);
     // L_LDR won't cause any instructions to be updated.
     if (rs != L_LDR) {
         AutoFlushCache::updateTop(uintptr_t(ptr), 4);
         AutoFlushCache::updateTop(uintptr_t(ptr->next()), 4);
     }
 }
 
+void
+Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+{
+    patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
+}
+
 // This just stomps over memory with 32 bits of raw data. Its purpose is to
 // overwrite the call of JITed code with 32 bits worth of an offset. This will
 // is only meant to function on code that has been invalidated, so it should
 // be totally safe. Since that instruction will never be executed again, a
 // ICache flush should not be necessary
 void
 Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
     // Raw is going to be the return address.
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1256,16 +1256,17 @@ class Assembler
 
     // TODO: this should actually be a pool-like object
     //       It is currently a big hack, and probably shouldn't exist
     js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpJumpRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
+    AsmJSAbsoluteLinkVector asmJSAbsoluteLinks_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
     CompactBufferWriter relocations_;
     CompactBufferWriter preBarriers_;
 
     bool enoughMemory_;
 
@@ -1377,16 +1378,23 @@ class Assembler
     bool addCodeLabel(CodeLabel label);
     size_t numCodeLabels() const {
         return codeLabels_.length();
     }
     CodeLabel codeLabel(size_t i) {
         return codeLabels_[i];
     }
 
+    size_t numAsmJSAbsoluteLinks() const {
+        return asmJSAbsoluteLinks_.length();
+    }
+    AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const {
+        return asmJSAbsoluteLinks_[i];
+    }
+
     // Size of the instruction stream, in bytes.
     size_t size() const;
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
     size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
@@ -1790,16 +1798,18 @@ class Assembler
     static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural);
     static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural);
     static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
 
 
     static uint32_t patchWrite_NearCallSize();
     static uint32_t nopSize() { return 4; }
     static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+    static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+                                        PatchedImmPtr expectedValue);
     static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
                                         ImmPtr expectedValue);
     static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
     static uint32_t alignDoubleArg(uint32_t offset) {
         return (offset+1)&~1;
     }
     static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = NULL);
     // Toggle a jmp or cmp emitted by toggledJump().
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -604,17 +604,20 @@ CodeGeneratorARM::visitSoftDivI(LSoftDiv
 
     Label done;
     if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
 
     masm.setupAlignedABICall(2);
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
+    if (gen->compilingAsmJS())
+        masm.callWithABI(AsmJSImm_aeabi_idivmod);
+    else
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
     // idivmod returns the quotient in r0, and the remainder in r1.
     if (!mir->isTruncated()) {
         JS_ASSERT(mir->fallible());
         masm.ma_cmp(r1, Imm32(0));
         if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
             return false;
     }
 
@@ -762,17 +765,20 @@ CodeGeneratorARM::visitSoftModI(LSoftMod
     }
 
     if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
 
     masm.setupAlignedABICall(2);
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
+    if (gen->compilingAsmJS())
+        masm.callWithABI(AsmJSImm_aeabi_idivmod);
+    else
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
 
     // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
     if (mir->isTruncated()) {
         // -0.0|0 == 0
     } else {
         JS_ASSERT(mir->fallible());
         // See if X < 0
         masm.ma_cmp(r1, Imm32(0));
@@ -2032,17 +2038,20 @@ CodeGeneratorARM::visitSoftUDivOrMod(LSo
     masm.ma_b(&notzero, Assembler::NonZero);
     masm.ma_mov(Imm32(0), output);
     masm.ma_b(&afterDiv);
     masm.bind(&notzero);
 
     masm.setupAlignedABICall(2);
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod));
+    if (gen->compilingAsmJS())
+        masm.callWithABI(AsmJSImm_aeabi_uidivmod);
+    else
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod));
 
     masm.bind(&afterDiv);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins)
 {
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1890,16 +1890,29 @@ MacroAssemblerARMCompat::movePtr(const I
     ma_mov(imm, dest);
 }
 void
 MacroAssemblerARMCompat::movePtr(const ImmPtr &imm, const Register &dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
+MacroAssemblerARMCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest)
+{
+    RelocStyle rs;
+    if (hasMOVWT())
+        rs = L_MOVWT;
+    else
+        rs = L_LDR;
+
+    AsmJSAbsoluteLink link(nextOffset().getOffset(), imm.kind());
+    enoughMemory_ &= asmJSAbsoluteLinks_.append(link);
+    ma_movPatchable(Imm32(-1), dest, Always, rs);
+}
+void
 MacroAssemblerARMCompat::load8ZeroExtend(const Address &address, const Register &dest)
 {
     ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
 }
 
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest)
 {
@@ -2033,16 +2046,22 @@ MacroAssemblerARMCompat::loadPtr(const B
     ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
 }
 void
 MacroAssemblerARMCompat::loadPtr(const AbsoluteAddress &address, const Register &dest)
 {
     movePtr(ImmWord(uintptr_t(address.addr)), ScratchRegister);
     loadPtr(Address(ScratchRegister, 0x0), dest);
 }
+void
+MacroAssemblerARMCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest)
+{
+    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    loadPtr(Address(ScratchRegister, 0x0), dest);
+}
 
 Operand payloadOf(const Address &address) {
     return Operand(address.base, address.offset);
 }
 Operand tagOf(const Address &address) {
     return Operand(address.base, address.offset + 4);
 }
 
@@ -3561,16 +3580,25 @@ MacroAssemblerARMCompat::callWithABI(voi
 {
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     ma_call(ImmPtr(fun));
     callWithABIPost(stackAdjust, result);
 }
 
 void
+MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, Result result)
+{
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(imm);
+    callWithABIPost(stackAdjust, result);
+}
+
+void
 MacroAssemblerARMCompat::callWithABI(const Address &fun, Result result)
 {
     // Load the callee in r12, no instruction between the ldr and call
     // should clobber it. Note that we can't use fun.base because it may
     // be one of the IntArg registers clobbered before the call.
     ma_ldr(fun, r12);
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -539,16 +539,20 @@ class MacroAssemblerARMCompat : public M
     void call(ImmWord imm) {
         call(ImmPtr((void*)imm.value));
     }
     void call(ImmPtr imm) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, imm, Relocation::HARDCODED);
         ma_call(imm);
     }
+    void call(AsmJSImmPtr imm) {
+        movePtr(imm, CallReg);
+        call(CallReg);
+    }
     void call(IonCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::IONCODE);
         RelocStyle rs;
         if (hasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
@@ -930,16 +934,20 @@ class MacroAssemblerARMCompat : public M
         branchPtr(cond, lhs, ScratchRegister, label);
     }
     void branchPtr(Condition cond, Register lhs, ImmWord imm, Label *label) {
         branch32(cond, lhs, Imm32(imm.value), label);
     }
     void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label *label) {
         branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
     }
+    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label *label) {
+        movePtr(imm, ScratchRegister);
+        branchPtr(cond, lhs, ScratchRegister, label);
+    }
     void decBranchPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
         subPtr(imm, lhs);
         branch32(cond, lhs, Imm32(0), label);
     }
     void moveValue(const Value &val, Register type, Register data);
 
     CodeOffsetJump jumpWithPatch(RepatchLabel *label, Condition cond = Always);
     template <typename T>
@@ -962,18 +970,23 @@ class MacroAssemblerARMCompat : public M
         ma_ldr(addr, secondScratchReg_);
         ma_cmp(secondScratchReg_, ptr);
         ma_b(label, cond);
     }
     void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
         branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
     }
     void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
-        loadPtr(addr, secondScratchReg_); // ma_cmp will use the scratch register.
-        ma_cmp(secondScratchReg_, ptr);
+        loadPtr(addr, ScratchRegister);
+        ma_cmp(ScratchRegister, ptr);
+        ma_b(label, cond);
+    }
+    void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr, Label *label) {
+        loadPtr(addr, ScratchRegister);
+        ma_cmp(ScratchRegister, ptr);
         ma_b(label, cond);
     }
     void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
         loadPtr(lhs, secondScratchReg_); // ma_cmp will use the scratch register.
         ma_cmp(secondScratchReg_, rhs);
         ma_b(label, cond);
     }
     void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
@@ -1203,16 +1216,17 @@ class MacroAssemblerARMCompat : public M
     void addPtr(const Address &src, Register dest);
 
     void move32(const Imm32 &imm, const Register &dest);
     void move32(const Register &src, const Register &dest);
 
     void movePtr(const Register &src, const Register &dest);
     void movePtr(const ImmWord &imm, const Register &dest);
     void movePtr(const ImmPtr &imm, const Register &dest);
+    void movePtr(const AsmJSImmPtr &imm, const Register &dest);
     void movePtr(const ImmGCPtr &imm, const Register &dest);
 
     void load8SignExtend(const Address &address, const Register &dest);
     void load8SignExtend(const BaseIndex &src, const Register &dest);
 
     void load8ZeroExtend(const Address &address, const Register &dest);
     void load8ZeroExtend(const BaseIndex &src, const Register &dest);
 
@@ -1224,16 +1238,17 @@ class MacroAssemblerARMCompat : public M
 
     void load32(const Address &address, const Register &dest);
     void load32(const BaseIndex &address, const Register &dest);
     void load32(const AbsoluteAddress &address, const Register &dest);
 
     void loadPtr(const Address &address, const Register &dest);
     void loadPtr(const BaseIndex &src, const Register &dest);
     void loadPtr(const AbsoluteAddress &address, const Register &dest);
+    void loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest);
 
     void loadPrivate(const Address &address, const Register &dest);
 
     void loadDouble(const Address &addr, const FloatRegister &dest);
     void loadDouble(const BaseIndex &src, const FloatRegister &dest);
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
@@ -1378,16 +1393,17 @@ class MacroAssemblerARMCompat : public M
 
   private:
     void callWithABIPre(uint32_t *stackAdjust);
     void callWithABIPost(uint32_t stackAdjust, Result result);
 
   public:
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABI(void *fun, Result result = GENERAL);
+    void callWithABI(AsmJSImmPtr imm, Result result = GENERAL);
     void callWithABI(const Address &fun, Result result = GENERAL);
 
     CodeOffsetLabel labelForPatch() {
         return CodeOffsetLabel(nextOffset().getOffset());
     }
 
     void computeEffectiveAddress(const Address &address, Register dest) {
         ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -99,84 +99,131 @@ struct Imm32
 struct ImmWord
 {
     uintptr_t value;
 
     explicit ImmWord(uintptr_t value) : value(value)
     { }
 };
 
+#ifdef DEBUG
+static inline bool
+IsCompilingAsmJS()
+{
+    // asm.js compilation pushes an IonContext with a null JSCompartment.
+    IonContext *ictx = MaybeGetIonContext();
+    return ictx && ictx->compartment == NULL;
+}
+#endif
+
 // Pointer to be embedded as an immediate in an instruction.
 struct ImmPtr
 {
     void *value;
 
     explicit ImmPtr(const void *value) : value(const_cast<void*>(value))
-    { }
+    {
+        // To make code serialization-safe, asm.js compilation should only
+        // compile pointer immediates using AsmJSImmPtr.
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     template <class R>
     explicit ImmPtr(R (*pf)())
       : value(JS_FUNC_TO_DATA_PTR(void *, pf))
-    { }
+    {
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     template <class R, class A1>
     explicit ImmPtr(R (*pf)(A1))
       : value(JS_FUNC_TO_DATA_PTR(void *, pf))
-    { }
+    {
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     template <class R, class A1, class A2>
     explicit ImmPtr(R (*pf)(A1, A2))
       : value(JS_FUNC_TO_DATA_PTR(void *, pf))
-    { }
+    {
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     template <class R, class A1, class A2, class A3>
     explicit ImmPtr(R (*pf)(A1, A2, A3))
       : value(JS_FUNC_TO_DATA_PTR(void *, pf))
-    { }
+    {
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     template <class R, class A1, class A2, class A3, class A4>
     explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
       : value(JS_FUNC_TO_DATA_PTR(void *, pf))
+    {
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
+
+};
+
+// The same as ImmPtr except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedImmPtr {
+    void *value;
+
+    explicit PatchedImmPtr()
+      : value(NULL)
     { }
-
+    explicit PatchedImmPtr(const void *value)
+      : value(const_cast<void*>(value))
+    { }
 };
 
 // Used for immediates which require relocation.
 struct ImmGCPtr
 {
     uintptr_t value;
 
     explicit ImmGCPtr(const gc::Cell *ptr) : value(reinterpret_cast<uintptr_t>(ptr))
     {
         JS_ASSERT(!IsPoisonedPtr(ptr));
         JS_ASSERT_IF(ptr, ptr->isTenured());
+
+        // asm.js shouldn't be creating GC things
+        JS_ASSERT(!IsCompilingAsmJS());
     }
 
   protected:
     ImmGCPtr() : value(0) {}
 };
 
 // Used for immediates which require relocation and may be traced during minor GC.
 struct ImmMaybeNurseryPtr : public ImmGCPtr
 {
     explicit ImmMaybeNurseryPtr(gc::Cell *ptr)
     {
         this->value = reinterpret_cast<uintptr_t>(ptr);
         JS_ASSERT(!IsPoisonedPtr(ptr));
+
+        // asm.js shouldn't be creating GC things
+        JS_ASSERT(!IsCompilingAsmJS());
     }
 };
 
 // Pointer to be embedded as an immediate that is loaded/stored from by an
 // instruction.
 struct AbsoluteAddress {
     void *addr;
 
     explicit AbsoluteAddress(const void *addr)
       : addr(const_cast<void*>(addr))
-    { }
+    {
+        // asm.js shouldn't be creating GC things
+        JS_ASSERT(!IsCompilingAsmJS());
+    }
 
     AbsoluteAddress offset(ptrdiff_t delta) {
         return AbsoluteAddress(((uint8_t *) addr) + delta);
     }
 };
 
 // The same as AbsoluteAddress except that the intention is to patch this
 // instruction. The initial value of the immediate is 'addr' and this value is
@@ -620,13 +667,82 @@ struct AsmJSGlobalAccess
 
     AsmJSGlobalAccess(CodeOffsetLabel patchAt, unsigned globalDataOffset)
       : patchAt(patchAt), globalDataOffset(globalDataOffset)
     {}
 };
 
 typedef Vector<AsmJSGlobalAccess, 0, IonAllocPolicy> AsmJSGlobalAccessVector;
 
+// Describes the intended pointee of an immediate to be embedded in asm.js
+// code. By representing the pointee as a symbolic enum, the pointee can be
+// patched after deserialization when the address of global things has changed.
+enum AsmJSImmKind
+{
+    AsmJSImm_Runtime,
+    AsmJSImm_StackLimit,
+    AsmJSImm_ReportOverRecursed,
+    AsmJSImm_HandleExecutionInterrupt,
+    AsmJSImm_InvokeFromAsmJS_Ignore,
+    AsmJSImm_InvokeFromAsmJS_ToInt32,
+    AsmJSImm_InvokeFromAsmJS_ToNumber,
+    AsmJSImm_CoerceInPlace_ToInt32,
+    AsmJSImm_CoerceInPlace_ToNumber,
+    AsmJSImm_ToInt32,
+    AsmJSImm_EnableActivationFromAsmJS,
+    AsmJSImm_DisableActivationFromAsmJS,
+#if defined(JS_CPU_ARM)
+    AsmJSImm_aeabi_idivmod,
+    AsmJSImm_aeabi_uidivmod,
+#endif
+    AsmJSImm_ModD,
+    AsmJSImm_SinD,
+    AsmJSImm_CosD,
+    AsmJSImm_TanD,
+    AsmJSImm_ASinD,
+    AsmJSImm_ACosD,
+    AsmJSImm_ATanD,
+    AsmJSImm_CeilD,
+    AsmJSImm_FloorD,
+    AsmJSImm_ExpD,
+    AsmJSImm_LogD,
+    AsmJSImm_PowD,
+    AsmJSImm_ATan2D
+};
+
+// Pointer to be embedded as an immediate in asm.js code.
+class AsmJSImmPtr
+{
+    AsmJSImmKind kind_;
+  public:
+    AsmJSImmKind kind() const { return kind_; }
+    AsmJSImmPtr(AsmJSImmKind kind) : kind_(kind) { JS_ASSERT(IsCompilingAsmJS()); }
+    AsmJSImmPtr() {}
+};
+
+// Pointer to be embedded as an immediate that is loaded/stored from by an
+// instruction in asm.js code.
+class AsmJSAbsoluteAddress
+{
+    AsmJSImmKind kind_;
+  public:
+    AsmJSImmKind kind() const { return kind_; }
+    AsmJSAbsoluteAddress(AsmJSImmKind kind) : kind_(kind) { JS_ASSERT(IsCompilingAsmJS()); }
+    AsmJSAbsoluteAddress() {}
+};
+
+// Represents an instruction to be patched and the intended pointee. These
+// links are accumulated in the MacroAssembler, but patching is done outside
+// the MacroAssembler (in AsmJSModule::staticallyLink).
+struct AsmJSAbsoluteLink
+{
+    AsmJSAbsoluteLink(CodeOffsetLabel patchAt, AsmJSImmKind target)
+      : patchAt(patchAt), target(target) {}
+    CodeOffsetLabel patchAt;
+    AsmJSImmKind target;
+};
+
+typedef Vector<AsmJSAbsoluteLink, 0, SystemAllocPolicy> AsmJSAbsoluteLinkVector;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_Assembler_shared_h */
--- a/js/src/jit/shared/Assembler-x86-shared.h
+++ b/js/src/jit/shared/Assembler-x86-shared.h
@@ -126,16 +126,17 @@ class AssemblerX86Shared
           : offset(offset),
             target(target),
             kind(kind)
         { }
     };
 
     Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
     Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+    AsmJSAbsoluteLinkVector asmJSAbsoluteLinks_;
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
     CompactBufferWriter preBarriers_;
     bool enoughMemory_;
 
     void writeDataRelocation(const Value &val) {
         if (val.isMarkable()) {
             JS_ASSERT(static_cast<gc::Cell*>(val.toGCThing())->isTenured());
@@ -287,16 +288,23 @@ class AssemblerX86Shared
     }
     size_t numCodeLabels() const {
         return codeLabels_.length();
     }
     CodeLabel codeLabel(size_t i) {
         return codeLabels_[i];
     }
 
+    size_t numAsmJSAbsoluteLinks() const {
+        return asmJSAbsoluteLinks_.length();
+    }
+    const AsmJSAbsoluteLink &asmJSAbsoluteLink(size_t i) const {
+        return asmJSAbsoluteLinks_[i];
+    }
+
     // Size of the instruction stream, in bytes.
     size_t size() const {
         return masm.size();
     }
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const {
         return jumpRelocations_.length();
     }
@@ -1577,23 +1585,26 @@ class AssemblerX86Shared
         JS_ASSERT(int32_t(offset) == offset);
         *((int32_t *) (start + 1)) = offset;
     }
 
     static void patchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) {
         *((int32_t *) dataLabel.raw() - 1) = toWrite.value;
     }
 
-    static void patchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData,
-                                        ImmPtr expectedData) {
+    static void patchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData,
+                                        PatchedImmPtr expectedData) {
         // The pointer given is a pointer to *after* the data.
         uintptr_t *ptr = ((uintptr_t *) data.raw()) - 1;
         JS_ASSERT(*ptr == (uintptr_t)expectedData.value);
         *ptr = (uintptr_t)newData.value;
     }
+    static void patchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) {
+        patchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value));
+    }
     static uint32_t nopSize() {
         return 1;
     }
     static uint8_t *nextInstruction(uint8_t *cur, uint32_t *count) {
         MOZ_ASSUME_UNREACHABLE("nextInstruction NYI on x86");
     }
 
     // Toggle a jmp or cmp emitted by toggledJump().
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -701,17 +701,20 @@ CodeGeneratorShared::visitOutOfLineTrunc
 {
     FloatRegister src = ool->src();
     Register dest = ool->dest();
 
     saveVolatile(dest);
 
     masm.setupUnalignedABICall(1, dest);
     masm.passABIArg(src);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
+    if (gen->compilingAsmJS())
+        masm.callWithABI(AsmJSImm_ToInt32);
+    else
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
     masm.storeCallResult(dest);
 
     restoreVolatile(dest);
 
     masm.jump(ool->rejoin());
     return true;
 }
 
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -480,16 +480,21 @@ class Assembler : public AssemblerX86Sha
     }
 
     void mov(ImmWord word, const Register &dest) {
         movq(word, dest);
     }
     void mov(ImmPtr imm, const Register &dest) {
         movq(imm, dest);
     }
+    void mov(AsmJSImmPtr imm, const Register &dest) {
+        masm.movq_i64r(-1, dest.code());
+        AsmJSAbsoluteLink link(masm.currentOffset(), imm.kind());
+        enoughMemory_ &= asmJSAbsoluteLinks_.append(link);
+    }
     void mov(const Imm32 &imm32, const Register &dest) {
         movl(imm32, dest);
     }
     void mov(const Operand &src, const Register &dest) {
         movq(src, dest);
     }
     void mov(const Register &src, const Operand &dest) {
         movq(src, dest);
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -232,16 +232,25 @@ void
 MacroAssemblerX64::callWithABI(void *fun, Result result)
 {
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(ImmPtr(fun));
     callWithABIPost(stackAdjust, result);
 }
 
+void
+MacroAssemblerX64::callWithABI(AsmJSImmPtr imm, Result result)
+{
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(imm);
+    callWithABIPost(stackAdjust, result);
+}
+
 static bool
 IsIntArgReg(Register reg)
 {
     for (uint32_t i = 0; i < NumIntArgRegs; i++) {
         if (IntArgRegs[i] == reg)
             return true;
     }
 
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -106,16 +106,20 @@ class MacroAssemblerX64 : public MacroAs
     /////////////////////////////////////////////////////////////////
     void call(ImmWord target) {
         mov(target, rax);
         call(rax);
     }
     void call(ImmPtr target) {
         call(ImmWord(uintptr_t(target.value)));
     }
+    void call(AsmJSImmPtr target) {
+        mov(target, rax);
+        call(rax);
+    }
 
     // Refers to the upper 32 bits of a 64-bit Value operand.
     // On x86_64, the upper 32 bits do not necessarily only contain the type.
     Operand ToUpper32(Operand base) {
         switch (base.kind()) {
           case Operand::MEM_REG_DISP:
             return Operand(Register::FromCode(base.base()), base.disp() + 4);
 
@@ -540,16 +544,21 @@ class MacroAssemblerX64 : public MacroAs
         JS_ASSERT(ptr != ScratchReg);
         if (JSC::X86Assembler::isAddressImmediate(addr.addr)) {
             branchPtr(cond, Operand(addr), ptr, label);
         } else {
             mov(ImmPtr(addr.addr), ScratchReg);
             branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
         }
     }
+    void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr, Label *label) {
+        JS_ASSERT(ptr != ScratchReg);
+        mov(AsmJSImmPtr(addr.kind()), ScratchReg);
+        branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
+    }
 
     void branchPrivatePtr(Condition cond, Address lhs, ImmPtr ptr, Label *label) {
         branchPtr(cond, lhs, ImmWord(uintptr_t(ptr.value) >> 1), label);
     }
 
     void branchPrivatePtr(Condition cond, Address lhs, Register ptr, Label *label) {
         if (ptr != ScratchReg)
             movePtr(ptr, ScratchReg);
@@ -606,16 +615,19 @@ class MacroAssemblerX64 : public MacroAs
         movq(src, dest);
     }
     void movePtr(ImmWord imm, Register dest) {
         mov(imm, dest);
     }
     void movePtr(ImmPtr imm, Register dest) {
         mov(imm, dest);
     }
+    void movePtr(AsmJSImmPtr imm, const Register &dest) {
+        mov(imm, dest);
+    }
     void movePtr(ImmGCPtr imm, Register dest) {
         movq(imm, dest);
     }
     void loadPtr(const AbsoluteAddress &address, Register dest) {
         if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
             movq(Operand(address), dest);
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
@@ -1132,16 +1144,17 @@ class MacroAssemblerX64 : public MacroAs
 
   private:
     void callWithABIPre(uint32_t *stackAdjust);
     void callWithABIPost(uint32_t stackAdjust, Result result);
 
   public:
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABI(void *fun, Result result = GENERAL);
+    void callWithABI(AsmJSImmPtr imm, Result result = GENERAL);
     void callWithABI(Address fun, Result result = GENERAL);
 
     void handleFailureWithHandler(void *handler);
     void handleFailureWithHandlerTail();
 
     void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
         shlq(Imm32(FRAMESIZE_SHIFT), frameSizeReg);
         orq(Imm32(type), frameSizeReg);
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -226,16 +226,21 @@ class Assembler : public AssemblerX86Sha
         movl(ImmWord(uintptr_t(imm.value)), dest);
     }
     void mov(ImmWord imm, Register dest) {
         movl(imm, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         movl(imm, dest);
     }
+    void mov(AsmJSImmPtr imm, Register dest) {
+        masm.movl_i32r(-1, dest.code());
+        AsmJSAbsoluteLink link(masm.currentOffset(), imm.kind());
+        enoughMemory_ &= asmJSAbsoluteLinks_.append(link);
+    }
     void mov(Imm32 imm, Register dest) {
         movl(imm, dest);
     }
     void mov(const Operand &src, const Register &dest) {
         movl(src, dest);
     }
     void mov(const Register &src, const Operand &dest) {
         movl(src, dest);
@@ -286,16 +291,21 @@ class Assembler : public AssemblerX86Sha
           case Operand::MEM_ADDRESS32:
             masm.cmpl_im(imm.value, op.address());
             writeDataRelocation(imm);
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
         }
     }
+    void cmpl(const AsmJSAbsoluteAddress &lhs, const Register &rhs) {
+        masm.cmpl_rm_force32(rhs.code(), (void*)-1);
+        AsmJSAbsoluteLink link(masm.currentOffset(), lhs.kind());
+        enoughMemory_ &= asmJSAbsoluteLinks_.append(link);
+    }
     CodeOffsetLabel cmplWithPatch(const Register &lhs, Imm32 rhs) {
         masm.cmpl_ir_force32(rhs.value, lhs.code());
         return masm.currentOffset();
     }
 
     void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
@@ -318,16 +328,23 @@ class Assembler : public AssemblerX86Sha
     }
     void call(ImmWord target) {
         call(ImmPtr((void*)target.value));
     }
     void call(ImmPtr target) {
         JmpSrc src = masm.call();
         addPendingJump(src, target, Relocation::HARDCODED);
     }
+    void call(AsmJSImmPtr target) {
+        // Moving to a register is suboptimal. To fix (use a single
+        // call-immediate instruction) we'll need to distinguish a new type of
+        // relative patch to an absolute address in AsmJSAbsoluteLink.
+        mov(target, eax);
+        call(eax);
+    }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
     CodeOffsetLabel toggledCall(IonCode *target, bool enabled) {
         CodeOffsetLabel offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
         addPendingJump(src, ImmPtr(target->raw()), Relocation::IONCODE);
         JS_ASSERT(size() - offset.offset() == ToggledCallSize());
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -854,17 +854,20 @@ CodeGeneratorX86::visitOutOfLineTruncate
     }
 
     masm.bind(&fail);
     {
         saveVolatile(output);
 
         masm.setupUnalignedABICall(1, output);
         masm.passABIArg(input);
-        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
+        if (gen->compilingAsmJS())
+            masm.callWithABI(AsmJSImm_ToInt32);
+        else
+            masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
         masm.storeCallResult(output);
 
         restoreVolatile(output);
     }
 
     masm.jump(ool->rejoin());
     return true;
 }
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -251,16 +251,25 @@ MacroAssemblerX86::callWithABI(void *fun
 {
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(ImmPtr(fun));
     callWithABIPost(stackAdjust, result);
 }
 
 void
+MacroAssemblerX86::callWithABI(AsmJSImmPtr fun, Result result)
+{
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(fun);
+    callWithABIPost(stackAdjust, result);
+}
+
+void
 MacroAssemblerX86::callWithABI(const Address &fun, Result result)
 {
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(Operand(fun));
     callWithABIPost(stackAdjust, result);
 }
 
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -553,16 +553,22 @@ class MacroAssemblerX86 : public MacroAs
         cmpl(Operand(lhs), rhs);
         j(cond, label);
     }
     void branch32(Condition cond, const AbsoluteAddress &lhs, Register rhs, Label *label) {
         cmpl(Operand(lhs), rhs);
         j(cond, label);
     }
 
+    // Specialization for AsmJSAbsoluteAddress.
+    void branchPtr(Condition cond, AsmJSAbsoluteAddress lhs, Register ptr, Label *label) {
+        cmpl(lhs, ptr);
+        j(cond, label);
+    }
+
     template <typename T, typename S>
     void branchPtr(Condition cond, T lhs, S ptr, Label *label) {
         cmpl(Operand(lhs), ptr);
         j(cond, label);
     }
 
     void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
         branchPtr(cond, lhs, ptr, label);
@@ -619,16 +625,19 @@ class MacroAssemblerX86 : public MacroAs
     }
 
     void movePtr(ImmWord imm, Register dest) {
         movl(Imm32(imm.value), dest);
     }
     void movePtr(ImmPtr imm, Register dest) {
         movl(imm, dest);
     }
+    void movePtr(AsmJSImmPtr imm, Register dest) {
+        mov(imm, dest);
+    }
     void movePtr(ImmGCPtr imm, Register dest) {
         movl(imm, dest);
     }
     void loadPtr(const Address &address, Register dest) {
         movl(Operand(address), dest);
     }
     void loadPtr(const Operand &src, Register dest) {
         movl(src, dest);
@@ -1023,16 +1032,17 @@ class MacroAssemblerX86 : public MacroAs
 
   private:
     void callWithABIPre(uint32_t *stackAdjust);
     void callWithABIPost(uint32_t stackAdjust, Result result);
 
   public:
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABI(void *fun, Result result = GENERAL);
+    void callWithABI(AsmJSImmPtr fun, Result result = GENERAL);
     void callWithABI(const Address &fun, Result result = GENERAL);
 
     // Used from within an Exit frame to handle a pending exception.
     void handleFailureWithHandler(void *handler);
     void handleFailureWithHandlerTail();
 
     void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
         shll(Imm32(FRAMESIZE_SHIFT), frameSizeReg);
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -263,16 +263,18 @@ struct ThreadSafeContext : ContextFriend
 
     // Accessors for immutable runtime data.
     JSAtomState &names() { return runtime_->atomState; }
     StaticStrings &staticStrings() { return runtime_->staticStrings; }
     PropertyName *emptyString() { return runtime_->emptyString; }
     FreeOp *defaultFreeOp() { return runtime_->defaultFreeOp(); }
     bool useHelperThreads() { return runtime_->useHelperThreads(); }
     size_t helperThreadCount() { return runtime_->helperThreadCount(); }
+    void *runtimeAddressForJit() { return runtime_; }
+    void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
 
     // GCs cannot happen while non-main threads are running.
     uint64_t gcNumber() { return runtime_->gcNumber; }
     size_t gcSystemPageSize() { return runtime_->gcSystemPageSize; }
     bool isHeapBusy() { return runtime_->isHeapBusy(); }
     bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
     bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
 
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -532,18 +532,18 @@ class PerThreadData : public PerThreadDa
 
     /* See AsmJSActivation comment. Protected by rt->operationCallbackLock. */
     js::AsmJSActivation *asmJSActivationStack_;
 
   public:
     js::Activation *const *addressOfActivation() const {
         return &activation_;
     }
-    js::AsmJSActivation *const *addressOfAsmJSActivationStackReadOnly() const {
-        return &asmJSActivationStack_;
+    static unsigned offsetOfAsmJSActivationStackReadOnly() {
+        return offsetof(PerThreadData, asmJSActivationStack_);
     }
 
     js::AsmJSActivation *asmJSActivationStackFromAnyThread() const {
         return asmJSActivationStack_;
     }
     js::AsmJSActivation *asmJSActivationStackFromOwnerThread() const {
         return asmJSActivationStack_;
     }