Bug 992267: Ensure stack alignment requirements for asm.js code; r=bbouvier
authorLuke Wagner <luke@mozilla.com>
Fri, 29 Aug 2014 15:34:20 +0200
changeset 224260 a6ebf2ffec5378590582645a0d18bd09c9199030
parent 224259 b45a94bb2c63e93065bb8ae92d67a1b67dbc1b53
child 224261 4773d0ec1ee8fac07c7f498377d5bbcaf84b0ad1
push id3979
push userraliiev@mozilla.com
push dateMon, 13 Oct 2014 16:35:44 +0000
treeherdermozilla-beta@30f2cc610691 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs992267
milestone34.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 992267: Ensure stack alignment requirements for asm.js code; r=bbouvier
js/src/asmjs/AsmJSFrameIterator.cpp
js/src/asmjs/AsmJSFrameIterator.h
js/src/asmjs/AsmJSValidate.cpp
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/IonFrames.cpp
js/src/jit/IonMacroAssembler.h
js/src/jit/LIR.h
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/mips/Assembler-mips.h
js/src/jit/mips/MacroAssembler-mips.cpp
js/src/jit/mips/Simulator-mips.cpp
js/src/jit/none/Architecture-none.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/Trampoline-x86.cpp
js/src/vm/Stack.cpp
js/src/vm/Stack.h
--- a/js/src/asmjs/AsmJSFrameIterator.cpp
+++ b/js/src/asmjs/AsmJSFrameIterator.cpp
@@ -351,21 +351,21 @@ js::GenerateAsmJSStackOverflowExit(Macro
     // value again. Do not update AsmJSFrame::callerFP as it is not necessary in
     // the non-profiling case (there is no return path from this point) and, in
     // the profiling case, it is already correct.
     Register activation = ABIArgGenerator::NonArgReturnReg0;
     masm.loadAsmJSActivation(activation);
     masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfFP()));
 
     // Prepare the stack for calling C++.
-    if (unsigned stackDec = StackDecrementForCall(sizeof(AsmJSFrame), ShadowStackSpace))
-        masm.subPtr(Imm32(stackDec), StackPointer);
+    if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
+        masm.subPtr(Imm32(d), StackPointer);
 
     // No need to restore the stack; the throw stub pops everything.
-    masm.assertStackAlignment();
+    masm.assertStackAlignment(ABIStackAlignment);
     masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed));
     masm.jump(throwLabel);
 }
 
 void
 js::GenerateAsmJSExitPrologue(MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
                               Label *begin)
 {
--- a/js/src/asmjs/AsmJSFrameIterator.h
+++ b/js/src/asmjs/AsmJSFrameIterator.h
@@ -165,12 +165,11 @@ GenerateAsmJSStackOverflowExit(jit::Macr
 
 void
 GenerateAsmJSExitPrologue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
                           jit::Label *begin);
 void
 GenerateAsmJSExitEpilogue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
                           jit::Label *profilingReturn);
 
-
 } // namespace js
 
 #endif // asmjs_AsmJSFrameIterator_h
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -2278,17 +2278,17 @@ class FunctionCompiler
 
     void finishCallArgs(Call *call)
     {
         if (inDeadCode())
             return;
         uint32_t parentStackBytes = call->abi_.stackBytesConsumedSoFar();
         uint32_t newStackBytes;
         if (call->childClobbers_) {
-            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, StackAlignment);
+            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
             for (unsigned i = 0; i < call->stackArgs_.length(); i++)
                 call->stackArgs_[i]->incrementOffset(call->spIncrement_);
             newStackBytes = Max(call->prevMaxStackBytes_,
                                 call->spIncrement_ + parentStackBytes);
         } else {
             call->spIncrement_ = 0;
             newStackBytes = Max(call->prevMaxStackBytes_,
                                 Max(call->maxChildStackBytes_, parentStackBytes));
@@ -5931,43 +5931,44 @@ CheckModuleReturn(ModuleCompiler &m)
     // (since cx->tempLifoAlloc is marked/released after each function
     // statement) and thus all the identifiers in the return statement will be
     // mistaken as free variables and added to lexdeps. Clear these now.
     m.parser().pc->lexdeps->clear();
     return true;
 }
 
 static void
-AssertStackAlignment(MacroAssembler &masm)
-{
-    JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
-    masm.assertStackAlignment();
+AssertStackAlignment(MacroAssembler &masm, uint32_t alignment)
+{
+    JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % alignment == 0);
+    masm.assertStackAlignment(alignment);
 }
 
 static unsigned
-StackDecrementForCall(MacroAssembler &masm, unsigned bytesToPush)
-{
-    return StackDecrementForCall(sizeof(AsmJSFrame) + masm.framePushed(), bytesToPush);
+StackDecrementForCall(MacroAssembler &masm, uint32_t alignment, unsigned bytesToPush)
+{
+    return StackDecrementForCall(alignment, sizeof(AsmJSFrame) + masm.framePushed(), bytesToPush);
 }
 
 template <class VectorT>
 static unsigned
 StackArgBytes(const VectorT &argTypes)
 {
     ABIArgIter<VectorT> iter(argTypes);
     while (!iter.done())
         iter++;
     return iter.stackBytesConsumedSoFar();
 }
 
 template <class VectorT>
 static unsigned
-StackDecrementForCall(MacroAssembler &masm, const VectorT &argTypes, unsigned extraBytes = 0)
-{
-    return StackDecrementForCall(masm, StackArgBytes(argTypes) + extraBytes);
+StackDecrementForCall(MacroAssembler &masm, uint32_t alignment, const VectorT &argTypes,
+                      unsigned extraBytes = 0)
+{
+    return StackDecrementForCall(masm, alignment, StackArgBytes(argTypes) + extraBytes);
 }
 
 #if defined(JS_CODEGEN_ARM)
 // The ARM system ABI also includes d15 in the non volatile float registers.
 // Also exclude lr (a.k.a. r14) as we preserve it manually)
 static const RegisterSet NonVolatileRegs =
     RegisterSet(GeneralRegisterSet(Registers::NonVolatileMask &
                                    ~(uint32_t(1) << Registers::lr)),
@@ -5983,37 +5984,39 @@ static const RegisterSet NonVolatileRegs
 // Look at MacroAssembler::PushRegsInMask(RegisterSet set)
 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
                                              NonVolatileRegs.fpus().getPushSizeInBytes() +
                                              sizeof(double);
 #else
 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
                                              NonVolatileRegs.fpus().getPushSizeInBytes();
 #endif
+static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
 
 static bool
 GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
 {
     MacroAssembler &masm = m.masm();
 
     Label begin;
     masm.align(CodeAlignment);
     masm.bind(&begin);
 
+    // Save the return address if it wasn't already saved by the call insn.
 #if defined(JS_CODEGEN_ARM)
     masm.push(lr);
 #elif defined(JS_CODEGEN_MIPS)
     masm.push(ra);
+#elif defined(JS_CODEGEN_X86)
+    static const unsigned EntryFrameSize = sizeof(void*);
 #endif
-    masm.subPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
+
+    // Save all caller non-volatile registers before we clobber them here and in
+    // the asm.js callee (which does not preserve non-volatile registers).
     masm.setFramePushed(0);
-
-    // In constrast to the system ABI, the Ion convention is that all registers
-    // are clobbered by calls. Thus, we must save the caller's non-volatile
-    // registers.
     masm.PushRegsInMask(NonVolatileRegs);
     JS_ASSERT(masm.framePushed() == FramePushedAfterSave);
 
     // ARM and MIPS have a globally-pinned GlobalReg (x64 uses RIP-relative
     // addressing, x86 uses immediates in effective addresses). For the
     // AsmJSGlobalRegBias addition, see Assembler-(mips,arm).h.
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     masm.movePtr(IntArgReg1, GlobalReg);
@@ -6021,39 +6024,46 @@ GenerateEntry(ModuleCompiler &m, unsigne
 #endif
 
     // ARM, MIPS and x64 have a globally-pinned HeapReg (x86 uses immediates in
     // effective addresses).
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     masm.loadPtr(Address(IntArgReg1, AsmJSModule::heapGlobalDataOffset()), HeapReg);
 #endif
 
-    // Remember the stack pointer in the current AsmJSActivation. This will be
-    // used by error exit paths to set the stack pointer back to what it was
-    // right after the (C++) caller's non-volatile registers were saved so that
-    // they can be restored.
-    Register activation = ABIArgGenerator::NonArgReturnReg0;
-    masm.loadAsmJSActivation(activation);
-    masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()));
-
-    // Get 'argv' into a non-arg register and save it on the stack.
+    // Put the 'argv' argument into a non-argument/return register so that we
+    // can use 'argv' while we fill in the arguments for the asm.js callee.
+    // Also, save 'argv' on the stack so that we can recover it after the call.
+    // Use a second non-argument/return register as temporary scratch. 
     Register argv = ABIArgGenerator::NonArgReturnReg0;
     Register scratch = ABIArgGenerator::NonArgReturnReg1;
 #if defined(JS_CODEGEN_X86)
-    masm.loadPtr(Address(StackPointer, sizeof(AsmJSFrame) + masm.framePushed()), argv);
+    masm.loadPtr(Address(StackPointer, EntryFrameSize + masm.framePushed()), argv);
 #else
     masm.movePtr(IntArgReg0, argv);
 #endif
     masm.Push(argv);
 
+    // Save the stack pointer to the saved non-volatile registers. We will use
+    // this on two paths: normal return and exceptional return. Since
+    // loadAsmJSActivation uses GlobalReg, we must do this after loading
+    // GlobalReg.
+    JS_ASSERT(masm.framePushed() == FramePushedForEntrySP);
+    masm.loadAsmJSActivation(scratch);
+    masm.storePtr(StackPointer, Address(scratch, AsmJSActivation::offsetOfEntrySP()));
+
+    // Dynamically align the stack since ABIStackAlignment is not necessarily
+    // AsmJSStackAlignment. We'll use entrySP to recover the original stack
+    // pointer on return.
+    masm.andPtr(Imm32(~(AsmJSStackAlignment - 1)), StackPointer);
+
     // Bump the stack for the call.
     PropertyName *funcName = m.module().exportedFunction(exportIndex).name();
     const ModuleCompiler::Func &func = *m.lookupFunction(funcName);
-    unsigned stackDec = StackDecrementForCall(masm, func.sig().args());
-    masm.reserveStack(stackDec);
+    masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), AsmJSStackAlignment));
 
     // Copy parameters out of argv and into the registers/stack-slots specified by
     // the system ABI.
     for (ABIArgTypeIter iter(func.sig().args()); !iter.done(); iter++) {
         unsigned argOffset = iter.index() * sizeof(uint64_t);
         Address src(argv, argOffset);
         switch (iter->kind()) {
           case ABIArg::GPR:
@@ -6079,22 +6089,25 @@ GenerateEntry(ModuleCompiler &m, unsigne
                 masm.loadFloat32(src, ScratchFloat32Reg);
                 masm.storeFloat32(ScratchFloat32Reg, Address(StackPointer, iter->offsetFromArgBase()));
             }
             break;
         }
     }
 
     // Call into the real function.
-    AssertStackAlignment(masm);
+    masm.assertStackAlignment(AsmJSStackAlignment);
     masm.call(CallSiteDesc(CallSiteDesc::Relative), &func.entry());
 
-    // Pop the stack and recover the original 'argv' argument passed to the
-    // trampoline (which was pushed on the stack).
-    masm.freeStack(stackDec);
+    // Recover the stack pointer value before dynamic alignment.
+    masm.loadAsmJSActivation(scratch);
+    masm.loadPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()), StackPointer);
+    masm.setFramePushed(FramePushedForEntrySP);
+
+    // Recover the 'argv' pointer which was saved before aligning the stack.
     masm.Pop(argv);
 
     // Store the return value in argv[0]
     switch (func.sig().retType().which()) {
       case RetType::Void:
         break;
       case RetType::Signed:
         masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
@@ -6108,17 +6121,16 @@ GenerateEntry(ModuleCompiler &m, unsigne
         break;
     }
 
     // Restore clobbered non-volatile registers of the caller.
     masm.PopRegsInMask(NonVolatileRegs);
     JS_ASSERT(masm.framePushed() == 0);
 
     masm.move32(Imm32(true), ReturnReg);
-    masm.addPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
     masm.ret();
 
     return m.finishGeneratingEntry(exportIndex, &begin) && !masm.oom();
 }
 
 static void
 FillArgumentArray(ModuleCompiler &m, const VarTypeVector &argTypes,
                   unsigned offsetToArgs, unsigned offsetToCallerStackArgs,
@@ -6172,17 +6184,17 @@ GenerateFFIInterpExit(ModuleCompiler &m,
     invokeArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray));
 
     // At the point of the call, the stack layout shall be (sp grows to the left):
     //   | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
     // The padding between stack args and argv ensures that argv is aligned. The
     // padding between argv and retaddr ensures that sp is aligned.
     unsigned offsetToArgv = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
     unsigned argvBytes = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
-    unsigned framePushed = StackDecrementForCall(masm, offsetToArgv + argvBytes);
+    unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, offsetToArgv + argvBytes);
 
     Label begin;
     GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::SlowFFI, &begin);
 
     // Fill the argument array.
     unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
     Register scratch = ABIArgGenerator::NonArgReturnReg0;
     FillArgumentArray(m, exit.sig().args(), offsetToArgv, offsetToCallerStackArgs, scratch);
@@ -6212,17 +6224,17 @@ GenerateFFIInterpExit(ModuleCompiler &m,
     } else {
         masm.computeEffectiveAddress(argv, scratch);
         masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
     }
     i++;
     JS_ASSERT(i.done());
 
     // Make the call, test whether it succeeded, and extract the return value.
-    AssertStackAlignment(masm);
+    AssertStackAlignment(masm, ABIStackAlignment);
     switch (exit.sig().retType().which()) {
       case RetType::Void:
         masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_Ignore));
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case RetType::Signed:
         masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_ToInt32));
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
@@ -6274,26 +6286,26 @@ GenerateFFIIonExit(ModuleCompiler &m, co
     // space required for both calls and take the maximum. In both cases,
     // include space for savedRegBytes, since these go below the Ion/coerce.
 
     // Ion calls use the following stack layout (sp grows to the left):
     //   | return address | descriptor | callee | argc | this | arg1 | arg2 | ...
     unsigned offsetToIonArgs = MaybeRetAddr;
     unsigned ionArgBytes = 3 * sizeof(size_t) + (1 + exit.sig().args().length()) * sizeof(Value);
     unsigned totalIonBytes = offsetToIonArgs + ionArgBytes + savedRegBytes;
-    unsigned ionFrameSize = StackDecrementForCall(masm, totalIonBytes);
+    unsigned ionFrameSize = StackDecrementForCall(masm, AsmJSStackAlignment, totalIonBytes);
 
     // Coercion calls use the following stack layout (sp grows to the left):
     //   | stack args | padding | Value argv[1] | ...
     // The padding between args and argv ensures that argv is aligned.
     MIRTypeVector coerceArgTypes(m.cx());
     coerceArgTypes.infallibleAppend(MIRType_Pointer); // argv
     unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(double));
     unsigned totalCoerceBytes = offsetToCoerceArgv + sizeof(Value) + savedRegBytes;
-    unsigned coerceFrameSize = StackDecrementForCall(masm, totalCoerceBytes);
+    unsigned coerceFrameSize = StackDecrementForCall(masm, AsmJSStackAlignment, totalCoerceBytes);
 
     unsigned framePushed = Max(ionFrameSize, coerceFrameSize);
 
     Label begin;
     GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::IonFFI, &begin);
 
     // 1. Descriptor
     size_t argOffset = offsetToIonArgs;
@@ -6384,19 +6396,19 @@ GenerateFFIIonExit(ModuleCompiler &m, co
         masm.loadPtr(Address(reg0, offsetOfJitTop), reg2);
         masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitTop()));
         masm.loadPtr(Address(reg0, offsetOfJitJSContext), reg2);
         masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitJSContext()));
         masm.storePtr(reg3, Address(reg0, offsetOfJitJSContext));
     }
 
     // 2. Call
-    AssertStackAlignment(masm);
+    AssertStackAlignment(masm, AsmJSStackAlignment);
     masm.callIonFromAsmJS(callee);
-    AssertStackAlignment(masm);
+    AssertStackAlignment(masm, AsmJSStackAlignment);
 
     {
         // Disable Activation.
         //
         // This sequence needs three registers, and must preserve the JSReturnReg_Data and
         // JSReturnReg_Type, so there are five live registers.
         JS_ASSERT(JSReturnReg_Data == AsmJSIonExitRegReturnData);
         JS_ASSERT(JSReturnReg_Type == AsmJSIonExitRegReturnType);
@@ -6469,17 +6481,17 @@ GenerateFFIIonExit(ModuleCompiler &m, co
         } else {
             masm.computeEffectiveAddress(argv, scratch);
             masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
         }
         i++;
         JS_ASSERT(i.done());
 
         // Call coercion function
-        AssertStackAlignment(masm);
+        AssertStackAlignment(masm, ABIStackAlignment);
         switch (exit.sig().retType().which()) {
           case RetType::Signed:
             masm.call(AsmJSImmPtr(AsmJSImm_CoerceInPlace_ToInt32));
             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
             masm.unboxInt32(Address(StackPointer, offsetToCoerceArgv), ReturnReg);
             break;
           case RetType::Double:
             masm.call(AsmJSImmPtr(AsmJSImm_CoerceInPlace_ToNumber));
@@ -6564,17 +6576,17 @@ GenerateBuiltinThunk(ModuleCompiler &m, 
       case AsmJSExit::Builtin_CeilF:
       case AsmJSExit::Builtin_FloorF:
         argTypes.infallibleAppend(MIRType_Float32);
         break;
       case AsmJSExit::Builtin_Limit:
         MOZ_CRASH("Bad builtin");
     }
 
-    uint32_t framePushed = StackDecrementForCall(masm, argTypes);
+    uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, argTypes);
 
     Label begin;
     GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Builtin(builtin), &begin);
 
     for (ABIArgMIRTypeIter i(argTypes); !i.done(); i++) {
         if (i->kind() != ABIArg::Stack)
             continue;
 #if !defined(JS_CODEGEN_ARM)
@@ -6589,17 +6601,17 @@ GenerateBuiltinThunk(ModuleCompiler &m, 
             masm.loadDouble(srcAddr, ScratchDoubleReg);
             masm.storeDouble(ScratchDoubleReg, dstAddr);
         }
 #else
         MOZ_CRASH("Architecture should have enough registers for all builtin calls");
 #endif
     }
 
-    AssertStackAlignment(masm);
+    AssertStackAlignment(masm, ABIStackAlignment);
     masm.call(BuiltinToImmKind(builtin));
 
     Label profilingReturn;
     GenerateAsmJSExitEpilogue(masm, framePushed, AsmJSExit::Builtin(builtin), &profilingReturn);
     return m.finishGeneratingBuiltinThunk(builtin, &begin, &profilingReturn) && !masm.oom();
 }
 
 static bool
@@ -6644,21 +6656,21 @@ GenerateAsyncInterruptExit(ModuleCompile
     // Store resumePC into the reserved space.
     masm.loadAsmJSActivation(scratch);
     masm.loadPtr(Address(scratch, AsmJSActivation::offsetOfResumePC()), scratch);
     masm.storePtr(scratch, Address(StackPointer, masm.framePushed() + sizeof(void*)));
 
     // We know that StackPointer is word-aligned, but not necessarily
     // stack-aligned, so we need to align it dynamically.
     masm.mov(StackPointer, ABIArgGenerator::NonVolatileReg);
-    masm.andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
+    masm.andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
     if (ShadowStackSpace)
         masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
 
-    masm.assertStackAlignment();
+    masm.assertStackAlignment(ABIStackAlignment);
     masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the StackPointer to it's position before the call.
     masm.mov(ABIArgGenerator::NonVolatileReg, StackPointer);
 
     // Restore the machine state to before the interrupt.
@@ -6671,27 +6683,27 @@ GenerateAsyncInterruptExit(ModuleCompile
     // set to zero so we can use masm.framePushed() below.
     masm.setFramePushed(0);
     // save all registers,except sp. After this stack is alligned.
     masm.PushRegsInMask(AllRegsExceptSP);
 
     // Save the stack pointer in a non-volatile register.
     masm.movePtr(StackPointer, s0);
     // Align the stack.
-    masm.ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
+    masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
 
     // Store resumePC into the reserved space.
     masm.loadAsmJSActivation(IntArgReg0);
     masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1);
     masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));
 
     // MIPS ABI requires rewserving stack for registes $a0 to $a3.
     masm.subPtr(Imm32(4 * sizeof(intptr_t)), StackPointer);
 
-    masm.assertStackAlignment();
+    masm.assertStackAlignment(ABIStackAlignment);
     masm.call(AsmJSImm_HandleExecutionInterrupt);
 
     masm.addPtr(Imm32(4 * sizeof(intptr_t)), StackPointer);
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // This will restore stack to the address before the call.
     masm.movePtr(s0, StackPointer);
@@ -6718,17 +6730,17 @@ GenerateAsyncInterruptExit(ModuleCompile
 
     // Store resumePC into the return PC stack slot.
     masm.loadAsmJSActivation(IntArgReg0);
     masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1);
     masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
 
     masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask)));   // save all FP registers
 
-    masm.assertStackAlignment();
+    masm.assertStackAlignment(ABIStackAlignment);
     masm.call(AsmJSImm_HandleExecutionInterrupt);
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the machine state to before the interrupt. this will set the pc!
     masm.PopRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask)));   // restore all FP registers
     masm.mov(r6,sp);
     masm.as_vmsr(r5);
@@ -6762,21 +6774,21 @@ GenerateAsyncInterruptExit(ModuleCompile
 }
 
 static bool
 GenerateSyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
     masm.setFramePushed(0);
 
-    unsigned framePushed = StackDecrementForCall(masm, ShadowStackSpace);
+    unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);
 
     GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Interrupt, &m.syncInterruptLabel());
 
-    AssertStackAlignment(masm);
+    AssertStackAlignment(masm, ABIStackAlignment);
     masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     Label profilingReturn;
     GenerateAsmJSExitEpilogue(masm, framePushed, AsmJSExit::Interrupt, &profilingReturn);
     return m.finishGeneratingInterrupt(&m.syncInterruptLabel(), &profilingReturn) && !masm.oom();
 }
 
@@ -6790,27 +6802,27 @@ GenerateThrowStub(ModuleCompiler &m, Lab
 {
     MacroAssembler &masm = m.masm();
     masm.align(CodeAlignment);
     masm.bind(throwLabel);
 
     // We are about to pop all frames in this AsmJSActivation. Set fp to null to
     // maintain the invariant that fp is either null or pointing to a valid
     // frame.
-    Register activation = ABIArgGenerator::NonArgReturnReg0;
-    masm.loadAsmJSActivation(activation);
-    masm.storePtr(ImmWord(0), Address(activation, AsmJSActivation::offsetOfFP()));
-
-    masm.setFramePushed(FramePushedAfterSave);
-    masm.loadPtr(Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer);
+    Register scratch = ABIArgGenerator::NonArgReturnReg0;
+    masm.loadAsmJSActivation(scratch);
+    masm.storePtr(ImmWord(0), Address(scratch, AsmJSActivation::offsetOfFP()));
+
+    masm.setFramePushed(FramePushedForEntrySP);
+    masm.loadPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()), StackPointer);
+    masm.Pop(scratch);
     masm.PopRegsInMask(NonVolatileRegs);
     JS_ASSERT(masm.framePushed() == 0);
 
     masm.mov(ImmWord(0), ReturnReg);
-    masm.addPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
     masm.ret();
 
     return m.finishGeneratingInlineStub(throwLabel) && !masm.oom();
 }
 
 static bool
 GenerateStubs(ModuleCompiler &m)
 {
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -139,17 +139,17 @@ NativeRegExpMacroAssembler::GenerateCode
 
 #ifndef JS_CODEGEN_X86
     // The InputOutputData* is stored as an argument, save it on the stack
     // above the frame.
     masm.Push(IntArgReg0);
 #endif
 
     size_t frameSize = sizeof(FrameData) + num_registers_ * sizeof(void *);
-    frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), StackAlignment) - masm.framePushed();
+    frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), ABIStackAlignment) - masm.framePushed();
 
     // Actually emit code to start a new stack frame.
     masm.reserveStack(frameSize);
     masm.checkStackAlignment();
 
     // Check if we have space on the stack.
     Label stack_ok;
     void *stack_limit = &runtime->mainThread.jitStackLimit;
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8817,22 +8817,25 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall
             }
         }
     }
 #endif
 
     if (mir->spIncrement())
         masm.freeStack(mir->spIncrement());
 
-    JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
+    JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % AsmJSStackAlignment == 0);
 
 #ifdef DEBUG
+    static_assert(AsmJSStackAlignment >= ABIStackAlignment,
+                  "The asm.js stack alignment should subsume the ABI-required alignment");
+    static_assert(AsmJSStackAlignment % ABIStackAlignment == 0,
+                  "The asm.js stack alignment should subsume the ABI-required alignment");
     Label ok;
-    JS_ASSERT(IsPowerOfTwo(StackAlignment));
-    masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
+    masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(AsmJSStackAlignment - 1), &ok);
     masm.breakpoint();
     masm.bind(&ok);
 #endif
 
     MAsmJSCall::Callee callee = mir->callee();
     switch (callee.which()) {
       case MAsmJSCall::Callee::Internal:
         masm.call(mir->desc(), callee.internal());
@@ -9061,17 +9064,17 @@ CodeGenerator::visitAsmJSInterruptCheck(
 {
     Register scratch = ToRegister(lir->scratch());
     masm.movePtr(AsmJSImmPtr(AsmJSImm_RuntimeInterrupt), scratch);
     masm.load8ZeroExtend(Address(scratch, 0), scratch);
     Label rejoin;
     masm.branch32(Assembler::Equal, scratch, Imm32(0), &rejoin);
     {
         uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
-                                                   StackAlignment);
+                                                   ABIStackAlignment);
         masm.reserveStack(stackFixup);
         masm.call(lir->funcDesc(), lir->interruptExit());
         masm.freeStack(stackFixup);
     }
     masm.bind(&rejoin);
     return true;
 }
 
--- a/js/src/jit/IonFrames.cpp
+++ b/js/src/jit/IonFrames.cpp
@@ -1070,17 +1070,17 @@ JitActivationIterator::jitStackRange(uin
     end = reinterpret_cast<uintptr_t *>(frames.prevFp());
 }
 
 #ifdef JS_CODEGEN_MIPS
 uint8_t *
 alignDoubleSpillWithOffset(uint8_t *pointer, int32_t offset)
 {
     uint32_t address = reinterpret_cast<uint32_t>(pointer);
-    address = (address - offset) & ~(StackAlignment - 1);
+    address = (address - offset) & ~(ABIStackAlignment - 1);
     return reinterpret_cast<uint8_t *>(address);
 }
 
 static void
 MarkJitExitFrameCopiedArguments(JSTracer *trc, const VMFunction *f, IonExitFooterFrame *footer)
 {
     uint8_t *doubleArgs = reinterpret_cast<uint8_t *>(footer);
     doubleArgs = alignDoubleSpillWithOffset(doubleArgs, sizeof(intptr_t));
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -1421,21 +1421,21 @@ class MacroAssembler : public MacroAssem
     }
 
     void icRestoreLive(RegisterSet &liveRegs, AfterICSaveLive &aic) {
         restoreFrameAlignmentForICArguments(aic);
         JS_ASSERT(framePushed() == aic.initialStack);
         PopRegsInMask(liveRegs);
     }
 
-    void assertStackAlignment() {
+    void assertStackAlignment(uint32_t alignment) {
 #ifdef DEBUG
         Label ok;
-        JS_ASSERT(IsPowerOfTwo(StackAlignment));
-        branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
+        JS_ASSERT(IsPowerOfTwo(alignment));
+        branchTestPtr(Assembler::Zero, StackPointer, Imm32(alignment - 1), &ok);
         breakpoint();
         bind(&ok);
 #endif
     }
 };
 
 static inline Assembler::DoubleCondition
 JSOpToDoubleCondition(JSOp op)
@@ -1503,18 +1503,18 @@ JSOpToCondition(JSOp op, bool isSigned)
             return Assembler::AboveOrEqual;
           default:
             MOZ_CRASH("Unrecognized comparison operation");
         }
     }
 }
 
 static inline size_t
-StackDecrementForCall(size_t bytesAlreadyPushed, size_t bytesToPush)
+StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
 {
     return bytesToPush +
-           ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, StackAlignment);
+           ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
 }
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_IonMacroAssembler_h */
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1581,20 +1581,20 @@ class LIRGraph
     }
     uint32_t localSlotCount() const {
         return localSlotCount_;
     }
     // Return the localSlotCount() value rounded up so that it satisfies the
     // platform stack alignment requirement, and so that it's a multiple of
     // the number of slots per Value.
     uint32_t paddedLocalSlotCount() const {
-        // Round to StackAlignment, but also round to at least sizeof(Value) in
-        // case that's greater, because StackOffsetOfPassedArg rounds argument
-        // slots to 8-byte boundaries.
-        size_t Alignment = Max(size_t(StackAlignment), sizeof(Value));
+        // Round to ABIStackAlignment, but also round to at least sizeof(Value)
+        // in case that's greater, because StackOffsetOfPassedArg rounds
+        // argument slots to 8-byte boundaries.
+        size_t Alignment = Max(size_t(ABIStackAlignment), sizeof(Value));
         return AlignBytes(localSlotCount(), Alignment);
     }
     size_t paddedLocalSlotsSize() const {
         return paddedLocalSlotCount();
     }
     void setArgumentSlotCount(uint32_t argumentSlotCount) {
         argumentSlotCount_ = argumentSlotCount;
     }
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -140,27 +140,28 @@ static MOZ_CONSTEXPR_VAR FloatRegister d
 static MOZ_CONSTEXPR_VAR FloatRegister d14 = {FloatRegisters::d14, VFPRegister::Double};
 static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15, VFPRegister::Double};
 
 
 // For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
 // load/store) operate in a single cycle when the address they are dealing with
 // is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
 // function boundaries. I'm trying to make sure this is always true.
-static const uint32_t StackAlignment = 8;
+static const uint32_t ABIStackAlignment = 8;
 static const uint32_t CodeAlignment = 8;
-static const bool StackKeptAligned = true;
 
 // This boolean indicates whether we support SIMD instructions flavoured for
 // this architecture or not. Rather than a method in the LIRGenerator, it is
 // here such that it is accessible from the entire codebase. Once full support
 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
 static const bool SupportsSimd = false;
 static const uint32_t SimdStackAlignment = 8;
 
+static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
+
 static const Scale ScalePointer = TimesFour;
 
 class Instruction;
 class InstBranchImm;
 uint32_t RM(Register r);
 uint32_t RS(Register r);
 uint32_t RD(Register r);
 uint32_t RT(Register r);
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -3773,17 +3773,17 @@ void
 MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     ma_mov(sp, scratch);
 
     // Force sp to be aligned.
-    ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
+    ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp);
     ma_push(scratch);
 }
 
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
 void
 MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type type)
 {
     MoveOperand to;
@@ -3932,17 +3932,17 @@ void
 MacroAssemblerARMCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
 {
     passABIArg(MoveOperand(freg), type);
 }
 
 void MacroAssemblerARMCompat::checkStackAlignment()
 {
 #ifdef DEBUG
-    ma_tst(sp, Imm32(StackAlignment - 1));
+    ma_tst(sp, Imm32(ABIStackAlignment - 1));
     breakpoint(NonZero);
 #endif
 }
 
 void
 MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJS)
 {
     JS_ASSERT(inCall_);
@@ -3951,21 +3951,21 @@ MacroAssemblerARMCompat::callWithABIPre(
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
     if (UseHardFpABI())
         *stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t);
 #endif
     uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
 
     if (!dynamicAlignment_) {
         *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue,
-                                             StackAlignment);
+                                             ABIStackAlignment);
     } else {
         // sizeof(intptr_t) accounts for the saved stack pointer pushed by
         // setupUnalignedABICall.
-        *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment);
+        *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), ABIStackAlignment);
     }
 
     reserveStack(*stackAdjust);
 
     // Position all arguments.
     {
         enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
         if (!enoughMemory_)
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -2112,17 +2112,17 @@ Simulator::softwareInterrupt(SimInstruct
         int32_t arg3 = get_register(r3);
         int32_t *stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
         int32_t arg4 = stack_pointer[0];
         int32_t arg5 = stack_pointer[1];
 
         int32_t saved_lr = get_register(lr);
         intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
 
-        bool stack_aligned = (get_register(sp) & (StackAlignment - 1)) == 0;
+        bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0;
         if (!stack_aligned) {
             fprintf(stderr, "Runtime call with unaligned stack!\n");
             MOZ_CRASH();
         }
 
         if (single_stepping_)
             single_step_callback_(single_step_callback_arg_, this, nullptr);
 
@@ -4253,17 +4253,17 @@ Simulator::call(uint8_t* entry, int argu
         set_register(r3, va_arg(parameters, int32_t));
 
     // Remaining arguments passed on stack.
     int original_stack = get_register(sp);
     int entry_stack = original_stack;
     if (argument_count >= 4)
         entry_stack -= (argument_count - 4) * sizeof(int32_t);
 
-    entry_stack &= ~StackAlignment;
+    entry_stack &= ~ABIStackAlignment;
 
     // Store remaining arguments on stack, from low to high memory.
     intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
     for (int i = 4; i < argument_count; i++)
         stack_argument[i - 4] = va_arg(parameters, int32_t);
     va_end(parameters);
     set_register(sp, entry_stack);
 
--- a/js/src/jit/mips/Assembler-mips.h
+++ b/js/src/jit/mips/Assembler-mips.h
@@ -153,29 +153,30 @@ static MOZ_CONSTEXPR_VAR FloatRegister f
 static MOZ_CONSTEXPR_VAR FloatRegister f22 = { FloatRegisters::f22, FloatRegister::Double };
 static MOZ_CONSTEXPR_VAR FloatRegister f24 = { FloatRegisters::f24, FloatRegister::Double };
 static MOZ_CONSTEXPR_VAR FloatRegister f26 = { FloatRegisters::f26, FloatRegister::Double };
 static MOZ_CONSTEXPR_VAR FloatRegister f28 = { FloatRegisters::f28, FloatRegister::Double };
 static MOZ_CONSTEXPR_VAR FloatRegister f30 = { FloatRegisters::f30, FloatRegister::Double };
 
 // MIPS CPUs can only load multibyte data that is "naturally"
 // four-byte-aligned, sp register should be eight-byte-aligned.
-static const uint32_t StackAlignment = 8;
+static const uint32_t ABIStackAlignment = 8;
 static const uint32_t CodeAlignment = 4;
-static const bool StackKeptAligned = true;
 
 // This boolean indicates whether we support SIMD instructions flavoured for
 // this architecture or not. Rather than a method in the LIRGenerator, it is
 // here such that it is accessible from the entire codebase. Once full support
 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
 static const bool SupportsSimd = false;
 // TODO this is just a filler to prevent a build failure. The MIPS SIMD
 // alignment requirements still need to be explored.
 static const uint32_t SimdStackAlignment = 8;
 
+static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
+
 static const Scale ScalePointer = TimesFour;
 
 // MIPS instruction types
 //                +---------------------------------------------------------------+
 //                |    6      |    5    |    5    |    5    |    5    |    6      |
 //                +---------------------------------------------------------------+
 // Register type  |  Opcode   |    Rs   |    Rt   |    Rd   |    Sa   | Function  |
 //                +---------------------------------------------------------------+
@@ -233,17 +234,16 @@ static const uint32_t Imm16Mask = ((1 <<
 static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
 static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
 static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
 static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
 static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
 static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
 static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
 static const uint32_t RegMask = Registers::Total - 1;
-static const uint32_t StackAlignmentMask = StackAlignment - 1;
 
 static const uint32_t MAX_BREAK_CODE = 1024 - 1;
 
 class Instruction;
 class InstReg;
 class InstImm;
 class InstJump;
 class BranchInstBlock;
--- a/js/src/jit/mips/MacroAssembler-mips.cpp
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -1569,17 +1569,17 @@ MacroAssembler::PushRegsInMask(RegisterS
         diffG -= sizeof(intptr_t);
         storePtr(*iter, Address(StackPointer, diffG));
     }
     MOZ_ASSERT(diffG == 0);
 
     // Double values have to be aligned. We reserve extra space so that we can
     // start writing from the first aligned location.
     // We reserve a whole extra double so that the buffer has even size.
-    ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
+    ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
     reserveStack(diffF + sizeof(double));
 
     for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
         if ((*iter).code() % 2 == 0)
             as_sd(*iter, SecondScratchReg, -diffF);
         diffF -= sizeof(double);
     }
     MOZ_ASSERT(diffF == 0);
@@ -1591,17 +1591,17 @@ MacroAssembler::PopRegsInMaskIgnore(Regi
     JS_ASSERT(!SupportsSimd && simdSet.size() == 0);
     int32_t diffG = set.gprs().size() * sizeof(intptr_t);
     int32_t diffF = set.fpus().getPushSizeInBytes();
     const int32_t reservedG = diffG;
     const int32_t reservedF = diffF;
 
     // Read the buffer form the first aligned location.
     ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
-    ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
+    ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
 
     for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
         if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
             // Use assembly l.d because we have alligned the stack.
             as_ld(*iter, SecondScratchReg, -diffF);
         diffF -= sizeof(double);
     }
     freeStack(reservedF + sizeof(double));
@@ -3153,17 +3153,17 @@ MacroAssemblerMIPSCompat::setupUnaligned
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     ma_move(scratch, StackPointer);
 
     // Force sp to be aligned
     ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
-    ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
+    ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
     as_sw(scratch, StackPointer, 0);
 }
 
 void
 MacroAssemblerMIPSCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
 {
     ++passedArgs_;
     if (!enoughMemory_)
@@ -3254,48 +3254,48 @@ MacroAssemblerMIPSCompat::passABIArg(Flo
     passABIArg(MoveOperand(freg), type);
 }
 
 void
 MacroAssemblerMIPSCompat::checkStackAlignment()
 {
 #ifdef DEBUG
     Label aligned;
-    as_andi(ScratchRegister, sp, StackAlignment - 1);
+    as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
     ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
     as_break(MAX_BREAK_CODE);
     bind(&aligned);
 #endif
 }
 
 void
 MacroAssemblerMIPSCompat::alignStackPointer()
 {
     movePtr(StackPointer, SecondScratchReg);
     subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
-    andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
+    andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
     storePtr(SecondScratchReg, Address(StackPointer, 0));
 }
 
 void
 MacroAssemblerMIPSCompat::restoreStackPointer()
 {
     loadPtr(Address(StackPointer, 0), StackPointer);
 }
 
 void
 MacroAssembler::alignFrameForICArguments(AfterICSaveLive &aic)
 {
-    if (framePushed() % StackAlignment != 0) {
-        aic.alignmentPadding = StackAlignment - (framePushed() % StackAlignment);
+    if (framePushed() % ABIStackAlignment != 0) {
+        aic.alignmentPadding = ABIStackAlignment - (framePushed() % StackAlignment);
         reserveStack(aic.alignmentPadding);
     } else {
         aic.alignmentPadding = 0;
     }
-    MOZ_ASSERT(framePushed() % StackAlignment == 0);
+    MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
     checkStackAlignment();
 }
 
 void
 MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive &aic)
 {
     if (aic.alignmentPadding != 0)
         freeStack(aic.alignmentPadding);
@@ -3311,20 +3311,20 @@ MacroAssemblerMIPSCompat::callWithABIPre
 
     *stackAdjust += usedArgSlots_ > NumIntArgRegs ?
                     usedArgSlots_ * sizeof(intptr_t) :
                     NumIntArgRegs * sizeof(intptr_t);
 
     uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
 
     if (dynamicAlignment_) {
-        *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
+        *stackAdjust += ComputeByteAlignment(*stackAdjust, ABIStackAlignment);
     } else {
         *stackAdjust += ComputeByteAlignment(framePushed_ + alignmentAtPrologue + *stackAdjust,
-                                             StackAlignment);
+                                             ABIStackAlignment);
     }
 
     reserveStack(*stackAdjust);
 
     // Save $ra because call is going to clobber it. Restore it in
     // callWithABIPost. NOTE: This is needed for calls from BaselineIC.
     // Maybe we can do this differently.
     ma_sw(ra, Address(StackPointer, *stackAdjust - sizeof(intptr_t)));
@@ -3439,17 +3439,17 @@ MacroAssemblerMIPSCompat::callWithABI(co
     callWithABIPost(stackAdjust, result);
 
 }
 
 void
 MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
 {
     // Reserve space for exception information.
-    int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
+    int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
     ma_subu(StackPointer, StackPointer, Imm32(size));
     ma_move(a0, StackPointer); // Use a0 since it is a first function argument
 
     // Ask for an exception handler.
     setupUnalignedABICall(1, a1);
     passABIArg(a0);
     callWithABI(handler);
 
--- a/js/src/jit/mips/Simulator-mips.cpp
+++ b/js/src/jit/mips/Simulator-mips.cpp
@@ -1866,17 +1866,17 @@ Simulator::softwareInterrupt(SimInstruct
         int32_t arg5 = stack_pointer[5];
 
         // This is dodgy but it works because the C entry stubs are never moved.
         // See comment in codegen-arm.cc and bug 1242173.
         int32_t saved_ra = getRegister(ra);
 
         intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
 
-        bool stack_aligned = (getRegister(sp) & (StackAlignment - 1)) == 0;
+        bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
         if (!stack_aligned) {
             fprintf(stderr, "Runtime call with unaligned stack!\n");
             MOZ_CRASH();
         }
 
         switch (redirection->type()) {
           case Args_General0: {
             Prototype_General0 target = reinterpret_cast<Prototype_General0>(external);
@@ -3400,17 +3400,17 @@ Simulator::call(uint8_t *entry, int argu
     int original_stack = getRegister(sp);
     // Compute position of stack on entry to generated code.
     int entry_stack = original_stack;
     if (argument_count > kCArgSlotCount)
         entry_stack = entry_stack - argument_count * sizeof(int32_t);
     else
         entry_stack = entry_stack - kCArgsSlotsSize;
 
-    entry_stack &= ~StackAlignment;
+    entry_stack &= ~ABIStackAlignment;
 
     intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
 
     // Setup the arguments.
     for (int i = 0; i < argument_count; i++) {
         js::jit::Register argReg;
         if (GetIntArgReg(i, &argReg))
             setRegister(argReg.code(), va_arg(parameters, int32_t));
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -11,16 +11,17 @@
 // platforms, so include it here to avoid inadvertent build bustage.
 #include "jit/IonSpewer.h"
 
 namespace js {
 namespace jit {
 
 static const bool SupportsSimd = false;
 static const uint32_t SimdStackAlignment = 0;
+static const uint32_t AsmJSStackAlignment = 0;
 
 class Registers
 {
   public:
     typedef uint8_t Code;
     typedef uint8_t SetType;
 
     static uint32_t SetSize(SetType) { MOZ_CRASH(); }
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -63,19 +63,18 @@ static MOZ_CONSTEXPR_VAR Register JSRetu
 #if defined(JS_NUNBOX32)
 static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg, InvalidReg);
 #elif defined(JS_PUNBOX64)
 static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg);
 #else
 #error "Bad architecture"
 #endif
 
-static const uint32_t StackAlignment = 8;
+static const uint32_t ABIStackAlignment = 4;
 static const uint32_t CodeAlignment = 4;
-static const bool StackKeptAligned = false;
 
 static const Scale ScalePointer = TimesOne;
 
 class Assembler : public AssemblerShared
 {
   public:
     enum Condition {
         Equal,
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -635,19 +635,19 @@ class CallSite : public CallSiteDesc
     // address on all archs (whether or not the call instruction pushes the
     // return address (x86/x64) or the prologue does (ARM/MIPS)).
     uint32_t stackDepth() const { return stackDepth_; }
 };
 
 typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
 
 // As an invariant across architectures, within asm.js code:
-//   $sp % StackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % StackAlignment
+//   $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment
 // Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
-// with a StackAlignment-aligned StackPointer) that are not included in
+// with a AsmJSStackAlignment-aligned StackPointer) that are not included in
 // masm.framePushed.
 struct AsmJSFrame
 {
     // The caller's saved frame pointer. In non-profiling mode, internal
     // asm.js-to-asm.js calls don't update fp and thus don't save the caller's
     // frame pointer; the space is reserved, however, so that profiling mode can
     // reuse the same function body without recompiling.
     uint8_t *callerFP;
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -64,76 +64,44 @@ CodeGeneratorShared::CodeGeneratorShared
     checkOsiPointRegisters(js_JitOptions.checkOsiPointRegisters),
 #endif
     frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
     frameInitialAdjustment_(0)
 {
     if (!gen->compilingAsmJS())
         masm.setInstrumentation(&sps_);
 
-    // Since asm.js uses the system ABI which does not necessarily use a
-    // regular array where all slots are sizeof(Value), it maintains the max
-    // argument stack depth separately.
     if (gen->compilingAsmJS()) {
+        // Since asm.js uses the system ABI which does not necessarily use a
+        // regular array where all slots are sizeof(Value), it maintains the max
+        // argument stack depth separately.
         JS_ASSERT(graph->argumentSlotCount() == 0);
         frameDepth_ += gen->maxAsmJSStackArgBytes();
 
+        // If the function uses any SIMD, we may need to insert padding so that
+        // local slots are aligned for SIMD.
+        if (gen->usesSimd()) {
+            frameInitialAdjustment_ = ComputeByteAlignment(sizeof(AsmJSFrame), AsmJSStackAlignment);
+            frameDepth_ += frameInitialAdjustment_;
+        }
+
         // An MAsmJSCall does not align the stack pointer at calls sites but instead
-        // relies on the a priori stack adjustment (in the prologue) on platforms
-        // (like x64) which require the stack to be aligned.
-        if (StackKeptAligned || gen->performsCall() || gen->usesSimd()) {
-            unsigned alignmentAtCall = sizeof(AsmJSFrame) + frameDepth_;
-            unsigned firstFixup = 0;
-            if (unsigned rem = alignmentAtCall % StackAlignment)
-                frameDepth_ += (firstFixup = StackAlignment - rem);
-
-            if (gen->usesSimd())
-                setupSimdAlignment(firstFixup);
-        }
+        // relies on the a priori stack adjustment. This must be the last
+        // adjustment of frameDepth_.
+        if (gen->performsCall())
+            frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_, AsmJSStackAlignment);
 
         // FrameSizeClass is only used for bailing, which cannot happen in
         // asm.js code.
         frameClass_ = FrameSizeClass::None();
     } else {
         frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
     }
 }
 
-void
-CodeGeneratorShared::setupSimdAlignment(unsigned fixup)
-{
-    JS_STATIC_ASSERT(SimdStackAlignment % StackAlignment == 0);
-    //  At this point, we have:
-    //      (frameDepth_ + sizeof(AsmJSFrame)) % StackAlignment == 0
-    //  which means we can add as many SimdStackAlignment as needed.
-
-    //  The next constraint is to have all stack slots
-    //  aligned for SIMD. That's done by having the first stack slot
-    //  aligned. We need an offset such that:
-    //      (frameDepth_ - offset) % SimdStackAlignment == 0
-    frameInitialAdjustment_ = frameDepth_ % SimdStackAlignment;
-
-    //  We need to ensure that the first stack slot is actually
-    //  located in this frame and not beforehand, when taking this
-    //  offset into account, i.e.:
-    //      frameDepth_ - initial adjustment >= frameDepth_ - fixup
-    //  <=>                            fixup >= initial adjustment
-    //
-    //  For instance, on x86 with gcc, if the initial frameDepth
-    //  % 16 is 8, then the fixup is 0, although the initial
-    //  adjustment is 8. The first stack slot would be located at
-    //  frameDepth - 8 in this case, which is obviously before
-    //  frameDepth.
-    //
-    //  If that's not the case, we add SimdStackAlignment to the
-    //  fixup, which will keep on satisfying other constraints.
-    if (frameInitialAdjustment_ > int32_t(fixup))
-        frameDepth_ += SimdStackAlignment;
-}
-
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
     JSScript *topScript = sps_.getPushed();
     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
         // Add native => bytecode mapping entries for OOL sites.
         // Not enabled on asm.js yet since asm doesn't contain bytecode mappings.
         if (!gen->compilingAsmJS()) {
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -491,18 +491,16 @@ class CodeGeneratorShared : public LInst
 // This function is not used for MIPS. MIPS has branchToBlock.
 #ifndef JS_CODEGEN_MIPS
     void jumpToBlock(MBasicBlock *mir, Assembler::Condition cond);
 #endif
 
   private:
     void generateInvalidateEpilogue();
 
-    void setupSimdAlignment(unsigned fixup);
-
   public:
     CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
 
   public:
     template <class ArgSeq, class StoreOutputTo>
     bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool);
 
     bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -179,29 +179,28 @@ class ABIArgGenerator
     static const Register NonArg_VolatileReg;
     static const Register NonReturn_VolatileReg0;
 };
 
 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = IntArgReg3;
 
 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx;
 
-// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
-// jitted code.
-static const uint32_t StackAlignment = 16;
-static const bool StackKeptAligned = false;
+static const uint32_t ABIStackAlignment = 16;
 static const uint32_t CodeAlignment = 8;
 
 // This boolean indicates whether we support SIMD instructions flavoured for
 // this architecture or not. Rather than a method in the LIRGenerator, it is
 // here such that it is accessible from the entire codebase. Once full support
 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
 static const bool SupportsSimd = true;
 static const uint32_t SimdStackAlignment = 16;
 
+static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
+
 static const Scale ScalePointer = TimesEight;
 
 } // namespace jit
 } // namespace js
 
 #include "jit/shared/Assembler-x86-shared.h"
 
 namespace js {
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -195,17 +195,17 @@ MacroAssemblerX64::setupAlignedABICall(u
 
 void
 MacroAssemblerX64::setupUnalignedABICall(uint32_t args, Register scratch)
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     movq(rsp, scratch);
-    andq(Imm32(~(StackAlignment - 1)), rsp);
+    andq(Imm32(~(ABIStackAlignment - 1)), rsp);
     push(scratch);
 }
 
 void
 MacroAssemblerX64::passABIArg(const MoveOperand &from, MoveOp::Type type)
 {
     MoveOperand to;
     switch (type) {
@@ -265,21 +265,21 @@ void
 MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
 {
     JS_ASSERT(inCall_);
     JS_ASSERT(args_ == passedIntArgs_ + passedFloatArgs_);
 
     if (dynamicAlignment_) {
         *stackAdjust = stackForCall_
                      + ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
-                                            StackAlignment);
+                                            ABIStackAlignment);
     } else {
         *stackAdjust = stackForCall_
                      + ComputeByteAlignment(stackForCall_ + framePushed_,
-                                            StackAlignment);
+                                            ABIStackAlignment);
     }
 
     reserveStack(*stackAdjust);
 
     // Position all arguments.
     {
         enoughMemory_ &= moveResolver_.resolve();
         if (!enoughMemory_)
@@ -288,17 +288,17 @@ MacroAssemblerX64::callWithABIPre(uint32
         MoveEmitter emitter(*this);
         emitter.emit(moveResolver_);
         emitter.finish();
     }
 
 #ifdef DEBUG
     {
         Label good;
-        testq(rsp, Imm32(StackAlignment - 1));
+        testq(rsp, Imm32(ABIStackAlignment - 1));
         j(Equal, &good);
         breakpoint();
         bind(&good);
     }
 #endif
 }
 
 void
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -546,17 +546,16 @@ JitRuntime::generateBailoutHandler(JSCon
 #endif
 
     return code;
 }
 
 JitCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
-    JS_ASSERT(!StackKeptAligned);
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
     if (p)
         return p->value();
 
     // Generate a separated code for the wrapper.
     MacroAssembler masm;
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -103,33 +103,34 @@ static MOZ_CONSTEXPR_VAR Register AsmJSI
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = edx;
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx;
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
 
-// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
-// jitted code.
+// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
+// calls. asm.js code does.
 #if defined(__GNUC__)
-static const uint32_t StackAlignment = 16;
+static const uint32_t ABIStackAlignment = 16;
 #else
-static const uint32_t StackAlignment = 4;
+static const uint32_t ABIStackAlignment = 4;
 #endif
-static const bool StackKeptAligned = false;
 static const uint32_t CodeAlignment = 8;
 
 // This boolean indicates whether we support SIMD instructions flavoured for
 // this architecture or not. Rather than a method in the LIRGenerator, it is
 // here such that it is accessible from the entire codebase. Once full support
 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
 static const bool SupportsSimd = true;
 static const uint32_t SimdStackAlignment = 16;
 
+static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
+
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
 
 struct ImmType : public ImmTag
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -222,17 +222,17 @@ MacroAssemblerX86::setupAlignedABICall(u
 
 void
 MacroAssemblerX86::setupUnalignedABICall(uint32_t args, Register scratch)
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     movl(esp, scratch);
-    andl(Imm32(~(StackAlignment - 1)), esp);
+    andl(Imm32(~(ABIStackAlignment - 1)), esp);
     push(scratch);
 }
 
 void
 MacroAssemblerX86::passABIArg(const MoveOperand &from, MoveOp::Type type)
 {
     ++passedArgs_;
     MoveOperand to = MoveOperand(StackPointer, stackForCall_);
@@ -262,21 +262,21 @@ void
 MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
 {
     JS_ASSERT(inCall_);
     JS_ASSERT(args_ == passedArgs_);
 
     if (dynamicAlignment_) {
         *stackAdjust = stackForCall_
                      + ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
-                                            StackAlignment);
+                                            ABIStackAlignment);
     } else {
         *stackAdjust = stackForCall_
                      + ComputeByteAlignment(stackForCall_ + framePushed_,
-                                            StackAlignment);
+                                            ABIStackAlignment);
     }
 
     reserveStack(*stackAdjust);
 
     // Position all arguments.
     {
         enoughMemory_ &= moveResolver_.resolve();
         if (!enoughMemory_)
@@ -286,17 +286,17 @@ MacroAssemblerX86::callWithABIPre(uint32
         emitter.emit(moveResolver_);
         emitter.finish();
     }
 
 #ifdef DEBUG
     {
         // Check call alignment.
         Label good;
-        testl(esp, Imm32(StackAlignment - 1));
+        testl(esp, Imm32(ABIStackAlignment - 1));
         j(Equal, &good);
         breakpoint();
         bind(&good);
     }
 #endif
 }
 
 void
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -585,17 +585,16 @@ JitRuntime::generateBailoutHandler(JSCon
 #endif
 
     return code;
 }
 
 JitCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
-    JS_ASSERT(!StackKeptAligned);
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
     if (p)
         return p->value();
 
     // Generate a separated code for the wrapper.
     MacroAssembler masm;
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -1545,17 +1545,17 @@ jit::JitActivation::markRematerializedFr
         return;
     for (RematerializedFrameTable::Enum e(*rematerializedFrames_); !e.empty(); e.popFront())
         RematerializedFrame::MarkInVector(trc, e.front().value());
 }
 
 AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
   : Activation(cx, AsmJS),
     module_(module),
-    errorRejoinSP_(nullptr),
+    entrySP_(nullptr),
     profiler_(nullptr),
     resumePC_(nullptr),
     fp_(nullptr),
     exitReason_(AsmJSExit::None)
 {
     if (cx->runtime()->spsProfiler.enabled()) {
         // Use a profiler string that matches jsMatch regex in
         // browser/devtools/profiler/cleopatra/js/parserWorker.js.
@@ -1568,17 +1568,17 @@ AsmJSActivation::AsmJSActivation(JSConte
     prevAsmJSForModule_ = module.activation();
     module.activation() = this;
 
     prevAsmJS_ = cx->mainThread().asmJSActivationStack_;
 
     JSRuntime::AutoLockForInterrupt lock(cx->runtime());
     cx->mainThread().asmJSActivationStack_ = this;
 
-    (void) errorRejoinSP_;  // squelch GCC warning
+    (void) entrySP_;  // squelch GCC warning
 }
 
 AsmJSActivation::~AsmJSActivation()
 {
     if (profiler_)
         profiler_->exitAsmJS();
 
     JS_ASSERT(fp_ == nullptr);
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -1477,17 +1477,17 @@ class InterpreterFrameIterator
 // JitActivation interleaved with Ion/Baseline jit code. This would allow
 // efficient calls back and forth but requires that we can walk the stack for
 // all kinds of jit code.
 class AsmJSActivation : public Activation
 {
     AsmJSModule &module_;
     AsmJSActivation *prevAsmJS_;
     AsmJSActivation *prevAsmJSForModule_;
-    void *errorRejoinSP_;
+    void *entrySP_;
     SPSProfiler *profiler_;
     void *resumePC_;
     uint8_t *fp_;
     AsmJSExit::Reason exitReason_;
 
   public:
     AsmJSActivation(JSContext *cx, AsmJSModule &module);
     ~AsmJSActivation();
@@ -1507,17 +1507,17 @@ class AsmJSActivation : public Activatio
     // Returns the reason why asm.js code called out of asm.js code.
     AsmJSExit::Reason exitReason() const { return exitReason_; }
 
     // Read by JIT code:
     static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
     static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
 
     // Written by JIT code:
-    static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
+    static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
     static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
     static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }
 
     // Set from SIGSEGV handler:
     void setResumePC(void *pc) { resumePC_ = pc; }
 };
 
 // A FrameIter walks over the runtime's stack of JS script activations,