Bug 1494618 - Simplify Baseline stack overflow checks. r=tcampbell
authorJan de Mooij <jdemooij@mozilla.com>
Sat, 29 Sep 2018 17:03:15 +0000
changeset 494661 021a82ef604b877cf2acb831652c6f3a39648564
parent 494660 7e9ab0e7b608ddb4d84f36194509f498c66e3449
child 494663 06173200ede0eff3a9a9136968d99d9f7e29d1d1
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstcampbell
bugs1494618
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1494618 - Simplify Baseline stack overflow checks. r=tcampbell The main change here is that we now inline the "early stack check" directly in JIT code instead of doing a VM call for it, and that lets us remove a lot of complexity to handle that special case elsewhere. Differential Revision: https://phabricator.services.mozilla.com/D7066
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineCompiler.h
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineIC.h
js/src/jit/BaselineJIT.cpp
js/src/jit/BaselineJIT.h
js/src/jit/VMFunctions.cpp
js/src/jit/VMFunctions.h
js/src/jit/arm64/MacroAssembler-arm64-inl.h
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -432,34 +432,45 @@ BaselineCompiler::emitPrologue()
         masm.storePtr(R1.scratchReg(), frame.addressOfEnvironmentChain());
     }
 
     // Functions with a large number of locals require two stack checks.
     // The VMCall for a fallible stack check can only occur after the
     // env chain has been initialized, as that is required for proper
     // exception handling if the VMCall returns false.  The env chain
     // initialization can only happen after the UndefinedValues for the
-    // local slots have been pushed.
-    // However by that time, the stack might have grown too much.
-    // In these cases, we emit an extra, early, infallible check
-    // before pushing the locals.  The early check sets a flag on the
-    // frame if the stack check fails (but otherwise doesn't throw an
-    // exception).  If the flag is set, then the jitcode skips past
-    // the pushing of the locals, and directly to env chain initialization
-    // followed by the actual stack check, which will throw the correct
-    // exception.
+    // local slots have been pushed. However by that time, the stack might
+    // have grown too much.
+    //
+    // In these cases, we emit an extra, early, infallible check before pushing
+    // the locals. The early check just sets a flag on the frame if the stack
+    // check fails. If the flag is set, then the jitcode skips past the pushing
+    // of the locals, and directly to env chain initialization followed by the
+    // actual stack check, which will throw the correct exception.
     Label earlyStackCheckFailed;
     if (needsEarlyStackCheck()) {
-        if (!emitStackCheck(/* earlyCheck = */ true)) {
-            return false;
+        // Subtract the size of script->nslots() from the stack pointer.
+        uint32_t slotsSize = script->nslots() * sizeof(Value);
+        Register scratch = R1.scratchReg();
+        masm.moveStackPtrTo(scratch);
+        masm.subPtr(Imm32(slotsSize), scratch);
+
+        // Set the OVER_RECURSED flag on the frame if the computed stack pointer
+        // overflows the stack limit. We have to use the actual (*NoInterrupt)
+        // stack limit here because we don't want to set the flag and throw an
+        // overrecursion exception later in the interrupt case.
+        Label stackCheckOk;
+        masm.branchPtr(Assembler::BelowOrEqual,
+                       AbsoluteAddress(cx->addressOfJitStackLimitNoInterrupt()), scratch,
+                       &stackCheckOk);
+        {
+            masm.or32(Imm32(BaselineFrame::OVER_RECURSED), frame.addressOfFlags());
+            masm.jump(&earlyStackCheckFailed);
         }
-        masm.branchTest32(Assembler::NonZero,
-                          frame.addressOfFlags(),
-                          Imm32(BaselineFrame::OVER_RECURSED),
-                          &earlyStackCheckFailed);
+        masm.bind(&stackCheckOk);
     }
 
     emitInitializeLocals();
 
     if (needsEarlyStackCheck()) {
         masm.bind(&earlyStackCheckFailed);
     }
 
@@ -630,23 +641,16 @@ BaselineCompiler::callVM(const VMFunctio
     uint32_t frameVals = frame.nlocals() + frame.stackDepth();
     uint32_t frameBaseSize = BaselineFrame::FramePointerOffset + BaselineFrame::Size();
     uint32_t frameFullSize = frameBaseSize + (frameVals * sizeof(Value));
     if (phase == POST_INITIALIZE) {
         masm.store32(Imm32(frameFullSize), frameSizeAddress);
         uint32_t descriptor = MakeFrameDescriptor(frameFullSize + argSize, FrameType::BaselineJS,
                                                   ExitFrameLayout::Size());
         masm.push(Imm32(descriptor));
-
-    } else if (phase == PRE_INITIALIZE) {
-        masm.store32(Imm32(frameBaseSize), frameSizeAddress);
-        uint32_t descriptor = MakeFrameDescriptor(frameBaseSize + argSize, FrameType::BaselineJS,
-                                                  ExitFrameLayout::Size());
-        masm.push(Imm32(descriptor));
-
     } else {
         MOZ_ASSERT(phase == CHECK_OVER_RECURSED);
         Label afterWrite;
         Label writePostInitialize;
 
         // If OVER_RECURSED is set, then frame locals haven't been pushed yet.
         masm.branchTest32(Assembler::Zero,
                           frame.addressOfFlags(),
@@ -682,79 +686,61 @@ BaselineCompiler::callVM(const VMFunctio
     }
 #endif
 
     // Add a fake ICEntry (without stubs), so that the return offset to
     // pc mapping works.
     return appendICEntry(ICEntry::Kind_CallVM, callOffset);
 }
 
-typedef bool (*CheckOverRecursedWithExtraFn)(JSContext*, BaselineFrame*, uint32_t, uint32_t);
-static const VMFunction CheckOverRecursedWithExtraInfo =
-    FunctionInfo<CheckOverRecursedWithExtraFn>(CheckOverRecursedWithExtra,
-                                               "CheckOverRecursedWithExtra");
-
-bool
-BaselineCompiler::emitStackCheck(bool earlyCheck)
-{
-    Label skipCall;
-    uint32_t slotsSize = script->nslots() * sizeof(Value);
-    uint32_t tolerance = earlyCheck ? slotsSize : 0;
-
-    masm.moveStackPtrTo(R1.scratchReg());
-
-    // If this is the early stack check, locals haven't been pushed yet.  Adjust the
-    // stack pointer to account for the locals that would be pushed before performing
-    // the guard around the vmcall to the stack check.
-    if (earlyCheck) {
-        masm.subPtr(Imm32(tolerance), R1.scratchReg());
-    }
-
+typedef bool (*CheckOverRecursedBaselineFn)(JSContext*, BaselineFrame*);
+static const VMFunction CheckOverRecursedBaselineInfo =
+    FunctionInfo<CheckOverRecursedBaselineFn>(CheckOverRecursedBaseline,
+                                              "CheckOverRecursedBaseline");
+
+bool
+BaselineCompiler::emitStackCheck()
+{
     // If this is the late stack check for a frame which contains an early stack check,
     // then the early stack check might have failed and skipped past the pushing of locals
     // on the stack.
     //
     // If this is a possibility, then the OVER_RECURSED flag should be checked, and the
-    // VMCall to CheckOverRecursed done unconditionally if it's set.
+    // VMCall to CheckOverRecursedBaseline done unconditionally if it's set.
     Label forceCall;
-    if (!earlyCheck && needsEarlyStackCheck()) {
+    if (needsEarlyStackCheck()) {
         masm.branchTest32(Assembler::NonZero,
                           frame.addressOfFlags(),
                           Imm32(BaselineFrame::OVER_RECURSED),
                           &forceCall);
     }
 
-    masm.branchPtr(Assembler::BelowOrEqual,
-                   AbsoluteAddress(cx->addressOfJitStackLimit()), R1.scratchReg(),
-                   &skipCall);
-
-    if (!earlyCheck && needsEarlyStackCheck()) {
+    Label skipCall;
+    masm.branchStackPtrRhs(Assembler::BelowOrEqual,
+                           AbsoluteAddress(cx->addressOfJitStackLimit()),
+                           &skipCall);
+
+    if (needsEarlyStackCheck()) {
         masm.bind(&forceCall);
     }
 
     prepareVMCall();
-    pushArg(Imm32(earlyCheck));
-    pushArg(Imm32(tolerance));
     masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
     pushArg(R1.scratchReg());
 
     CallVMPhase phase = POST_INITIALIZE;
-    if (earlyCheck) {
-        phase = PRE_INITIALIZE;
-    } else if (needsEarlyStackCheck()) {
+    if (needsEarlyStackCheck()) {
         phase = CHECK_OVER_RECURSED;
     }
 
-    if (!callVMNonOp(CheckOverRecursedWithExtraInfo, phase)) {
+    if (!callVMNonOp(CheckOverRecursedBaselineInfo, phase)) {
         return false;
     }
 
-    icEntries_.back().setFakeKind(earlyCheck
-                                  ? ICEntry::Kind_EarlyStackCheck
-                                  : ICEntry::Kind_StackCheck);
+    icEntries_.back().setFakeKind(ICEntry::Kind_StackCheck);
 
     masm.bind(&skipCall);
     return true;
 }
 
 void
 BaselineCompiler::emitIsDebuggeeCheck()
 {
--- a/js/src/jit/BaselineCompiler.h
+++ b/js/src/jit/BaselineCompiler.h
@@ -412,17 +412,16 @@ class BaselineCompiler final
     template <typename T>
     void pushArg(const T& t) {
         masm.Push(t);
     }
     void prepareVMCall();
 
     enum CallVMPhase {
         POST_INITIALIZE,
-        PRE_INITIALIZE,
         CHECK_OVER_RECURSED
     };
     bool callVM(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE);
 
     bool callVMNonOp(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE) {
         if (!callVM(fun, phase)) {
             return false;
         }
@@ -446,17 +445,17 @@ class BaselineCompiler final
     MOZ_MUST_USE bool emitIC(ICStub* stub, ICEntry::Kind kind);
     MOZ_MUST_USE bool emitOpIC(ICStub* stub) {
         return emitIC(stub, ICEntry::Kind_Op);
     }
     MOZ_MUST_USE bool emitNonOpIC(ICStub* stub) {
         return emitIC(stub, ICEntry::Kind_NonOp);
     }
 
-    MOZ_MUST_USE bool emitStackCheck(bool earlyCheck=false);
+    MOZ_MUST_USE bool emitStackCheck();
     MOZ_MUST_USE bool emitInterruptCheck();
     MOZ_MUST_USE bool emitWarmUpCounterIncrement(bool allowOsr=true);
     MOZ_MUST_USE bool emitArgumentTypeChecks();
     void emitIsDebuggeeCheck();
     MOZ_MUST_USE bool emitDebugPrologue();
     MOZ_MUST_USE bool emitDebugTrap();
     MOZ_MUST_USE bool emitTraceLoggerEnter();
     MOZ_MUST_USE bool emitTraceLoggerExit();
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -95,17 +95,16 @@ struct DebugModeOSREntry
         // recompInfo.
         js_delete(recompInfo);
     }
 
     bool needsRecompileInfo() const {
         return frameKind == ICEntry::Kind_CallVM ||
                frameKind == ICEntry::Kind_WarmupCounter ||
                frameKind == ICEntry::Kind_StackCheck ||
-               frameKind == ICEntry::Kind_EarlyStackCheck ||
                frameKind == ICEntry::Kind_DebugTrap ||
                frameKind == ICEntry::Kind_DebugPrologue ||
                frameKind == ICEntry::Kind_DebugAfterYield ||
                frameKind == ICEntry::Kind_DebugEpilogue;
     }
 
     bool recompiled() const {
         return oldBaselineScript != script->baselineScript();
@@ -306,18 +305,16 @@ ICEntryKindToString(ICEntry::Kind kind)
       case ICEntry::Kind_NonOp:
         return "non-op IC";
       case ICEntry::Kind_CallVM:
         return "callVM";
       case ICEntry::Kind_WarmupCounter:
         return "warmup counter";
       case ICEntry::Kind_StackCheck:
         return "stack check";
-      case ICEntry::Kind_EarlyStackCheck:
-        return "early stack check";
       case ICEntry::Kind_DebugTrap:
         return "debug trap";
       case ICEntry::Kind_DebugPrologue:
         return "debug prologue";
       case ICEntry::Kind_DebugAfterYield:
         return "debug after yield";
       case ICEntry::Kind_DebugEpilogue:
         return "debug epilogue";
@@ -477,17 +474,16 @@ PatchBaselineFramesForDebugMode(JSContex
             // the previous OSR debug info stashed on the frame.
             BaselineDebugModeOSRInfo* info = frame.baselineFrame()->getDebugModeOSRInfo();
             if (info) {
                 MOZ_ASSERT(info->pc == pc);
                 MOZ_ASSERT(info->frameKind == kind);
                 MOZ_ASSERT(kind == ICEntry::Kind_CallVM ||
                            kind == ICEntry::Kind_WarmupCounter ||
                            kind == ICEntry::Kind_StackCheck ||
-                           kind == ICEntry::Kind_EarlyStackCheck ||
                            kind == ICEntry::Kind_DebugTrap ||
                            kind == ICEntry::Kind_DebugPrologue ||
                            kind == ICEntry::Kind_DebugAfterYield ||
                            kind == ICEntry::Kind_DebugEpilogue);
 
                 // We will have allocated a new recompile info, so delete the
                 // existing one.
                 frame.baselineFrame()->deleteDebugModeOSRInfo();
@@ -522,25 +518,23 @@ PatchBaselineFramesForDebugMode(JSContex
                 // handled especially only because the warmup counter VM call is
                 // part of the prologue, and not tied an opcode.
                 ICEntry& warmupCountEntry = bl->warmupCountICEntry();
                 recompInfo->resumeAddr = bl->returnAddressForIC(warmupCountEntry);
                 popFrameReg = false;
                 break;
               }
 
-              case ICEntry::Kind_StackCheck:
-              case ICEntry::Kind_EarlyStackCheck: {
+              case ICEntry::Kind_StackCheck: {
                 // Case I above.
                 //
                 // Patching mechanism is identical to a CallVM. This is
                 // handled especially only because the stack check VM call is
                 // part of the prologue, and not tied an opcode.
-                bool earlyCheck = kind == ICEntry::Kind_EarlyStackCheck;
-                ICEntry& stackCheckEntry = bl->stackCheckICEntry(earlyCheck);
+                ICEntry& stackCheckEntry = bl->stackCheckICEntry();
                 recompInfo->resumeAddr = bl->returnAddressForIC(stackCheckEntry);
                 popFrameReg = false;
                 break;
               }
 
               case ICEntry::Kind_DebugTrap:
                 // Case C above.
                 //
@@ -1007,18 +1001,17 @@ static inline bool
 IsReturningFromCallVM(BaselineDebugModeOSRInfo* info)
 {
     // Keep this in sync with EmitBranchIsReturningFromCallVM.
     //
     // The stack check entries are returns from a callVM, but have a special
     // kind because they do not exist in a 1-1 relationship with a pc offset.
     return info->frameKind == ICEntry::Kind_CallVM ||
            info->frameKind == ICEntry::Kind_WarmupCounter ||
-           info->frameKind == ICEntry::Kind_StackCheck ||
-           info->frameKind == ICEntry::Kind_EarlyStackCheck;
+           info->frameKind == ICEntry::Kind_StackCheck;
 }
 
 static void
 EmitBranchICEntryKind(MacroAssembler& masm, Register entry, ICEntry::Kind kind, Label* label)
 {
     masm.branch32(MacroAssembler::Equal,
                   Address(entry, offsetof(BaselineDebugModeOSRInfo, frameKind)),
                   Imm32(kind), label);
@@ -1026,17 +1019,16 @@ EmitBranchICEntryKind(MacroAssembler& ma
 
 static void
 EmitBranchIsReturningFromCallVM(MacroAssembler& masm, Register entry, Label* label)
 {
     // Keep this in sync with IsReturningFromCallVM.
     EmitBranchICEntryKind(masm, entry, ICEntry::Kind_CallVM, label);
     EmitBranchICEntryKind(masm, entry, ICEntry::Kind_WarmupCounter, label);
     EmitBranchICEntryKind(masm, entry, ICEntry::Kind_StackCheck, label);
-    EmitBranchICEntryKind(masm, entry, ICEntry::Kind_EarlyStackCheck, label);
 }
 
 static void
 SyncBaselineDebugModeOSRInfo(BaselineFrame* frame, Value* vp, bool rv)
 {
     AutoUnsafeCallWithABI unsafe;
     BaselineDebugModeOSRInfo* info = frame->debugModeOSRInfo();
     MOZ_ASSERT(info);
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -248,19 +248,16 @@ class ICEntry
         // A fake IC entry for returning from a callVM to after the
         // warmup counter.
         Kind_WarmupCounter,
 
         // A fake IC entry for returning from a callVM to the interrupt
         // handler via the over-recursion check on function entry.
         Kind_StackCheck,
 
-        // As above, but for the early check. See emitStackCheck.
-        Kind_EarlyStackCheck,
-
         // A fake IC entry for returning from DebugTrapHandler.
         Kind_DebugTrap,
 
         // A fake IC entry for returning from a callVM to
         // Debug{Prologue,AfterYield,Epilogue}.
         Kind_DebugPrologue,
         Kind_DebugAfterYield,
         Kind_DebugEpilogue,
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -737,25 +737,24 @@ BaselineScript::callVMEntryFromPCOffset(
         if (icEntry(i).kind() == ICEntry::Kind_CallVM) {
             return icEntry(i);
         }
     }
     MOZ_CRASH("Invalid PC offset for callVM entry.");
 }
 
 ICEntry&
-BaselineScript::stackCheckICEntry(bool earlyCheck)
+BaselineScript::stackCheckICEntry()
 {
     // The stack check will always be at offset 0, so just do a linear search
     // from the beginning. This is only needed for debug mode OSR, when
     // patching a frame that has invoked a Debugger hook via the interrupt
     // handler via the stack check, which is part of the prologue.
-    ICEntry::Kind kind = earlyCheck ? ICEntry::Kind_EarlyStackCheck : ICEntry::Kind_StackCheck;
     for (size_t i = 0; i < numICEntries() && icEntry(i).pcOffset() == 0; i++) {
-        if (icEntry(i).kind() == kind) {
+        if (icEntry(i).kind() == ICEntry::Kind_StackCheck) {
             return icEntry(i);
         }
     }
     MOZ_CRASH("No stack check ICEntry found.");
 }
 
 ICEntry&
 BaselineScript::warmupCountICEntry()
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -395,17 +395,17 @@ struct BaselineScript
     ICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset,
                                               ICEntry* prevLookedUpEntry);
 
     ICEntry& icEntry(size_t index);
     ICEntry& icEntryFromReturnOffset(CodeOffset returnOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry);
     ICEntry& callVMEntryFromPCOffset(uint32_t pcOffset);
-    ICEntry& stackCheckICEntry(bool earlyCheck);
+    ICEntry& stackCheckICEntry();
     ICEntry& warmupCountICEntry();
     ICEntry& icEntryFromReturnAddress(uint8_t* returnAddr);
     uint8_t* returnAddressForIC(const ICEntry& ent);
 
     size_t numICEntries() const {
         return icEntries_;
     }
 
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -146,100 +146,56 @@ InvokeFromInterpreterStub(JSContext* cx,
         return false;
     }
 
     // Overwrite |this| with the return value.
     argv[0] = rval;
     return true;
 }
 
-#ifdef JS_SIMULATOR
-static bool
-CheckSimulatorRecursionLimitWithExtra(JSContext* cx, uint32_t extra)
-{
-    if (cx->simulator()->overRecursedWithExtra(extra)) {
-        ReportOverRecursed(cx);
-        return false;
-    }
-    return true;
-}
-#endif
-
 bool
 CheckOverRecursed(JSContext* cx)
 {
     // We just failed the jitStackLimit check. There are two possible reasons:
-    //  - jitStackLimit was the real stack limit and we're over-recursed
-    //  - jitStackLimit was set to UINTPTR_MAX by JSRuntime::requestInterrupt
-    //    and we need to call JSRuntime::handleInterrupt.
+    //  1) jitStackLimit was the real stack limit and we're over-recursed
+    //  2) jitStackLimit was set to UINTPTR_MAX by JSContext::requestInterrupt
+    //     and we need to call JSContext::handleInterrupt.
+
+    // This handles 1).
 #ifdef JS_SIMULATOR
-    if (!CheckSimulatorRecursionLimitWithExtra(cx, 0)) {
+    if (cx->simulator()->overRecursedWithExtra(0)) {
+        ReportOverRecursed(cx);
         return false;
     }
 #else
     if (!CheckRecursionLimit(cx)) {
         return false;
     }
 #endif
+
+    // This handles 2).
     gc::MaybeVerifyBarriers(cx);
     return cx->handleInterrupt();
 }
 
-// This function can get called in two contexts.  In the usual context, it's
-// called with earlyCheck=false, after the env chain has been initialized on
-// a baseline frame.  In this case, it's ok to throw an exception, so a failed
-// stack check returns false, and a successful stack check promps a check for
-// an interrupt from the runtime, which may also cause a false return.
-//
-// In the second case, it's called with earlyCheck=true, prior to frame
-// initialization.  An exception cannot be thrown in this instance, so instead
-// an error flag is set on the frame and true returned.
+// This function gets called when the overrecursion check fails for a Baseline
+// frame. This is just like CheckOverRecursed, with an extra check to handle
+// early stack check failures.
 bool
-CheckOverRecursedWithExtra(JSContext* cx, BaselineFrame* frame,
-                           uint32_t extra, uint32_t earlyCheck)
+CheckOverRecursedBaseline(JSContext* cx, BaselineFrame* frame)
 {
-    MOZ_ASSERT_IF(earlyCheck, !frame->overRecursed());
-
-    // See |CheckOverRecursed| above.  This is a variant of that function which
-    // accepts an argument holding the extra stack space needed for the Baseline
-    // frame that's about to be pushed.
-    uint8_t spDummy;
-    uint8_t* checkSp = (&spDummy) - extra;
-    if (earlyCheck) {
-#ifdef JS_SIMULATOR
-        (void)checkSp;
-        if (!CheckSimulatorRecursionLimitWithExtra(cx, extra)) {
-            frame->setOverRecursed();
-        }
-#else
-        if (!CheckRecursionLimitWithStackPointer(cx, checkSp)) {
-            frame->setOverRecursed();
-        }
-#endif
-        return true;
-    }
-
     // The OVERRECURSED flag may have already been set on the frame by an
-    // early over-recursed check.  If so, throw immediately.
+    // early over-recursed check (before pushing the locals).  If so, throw
+    // immediately.
     if (frame->overRecursed()) {
+        ReportOverRecursed(cx);
         return false;
     }
 
-#ifdef JS_SIMULATOR
-    if (!CheckSimulatorRecursionLimitWithExtra(cx, extra)) {
-        return false;
-    }
-#else
-    if (!CheckRecursionLimitWithStackPointer(cx, checkSp)) {
-        return false;
-    }
-#endif
-
-    gc::MaybeVerifyBarriers(cx);
-    return cx->handleInterrupt();
+    return CheckOverRecursed(cx);
 }
 
 JSObject*
 BindVar(JSContext* cx, HandleObject envChain)
 {
     JSObject* obj = envChain;
     while (!obj->isQualifiedVarObj()) {
         obj = obj->enclosingEnvironment();
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -679,18 +679,17 @@ InvokeFunction(JSContext* cx, HandleObje
 MOZ_MUST_USE bool
 InvokeFunctionShuffleNewTarget(JSContext* cx, HandleObject obj, uint32_t numActualArgs,
                                uint32_t numFormalArgs, Value* argv, MutableHandleValue rval);
 
 class InterpreterStubExitFrameLayout;
 bool InvokeFromInterpreterStub(JSContext* cx, InterpreterStubExitFrameLayout* frame);
 
 bool CheckOverRecursed(JSContext* cx);
-bool CheckOverRecursedWithExtra(JSContext* cx, BaselineFrame* frame,
-                                uint32_t extra, uint32_t earlyCheck);
+bool CheckOverRecursedBaseline(JSContext* cx, BaselineFrame* frame);
 
 JSObject* BindVar(JSContext* cx, HandleObject scopeChain);
 MOZ_MUST_USE bool
 DefVar(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject scopeChain);
 MOZ_MUST_USE bool
 DefLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject scopeChain);
 MOZ_MUST_USE bool
 DefGlobalLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs);
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -2133,17 +2133,17 @@ MacroAssemblerCompat::branchStackPtrRhs(
     B(label, Assembler::InvertCondition(cond));
 }
 
 void
 MacroAssemblerCompat::branchStackPtrRhs(Condition cond, AbsoluteAddress lhs, Label* label)
 {
     vixl::UseScratchRegisterScope temps(this);
     const ARMRegister scratch = temps.AcquireX();
-    movePtr(ImmPtr(lhs.addr), scratch.asUnsized());
+    loadPtr(lhs, scratch.asUnsized());
     // Cmp disallows SP as the rhs, so flip the operands and invert the
     // condition.
     Cmp(GetStackPointer64(), scratch);
     B(label, Assembler::InvertCondition(cond));
 }
 
 // If source is a double, load into dest.
 // If source is int32, convert to double and store in dest.