Bug 1116143 - Patch bare callVMs correctly in debug mode OSR. (r=jandem)
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/debug/execution-observability-03.js
@@ -0,0 +1,17 @@
+// Tests that bare callVMs (in the delprop below) are patched correctly.
+
+var o = {};
+var global = this;
+var p = new Proxy(o, {
+ "deleteProperty": function (target, key) {
+ var g = newGlobal();
+ g.parent = global;
+ g.eval("var dbg = new Debugger(parent); dbg.onEnterFrame = function(frame) {};");
+ return true;
+ }
+});
+function test() {
+ for (var i=0; i<100; i++) {}
+ assertEq(delete p.foo, true);
+}
+test();
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -546,17 +546,17 @@ BaselineCompiler::emitStackCheck(bool ea
pushArg(R1.scratchReg());
CallVMPhase phase = POST_INITIALIZE;
if (earlyCheck)
phase = PRE_INITIALIZE;
else if (needsEarlyStackCheck())
phase = CHECK_OVER_RECURSED;
- if (!callVM(CheckOverRecursedWithExtraInfo, phase))
+ if (!callVMNonOp(CheckOverRecursedWithExtraInfo, phase))
return false;
masm.bind(&skipCall);
return true;
}
typedef bool (*DebugPrologueFn)(JSContext *, BaselineFrame *, jsbytecode *, bool *);
static const VMFunction DebugPrologueInfo = FunctionInfo<DebugPrologueFn>(jit::DebugPrologue);
@@ -621,31 +621,31 @@ BaselineCompiler::initScopeChain()
if (fun->isHeavyweight()) {
// Call into the VM to create a new call object.
prepareVMCall();
masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
pushArg(R0.scratchReg());
- if (!callVM(HeavyweightFunPrologueInfo, phase))
+ if (!callVMNonOp(HeavyweightFunPrologueInfo, phase))
return false;
}
} else {
// ScopeChain pointer in BaselineFrame has already been initialized
// in prologue.
if (script->isForEval() && script->strict()) {
// Strict eval needs its own call object.
prepareVMCall();
masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
pushArg(R0.scratchReg());
- if (!callVM(StrictEvalPrologueInfo, phase))
+ if (!callVMNonOp(StrictEvalPrologueInfo, phase))
return false;
}
}
return true;
}
typedef bool (*InterruptCheckFn)(JSContext *);
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -345,18 +345,17 @@ PatchBaselineFramesForDebugMode(JSContex
// Recompile Patching Overview
//
// When toggling debug mode with live baseline scripts on the stack, we
// could have entered the VM via the following ways from the baseline
// script.
//
// Off to On:
// A. From a "can call" stub.
- // B. From a VM call (interrupt handler, debugger statement handler,
- // throw).
+ // B. From a VM call.
// H. From inside HandleExceptionBaseline.
//
// On to Off:
// - All the ways above.
// C. From the debug trap handler.
// D. From the debug prologue.
// E. From the debug epilogue.
//
@@ -468,35 +467,37 @@ PatchBaselineFramesForDebugMode(JSContex
}
// The RecompileInfo must already be allocated so that this
// function may be infallible.
BaselineDebugModeOSRInfo *recompInfo = entry.takeRecompInfo();
bool popFrameReg;
switch (kind) {
- case ICEntry::Kind_CallVM:
+ case ICEntry::Kind_CallVM: {
// Case B above.
//
- // Patching returns from an interrupt handler or the debugger
- // statement handler is similar in that we can resume at the
- // next op.
+ // Patching returns from a VM call. After fixing up the the
+ // continuation for unsynced values (the frame register is
+ // popped by the callVM trampoline), we resume at the
+ // return-from-callVM address. The assumption here is that all
+ // callVMs which can trigger debug mode OSR are the *only*
+ // callVMs generated for their respective pc locations in the
+ // baseline JIT code.
//
- // Throws are treated differently, as patching a throw means
- // we are recompiling on-stack scripts from inside an
- // onExceptionUnwind invocation. The resume address must be
- // settled on the throwing pc and not its successor, so that
- // Debugger.Frame may report the correct offset. Note we never
- // actually resume execution there, and it is set for the sake
- // of frame iterators.
- if (!iter.baselineFrame()->isDebuggerHandlingException())
- pc += GetBytecodeLength(pc);
- recompInfo->resumeAddr = bl->nativeCodeForPC(script, pc, &recompInfo->slotInfo);
- popFrameReg = true;
+ // Get the slot info for the next pc, but ignore the code
+ // address. We get the code address from the
+ // return-from-callVM entry instead.
+ (void) bl->maybeNativeCodeForPC(script, pc + GetBytecodeLength(pc),
+ &recompInfo->slotInfo);
+ ICEntry &callVMEntry = bl->callVMEntryFromPCOffset(pcOffset);
+ recompInfo->resumeAddr = bl->returnAddressForIC(callVMEntry);
+ popFrameReg = false;
break;
+ }
case ICEntry::Kind_DebugTrap:
// Case C above.
//
// Debug traps are emitted before each op, so we resume at the
// same op. Calling debug trap handlers is done via a toggled
// call to a thunk (DebugTrapHandler) that takes care tearing
// down its own stub frame so we don't need to worry about
@@ -901,22 +902,19 @@ HasForcedReturn(BaselineDebugModeOSRInfo
ICEntry::Kind kind = info->frameKind;
// The debug epilogue always checks its resumption value, so we don't need
// to check rv.
if (kind == ICEntry::Kind_DebugEpilogue)
return true;
// |rv| is the value in ReturnReg. If true, in the case of the prologue,
- // debug trap, and debugger statement handler, it means a forced return.
- if (kind == ICEntry::Kind_DebugPrologue ||
- (kind == ICEntry::Kind_CallVM && JSOp(*info->pc) == JSOP_DEBUGGER))
- {
+ // it means a forced return.
+ if (kind == ICEntry::Kind_DebugPrologue)
return rv;
- }
// N.B. The debug trap handler handles its own forced return, so no
// need to deal with it here.
return false;
}
static void
SyncBaselineDebugModeOSRInfo(BaselineFrame *frame, Value *vp, bool rv)
@@ -929,23 +927,29 @@ SyncBaselineDebugModeOSRInfo(BaselineFra
// Load the frame's rval and overwrite the resume address to go to the
// epilogue.
MOZ_ASSERT(R0 == JSReturnOperand);
info->valueR0 = frame->returnValue();
info->resumeAddr = frame->script()->baselineScript()->epilogueEntryAddr();
return;
}
- // Read stack values and make sure R0 and R1 have the right values.
- unsigned numUnsynced = info->slotInfo.numUnsynced();
- MOZ_ASSERT(numUnsynced <= 2);
- if (numUnsynced > 0)
- info->popValueInto(info->slotInfo.topSlotLocation(), vp);
- if (numUnsynced > 1)
- info->popValueInto(info->slotInfo.nextSlotLocation(), vp);
+ // Read stack values and make sure R0 and R1 have the right values if we
+ // aren't returning from a callVM.
+ //
+ // In the case of returning from a callVM, we don't need to restore R0 and
+ // R1 ourself since we'll return into code that does it if needed.
+ if (info->frameKind != ICEntry::Kind_CallVM) {
+ unsigned numUnsynced = info->slotInfo.numUnsynced();
+ MOZ_ASSERT(numUnsynced <= 2);
+ if (numUnsynced > 0)
+ info->popValueInto(info->slotInfo.topSlotLocation(), vp);
+ if (numUnsynced > 1)
+ info->popValueInto(info->slotInfo.nextSlotLocation(), vp);
+ }
// Scale stackAdjust.
info->stackAdjust *= sizeof(Value);
}
static void
FinishBaselineDebugModeOSR(BaselineFrame *frame)
{
@@ -980,16 +984,63 @@ JitRuntime::getBaselineDebugModeOSRHandl
{
if (!getBaselineDebugModeOSRHandler(cx))
return nullptr;
return popFrameReg
? baselineDebugModeOSRHandler_->raw()
: baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
}
+static void
+EmitBaselineDebugModeOSRHandlerTail(MacroAssembler &masm, Register temp, bool returnFromCallVM)
+{
+ // Save real return address on the stack temporarily.
+ //
+ // If we're returning from a callVM, we don't need to worry about R0 and
+ // R1 but do need to propagate the original ReturnReg value. Otherwise we
+ // need to worry about R0 and R1 but can clobber ReturnReg. Indeed, on
+ // x86, R1 contains ReturnReg.
+ if (returnFromCallVM) {
+ masm.push(ReturnReg);
+ } else {
+ masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR0)));
+ masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR1)));
+ }
+ masm.push(BaselineFrameReg);
+ masm.push(Address(temp, offsetof(BaselineDebugModeOSRInfo, resumeAddr)));
+
+ // Call a stub to free the allocated info.
+ masm.setupUnalignedABICall(1, temp);
+ masm.loadBaselineFramePtr(BaselineFrameReg, temp);
+ masm.passABIArg(temp);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBaselineDebugModeOSR));
+
+ // Restore saved values.
+ GeneralRegisterSet jumpRegs(GeneralRegisterSet::All());
+ if (returnFromCallVM) {
+ jumpRegs.take(ReturnReg);
+ } else {
+ jumpRegs.take(R0);
+ jumpRegs.take(R1);
+ }
+ jumpRegs.take(BaselineFrameReg);
+ Register target = jumpRegs.takeAny();
+
+ masm.pop(target);
+ masm.pop(BaselineFrameReg);
+ if (returnFromCallVM) {
+ masm.pop(ReturnReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+
+ masm.jump(target);
+}
+
JitCode *
JitRuntime::generateBaselineDebugModeOSRHandler(JSContext *cx, uint32_t *noFrameRegPopOffsetOut)
{
MacroAssembler masm(cx);
GeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(BaselineFrameReg);
regs.take(ReturnReg);
@@ -1000,58 +1051,49 @@ JitRuntime::generateBaselineDebugModeOSR
masm.pop(BaselineFrameReg);
// Not all patched baseline frames are returning from a situation where
// the frame reg is already fixed up.
CodeOffsetLabel noFrameRegPopOffset(masm.currentOffset());
// Record the stack pointer for syncing.
masm.movePtr(StackPointer, syncedStackStart);
+ masm.push(ReturnReg);
masm.push(BaselineFrameReg);
// Call a stub to fully initialize the info.
masm.setupUnalignedABICall(3, temp);
masm.loadBaselineFramePtr(BaselineFrameReg, temp);
masm.passABIArg(temp);
masm.passABIArg(syncedStackStart);
masm.passABIArg(ReturnReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, SyncBaselineDebugModeOSRInfo));
// Discard stack values depending on how many were unsynced, as we always
- // have a fully synced stack in the recompile handler. See assert in
- // DebugModeOSREntry constructor.
+ // have a fully synced stack in the recompile handler. We arrive here via
+ // a callVM, and prepareCallVM in BaselineCompiler always fully syncs the
+ // stack.
masm.pop(BaselineFrameReg);
+ masm.pop(ReturnReg);
masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScratchValue()), temp);
masm.addPtr(Address(temp, offsetof(BaselineDebugModeOSRInfo, stackAdjust)), StackPointer);
- // Save real return address on the stack temporarily.
- masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR0)));
- masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR1)));
- masm.push(BaselineFrameReg);
- masm.push(Address(temp, offsetof(BaselineDebugModeOSRInfo, resumeAddr)));
-
- // Call a stub to free the allocated info.
- masm.setupUnalignedABICall(1, temp);
- masm.loadBaselineFramePtr(BaselineFrameReg, temp);
- masm.passABIArg(temp);
- masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBaselineDebugModeOSR));
+ // Emit two tails for the case of returning from a callVM and all other
+ // cases, as the state we need to restore differs depending on the case.
+ Label returnFromCallVM, end;
+ masm.branch32(MacroAssembler::Equal,
+ Address(temp, offsetof(BaselineDebugModeOSRInfo, frameKind)),
+ Imm32(ICEntry::Kind_CallVM),
+ &returnFromCallVM);
- // Restore saved values.
- GeneralRegisterSet jumpRegs(GeneralRegisterSet::All());
- jumpRegs.take(R0);
- jumpRegs.take(R1);
- jumpRegs.take(BaselineFrameReg);
- Register target = jumpRegs.takeAny();
-
- masm.pop(target);
- masm.pop(BaselineFrameReg);
- masm.popValue(R1);
- masm.popValue(R0);
-
- masm.jump(target);
+ EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ false);
+ masm.jump(&end);
+ masm.bind(&returnFromCallVM);
+ EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ true);
+ masm.bind(&end);
Linker linker(masm);
AutoFlushICache afc("BaselineDebugModeOSRHandler");
JitCode *code = linker.newCode<NoGC>(cx, OTHER_CODE);
if (!code)
return nullptr;
noFrameRegPopOffset.fixup(&masm);
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -217,19 +217,23 @@ class ICEntry
public:
enum Kind {
// A for-op IC entry.
Kind_Op = 0,
// A non-op IC entry.
Kind_NonOp,
- // A fake IC entry for returning from a callVM.
+ // A fake IC entry for returning from a callVM for an op.
Kind_CallVM,
+ // A fake IC entry for returning from a callVM not for an op (e.g., in
+ // the prologue).
+ Kind_NonOpCallVM,
+
// A fake IC entry for returning from DebugTrapHandler.
Kind_DebugTrap,
// A fake IC entry for returning from a callVM to
// Debug{Prologue,Epilogue}.
Kind_DebugPrologue,
Kind_DebugEpilogue,
@@ -294,16 +298,20 @@ class ICEntry
void setForDebugPrologue() {
MOZ_ASSERT(kind() == Kind_CallVM);
setKind(Kind_DebugPrologue);
}
void setForDebugEpilogue() {
MOZ_ASSERT(kind() == Kind_CallVM);
setKind(Kind_DebugEpilogue);
}
+ void setForNonOpCallVM() {
+ MOZ_ASSERT(kind() == Kind_CallVM);
+ setKind(Kind_NonOpCallVM);
+ }
bool hasStub() const {
return firstStub_ != nullptr;
}
ICStub *firstStub() const {
MOZ_ASSERT(hasStub());
return firstStub_;
}
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -558,34 +558,41 @@ BaselineScript::icEntryFromReturnOffset(
}
uint8_t *
BaselineScript::returnAddressForIC(const ICEntry &ent)
{
return method()->raw() + ent.returnOffset().offset();
}
-ICEntry &
-BaselineScript::icEntryFromPCOffset(uint32_t pcOffset)
+static inline size_t
+ComputeBinarySearchMid(BaselineScript *baseline, uint32_t pcOffset)
{
- // Multiple IC entries can have the same PC offset, but this method only looks for
- // those which have isForOp() set.
size_t bottom = 0;
- size_t top = numICEntries();
+ size_t top = baseline->numICEntries();
size_t mid = bottom + (top - bottom) / 2;
while (mid < top) {
- ICEntry &midEntry = icEntry(mid);
+ ICEntry &midEntry = baseline->icEntry(mid);
if (midEntry.pcOffset() < pcOffset)
bottom = mid + 1;
else if (midEntry.pcOffset() > pcOffset)
top = mid;
else
break;
mid = bottom + (top - bottom) / 2;
}
+ return mid;
+}
+
+ICEntry &
+BaselineScript::icEntryFromPCOffset(uint32_t pcOffset)
+{
+ // Multiple IC entries can have the same PC offset, but this method only looks for
+ // those which have isForOp() set.
+ size_t mid = ComputeBinarySearchMid(this, pcOffset);
// Found an IC entry with a matching PC offset. Search backward, and then
// forward from this IC entry, looking for one with the same PC offset which
// has isForOp() set.
for (size_t i = mid; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i--) {
if (icEntry(i).isForOp())
return icEntry(i);
}
@@ -614,16 +621,34 @@ BaselineScript::icEntryFromPCOffset(uint
}
MOZ_ASSERT(curEntry->pcOffset() == pcOffset && curEntry->isForOp());
return *curEntry;
}
return icEntryFromPCOffset(pcOffset);
}
+ICEntry &
+BaselineScript::callVMEntryFromPCOffset(uint32_t pcOffset)
+{
+ // Like icEntryFromPCOffset, but only looks for the fake ICEntries
+ // inserted by VM calls.
+ size_t mid = ComputeBinarySearchMid(this, pcOffset);
+
+ for (size_t i = mid; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i--) {
+ if (icEntry(i).kind() == ICEntry::Kind_CallVM)
+ return icEntry(i);
+ }
+ for (size_t i = mid+1; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i++) {
+ if (icEntry(i).kind() == ICEntry::Kind_CallVM)
+ return icEntry(i);
+ }
+ MOZ_CRASH("Invalid PC offset for callVM entry.");
+}
+
ICEntry *
BaselineScript::maybeICEntryFromReturnAddress(uint8_t *returnAddr)
{
MOZ_ASSERT(returnAddr > method_->raw());
MOZ_ASSERT(returnAddr < method_->raw() + method_->instructionsSize());
CodeOffsetLabel offset(returnAddr - method_->raw());
return maybeICEntryFromReturnOffset(offset);
}
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -341,16 +341,17 @@ struct BaselineScript
return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
}
ICEntry &icEntry(size_t index);
ICEntry *maybeICEntryFromReturnOffset(CodeOffsetLabel returnOffset);
ICEntry &icEntryFromReturnOffset(CodeOffsetLabel returnOffset);
ICEntry &icEntryFromPCOffset(uint32_t pcOffset);
ICEntry &icEntryFromPCOffset(uint32_t pcOffset, ICEntry *prevLookedUpEntry);
+ ICEntry &callVMEntryFromPCOffset(uint32_t pcOffset);
ICEntry *maybeICEntryFromReturnAddress(uint8_t *returnAddr);
ICEntry &icEntryFromReturnAddress(uint8_t *returnAddr);
uint8_t *returnAddressForIC(const ICEntry &ent);
size_t numICEntries() const {
return icEntries_;
}
--- a/js/src/jit/shared/BaselineCompiler-shared.h
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -135,16 +135,23 @@ class BaselineCompilerShared
enum CallVMPhase {
POST_INITIALIZE,
PRE_INITIALIZE,
CHECK_OVER_RECURSED
};
bool callVM(const VMFunction &fun, CallVMPhase phase=POST_INITIALIZE);
+ bool callVMNonOp(const VMFunction &fun, CallVMPhase phase=POST_INITIALIZE) {
+ if (!callVM(fun, phase))
+ return false;
+ icEntries_.back().setForNonOpCallVM();
+ return true;
+ }
+
public:
BytecodeAnalysis &analysis() {
return analysis_;
}
void setCompileDebugInstrumentation() {
compileDebugInstrumentation_ = true;
}