--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -378,17 +378,19 @@ public:
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "pop %s\n", MAYBE_PAD, nameIReg(reg));
m_formatter.oneByteOp(OP_POP_EAX, reg);
}
void push_i32(int imm)
{
- FIXME_INSN_PRINTING;
+ js::JaegerSpew(js::JSpew_Insns,
+ IPFX "pushl %s$0x%x\n", MAYBE_PAD,
+ PRETTY_PRINT_OFFSET(imm));
m_formatter.oneByteOp(OP_PUSH_Iz);
m_formatter.immediate32(imm);
}
void push_m(int offset, RegisterID base)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "push %s0x%x(%s)\n", MAYBE_PAD,
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug563000/trap-from-add-inline.js
@@ -0,0 +1,12 @@
+setDebug(true);
+x = "notset";
+function main() {
+ /* The JSOP_STOP in a. */
+ a = { valueOf: function () { trap(main, 38, "success()"); } };
+ a + "";
+ x = "failure";
+}
+function success() { x = "success"; }
+
+main();
+assertEq(x, "success");
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug563000/trap-from-add-ool.js
@@ -0,0 +1,14 @@
+setDebug(true);
+x = "notset";
+function main() {
+ /* The JSOP_STOP in a. */
+ a = { valueOf: function () { trap(main, 59, "success()"); } };
+ b = "";
+ eval();
+ a + b;
+ x = "failure";
+}
+function success() { x = "success"; }
+
+main();
+assertEq(x, "success");
--- a/js/src/methodjit/BaseAssembler.h
+++ b/js/src/methodjit/BaseAssembler.h
@@ -168,24 +168,39 @@ class Assembler : public ValueAssembler
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#endif
/* :TODO: OOM */
Label startLabel;
Vector<CallPatch, 64, SystemAllocPolicy> callPatches;
+ // List and count of registers that will be saved and restored across a call.
+ uint32 saveCount;
+ RegisterID savedRegs[TotalRegisters];
+
+ // Calling convention used by the currently in-progress call.
+ Registers::CallConvention callConvention;
+
+ // Amount of stack space reserved for the currently in-progress call. This
+ // includes alignment and parameters.
+ uint32 stackAdjust;
+
+ // Debug flag to make sure calls do not nest.
+#ifdef DEBUG
+ bool callIsAligned;
+#endif
+
public:
-#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
- // If there is no fast call, we need to add esp by 8 after the call.
- // This callLabel is to record the Label exactly after the call.
- Label callLabel;
+ Assembler()
+ : callPatches(SystemAllocPolicy()),
+ saveCount(0)
+#ifdef DEBUG
+ , callIsAligned(false)
#endif
- Assembler()
- : callPatches(SystemAllocPolicy())
{
startLabel = label();
}
/* Total number of floating-point registers. */
static const uint32 TotalFPRegisters = FPRegisters::TotalFPRegisters;
/* Register pair storing returned type/data for calls. */
@@ -243,20 +258,18 @@ static const JSC::MacroAssembler::Regist
} else {
m_assembler.movd_rr(lo, fpReg);
m_assembler.movd_rr(hi, FPRegisters::Temp0);
m_assembler.unpcklps_rr(FPRegisters::Temp0, fpReg);
}
}
#endif
- /*
- * Prepares for a stub call.
- */
- void * getCallTarget(void *fun) {
+ // Prepares for a stub call.
+ void *getCallTarget(void *fun) {
#ifdef JS_CPU_ARM
/*
* Insert a veneer for ARM to allow it to catch exceptions. There is no
* reliable way to determine the location of the return address on the
* stack, so it cannot be hijacked.
*
* :TODO: It wouldn't surprise me if GCC always pushes LR first. In that
* case, this looks like the x86-style call, and we can hijack the stack
@@ -278,114 +291,247 @@ static const JSC::MacroAssembler::Regist
* location on the stack can hijack C++'s return mechanism by overwriting
* that address, so a veneer is not required.
*/
void *pfun = fun;
#endif
return pfun;
}
+ // Save all registers in the given mask.
+ void saveRegs(uint32 volatileMask) {
+ // Only one use per call.
+ JS_ASSERT(saveCount == 0);
+ // Must save registers before pushing arguments or setting up calls.
+ JS_ASSERT(!callIsAligned);
-#define STUB_CALL_TYPE(type) \
- Call stubCall(type stub, jsbytecode *pc, uint32 fd) { \
- return stubCall(JS_FUNC_TO_DATA_PTR(void *, stub), \
- pc, fd); \
+ Registers set(volatileMask);
+ while (!set.empty()) {
+ JS_ASSERT(saveCount < TotalRegisters);
+
+ RegisterID reg = set.takeAnyReg();
+ savedRegs[saveCount++] = reg;
+ push(reg);
+ }
+ }
+
+ static const uint32 StackAlignment = 16;
+
+ static inline uint32 alignForCall(uint32 stackBytes) {
+#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
+ // If StackAlignment is a power of two, % is just two shifts.
+ // 16 - (x % 16) gives alignment, extra % 16 handles total == 0.
+ return (StackAlignment - (stackBytes % StackAlignment)) % StackAlignment;
+#else
+ return 0;
+#endif
+ }
+
+ // Some platforms require stack manipulation before making stub calls.
+ // When using THROW/V, the return address is replaced, meaning the
+ // stack de-adjustment will not have occured. JaegerThrowpoline accounts
+ // for this. For stub calls, which are always invoked as if they use
+ // two parameters, the stack adjustment is constant.
+ //
+ // When using callWithABI() manually, for example via an IC, it might
+ // be necessary to jump directly to JaegerThrowpoline. In this case,
+ // the constant is provided here in order to appropriately adjust the
+ // stack.
+#ifdef _WIN64
+ static const uint32 ReturnStackAdjustment = 32;
+#elif defined(JS_CPU_X86) && defined(JS_NO_FASTCALL)
+ static const uint32 ReturnStackAdjustment = 16;
+#else
+ static const uint32 ReturnStackAdjustment = 0;
+#endif
+
+ void throwInJIT() {
+ if (ReturnStackAdjustment)
+ subPtr(Imm32(ReturnStackAdjustment), stackPointerRegister);
+ move(ImmPtr(JS_FUNC_TO_DATA_PTR(void *, JaegerThrowpoline)), Registers::ReturnReg);
+ jump(Registers::ReturnReg);
+ }
+
+ // Windows x64 requires extra space in between calls.
+#ifdef _WIN64
+ static const uint32 ShadowStackSpace = 32;
+#else
+ static const uint32 ShadowStackSpace = 0;
+#endif
+
+ // Prepare the stack for a call sequence. This must be called AFTER all
+ // volatile regs have been saved, and BEFORE pushArg() is used. The stack
+ // is assumed to be aligned to 16-bytes plus any pushes that occured via
+ // saveVolatileRegs().
+ void setupABICall(Registers::CallConvention convention, uint32 generalArgs) {
+ JS_ASSERT(!callIsAligned);
+
+ uint32 numArgRegs = Registers::numArgRegs(convention);
+ uint32 pushCount = (generalArgs > numArgRegs)
+ ? generalArgs - numArgRegs
+ : 0;
+
+ // Adjust the stack for alignment and parameters all at once.
+ stackAdjust = (pushCount + saveCount) * sizeof(void *);
+ stackAdjust += alignForCall(stackAdjust);
+
+#ifdef _WIN64
+ // Windows x64 ABI requires 32 bytes of "shadow space" for the callee
+ // to spill its parameters.
+ stackAdjust += ShadowStackSpace;
+#endif
+
+ if (stackAdjust)
+ subPtr(Imm32(stackAdjust), stackPointerRegister);
+
+ callConvention = convention;
+#ifdef DEBUG
+ callIsAligned = true;
+#endif
+ }
+
+ // This is an internal function only for use inside a startABICall(),
+ // callWithABI() sequence, and only for arguments known to fit in
+ // registers.
+ Address addressOfArg(uint32 i) {
+ uint32 numArgRegs = Registers::numArgRegs(callConvention);
+ JS_ASSERT(i >= numArgRegs);
+
+ // Note that shadow space is for the callee to spill, and thus it must
+ // be skipped when writing its arguments.
+ int32 spOffset = ((i - numArgRegs) * sizeof(void *)) + ShadowStackSpace;
+ return Address(stackPointerRegister, spOffset);
+ }
+
+ // Push an argument for a call.
+ void storeArg(uint32 i, RegisterID reg) {
+ JS_ASSERT(callIsAligned);
+ RegisterID to;
+ if (Registers::regForArg(callConvention, i, &to)) {
+ if (reg != to)
+ move(reg, to);
+ } else {
+ storePtr(reg, addressOfArg(i));
+ }
+ }
+
+ void storeArg(uint32 i, Imm32 imm) {
+ JS_ASSERT(callIsAligned);
+ RegisterID to;
+ if (Registers::regForArg(callConvention, i, &to))
+ move(imm, to);
+ else
+ store32(imm, addressOfArg(i));
+ }
+
+ // High-level call helper, given an optional function pointer and a
+ // calling convention. setupABICall() must have been called beforehand,
+ // as well as each numbered argument stored with storeArg().
+ //
+ // After callWithABI(), the call state is reset, so a new call may begin.
+ Call callWithABI(void *fun) {
+ JS_ASSERT(callIsAligned);
+
+ Call cl = call();
+ callPatches.append(CallPatch(cl, fun));
+
+ if (stackAdjust)
+ addPtr(Imm32(stackAdjust), stackPointerRegister);
+
+#ifdef DEBUG
+ callIsAligned = false;
+#endif
+ return cl;
+ }
+
+ // Restore registers after a call.
+ void restoreRegs() {
+ // Note that saveCount will safely decrement back to 0.
+ while (saveCount)
+ pop(savedRegs[--saveCount]);
+ }
+
+ // Wrap AbstractMacroAssembler::getLinkerCallReturnOffset which is protected.
+ unsigned callReturnOffset(Call call) {
+ return getLinkerCallReturnOffset(call);
+ }
+
+
+#define STUB_CALL_TYPE(type) \
+ Call callWithVMFrame(type stub, jsbytecode *pc, uint32 fd) { \
+ return fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stub), pc, fd); \
}
STUB_CALL_TYPE(JSObjStub);
STUB_CALL_TYPE(VoidPtrStubUInt32);
STUB_CALL_TYPE(VoidStubUInt32);
STUB_CALL_TYPE(VoidStub);
#undef STUB_CALL_TYPE
- Call stubCallImpl(void *ptr, jsbytecode *pc, int32 frameDepth) {
- JS_STATIC_ASSERT(ClobberInCall != Registers::ArgReg1);
-
- void *pfun = getCallTarget(ptr);
-
- /* PC -> regs->pc :( */
- storePtr(ImmPtr(pc),
- FrameAddress(offsetof(VMFrame, regs) + offsetof(JSFrameRegs, pc)));
-
- /* Store sp */
- fixScriptStack(frameDepth);
-
- /* VMFrame -> ArgReg0 */
- setupVMFrame();
-
- return wrapCall(pfun);
- }
-
- Call stubCall(void *ptr, jsbytecode *pc, uint32 frameDepth) {
- JS_ASSERT(frameDepth <= INT32_MAX);
- return stubCallImpl(ptr, pc, (int32)frameDepth);
- }
-
- Call stubCallWithDynamicDepth(void *ptr, jsbytecode *pc) {
- return stubCallImpl(ptr, pc, -1);
- }
-
- Call wrapCall(void *pfun) {
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- push(Registers::ArgReg1);
- push(Registers::ArgReg0);
-#elif defined(_WIN64)
- subPtr(JSC::MacroAssembler::Imm32(32),
- JSC::MacroAssembler::stackPointerRegister);
-#endif
- Call cl = call(pfun);
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- callLabel = label();
- addPtr(JSC::MacroAssembler::Imm32(8),
- JSC::MacroAssembler::stackPointerRegister);
-#elif defined(_WIN64)
- callLabel = label();
- addPtr(JSC::MacroAssembler::Imm32(32),
- JSC::MacroAssembler::stackPointerRegister);
-#endif
- return cl;
- }
-
- void fixScriptStack(int32 frameDepth) {
- /*
- * sp = fp->slots() + frameDepth
- * regs->sp = sp
- *
- * |frameDepth < 0| implies ic::SplatApplyArgs has been called which
- * means regs.sp has already been set in the VMFrame.
- */
+ void setupInfallibleVMFrame(int32 frameDepth) {
+ // |frameDepth < 0| implies ic::SplatApplyArgs has been called which
+ // means regs.sp has already been set in the VMFrame.
if (frameDepth >= 0) {
+ // sp = fp->slots() + frameDepth
+ // regs->sp = sp
addPtr(Imm32(sizeof(JSStackFrame) + frameDepth * sizeof(jsval)),
JSFrameReg,
ClobberInCall);
storePtr(ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
}
- /* regs->fp = fp */
- storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
- }
-
- void setupVMFrame() {
+ // The JIT has moved Arg1 already, and we've guaranteed to not clobber
+ // it. Move ArgReg0 into place now. setupFallibleVMFrame will not
+ // clobber it either.
move(MacroAssembler::stackPointerRegister, Registers::ArgReg0);
}
- Call call() {
- return JSC::MacroAssembler::call();
+ void setupFallibleVMFrame(jsbytecode *pc, int32 frameDepth) {
+ setupInfallibleVMFrame(frameDepth);
+
+ /* regs->fp = fp */
+ storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
+
+ /* PC -> regs->pc :( */
+ storePtr(ImmPtr(pc),
+ FrameAddress(offsetof(VMFrame, regs) + offsetof(JSFrameRegs, pc)));
+ }
+
+ // An infallible VM call is a stub call (taking a VMFrame & and one
+ // optional parameter) that does not need |pc| and |fp| updated, since
+ // the call is guaranteed to not fail. However, |sp| is always coherent.
+ Call infallibleVMCall(void *ptr, int32 frameDepth) {
+ setupInfallibleVMFrame(frameDepth);
+ return wrapVMCall(ptr);
}
- Call call(void *fun) {
- Call cl = JSC::MacroAssembler::call();
- // XXX
- callPatches.append(CallPatch(cl, fun));
- return cl;
+ // A fallible VM call is a stub call (taking a VMFrame & and one optional
+ // parameter) that needs the entire VMFrame to be coherent, meaning that
+ // |pc| and |fp| are guaranteed to be up-to-date.
+ Call fallibleVMCall(void *ptr, jsbytecode *pc, int32 frameDepth) {
+ setupFallibleVMFrame(pc, frameDepth);
+ return wrapVMCall(ptr);
}
- Call call(RegisterID reg) {
- return MacroAssembler::call(reg);
+ Call wrapVMCall(void *ptr) {
+ JS_ASSERT(!saveCount);
+ JS_ASSERT(!callIsAligned);
+
+ // Every stub call has at most two arguments.
+ setupABICall(Registers::FastCall, 2);
+
+ // On x86, if JS_NO_FASTCALL is present, these will result in actual
+ // pushes to the stack, which the caller will clean up. Otherwise,
+ // they'll be ignored because the registers fit into the calling
+ // sequence.
+ storeArg(0, Registers::ArgReg0);
+ storeArg(1, Registers::ArgReg1);
+
+ return callWithABI(getCallTarget(ptr));
}
void finalize(JSC::LinkBuffer &linker) {
for (size_t i = 0; i < callPatches.length(); i++) {
CallPatch &patch = callPatches[i];
linker.link(patch.call, JSC::FunctionPtr(patch.fun));
}
}
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -61,33 +61,16 @@
#include "jsautooplen.h"
using namespace js;
using namespace js::mjit;
#if defined(JS_POLYIC) || defined(JS_MONOIC)
using namespace js::mjit::ic;
#endif
-/* This macro should be used after stub calls (which automatically set callLabel). */
-#define ADD_CALLSITE(stub) \
- if (debugMode) addCallSite(__LINE__, (stub))
-
-/* For custom calls/jumps, this macro sets callLabel before adding the callsite. */
-#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
-# define ADD_NON_STUB_CALLSITE(stub) \
- if (stub) \
- stubcc.masm.callLabel = stubcc.masm.label() \
- else \
- masm.callLabel = masm.label(); \
- ADD_CALLSITE(stub)
-#else
-# define ADD_NON_STUB_CALLSITE(stub) \
- ADD_CALLSITE(stub)
-#endif
-
#define RETURN_IF_OOM(retval) \
JS_BEGIN_MACRO \
if (oomInVector || masm.oom() || stubcc.masm.oom()) { \
js_ReportOutOfMemory(cx); \
return retval; \
} \
JS_END_MACRO
@@ -104,34 +87,35 @@ mjit::Compiler::Compiler(JSContext *cx,
fp(fp),
script(fp->script()),
scopeChain(&fp->scopeChain()),
globalObj(scopeChain->getGlobal()),
fun(fp->isFunctionFrame() && !fp->isEvalFrame()
? fp->fun()
: NULL),
isConstructing(fp->isConstructing()),
- analysis(NULL), jumpMap(NULL), frame(cx, script, masm),
+ analysis(NULL), jumpMap(NULL), savedTraps(NULL),
+ frame(cx, script, masm),
branchPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
#if defined JS_MONOIC
mics(CompilerAllocPolicy(cx, *thisFromCtor())),
callICs(CompilerAllocPolicy(cx, *thisFromCtor())),
equalityICs(CompilerAllocPolicy(cx, *thisFromCtor())),
traceICs(CompilerAllocPolicy(cx, *thisFromCtor())),
#endif
#if defined JS_POLYIC
pics(CompilerAllocPolicy(cx, *thisFromCtor())),
getElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
setElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
#endif
callPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
callSites(CompilerAllocPolicy(cx, *thisFromCtor())),
doubleList(CompilerAllocPolicy(cx, *thisFromCtor())),
stubcc(cx, *thisFromCtor(), frame, script),
- debugMode(cx->compartment->debugMode),
+ debugMode_(cx->compartment->debugMode),
#if defined JS_TRACER
addTraceHints(cx->traceJitEnabled),
#endif
oomInVector(false),
applyTricks(NoApplyTricks)
{
}
@@ -206,17 +190,17 @@ mjit::Compiler::performCompilation(JITSc
Profiler prof;
prof.start();
#endif
/* Initialize PC early so stub calls in the prologue can be fallible. */
PC = script->code;
#ifdef JS_METHODJIT
- script->debugMode = debugMode;
+ script->debugMode = debugMode();
#endif
for (uint32 i = 0; i < script->nClosedVars; i++)
frame.setClosedVar(script->getClosedVar(i));
CHECK_STATUS(generatePrologue());
CHECK_STATUS(generateMethod());
CHECK_STATUS(generateEpilogue());
@@ -233,16 +217,17 @@ mjit::Compiler::performCompilation(JITSc
return Compile_Okay;
}
#undef CHECK_STATUS
mjit::Compiler::~Compiler()
{
cx->free(jumpMap);
+ cx->free(savedTraps);
}
CompileStatus JS_NEVER_INLINE
mjit::TryCompile(JSContext *cx, JSStackFrame *fp)
{
JS_ASSERT(cx->fp() == fp);
#if JS_HAS_SHARP_VARS
@@ -254,16 +239,32 @@ mjit::TryCompile(JSContext *cx, JSStackF
if (fp->isConstructing() && !fp->script()->nslots)
fp->script()->nslots++;
Compiler cc(cx, fp);
return cc.compile();
}
+bool
+mjit::Compiler::loadOldTraps(const Vector<CallSite> &sites)
+{
+ savedTraps = (bool *)cx->calloc(sizeof(bool) * script->length);
+ if (!savedTraps)
+ return false;
+
+ for (size_t i = 0; i < sites.length(); i++) {
+ const CallSite &site = sites[i];
+ if (site.isTrap())
+ savedTraps[site.pcOffset] = true;
+ }
+
+ return true;
+}
+
CompileStatus
mjit::Compiler::generatePrologue()
{
invokeLabel = masm.label();
/*
* If there is no function, then this can only be called via JaegerShot(),
* which expects an existing frame to be initialized like the interpreter.
@@ -295,17 +296,17 @@ mjit::Compiler::generatePrologue()
stubcc.crossJump(argMatch, fastPath);
if (JSParamReg_Argc != Registers::ArgReg1)
stubcc.masm.move(JSParamReg_Argc, Registers::ArgReg1);
/* Slow path - call the arity check function. Returns new fp. */
stubcc.masm.storePtr(ImmPtr(fun), Address(JSFrameReg, JSStackFrame::offsetOfExec()));
stubcc.masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
- stubcc.call(stubs::FixupArity);
+ OOL_STUBCALL(stubs::FixupArity);
stubcc.masm.move(Registers::ReturnReg, JSFrameReg);
stubcc.crossJump(stubcc.masm.jump(), fastPath);
}
/*
* Guard that there is enough stack space. Note we include the size of
* a second frame, to ensure we can create a frame from call sites.
*/
@@ -313,17 +314,17 @@ mjit::Compiler::generatePrologue()
JSFrameReg,
Registers::ReturnReg);
Jump stackCheck = masm.branchPtr(Assembler::AboveOrEqual, Registers::ReturnReg,
FrameAddress(offsetof(VMFrame, stackLimit)));
/* If the stack check fails... */
{
stubcc.linkExitDirect(stackCheck, stubcc.masm.label());
- stubcc.call(stubs::HitStackQuota);
+ OOL_STUBCALL(stubs::HitStackQuota);
stubcc.crossJump(stubcc.masm.jump(), masm.label());
}
/*
* Set locals to undefined, as in initCallFrameLatePrologue.
* Skip locals which aren't closed and are known to be defined before used,
* :FIXME: bug 604541: write undefined if we might be using the tracer, so it works.
*/
@@ -332,17 +333,17 @@ mjit::Compiler::generatePrologue()
Address local(JSFrameReg, sizeof(JSStackFrame) + i * sizeof(Value));
masm.storeValue(UndefinedValue(), local);
}
}
/* Create the call object. */
if (fun->isHeavyweight()) {
prepareStubCall(Uses(0));
- stubCall(stubs::GetCallObject);
+ INLINE_STUBCALL(stubs::GetCallObject);
}
j.linkTo(masm.label(), &masm);
if (analysis->usesScopeChain() && !fun->isHeavyweight()) {
/*
* Load the scope chain into the frame if necessary. The scope chain
* is always set for global and eval frames, and will have been set by
@@ -356,18 +357,18 @@ mjit::Compiler::generatePrologue()
masm.storePtr(t0, Address(JSFrameReg, JSStackFrame::offsetOfScopeChain()));
hasScope.linkTo(masm.label(), &masm);
}
}
if (isConstructing)
constructThis();
- if (debugMode || Probes::callTrackingActive(cx))
- stubCall(stubs::EnterScript);
+ if (debugMode() || Probes::callTrackingActive(cx))
+ INLINE_STUBCALL(stubs::EnterScript);
return Compile_Okay;
}
CompileStatus
mjit::Compiler::generateEpilogue()
{
return Compile_Okay;
@@ -747,28 +748,27 @@ mjit::Compiler::finishThisUp(JITScript *
stubcc.masm.finalize(stubCode);
JSC::ExecutableAllocator::makeExecutable(result, masm.size() + stubcc.size());
JSC::ExecutableAllocator::cacheFlush(result, masm.size() + stubcc.size());
/* Build the table of call sites. */
jit->nCallSites = callSites.length();
if (callSites.length()) {
- CallSite *callSiteList = (CallSite *)cursor;
+ jit->callSites = (CallSite *)cursor;
cursor += sizeof(CallSite) * callSites.length();
for (size_t i = 0; i < callSites.length(); i++) {
- if (callSites[i].stub)
- callSiteList[i].codeOffset = masm.size() + stubcc.masm.distanceOf(callSites[i].location);
- else
- callSiteList[i].codeOffset = masm.distanceOf(callSites[i].location);
- callSiteList[i].pcOffset = callSites[i].pc - script->code;
- callSiteList[i].id = callSites[i].id;
+ CallSite &to = jit->callSites[i];
+ InternalCallSite &from = callSites[i];
+ uint32 codeOffset = from.ool
+ ? masm.size() + from.returnOffset
+ : from.returnOffset;
+ to.initialize(codeOffset, from.pc - script->code, from.id);
}
- jit->callSites = callSiteList;
} else {
jit->callSites = NULL;
}
JS_ASSERT(size_t(cursor - (uint8*)jit) == totalBytes);
jit->nmap = nmap;
*jitp = jit;
@@ -831,36 +831,55 @@ mjit::Compiler::generateMethod()
jumpMap[uint32(PC - script->code)] = masm.label();
SPEW_OPCODE();
JS_ASSERT(frame.stackDepth() == opinfo->stackDepth);
if (trap) {
prepareStubCall(Uses(0));
masm.move(ImmPtr(PC), Registers::ArgReg1);
- stubCall(stubs::Trap);
+ Call cl = emitStubCall(JS_FUNC_TO_DATA_PTR(void *, stubs::Trap));
+ InternalCallSite site(masm.callReturnOffset(cl), PC,
+ CallSite::MAGIC_TRAP_ID, true, false);
+ addCallSite(site);
+ } else if (savedTraps && savedTraps[PC - script->code]) {
+ // Normally when we patch return addresses, we have generated the
+ // same exact code at that site. For example, patching a stub call's
+ // return address will resume at the same stub call.
+ //
+ // In the case we're handling here, we could potentially be
+ // recompiling to remove a trap, and therefore we won't generate
+ // a call to the trap. However, we could be re-entering from that
+ // trap. The callsite will be missing, and fixing the stack will
+ // fail! Worse, we can't just put a label here, because on some
+ // platforms the stack needs to be adjusted when returning from
+ // the old trap call.
+ //
+ // To deal with this, we add a small bit of code in the OOL path
+ // that will adjust the stack and jump back into the script.
+ // Note that this uses MAGIC_TRAP_ID, which is necessary for
+ // repatching to detect the callsite as identical to the return
+ // address.
+ //
+ // Unfortunately, this means that if a bytecode is ever trapped,
+ // we will always generate a CallSite (either Trapped or not) for
+ // every debug recompilation of the script thereafter. The reason
+ // is that MAGIC_TRAP_ID callsites always propagate to the next
+ // recompilation. That's okay, and not worth fixing - it's a small
+ // amount of memory.
+ uint32 offset = stubcc.masm.distanceOf(stubcc.masm.label());
+ if (Assembler::ReturnStackAdjustment) {
+ stubcc.masm.addPtr(Imm32(Assembler::ReturnStackAdjustment),
+ Assembler::stackPointerRegister);
+ }
+ stubcc.crossJump(stubcc.masm.jump(), masm.label());
+
+ InternalCallSite site(offset, PC, CallSite::MAGIC_TRAP_ID, false, true);
+ addCallSite(site);
}
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- // In case of no fast call, when we change the return address,
- // we need to make sure add esp by 8. For normal call, we need
- // to make sure the esp is not changed.
- else {
- masm.subPtr(Imm32(8), Registers::StackPointer);
- masm.callLabel = masm.label();
- masm.addPtr(Imm32(8), Registers::StackPointer);
- }
-#elif defined(_WIN64)
- // In case of Win64 ABI, stub caller make 32-bytes spcae on stack
- else {
- masm.subPtr(Imm32(32), Registers::StackPointer);
- masm.callLabel = masm.label();
- masm.addPtr(Imm32(32), Registers::StackPointer);
- }
-#endif
- ADD_CALLSITE(false);
/**********************
* BEGIN COMPILER OPS *
**********************/
switch (op) {
BEGIN_CASE(JSOP_NOP)
END_CASE(JSOP_NOP)
@@ -1115,37 +1134,37 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_DELNAME)
{
uint32 index = fullAtomIndex(PC);
JSAtom *atom = script->getAtom(index);
prepareStubCall(Uses(0));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::DelName);
+ INLINE_STUBCALL(stubs::DelName);
frame.pushSynced();
}
END_CASE(JSOP_DELNAME)
BEGIN_CASE(JSOP_DELPROP)
{
uint32 index = fullAtomIndex(PC);
JSAtom *atom = script->getAtom(index);
prepareStubCall(Uses(1));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(STRICT_VARIANT(stubs::DelProp));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::DelProp));
frame.pop();
frame.pushSynced();
}
END_CASE(JSOP_DELPROP)
BEGIN_CASE(JSOP_DELELEM)
prepareStubCall(Uses(2));
- stubCall(STRICT_VARIANT(stubs::DelElem));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::DelElem));
frame.popn(2);
frame.pushSynced();
END_CASE(JSOP_DELELEM)
BEGIN_CASE(JSOP_TYPEOF)
BEGIN_CASE(JSOP_TYPEOFEXPR)
jsop_typeof();
END_CASE(JSOP_TYPEOF)
@@ -1277,17 +1296,17 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_SETELEM)
if (!jsop_setelem())
return Compile_Error;
END_CASE(JSOP_SETELEM);
BEGIN_CASE(JSOP_CALLNAME)
prepareStubCall(Uses(0));
masm.move(Imm32(fullAtomIndex(PC)), Registers::ArgReg1);
- stubCall(stubs::CallName);
+ INLINE_STUBCALL(stubs::CallName);
frame.pushSynced();
frame.pushSynced();
END_CASE(JSOP_CALLNAME)
BEGIN_CASE(JSOP_EVAL)
{
JaegerSpew(JSpew_Insns, " --- EVAL --- \n");
emitEval(GET_ARGC(PC));
@@ -1355,30 +1374,30 @@ mjit::Compiler::generateMethod()
return Compile_Error;
END_CASE(JSOP_AND)
BEGIN_CASE(JSOP_TABLESWITCH)
frame.syncAndForgetEverything();
masm.move(ImmPtr(PC), Registers::ArgReg1);
/* prepareStubCall() is not needed due to syncAndForgetEverything() */
- stubCall(stubs::TableSwitch);
+ INLINE_STUBCALL(stubs::TableSwitch);
frame.pop();
masm.jump(Registers::ReturnReg);
PC += js_GetVariableBytecodeLength(PC);
break;
END_CASE(JSOP_TABLESWITCH)
BEGIN_CASE(JSOP_LOOKUPSWITCH)
frame.syncAndForgetEverything();
masm.move(ImmPtr(PC), Registers::ArgReg1);
/* prepareStubCall() is not needed due to syncAndForgetEverything() */
- stubCall(stubs::LookupSwitch);
+ INLINE_STUBCALL(stubs::LookupSwitch);
frame.pop();
masm.jump(Registers::ReturnReg);
PC += js_GetVariableBytecodeLength(PC);
break;
END_CASE(JSOP_LOOKUPSWITCH)
BEGIN_CASE(JSOP_STRICTEQ)
@@ -1388,34 +1407,34 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_STRICTNE)
jsop_stricteq(op);
END_CASE(JSOP_STRICTNE)
BEGIN_CASE(JSOP_ITER)
# if defined JS_CPU_X64
prepareStubCall(Uses(1));
masm.move(Imm32(PC[1]), Registers::ArgReg1);
- stubCall(stubs::Iter);
+ INLINE_STUBCALL(stubs::Iter);
frame.pop();
frame.pushSynced();
#else
iter(PC[1]);
#endif
END_CASE(JSOP_ITER)
BEGIN_CASE(JSOP_MOREITER)
/* This MUST be fused with IFNE or IFNEX. */
iterMore();
break;
END_CASE(JSOP_MOREITER)
BEGIN_CASE(JSOP_ENDITER)
# if defined JS_CPU_X64
prepareStubCall(Uses(1));
- stubCall(stubs::EndIter);
+ INLINE_STUBCALL(stubs::EndIter);
frame.pop();
#else
iterEnd();
#endif
END_CASE(JSOP_ENDITER)
BEGIN_CASE(JSOP_POP)
frame.pop();
@@ -1480,43 +1499,43 @@ mjit::Compiler::generateMethod()
jsint i = GET_UINT16(PC);
uint32 count = GET_UINT16(PC + UINT16_LEN);
JS_ASSERT(i == JSProto_Array || i == JSProto_Object);
prepareStubCall(Uses(0));
masm.move(Imm32(count), Registers::ArgReg1);
if (i == JSProto_Array)
- stubCall(stubs::NewInitArray);
+ INLINE_STUBCALL(stubs::NewInitArray);
else
- stubCall(stubs::NewInitObject);
+ INLINE_STUBCALL(stubs::NewInitObject);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
END_CASE(JSOP_NEWINIT)
BEGIN_CASE(JSOP_ENDINIT)
END_CASE(JSOP_ENDINIT)
BEGIN_CASE(JSOP_INITPROP)
{
JSAtom *atom = script->getAtom(fullAtomIndex(PC));
prepareStubCall(Uses(2));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::InitProp);
+ INLINE_STUBCALL(stubs::InitProp);
frame.pop();
}
END_CASE(JSOP_INITPROP)
BEGIN_CASE(JSOP_INITELEM)
{
JSOp next = JSOp(PC[JSOP_INITELEM_LENGTH]);
prepareStubCall(Uses(3));
masm.move(Imm32(next == JSOP_ENDINIT ? 1 : 0), Registers::ArgReg1);
- stubCall(stubs::InitElem);
+ INLINE_STUBCALL(stubs::InitElem);
frame.popn(2);
}
END_CASE(JSOP_INITELEM)
BEGIN_CASE(JSOP_INCARG)
BEGIN_CASE(JSOP_DECARG)
BEGIN_CASE(JSOP_ARGINC)
BEGIN_CASE(JSOP_ARGDEC)
@@ -1598,23 +1617,23 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_SETNAME)
BEGIN_CASE(JSOP_SETMETHOD)
if (!jsop_setprop(script->getAtom(fullAtomIndex(PC)), true))
return Compile_Error;
END_CASE(JSOP_SETNAME)
BEGIN_CASE(JSOP_THROW)
prepareStubCall(Uses(1));
- stubCall(stubs::Throw);
+ INLINE_STUBCALL(stubs::Throw);
frame.pop();
END_CASE(JSOP_THROW)
BEGIN_CASE(JSOP_IN)
prepareStubCall(Uses(2));
- stubCall(stubs::In);
+ INLINE_STUBCALL(stubs::In);
frame.popn(2);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, Registers::ReturnReg);
END_CASE(JSOP_IN)
BEGIN_CASE(JSOP_INSTANCEOF)
if (!jsop_instanceof())
return Compile_Error;
@@ -1678,38 +1697,38 @@ mjit::Compiler::generateMethod()
if (fun) {
JSLocalKind localKind = fun->lookupLocal(cx, inner->atom, NULL);
if (localKind != JSLOCAL_NONE)
frame.syncAndForgetEverything();
}
prepareStubCall(Uses(0));
masm.move(ImmPtr(inner), Registers::ArgReg1);
- stubCall(STRICT_VARIANT(stubs::DefFun));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::DefFun));
}
END_CASE(JSOP_DEFFUN)
BEGIN_CASE(JSOP_DEFVAR)
{
uint32 index = fullAtomIndex(PC);
JSAtom *atom = script->getAtom(index);
prepareStubCall(Uses(0));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::DefVar);
+ INLINE_STUBCALL(stubs::DefVar);
}
END_CASE(JSOP_DEFVAR)
BEGIN_CASE(JSOP_DEFLOCALFUN_FC)
{
uint32 slot = GET_SLOTNO(PC);
JSFunction *fun = script->getFunction(fullAtomIndex(&PC[SLOTNO_LEN]));
prepareStubCall(Uses(frame.frameDepth()));
masm.move(ImmPtr(fun), Registers::ArgReg1);
- stubCall(stubs::DefLocalFun_FC);
+ INLINE_STUBCALL(stubs::DefLocalFun_FC);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
frame.storeLocal(slot, true);
frame.pop();
}
END_CASE(JSOP_DEFLOCALFUN_FC)
BEGIN_CASE(JSOP_LAMBDA)
@@ -1735,21 +1754,21 @@ mjit::Compiler::generateMethod()
stub = stubs::LambdaJoinableForNull;
}
}
prepareStubCall(Uses(uses));
masm.move(ImmPtr(fun), Registers::ArgReg1);
if (stub == stubs::Lambda) {
- stubCall(stub);
+ INLINE_STUBCALL(stub);
} else {
jsbytecode *savedPC = PC;
PC = pc2;
- stubCall(stub);
+ INLINE_STUBCALL(stub);
PC = savedPC;
}
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
END_CASE(JSOP_LAMBDA)
@@ -1774,33 +1793,33 @@ mjit::Compiler::generateMethod()
if (op == JSOP_CALLFCSLOT)
frame.push(UndefinedValue());
}
END_CASE(JSOP_CALLFCSLOT)
BEGIN_CASE(JSOP_ARGSUB)
prepareStubCall(Uses(0));
masm.move(Imm32(GET_ARGNO(PC)), Registers::ArgReg1);
- stubCall(stubs::ArgSub);
+ INLINE_STUBCALL(stubs::ArgSub);
frame.pushSynced();
END_CASE(JSOP_ARGSUB)
BEGIN_CASE(JSOP_ARGCNT)
prepareStubCall(Uses(0));
- stubCall(stubs::ArgCnt);
+ INLINE_STUBCALL(stubs::ArgCnt);
frame.pushSynced();
END_CASE(JSOP_ARGCNT)
BEGIN_CASE(JSOP_DEFLOCALFUN)
{
uint32 slot = GET_SLOTNO(PC);
JSFunction *fun = script->getFunction(fullAtomIndex(&PC[SLOTNO_LEN]));
prepareStubCall(Uses(0));
masm.move(ImmPtr(fun), Registers::ArgReg1);
- stubCall(stubs::DefLocalFun);
+ INLINE_STUBCALL(stubs::DefLocalFun);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
frame.storeLocal(slot, true);
frame.pop();
}
END_CASE(JSOP_DEFLOCALFUN)
BEGIN_CASE(JSOP_RETRVAL)
@@ -1818,17 +1837,17 @@ mjit::Compiler::generateMethod()
jsop_setgname(fullAtomIndex(PC));
END_CASE(JSOP_SETGNAME)
BEGIN_CASE(JSOP_REGEXP)
{
JSObject *regex = script->getRegExp(fullAtomIndex(PC));
prepareStubCall(Uses(0));
masm.move(ImmPtr(regex), Registers::ArgReg1);
- stubCall(stubs::RegExp);
+ INLINE_STUBCALL(stubs::RegExp);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
END_CASE(JSOP_REGEXP)
BEGIN_CASE(JSOP_CALLPROP)
if (!jsop_callprop(script->getAtom(fullAtomIndex(PC))))
return Compile_Error;
@@ -1838,17 +1857,17 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_CALLUPVAR)
{
uint32 index = GET_UINT16(PC);
JSUpvarArray *uva = script->upvars();
JS_ASSERT(index < uva->length);
prepareStubCall(Uses(0));
masm.move(Imm32(uva->vector[index].asInteger()), Registers::ArgReg1);
- stubCall(stubs::GetUpvar);
+ INLINE_STUBCALL(stubs::GetUpvar);
frame.pushSynced();
if (op == JSOP_CALLUPVAR)
frame.push(UndefinedValue());
}
END_CASE(JSOP_CALLUPVAR)
BEGIN_CASE(JSOP_UINT24)
frame.push(Value(Int32Value((int32_t) GET_UINT24(PC))));
@@ -1890,58 +1909,58 @@ mjit::Compiler::generateMethod()
frame.push(Value(Int32Value(GET_INT32(PC))));
END_CASE(JSOP_INT32)
BEGIN_CASE(JSOP_NEWARRAY)
{
uint32 len = GET_UINT16(PC);
prepareStubCall(Uses(len));
masm.move(Imm32(len), Registers::ArgReg1);
- stubCall(stubs::NewArray);
+ INLINE_STUBCALL(stubs::NewArray);
frame.popn(len);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
END_CASE(JSOP_NEWARRAY)
BEGIN_CASE(JSOP_HOLE)
frame.push(MagicValue(JS_ARRAY_HOLE));
END_CASE(JSOP_HOLE)
BEGIN_CASE(JSOP_LAMBDA_FC)
{
JSFunction *fun = script->getFunction(fullAtomIndex(PC));
prepareStubCall(Uses(frame.frameDepth()));
masm.move(ImmPtr(fun), Registers::ArgReg1);
- stubCall(stubs::FlatLambda);
+ INLINE_STUBCALL(stubs::FlatLambda);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
END_CASE(JSOP_LAMBDA_FC)
BEGIN_CASE(JSOP_TRACE)
BEGIN_CASE(JSOP_NOTRACE)
{
if (analysis->jumpTarget(PC))
interruptCheckHelper();
}
END_CASE(JSOP_TRACE)
BEGIN_CASE(JSOP_DEBUGGER)
prepareStubCall(Uses(0));
masm.move(ImmPtr(PC), Registers::ArgReg1);
- stubCall(stubs::Debugger);
+ INLINE_STUBCALL(stubs::Debugger);
END_CASE(JSOP_DEBUGGER)
BEGIN_CASE(JSOP_INITMETHOD)
{
JSAtom *atom = script->getAtom(fullAtomIndex(PC));
prepareStubCall(Uses(2));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::InitMethod);
+ INLINE_STUBCALL(stubs::InitMethod);
frame.pop();
}
END_CASE(JSOP_INITMETHOD)
BEGIN_CASE(JSOP_UNBRAND)
jsop_unbrand();
END_CASE(JSOP_UNBRAND)
@@ -2032,22 +2051,21 @@ mjit::Compiler::findCallSite(const CallS
{
JS_ASSERT(callSite.pcOffset < script->length);
JITScript *jit = script->getJIT(fp->isConstructing());
uint8* ilPath = (uint8 *)jit->code.m_code.executableAddress();
uint8* oolPath = ilPath + masm.size();
for (uint32 i = 0; i < callSites.length(); i++) {
- if (callSites[i].pc == script->code + callSite.pcOffset &&
- callSites[i].id == callSite.id) {
- if (callSites[i].stub) {
- return oolPath + stubcc.masm.distanceOf(callSites[i].location);
- }
- return ilPath + masm.distanceOf(callSites[i].location);
+ InternalCallSite &cs = callSites[i];
+ if (cs.pc == script->code + callSite.pcOffset && cs.id == callSite.id) {
+ if (cs.ool)
+ return oolPath + cs.returnOffset;
+ return ilPath + cs.returnOffset;
}
}
/* We have no idea where to patch up to. */
JS_NOT_REACHED("Call site vanished.");
return NULL;
}
@@ -2194,44 +2212,44 @@ mjit::Compiler::emitReturnValue(Assemble
void
mjit::Compiler::emitReturn(FrameEntry *fe)
{
JS_ASSERT_IF(!fun, JSOp(*PC) == JSOP_STOP);
/* Only the top of the stack can be returned. */
JS_ASSERT_IF(fe, fe == frame.peek(-1));
- if (debugMode || Probes::callTrackingActive(cx)) {
+ if (debugMode() || Probes::callTrackingActive(cx)) {
prepareStubCall(Uses(0));
- stubCall(stubs::LeaveScript);
+ INLINE_STUBCALL(stubs::LeaveScript);
}
/*
* If there's a function object, deal with the fact that it can escape.
* Note that after we've placed the call object, all tracked state can
* be thrown away. This will happen anyway because the next live opcode
* (if any) must have an incoming edge.
*
* However, it's an optimization to throw it away early - the tracker
* won't be spilled on further exits or join points.
*/
if (fun) {
if (fun->isHeavyweight()) {
/* There will always be a call object. */
prepareStubCall(Uses(fe ? 1 : 0));
- stubCall(stubs::PutActivationObjects);
+ INLINE_STUBCALL(stubs::PutActivationObjects);
} else {
/* if (hasCallObj() || hasArgsObj()) stubs::PutActivationObjects() */
Jump putObjs = masm.branchTest32(Assembler::NonZero,
Address(JSFrameReg, JSStackFrame::offsetOfFlags()),
Imm32(JSFRAME_HAS_CALL_OBJ | JSFRAME_HAS_ARGS_OBJ));
stubcc.linkExit(putObjs, Uses(frame.frameDepth()));
stubcc.leave();
- stubcc.call(stubs::PutActivationObjects);
+ OOL_STUBCALL(stubs::PutActivationObjects);
emitReturnValue(&stubcc.masm, fe);
emitFinalReturn(stubcc.masm);
}
}
emitReturnValue(&masm, fe);
emitFinalReturn(masm);
@@ -2242,20 +2260,20 @@ void
mjit::Compiler::prepareStubCall(Uses uses)
{
JaegerSpew(JSpew_Insns, " ---- STUB CALL, SYNCING FRAME ---- \n");
frame.syncAndKill(Registers(Registers::TempRegs), uses);
JaegerSpew(JSpew_Insns, " ---- FRAME SYNCING DONE ---- \n");
}
JSC::MacroAssembler::Call
-mjit::Compiler::stubCall(void *ptr)
+mjit::Compiler::emitStubCall(void *ptr)
{
JaegerSpew(JSpew_Insns, " ---- CALLING STUB ---- \n");
- Call cl = masm.stubCall(ptr, PC, frame.stackDepth() + script->nfixed);
+ Call cl = masm.fallibleVMCall(ptr, PC, frame.stackDepth() + script->nfixed);
JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
return cl;
}
void
mjit::Compiler::interruptCheckHelper()
{
RegisterID reg = frame.allocReg();
@@ -2292,53 +2310,57 @@ mjit::Compiler::interruptCheckHelper()
stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), reg);
stubcc.masm.loadPtr(Address(reg, offsetof(JSContext, thread)), reg);
Address flag(reg, offsetof(JSThread, data.interruptFlags));
Jump noInterrupt = stubcc.masm.branchTest32(Assembler::Zero, flag);
#endif
frame.sync(stubcc.masm, Uses(0));
stubcc.masm.move(ImmPtr(PC), Registers::ArgReg1);
- stubcc.call(stubs::Interrupt);
- ADD_CALLSITE(true);
+ OOL_STUBCALL(stubs::Interrupt);
stubcc.rejoin(Changes(0));
#ifdef JS_THREADSAFE
stubcc.linkRejoin(noInterrupt);
#endif
frame.freeReg(reg);
}
void
+mjit::Compiler::addReturnSite(Label joinPoint, uint32 id)
+{
+ InternalCallSite site(masm.distanceOf(joinPoint), PC, id, false, false);
+ addCallSite(site);
+}
+
+void
mjit::Compiler::emitUncachedCall(uint32 argc, bool callingNew)
{
CallPatchInfo callPatch;
RegisterID r0 = Registers::ReturnReg;
VoidPtrStubUInt32 stub = callingNew ? stubs::UncachedNew : stubs::UncachedCall;
frame.syncAndKill(Registers(Registers::AvailRegs), Uses(argc + 2));
prepareStubCall(Uses(argc + 2));
masm.move(Imm32(argc), Registers::ArgReg1);
- stubCall(stub);
- ADD_CALLSITE(false);
+ INLINE_STUBCALL(stub);
Jump notCompiled = masm.branchTestPtr(Assembler::Zero, r0, r0);
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
callPatch.hasFastNcode = true;
callPatch.fastNcodePatch =
masm.storePtrWithPatch(ImmPtr(NULL),
Address(JSFrameReg, JSStackFrame::offsetOfncode()));
masm.jump(r0);
- ADD_NON_STUB_CALLSITE(false);
-
callPatch.joinPoint = masm.label();
+ addReturnSite(callPatch.joinPoint, __LINE__);
masm.loadPtr(Address(JSFrameReg, JSStackFrame::offsetOfPrev()), JSFrameReg);
frame.popn(argc + 2);
frame.takeReg(JSReturnReg_Type);
frame.takeReg(JSReturnReg_Data);
frame.pushRegs(JSReturnReg_Type, JSReturnReg_Data);
stubcc.linkExitDirect(notCompiled, stubcc.masm.label());
@@ -2388,39 +2410,38 @@ mjit::Compiler::checkCallApplySpeculatio
{
if (isObj.isSet())
stubcc.linkExitDirect(isObj.getJump(), stubcc.masm.label());
stubcc.linkExitDirect(isFun, stubcc.masm.label());
stubcc.linkExitDirect(isNative, stubcc.masm.label());
int32 frameDepthAdjust;
if (applyTricks == LazyArgsObj) {
- stubcc.call(stubs::Arguments);
+ OOL_STUBCALL(stubs::Arguments);
frameDepthAdjust = +1;
} else {
frameDepthAdjust = 0;
}
stubcc.masm.move(Imm32(callImmArgc), Registers::ArgReg1);
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
- stubcc.masm.stubCall(JS_FUNC_TO_DATA_PTR(void *, stubs::UncachedCall),
- PC, frame.frameDepth() + frameDepthAdjust);
+ OOL_STUBCALL_SLOTS(JS_FUNC_TO_DATA_PTR(void *, stubs::UncachedCall),
+ frame.frameDepth() + frameDepthAdjust);
JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
- ADD_CALLSITE(true);
RegisterID r0 = Registers::ReturnReg;
Jump notCompiled = stubcc.masm.branchTestPtr(Assembler::Zero, r0, r0);
stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
Address ncodeAddr(JSFrameReg, JSStackFrame::offsetOfncode());
uncachedCallPatch->hasSlowNcode = true;
uncachedCallPatch->slowNcodePatch = stubcc.masm.storePtrWithPatch(ImmPtr(NULL), ncodeAddr);
stubcc.masm.jump(r0);
- ADD_NON_STUB_CALLSITE(true);
+ addReturnSite(masm.label(), __LINE__);
notCompiled.linkTo(stubcc.masm.label(), &stubcc.masm);
/*
* inlineCallHelper will link uncachedCallSlowRejoin to the join point
* at the end of the ic. At that join point, the return value of the
* call is assumed to be in registers, so load them before jumping.
*/
@@ -2445,17 +2466,17 @@ mjit::Compiler::checkCallApplySpeculatio
/* This predicate must be called before the current op mutates the FrameState. */
bool
mjit::Compiler::canUseApplyTricks()
{
JS_ASSERT(*PC == JSOP_ARGUMENTS);
jsbytecode *nextpc = PC + JSOP_ARGUMENTS_LENGTH;
return *nextpc == JSOP_FUNAPPLY &&
IsLowerableFunCallOrApply(nextpc) &&
- !debugMode;
+ !debugMode();
}
/* See MonoIC.cpp, CallCompiler for more information on call ICs. */
void
mjit::Compiler::inlineCallHelper(uint32 callImmArgc, bool callingNew)
{
/* Check for interrupts on function call */
interruptCheckHelper();
@@ -2484,17 +2505,17 @@ mjit::Compiler::inlineCallHelper(uint32
*/
bool lowerFunCallOrApply = IsLowerableFunCallOrApply(PC);
/*
* Currently, constant values are not functions, so don't even try to
* optimize. This lets us assume that callee/this have regs below.
*/
#ifdef JS_MONOIC
- if (debugMode ||
+ if (debugMode() ||
origCallee->isConstant() || origCallee->isNotType(JSVAL_TYPE_OBJECT) ||
(lowerFunCallOrApply &&
(origThis->isConstant() || origThis->isNotType(JSVAL_TYPE_OBJECT)))) {
#endif
if (applyTricks == LazyArgsObj) {
/* frame.pop() above reset us to pre-JSOP_ARGUMENTS state */
jsop_arguments();
frame.pushSynced();
@@ -2615,17 +2636,17 @@ mjit::Compiler::inlineCallHelper(uint32
tempRegs.putReg(tmp);
/*
* N.B. After this call, the frame will have a dynamic frame size.
* Check after the function is known not to be a native so that the
* catch-all/native path has a static depth.
*/
if (callIC.frameSize.isDynamic())
- stubcc.call(ic::SplatApplyArgs);
+ OOL_STUBCALL(ic::SplatApplyArgs);
/*
* No-op jump that gets patched by ic::New/Call to the stub generated
* by generateFullCallStub.
*/
Jump toPatch = stubcc.masm.jump();
toPatch.linkTo(stubcc.masm.label(), &stubcc.masm);
callIC.oolJump = toPatch;
@@ -2633,19 +2654,19 @@ mjit::Compiler::inlineCallHelper(uint32
/*
* At this point the function is definitely scripted, so we try to
* compile it and patch either funGuard/funJump or oolJump. This code
* is only executed once.
*/
callIC.addrLabel1 = stubcc.masm.moveWithPatch(ImmPtr(NULL), Registers::ArgReg1);
void *icFunPtr = JS_FUNC_TO_DATA_PTR(void *, callingNew ? ic::New : ic::Call);
if (callIC.frameSize.isStatic())
- callIC.oolCall = stubcc.masm.stubCall(icFunPtr, PC, frame.frameDepth());
+ callIC.oolCall = OOL_STUBCALL_SLOTS(icFunPtr, frame.frameDepth());
else
- callIC.oolCall = stubcc.masm.stubCallWithDynamicDepth(icFunPtr, PC);
+ callIC.oolCall = OOL_STUBCALL_SLOTS(icFunPtr, -1);
callIC.funObjReg = icCalleeData;
callIC.funPtrReg = funPtrReg;
/*
* The IC call either returns NULL, meaning call completed, or a
* function pointer to jump to. Caveat: Must restore JSFrameReg
* because a new frame has been pushed.
@@ -2671,17 +2692,17 @@ mjit::Compiler::inlineCallHelper(uint32
* path through js::Invoke.
*/
if (notObjectJump.isSet())
stubcc.linkExitDirect(notObjectJump.get(), stubcc.masm.label());
notFunction.linkTo(stubcc.masm.label(), &stubcc.masm);
isNative.linkTo(stubcc.masm.label(), &stubcc.masm);
callIC.addrLabel2 = stubcc.masm.moveWithPatch(ImmPtr(NULL), Registers::ArgReg1);
- stubcc.call(callingNew ? ic::NativeNew : ic::NativeCall);
+ OOL_STUBCALL(callingNew ? ic::NativeNew : ic::NativeCall);
rejoin2 = stubcc.masm.jump();
}
/*
* If the call site goes to a closure over the same function, it will
* generate an out-of-line stub that joins back here.
*/
@@ -2692,16 +2713,17 @@ mjit::Compiler::inlineCallHelper(uint32
flags |= JSFRAME_CONSTRUCTING;
InlineFrameAssembler inlFrame(masm, callIC, flags);
callPatch.hasFastNcode = true;
callPatch.fastNcodePatch = inlFrame.assemble(NULL);
callIC.hotJump = masm.jump();
callIC.joinPoint = callPatch.joinPoint = masm.label();
+ addReturnSite(callPatch.joinPoint, __LINE__);
if (lowerFunCallOrApply)
uncachedCallPatch.joinPoint = callIC.joinPoint;
masm.loadPtr(Address(JSFrameReg, JSStackFrame::offsetOfPrev()), JSFrameReg);
frame.popn(speculatedArgc + 2);
frame.takeReg(JSReturnReg_Type);
frame.takeReg(JSReturnReg_Data);
frame.pushRegs(JSReturnReg_Type, JSReturnReg_Data);
@@ -2733,28 +2755,18 @@ mjit::Compiler::inlineCallHelper(uint32
}
/*
* This function must be called immediately after any instruction which could
* cause a new JSStackFrame to be pushed and could lead to a new debug trap
* being set. This includes any API callbacks and any scripted or native call.
*/
void
-mjit::Compiler::addCallSite(uint32 id, bool stub)
+mjit::Compiler::addCallSite(const InternalCallSite &site)
{
- InternalCallSite site;
- site.stub = stub;
-#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
- site.location = stub ? stubcc.masm.callLabel : masm.callLabel;
-#else
- site.location = stub ? stubcc.masm.label() : masm.label();
-#endif
-
- site.pc = PC;
- site.id = id;
callSites.append(site);
}
void
mjit::Compiler::restoreFrameRegs(Assembler &masm)
{
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
}
@@ -2819,17 +2831,17 @@ mjit::Compiler::compareTwoValues(JSConte
JS_NOT_REACHED("NYI");
return false;
}
bool
mjit::Compiler::emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused)
{
prepareStubCall(Uses(2));
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.pop();
frame.pop();
if (!target) {
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, Registers::ReturnReg);
return true;
}
@@ -2845,43 +2857,43 @@ mjit::Compiler::emitStubCmpOp(BoolStub s
}
void
mjit::Compiler::jsop_setprop_slow(JSAtom *atom, bool usePropCache)
{
prepareStubCall(Uses(2));
masm.move(ImmPtr(atom), Registers::ArgReg1);
if (usePropCache)
- stubCall(STRICT_VARIANT(stubs::SetName));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::SetName));
else
- stubCall(STRICT_VARIANT(stubs::SetPropNoCache));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::SetPropNoCache));
JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETPROP_LENGTH);
frame.shimmy(1);
}
void
mjit::Compiler::jsop_getprop_slow(JSAtom *atom, bool usePropCache)
{
prepareStubCall(Uses(1));
if (usePropCache) {
- stubCall(stubs::GetProp);
+ INLINE_STUBCALL(stubs::GetProp);
} else {
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::GetPropNoCache);
+ INLINE_STUBCALL(stubs::GetPropNoCache);
}
frame.pop();
frame.pushSynced();
}
bool
mjit::Compiler::jsop_callprop_slow(JSAtom *atom)
{
prepareStubCall(Uses(1));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stubs::CallProp);
+ INLINE_STUBCALL(stubs::CallProp);
frame.pop();
frame.pushSynced();
frame.pushSynced();
return true;
}
bool
mjit::Compiler::jsop_length()
@@ -2904,17 +2916,17 @@ mjit::Compiler::jsop_length()
}
return true;
}
#if defined JS_POLYIC
return jsop_getprop(cx->runtime->atomState.lengthAtom);
#else
prepareStubCall(Uses(1));
- stubCall(stubs::Length);
+ INLINE_STUBCALL(stubs::Length);
frame.pop();
frame.pushSynced();
return true;
#endif
}
#ifdef JS_MONOIC
void
@@ -2997,17 +3009,17 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
Imm32(int32(JSObjectMap::INVALID_SHAPE)),
inlineShapeLabel);
DBGLABEL(dbgInlineShapeJump);
pic.slowPathStart = stubcc.linkExit(j, Uses(1));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::GetProp);
+ pic.slowPathCall = OOL_STUBCALL(ic::GetProp);
/* Load dslots. */
#if defined JS_NUNBOX32
DBGLABEL(dbgDslotsLoad);
#elif defined JS_PUNBOX64
Label dslotsLoadLabel = masm.label();
#endif
masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
@@ -3121,17 +3133,17 @@ mjit::Compiler::jsop_callprop_generic(JS
inlineShapeLabel);
DBGLABEL(dbgInlineShapeJump);
pic.slowPathStart = stubcc.linkExit(j, Uses(1));
/* Slow path. */
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::CallProp);
+ pic.slowPathCall = OOL_STUBCALL(ic::CallProp);
/* Adjust the frame. None of this will generate code. */
frame.pop();
frame.pushRegs(shapeReg, objReg);
frame.pushSynced();
/* Load dslots. */
#if defined JS_NUNBOX32
@@ -3268,17 +3280,17 @@ mjit::Compiler::jsop_callprop_obj(JSAtom
Imm32(int32(JSObjectMap::INVALID_SHAPE)),
inlineShapeLabel);
DBGLABEL(dbgInlineShapeJump);
pic.slowPathStart = stubcc.linkExit(j, Uses(1));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::CallProp);
+ pic.slowPathCall = OOL_STUBCALL(ic::CallProp);
/* Load dslots. */
#if defined JS_NUNBOX32
DBGLABEL(dbgDslotsLoad);
#elif defined JS_PUNBOX64
Label dslotsLoadLabel = masm.label();
#endif
masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
@@ -3390,19 +3402,19 @@ mjit::Compiler::jsop_setprop(JSAtom *ato
pic.fastPathStart = masm.label();
Jump j = masm.testObject(Assembler::NotEqual, reg);
pic.typeCheck = stubcc.linkExit(j, Uses(2));
stubcc.leave();
stubcc.masm.move(ImmPtr(atom), Registers::ArgReg1);
if (usePropCache)
- stubcc.call(STRICT_VARIANT(stubs::SetName));
+ OOL_STUBCALL(STRICT_VARIANT(stubs::SetName));
else
- stubcc.call(STRICT_VARIANT(stubs::SetPropNoCache));
+ OOL_STUBCALL(STRICT_VARIANT(stubs::SetPropNoCache));
typeCheck = stubcc.masm.jump();
pic.hasTypeCheck = true;
} else {
pic.fastPathStart = masm.label();
pic.hasTypeCheck = false;
pic.typeReg = Registers::ReturnReg;
}
@@ -3430,17 +3442,17 @@ mjit::Compiler::jsop_setprop(JSAtom *ato
DBGLABEL(dbgInlineShapeJump);
/* Slow path. */
{
pic.slowPathStart = stubcc.linkExit(j, Uses(2));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::SetProp);
+ pic.slowPathCall = OOL_STUBCALL(ic::SetProp);
}
/* Load dslots. */
#if defined JS_NUNBOX32
DBGLABEL(dbgDslots);
#elif defined JS_PUNBOX64
Label dslotsLoadLabel = masm.label();
#endif
@@ -3512,17 +3524,17 @@ mjit::Compiler::jsop_name(JSAtom *atom)
pic.shapeGuard = masm.label();
Jump j = masm.jump();
DBGLABEL(dbgJumpOffset);
{
pic.slowPathStart = stubcc.linkExit(j, Uses(0));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::Name);
+ pic.slowPathCall = OOL_STUBCALL(ic::Name);
}
pic.fastPathRejoin = masm.label();
frame.pushRegs(pic.shapeReg, pic.objReg);
JS_ASSERT(masm.differenceBetween(pic.fastPathStart, dbgJumpOffset) == SCOPENAME_JUMP_OFFSET);
stubcc.rejoin(Changes(1));
@@ -3554,17 +3566,17 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
pic.shapeGuard = masm.label();
Jump j = masm.jump();
DBGLABEL(dbgJumpOffset);
{
pic.slowPathStart = stubcc.linkExit(j, Uses(1));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::XName);
+ pic.slowPathCall = OOL_STUBCALL(ic::XName);
}
pic.fastPathRejoin = masm.label();
frame.pop();
frame.pushRegs(pic.shapeReg, pic.objReg);
JS_ASSERT(masm.differenceBetween(pic.fastPathStart, dbgJumpOffset) == SCOPENAME_JUMP_OFFSET);
@@ -3603,17 +3615,17 @@ mjit::Compiler::jsop_bindname(uint32 ind
masm.loadPayload(parent, Registers::ValueReg);
Jump j = masm.branchPtr(Assembler::NotEqual, Registers::ValueReg, ImmPtr(0));
Label inlineJumpOffset = masm.label();
#endif
{
pic.slowPathStart = stubcc.linkExit(j, Uses(0));
stubcc.leave();
passICAddress(&pic);
- pic.slowPathCall = stubcc.call(ic::BindName);
+ pic.slowPathCall = OOL_STUBCALL(ic::BindName);
}
pic.fastPathRejoin = masm.label();
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, pic.objReg);
frame.freeReg(pic.shapeReg);
#if defined JS_NUNBOX32
JS_ASSERT(masm.differenceBetween(pic.shapeGuard, inlineJumpOffset) == BINDNAME_INLINE_JUMP_OFFSET);
@@ -3628,17 +3640,17 @@ mjit::Compiler::jsop_bindname(uint32 ind
}
#else /* JS_POLYIC */
void
mjit::Compiler::jsop_name(JSAtom *atom)
{
prepareStubCall(Uses(0));
- stubCall(stubs::Name);
+ INLINE_STUBCALL(stubs::Name);
frame.pushSynced();
}
bool
mjit::Compiler::jsop_xname(JSAtom *atom)
{
return jsop_getprop(atom);
}
@@ -3672,20 +3684,20 @@ mjit::Compiler::jsop_bindname(uint32 ind
Address address(reg, offsetof(JSObject, parent));
Jump j = masm.branchPtr(Assembler::NotEqual, masm.payloadOf(address), ImmPtr(0));
stubcc.linkExit(j, Uses(0));
stubcc.leave();
if (usePropCache) {
- stubcc.call(stubs::BindName);
+ OOL_STUBCALL(stubs::BindName);
} else {
stubcc.masm.move(ImmPtr(script->getAtom(index)), Registers::ArgReg1);
- stubcc.call(stubs::BindNameNoCache);
+ OOL_STUBCALL(stubs::BindNameNoCache);
}
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, reg);
stubcc.rejoin(Changes(1));
}
#endif
@@ -3714,17 +3726,17 @@ mjit::Compiler::jsop_this()
* In strict mode code, we don't wrap 'this'.
* In direct-call eval code, we wrapped 'this' before entering the eval.
* In global code, 'this' is always an object.
*/
if (fun && !script->strictModeCode) {
Jump notObj = frame.testObject(Assembler::NotEqual, frame.peek(-1));
stubcc.linkExit(notObj, Uses(1));
stubcc.leave();
- stubcc.call(stubs::This);
+ OOL_STUBCALL(stubs::This);
stubcc.rejoin(Changes(1));
}
}
void
mjit::Compiler::jsop_gnameinc(JSOp op, VoidStubAtom stub, uint32 index)
{
#if defined JS_MONOIC
@@ -3800,17 +3812,17 @@ mjit::Compiler::jsop_gnameinc(JSOp op, V
}
if (pop)
PC += JSOP_POP_LENGTH;
#else
JSAtom *atom = script->getAtom(index);
prepareStubCall(Uses(0));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.pushSynced();
#endif
PC += JSOP_GNAMEINC_LENGTH;
}
bool
mjit::Compiler::jsop_nameinc(JSOp op, VoidStubAtom stub, uint32 index)
@@ -3890,17 +3902,17 @@ mjit::Compiler::jsop_nameinc(JSOp op, Vo
// N
}
if (pop)
PC += JSOP_POP_LENGTH;
#else
prepareStubCall(Uses(0));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.pushSynced();
#endif
PC += JSOP_NAMEINC_LENGTH;
return true;
}
bool
@@ -3977,17 +3989,17 @@ mjit::Compiler::jsop_propinc(JSOp op, Vo
}
if (pop)
PC += JSOP_POP_LENGTH;
} else
#endif
{
prepareStubCall(Uses(1));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.pop();
frame.pushSynced();
}
PC += JSOP_PROPINC_LENGTH;
return true;
}
@@ -3998,17 +4010,17 @@ mjit::Compiler::iter(uintN flags)
/*
* Stub the call if this is not a simple 'for in' loop or if the iterated
* value is known to not be an object.
*/
if ((flags != JSITER_ENUMERATE) || fe->isNotType(JSVAL_TYPE_OBJECT)) {
prepareStubCall(Uses(1));
masm.move(Imm32(flags), Registers::ArgReg1);
- stubCall(stubs::Iter);
+ INLINE_STUBCALL(stubs::Iter);
frame.pop();
frame.pushSynced();
return;
}
if (!fe->isTypeKnown()) {
Jump notObject = frame.testObject(Assembler::NotEqual, fe);
stubcc.linkExit(notObject, Uses(1));
@@ -4089,17 +4101,17 @@ mjit::Compiler::iter(uintN flags)
masm.storePtr(ioreg, Address(T1, offsetof(JSContext, enumerators)));
frame.freeReg(nireg);
frame.freeReg(T1);
frame.freeReg(T2);
stubcc.leave();
stubcc.masm.move(Imm32(flags), Registers::ArgReg1);
- stubcc.call(stubs::Iter);
+ OOL_STUBCALL(stubs::Iter);
/* Push the iterator object. */
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, ioreg);
stubcc.rejoin(Changes(1));
}
@@ -4149,17 +4161,17 @@ mjit::Compiler::iterNext()
masm.addPtr(Imm32(sizeof(jsid)), T2, T4);
masm.storePtr(T4, Address(T1, offsetof(NativeIterator, props_cursor)));
frame.freeReg(T4);
frame.freeReg(T1);
frame.freeReg(T2);
stubcc.leave();
- stubcc.call(stubs::IterNext);
+ OOL_STUBCALL(stubs::IterNext);
frame.pushUntypedPayload(JSVAL_TYPE_STRING, T3);
/* Join with the stub call. */
stubcc.rejoin(Changes(1));
}
bool
@@ -4190,17 +4202,17 @@ mjit::Compiler::iterMore()
JSOp next = JSOp(*target);
JS_ASSERT(next == JSOP_IFNE || next == JSOP_IFNEX);
target += (next == JSOP_IFNE)
? GET_JUMP_OFFSET(target)
: GET_JUMPX_OFFSET(target);
stubcc.leave();
- stubcc.call(stubs::IterMore);
+ OOL_STUBCALL(stubs::IterMore);
Jump j = stubcc.masm.branchTest32(Assembler::NonZero, Registers::ReturnReg,
Registers::ReturnReg);
PC += JSOP_MOREITER_LENGTH;
PC += js_CodeSpec[next].length;
stubcc.rejoin(Changes(1));
@@ -4247,51 +4259,51 @@ mjit::Compiler::iterEnd()
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), T2);
masm.loadPtr(Address(T1, offsetof(NativeIterator, next)), T1);
masm.storePtr(T1, Address(T2, offsetof(JSContext, enumerators)));
frame.freeReg(T1);
frame.freeReg(T2);
stubcc.leave();
- stubcc.call(stubs::EndIter);
+ OOL_STUBCALL(stubs::EndIter);
frame.pop();
stubcc.rejoin(Changes(1));
}
void
mjit::Compiler::jsop_eleminc(JSOp op, VoidStub stub)
{
prepareStubCall(Uses(2));
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.popn(2);
frame.pushSynced();
}
void
mjit::Compiler::jsop_getgname_slow(uint32 index)
{
prepareStubCall(Uses(0));
- stubCall(stubs::GetGlobalName);
+ INLINE_STUBCALL(stubs::GetGlobalName);
frame.pushSynced();
}
void
mjit::Compiler::jsop_bindgname()
{
if (script->compileAndGo && globalObj) {
frame.push(ObjectValue(*globalObj));
return;
}
/* :TODO: this is slower than it needs to be. */
prepareStubCall(Uses(0));
- stubCall(stubs::BindGlobalName);
+ INLINE_STUBCALL(stubs::BindGlobalName);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, Registers::ReturnReg);
}
void
mjit::Compiler::jsop_getgname(uint32 index)
{
#if defined JS_MONOIC
@@ -4326,17 +4338,17 @@ mjit::Compiler::jsop_getgname(uint32 ind
Imm32(int32(JSObjectMap::INVALID_SHAPE)), mic.shape);
frame.freeReg(reg);
}
stubcc.linkExit(shapeGuard, Uses(0));
stubcc.leave();
passMICAddress(mic);
mic.stubEntry = stubcc.masm.label();
- mic.call = stubcc.call(ic::GetGlobalName);
+ mic.call = OOL_STUBCALL(ic::GetGlobalName);
/* Garbage value. */
uint32 slot = 1 << 24;
masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
Address address(objReg, slot);
/*
@@ -4382,17 +4394,17 @@ mjit::Compiler::jsop_getgname(uint32 ind
}
void
mjit::Compiler::jsop_setgname_slow(uint32 index)
{
JSAtom *atom = script->getAtom(index);
prepareStubCall(Uses(2));
masm.move(ImmPtr(atom), Registers::ArgReg1);
- stubCall(STRICT_VARIANT(stubs::SetGlobalName));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::SetGlobalName));
frame.popn(2);
frame.pushSynced();
}
void
mjit::Compiler::jsop_setgname(uint32 index)
{
#if defined JS_MONOIC
@@ -4425,17 +4437,17 @@ mjit::Compiler::jsop_setgname(uint32 ind
mic.shape);
frame.freeReg(reg);
}
stubcc.linkExit(shapeGuard, Uses(2));
stubcc.leave();
passMICAddress(mic);
mic.stubEntry = stubcc.masm.label();
- mic.call = stubcc.call(ic::SetGlobalName);
+ mic.call = OOL_STUBCALL(ic::SetGlobalName);
/* Garbage value. */
uint32 slot = 1 << 24;
/* Get both type and reg into registers. */
FrameEntry *fe = frame.peek(-1);
Value v;
@@ -4514,47 +4526,47 @@ mjit::Compiler::jsop_setgname(uint32 ind
jsop_setgname_slow(index);
#endif
}
void
mjit::Compiler::jsop_setelem_slow()
{
prepareStubCall(Uses(3));
- stubCall(STRICT_VARIANT(stubs::SetElem));
+ INLINE_STUBCALL(STRICT_VARIANT(stubs::SetElem));
frame.popn(3);
frame.pushSynced();
}
void
mjit::Compiler::jsop_getelem_slow()
{
prepareStubCall(Uses(2));
- stubCall(stubs::GetElem);
+ INLINE_STUBCALL(stubs::GetElem);
frame.popn(2);
frame.pushSynced();
}
void
mjit::Compiler::jsop_unbrand()
{
prepareStubCall(Uses(1));
- stubCall(stubs::Unbrand);
+ INLINE_STUBCALL(stubs::Unbrand);
}
bool
mjit::Compiler::jsop_instanceof()
{
FrameEntry *lhs = frame.peek(-2);
FrameEntry *rhs = frame.peek(-1);
// The fast path applies only when both operands are objects.
if (rhs->isNotType(JSVAL_TYPE_OBJECT) || lhs->isNotType(JSVAL_TYPE_OBJECT)) {
prepareStubCall(Uses(2));
- stubCall(stubs::InstanceOf);
+ INLINE_STUBCALL(stubs::InstanceOf);
frame.popn(2);
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, Registers::ReturnReg);
return true;
}
MaybeJump firstSlow;
if (!rhs->isTypeKnown()) {
@@ -4567,17 +4579,17 @@ mjit::Compiler::jsop_instanceof()
/* Test for bound functions. */
RegisterID obj = frame.tempRegForData(rhs);
Jump isBound = masm.branchTest32(Assembler::NonZero, Address(obj, offsetof(JSObject, flags)),
Imm32(JSObject::BOUND_FUNCTION));
{
stubcc.linkExit(isBound, Uses(2));
stubcc.leave();
- stubcc.call(stubs::InstanceOf);
+ OOL_STUBCALL(stubs::InstanceOf);
firstSlow = stubcc.masm.jump();
}
/* This is sadly necessary because the error case needs the object. */
frame.dup();
if (!jsop_getprop(cx->runtime->atomState.classPrototypeAtom, false))
@@ -4613,17 +4625,17 @@ mjit::Compiler::jsop_instanceof()
isFalse2.linkTo(masm.label(), &masm);
masm.move(Imm32(0), temp);
isTrue.linkTo(masm.label(), &masm);
frame.freeReg(proto);
frame.freeReg(obj);
stubcc.leave();
- stubcc.call(stubs::FastInstanceOf);
+ OOL_STUBCALL(stubs::FastInstanceOf);
frame.popn(3);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, temp);
if (firstSlow.isSet())
firstSlow.getJump().linkTo(stubcc.masm.label(), &stubcc.masm);
stubcc.rejoin(Changes(1));
return true;
@@ -4633,26 +4645,26 @@ void
mjit::Compiler::emitEval(uint32 argc)
{
/* Check for interrupts on function call */
interruptCheckHelper();
frame.syncAndKill(Registers(Registers::AvailRegs), Uses(argc + 2));
prepareStubCall(Uses(argc + 2));
masm.move(Imm32(argc), Registers::ArgReg1);
- stubCall(stubs::Eval);
+ INLINE_STUBCALL(stubs::Eval);
frame.popn(argc + 2);
frame.pushSynced();
}
void
mjit::Compiler::jsop_arguments()
{
prepareStubCall(Uses(0));
- stubCall(stubs::Arguments);
+ INLINE_STUBCALL(stubs::Arguments);
}
/*
* Note: This function emits tracer hooks into the OOL path. This means if
* it is used in the middle of an in-progress slow path, the stream will be
* hopelessly corrupted. Take care to only call this before linkExits() and
* after rejoin()s.
*/
@@ -4710,17 +4722,17 @@ mjit::Compiler::jumpAndTrace(Jump j, jsb
traceICs[index] = ic;
# endif
/* Save and restore compiler-tracked PC, so cx->regs is right in InvokeTracer. */
{
jsbytecode* pc = PC;
PC = target;
- stubcc.call(stubs::InvokeTracer);
+ OOL_STUBCALL(stubs::InvokeTracer);
PC = pc;
}
Jump no = stubcc.masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
restoreFrameRegs(stubcc.masm);
stubcc.masm.jump(Registers::ReturnReg);
@@ -4742,17 +4754,17 @@ mjit::Compiler::enterBlock(JSObject *obj
restoreFrameRegs(masm);
uint32 oldFrameDepth = frame.frameDepth();
/* For now, don't bother doing anything for this opcode. */
frame.syncAndForgetEverything();
masm.move(ImmPtr(obj), Registers::ArgReg1);
uint32 n = js_GetEnterBlockStackDefs(cx, script, PC);
- stubCall(stubs::EnterBlock);
+ INLINE_STUBCALL(stubs::EnterBlock);
frame.enterBlock(n);
uintN base = JSSLOT_FREE(&js_BlockClass);
uintN count = OBJ_BLOCK_COUNT(cx, obj);
uintN limit = base + count;
for (uintN slot = base, i = 0; slot < limit; slot++, i++) {
const Value &v = obj->getSlotRef(slot);
if (v.isBoolean() && v.toBoolean())
@@ -4766,17 +4778,17 @@ mjit::Compiler::leaveBlock()
/*
* Note: After bug 535912, we can pass the block obj directly, inline
* PutBlockObject, and do away with the muckiness in PutBlockObject.
*/
uint32 n = js_GetVariableStackUses(JSOP_LEAVEBLOCK, PC);
JSObject *obj = script->getObject(fullAtomIndex(PC + UINT16_LEN));
prepareStubCall(Uses(n));
masm.move(ImmPtr(obj), Registers::ArgReg1);
- stubCall(stubs::LeaveBlock);
+ INLINE_STUBCALL(stubs::LeaveBlock);
frame.leaveBlock(n);
}
// Creates the new object expected for constructors, and places it in |thisv|.
// It is broken down into the following operations:
// CALLEE
// GETPROP "prototype"
// IFPRIMTOP:
@@ -4809,23 +4821,23 @@ mjit::Compiler::constructThis()
stubcc.crossJump(stubcc.masm.jump(), masm.label());
// Done with the protoFe.
frame.pop();
prepareStubCall(Uses(0));
if (protoReg != Registers::ArgReg1)
masm.move(protoReg, Registers::ArgReg1);
- stubCall(stubs::CreateThis);
+ INLINE_STUBCALL(stubs::CreateThis);
frame.freeReg(protoReg);
return true;
}
void
mjit::Compiler::jsop_callelem_slow()
{
prepareStubCall(Uses(2));
- stubCall(stubs::CallElem);
+ INLINE_STUBCALL(stubs::CallElem);
frame.popn(2);
frame.pushSynced();
frame.pushSynced();
}
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -50,16 +50,18 @@
#include "MonoIC.h"
#include "PolyIC.h"
namespace js {
namespace mjit {
class Compiler : public BaseCompiler
{
+ friend class StubCompiler;
+
struct BranchPatch {
BranchPatch(const Jump &j, jsbytecode *pc)
: jump(j), pc(pc)
{ }
Jump jump;
jsbytecode *pc;
};
@@ -239,36 +241,43 @@ class Compiler : public BaseCompiler
struct Defs {
Defs(uint32 ndefs)
: ndefs(ndefs)
{ }
uint32 ndefs;
};
struct InternalCallSite {
- bool stub;
- Label location;
+ uint32 returnOffset;
jsbytecode *pc;
uint32 id;
+ bool call;
+ bool ool;
+
+ InternalCallSite(uint32 returnOffset, jsbytecode *pc, uint32 id,
+ bool call, bool ool)
+ : returnOffset(returnOffset), pc(pc), id(id), call(call), ool(ool)
+ { }
};
struct DoublePatch {
double d;
DataLabelPtr label;
bool ool;
};
JSStackFrame *fp;
JSScript *script;
JSObject *scopeChain;
JSObject *globalObj;
JSFunction *fun;
bool isConstructing;
analyze::Script *analysis;
Label *jumpMap;
+ bool *savedTraps;
jsbytecode *PC;
Assembler masm;
FrameState frame;
js::Vector<BranchPatch, 64, CompilerAllocPolicy> branchPatches;
#if defined JS_MONOIC
js::Vector<MICGenInfo, 64, CompilerAllocPolicy> mics;
js::Vector<CallGenInfo, 64, CompilerAllocPolicy> callICs;
js::Vector<EqualityGenInfo, 64, CompilerAllocPolicy> equalityICs;
@@ -280,17 +289,17 @@ class Compiler : public BaseCompiler
js::Vector<SetElementICInfo, 16, CompilerAllocPolicy> setElemICs;
#endif
js::Vector<CallPatchInfo, 64, CompilerAllocPolicy> callPatches;
js::Vector<InternalCallSite, 64, CompilerAllocPolicy> callSites;
js::Vector<DoublePatch, 16, CompilerAllocPolicy> doubleList;
StubCompiler stubcc;
Label invokeLabel;
Label arityLabel;
- bool debugMode;
+ bool debugMode_;
bool addTraceHints;
bool oomInVector; // True if we have OOM'd appending to a vector.
enum { NoApplyTricks, LazyArgsObj } applyTricks;
Compiler *thisFromCtor() { return this; }
friend class CompilerAllocPolicy;
public:
@@ -303,29 +312,33 @@ class Compiler : public BaseCompiler
CompileStatus compile();
jsbytecode *getPC() { return PC; }
Label getLabel() { return masm.label(); }
bool knownJump(jsbytecode *pc);
Label labelOf(jsbytecode *target);
void *findCallSite(const CallSite &callSite);
+ void addCallSite(const InternalCallSite &callSite);
+ void addReturnSite(Label joinPoint, uint32 id);
+ bool loadOldTraps(const Vector<CallSite> &site);
+
+ bool debugMode() { return debugMode_; }
private:
CompileStatus performCompilation(JITScript **jitp);
CompileStatus generatePrologue();
CompileStatus generateMethod();
CompileStatus generateEpilogue();
CompileStatus finishThisUp(JITScript **jitp);
/* Non-emitting helpers. */
uint32 fullAtomIndex(jsbytecode *pc);
bool jumpInScript(Jump j, jsbytecode *pc);
bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
- void addCallSite(uint32 id, bool stub);
bool canUseApplyTricks();
/* Emitting helpers. */
void restoreFrameRegs(Assembler &masm);
bool emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused);
void iter(uintN flags);
void iterNext();
bool iterMore();
@@ -442,42 +455,40 @@ class Compiler : public BaseCompiler
bool jsop_setelem();
bool jsop_getelem(bool isCall);
bool isCacheableBaseAndIndex(FrameEntry *obj, FrameEntry *id);
void jsop_stricteq(JSOp op);
bool jsop_equality(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
bool jsop_equality_int_string(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
void jsop_pos();
-#define STUB_CALL_TYPE(type) \
- Call stubCall(type stub) { \
- return stubCall(JS_FUNC_TO_DATA_PTR(void *, stub)); \
- }
+
+ void prepareStubCall(Uses uses);
+ Call emitStubCall(void *ptr);
+};
- STUB_CALL_TYPE(JSObjStub);
- STUB_CALL_TYPE(VoidStubUInt32);
- STUB_CALL_TYPE(VoidStub);
- STUB_CALL_TYPE(VoidPtrStubUInt32);
- STUB_CALL_TYPE(VoidPtrStub);
- STUB_CALL_TYPE(BoolStub);
- STUB_CALL_TYPE(JSObjStubUInt32);
- STUB_CALL_TYPE(JSObjStubFun);
- STUB_CALL_TYPE(JSObjStubJSObj);
- STUB_CALL_TYPE(VoidStubAtom);
- STUB_CALL_TYPE(JSStrStub);
- STUB_CALL_TYPE(JSStrStubUInt32);
- STUB_CALL_TYPE(VoidStubJSObj);
- STUB_CALL_TYPE(VoidPtrStubPC);
- STUB_CALL_TYPE(VoidVpStub);
- STUB_CALL_TYPE(VoidStubPC);
- STUB_CALL_TYPE(BoolStubUInt32);
- STUB_CALL_TYPE(VoidStubFun);
+// Given a stub call, emits the call into the inline assembly path. If
+// debug mode is on, adds the appropriate instrumentation for recompilation.
+#define INLINE_STUBCALL(stub) \
+ do { \
+ Call cl = emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub))); \
+ if (debugMode()) { \
+ InternalCallSite site(masm.callReturnOffset(cl), PC, __LINE__, \
+ true, false); \
+ addCallSite(site); \
+ } \
+ } while (0) \
-#undef STUB_CALL_TYPE
- void prepareStubCall(Uses uses);
- Call stubCall(void *ptr);
-};
+// Given a stub call, emits the call into the out-of-line assembly path. If
+// debug mode is on, adds the appropriate instrumentation for recompilation.
+// Unlike the INLINE_STUBCALL variant, this returns the Call offset.
+#define OOL_STUBCALL(stub) \
+ stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), __LINE__) \
+
+// Same as OOL_STUBCALL, but specifies a slot depth.
+#define OOL_STUBCALL_SLOTS(stub, slots) \
+ stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), (slots), __LINE__) \
} /* namespace js */
} /* namespace mjit */
#endif
--- a/js/src/methodjit/FastArithmetic.cpp
+++ b/js/src/methodjit/FastArithmetic.cpp
@@ -219,17 +219,17 @@ mjit::Compiler::jsop_binary(JSOp op, Voi
|| op == JSOP_MUL
#endif /* JS_CPU_ARM */
) {
bool isStringResult = (op == JSOP_ADD) &&
(lhs->isType(JSVAL_TYPE_STRING) ||
rhs->isType(JSVAL_TYPE_STRING));
prepareStubCall(Uses(2));
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.popn(2);
if (isStringResult)
frame.pushSyncedType(JSVAL_TYPE_STRING);
else
frame.pushSynced();
return;
}
@@ -344,17 +344,17 @@ mjit::Compiler::jsop_binary_double(Frame
masm.storeDouble(fpLeft, frame.addressOf(lhs));
if (done.isSet())
done.getJump().linkTo(masm.label(), &masm);
if (lhsNotNumber.isSet() || rhsNotNumber.isSet()) {
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
}
frame.popn(2);
frame.pushNumber(MaybeRegisterID());
if (lhsNotNumber.isSet() || rhsNotNumber.isSet())
stubcc.rejoin(Changes(1));
}
@@ -451,17 +451,17 @@ mjit::Compiler::jsop_binary_full_simple(
/* Slow paths funnel here. */
if (notNumber.isSet())
notNumber.get().linkTo(stubcc.masm.label(), &stubcc.masm);
overflowDone.get().linkTo(stubcc.masm.label(), &stubcc.masm);
/* Slow call - use frame.sync to avoid erroneous jump repatching in stubcc. */
frame.sync(stubcc.masm, Uses(2));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
/* Finish up stack operations. */
frame.popn(2);
frame.pushNumber(regs.result, true);
/* Merge back OOL double paths. */
if (doublePathDone.isSet())
stubcc.linkRejoin(doublePathDone.get());
@@ -698,17 +698,17 @@ mjit::Compiler::jsop_binary_full(FrameEn
rhsNotNumber.get().linkTo(stubcc.masm.label(), &stubcc.masm);
}
if (rhsNotNumber2.isSet())
rhsNotNumber2.get().linkTo(stubcc.masm.label(), &stubcc.masm);
/* Slow call - use frame.sync to avoid erroneous jump repatching in stubcc. */
frame.sync(stubcc.masm, Uses(2));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
/* Finish up stack operations. */
frame.popn(2);
frame.pushNumber(regs.result, true);
/* Merge back OOL double paths. */
if (doublePathDone.isSet())
stubcc.linkRejoin(doublePathDone.get());
@@ -723,17 +723,17 @@ static const uint64 DoubleNegMask = 0x80
void
mjit::Compiler::jsop_neg()
{
FrameEntry *fe = frame.peek(-1);
if (fe->isTypeKnown() && fe->getKnownType() > JSVAL_UPPER_INCL_TYPE_OF_NUMBER_SET) {
prepareStubCall(Uses(1));
- stubCall(stubs::Neg);
+ INLINE_STUBCALL(stubs::Neg);
frame.pop();
frame.pushSynced();
return;
}
JS_ASSERT(!fe->isConstant());
/* Load type information into register. */
@@ -790,17 +790,17 @@ mjit::Compiler::jsop_neg()
jmpIntRejoin.setJump(stubcc.masm.jump());
}
frame.freeReg(reg);
if (feTypeReg.isSet())
frame.unpinReg(feTypeReg.reg());
stubcc.leave();
- stubcc.call(stubs::Neg);
+ OOL_STUBCALL(stubs::Neg);
frame.pop();
frame.pushSynced();
/* Link jumps. */
if (jmpNotDbl.isSet())
stubcc.linkExitDirect(jmpNotDbl.getJump(), lblIntPath);
@@ -822,17 +822,17 @@ mjit::Compiler::jsop_mod()
#if defined(JS_CPU_X86)
FrameEntry *lhs = frame.peek(-2);
FrameEntry *rhs = frame.peek(-1);
if ((lhs->isTypeKnown() && lhs->getKnownType() != JSVAL_TYPE_INT32) ||
(rhs->isTypeKnown() && rhs->getKnownType() != JSVAL_TYPE_INT32))
#endif
{
prepareStubCall(Uses(2));
- stubCall(stubs::Mod);
+ INLINE_STUBCALL(stubs::Mod);
frame.popn(2);
frame.pushSynced();
return;
}
#if defined(JS_CPU_X86)
if (!lhs->isTypeKnown()) {
Jump j = frame.testInt32(Assembler::NotEqual, lhs);
@@ -925,17 +925,17 @@ mjit::Compiler::jsop_mod()
/* Better - integer. */
masm.storeTypeTag(ImmType(JSVAL_TYPE_INT32), frame.addressOf(lhs));
if (done.isSet())
done.getJump().linkTo(masm.label(), &masm);
if (slowPath) {
stubcc.leave();
- stubcc.call(stubs::Mod);
+ OOL_STUBCALL(stubs::Mod);
}
frame.popn(2);
frame.pushNumber(X86Registers::edx);
if (slowPath)
stubcc.rejoin(Changes(1));
#endif
@@ -1021,23 +1021,23 @@ mjit::Compiler::jsop_equality_int_string
ic.stub = stub;
bool useIC = !addTraceHints || target >= PC;
/* Call the IC stub, which may generate a fast path. */
if (useIC) {
/* Adjust for the two values just pushed. */
ic.addrLabel = stubcc.masm.moveWithPatch(ImmPtr(NULL), Registers::ArgReg1);
- ic.stubCall = stubcc.call(ic::Equality, frame.stackDepth() + script->nfixed + 2);
+ ic.stubCall = OOL_STUBCALL_SLOTS(ic::Equality, frame.stackDepth() + script->nfixed + 2);
needStub = false;
}
#endif
if (needStub)
- stubcc.call(stub, frame.stackDepth() + script->nfixed + 2);
+ OOL_STUBCALL_SLOTS(stub, frame.stackDepth() + script->nfixed + 2);
/*
* The stub call has no need to rejoin, since state is synced.
* Instead, we can just test the return value.
*/
Assembler::Condition ncond = (fused == JSOP_IFEQ)
? Assembler::Zero
: Assembler::NonZero;
@@ -1113,17 +1113,17 @@ mjit::Compiler::jsop_equality_int_string
}
if (!rhsInt) {
Jump rhsFail = frame.testInt32(Assembler::NotEqual, rhs);
stubcc.linkExit(rhsFail, Uses(2));
}
}
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
RegisterID reg = frame.ownRegForData(lhs);
/* x86/64's SET instruction can only take single-byte regs.*/
RegisterID resultReg = reg;
if (!(Registers::maskReg(reg) & Registers::SingleByteRegs))
resultReg = frame.allocReg(Registers::SingleByteRegs);
@@ -1270,17 +1270,17 @@ mjit::Compiler::jsop_relational_double(J
Assembler::DoubleCondition dblCond = DoubleCondForOp(op, fused);
if (target) {
if (lhsNotNumber.isSet())
stubcc.linkExitForBranch(lhsNotNumber.get());
if (rhsNotNumber.isSet())
stubcc.linkExitForBranch(rhsNotNumber.get());
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
frame.popn(2);
frame.syncAndForgetEverything();
Jump j = masm.branchDouble(dblCond, fpLeft, fpRight);
/*
* The stub call has no need to rejoin since the state is synced.
@@ -1302,17 +1302,17 @@ mjit::Compiler::jsop_relational_double(J
if (!jumpAndTrace(j, target, &sj))
return false;
} else {
if (lhsNotNumber.isSet())
stubcc.linkExit(lhsNotNumber.get(), Uses(2));
if (rhsNotNumber.isSet())
stubcc.linkExit(rhsNotNumber.get(), Uses(2));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
frame.popn(2);
RegisterID reg = frame.allocReg();
Jump j = masm.branchDouble(dblCond, fpLeft, fpRight);
masm.move(Imm32(0), reg);
Jump skip = masm.jump();
j.linkTo(masm.label(), &masm);
@@ -1433,17 +1433,17 @@ mjit::Compiler::jsop_relational_full(JSO
/*
* For fusions, spill the tracker state. xmm* remain intact. Note
* that frame.sync() must be used directly, to avoid syncExit()'s
* jumping logic.
*/
frame.sync(stubcc.masm, Uses(frame.frameDepth()));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
}
/* Forget the world, preserving data. */
frame.pinReg(cmpReg);
if (reg.isSet())
frame.pinReg(reg.reg());
frame.popn(2);
@@ -1538,17 +1538,17 @@ mjit::Compiler::jsop_relational_full(JSO
rhsNotNumber.get().linkTo(stubcc.masm.label(), &stubcc.masm);
}
if (rhsNotNumber2.isSet())
rhsNotNumber2.get().linkTo(stubcc.masm.label(), &stubcc.masm);
/* Emit the slow path - note full frame syncage. */
frame.sync(stubcc.masm, Uses(2));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
}
/* Get an integer comparison condition. */
Assembler::Condition i32Cond;
switch (cmpOp) {
case JSOP_GT:
i32Cond = Assembler::GreaterThan;
break;
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -131,17 +131,17 @@ mjit::Compiler::jsop_rsh_unknown_const(F
frame.loadDouble(lhs, FPRegisters::First, stubcc.masm);
Jump lhsTruncateGuard = stubcc.masm.branchTruncateDoubleToInt32(FPRegisters::First, lhsData);
stubcc.crossJump(stubcc.masm.jump(), masm.label());
lhsDoubleGuard.linkTo(stubcc.masm.label(), &stubcc.masm);
lhsTruncateGuard.linkTo(stubcc.masm.label(), &stubcc.masm);
frame.sync(stubcc.masm, Uses(2));
- stubcc.call(stubs::Rsh);
+ OOL_STUBCALL(stubs::Rsh);
if (shiftAmount)
masm.rshift32(Imm32(shiftAmount), lhsData);
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_INT32, lhsData);
stubcc.rejoin(Changes(1));
@@ -154,17 +154,17 @@ mjit::Compiler::jsop_rsh_const_unknown(F
RegisterID rhsType = frame.tempRegForType(rhs);
frame.pinReg(rhsType);
RegisterID result = frame.allocReg();
frame.unpinReg(rhsType);
Jump rhsIntGuard = masm.testInt32(Assembler::NotEqual, rhsType);
stubcc.linkExit(rhsIntGuard, Uses(2));
stubcc.leave();
- stubcc.call(stubs::Rsh);
+ OOL_STUBCALL(stubs::Rsh);
masm.move(Imm32(lhs->getValue().toInt32()), result);
masm.rshift32(rhsData, result);
frame.freeReg(rhsData);
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_INT32, result);
stubcc.rejoin(Changes(1));
}
@@ -176,17 +176,17 @@ mjit::Compiler::jsop_rsh_int_unknown(Fra
RegisterID rhsType = frame.tempRegForType(rhs);
frame.pinReg(rhsType);
RegisterID lhsData = frame.copyDataIntoReg(lhs);
frame.unpinReg(rhsType);
Jump rhsIntGuard = masm.testInt32(Assembler::NotEqual, rhsType);
stubcc.linkExit(rhsIntGuard, Uses(2));
stubcc.leave();
- stubcc.call(stubs::Rsh);
+ OOL_STUBCALL(stubs::Rsh);
masm.rshift32(rhsData, lhsData);
frame.freeReg(rhsData);
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_INT32, lhsData);
stubcc.rejoin(Changes(1));
}
@@ -231,17 +231,17 @@ mjit::Compiler::jsop_rsh_unknown_any(Fra
stubcc.crossJump(stubcc.masm.jump(), masm.label());
lhsDoubleGuard.linkTo(stubcc.masm.label(), &stubcc.masm);
lhsTruncateGuard.linkTo(stubcc.masm.label(), &stubcc.masm);
if (rhsIntGuard.isSet())
stubcc.linkExitDirect(rhsIntGuard.getJump(), stubcc.masm.label());
frame.sync(stubcc.masm, Uses(2));
- stubcc.call(stubs::Rsh);
+ OOL_STUBCALL(stubs::Rsh);
masm.rshift32(rhsData, lhsData);
frame.freeReg(rhsData);
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_INT32, lhsData);
stubcc.rejoin(Changes(1));
@@ -253,17 +253,17 @@ mjit::Compiler::jsop_rsh()
FrameEntry *rhs = frame.peek(-1);
FrameEntry *lhs = frame.peek(-2);
if (tryBinaryConstantFold(cx, frame, JSOP_RSH, lhs, rhs))
return;
if (lhs->isNotType(JSVAL_TYPE_INT32) || rhs->isNotType(JSVAL_TYPE_INT32)) {
prepareStubCall(Uses(2));
- stubCall(stubs::Rsh);
+ INLINE_STUBCALL(stubs::Rsh);
frame.popn(2);
frame.pushSyncedType(JSVAL_TYPE_INT32);
return;
}
JS_ASSERT(!(lhs->isConstant() && rhs->isConstant()));
if (lhs->isConstant()) {
if (rhs->isType(JSVAL_TYPE_INT32))
@@ -288,34 +288,34 @@ mjit::Compiler::jsop_rsh()
void
mjit::Compiler::jsop_bitnot()
{
FrameEntry *top = frame.peek(-1);
/* We only want to handle integers here. */
if (top->isTypeKnown() && top->getKnownType() != JSVAL_TYPE_INT32) {
prepareStubCall(Uses(1));
- stubCall(stubs::BitNot);
+ INLINE_STUBCALL(stubs::BitNot);
frame.pop();
frame.pushSyncedType(JSVAL_TYPE_INT32);
return;
}
/* Test the type. */
bool stubNeeded = false;
if (!top->isTypeKnown()) {
Jump intFail = frame.testInt32(Assembler::NotEqual, top);
stubcc.linkExit(intFail, Uses(1));
frame.learnType(top, JSVAL_TYPE_INT32);
stubNeeded = true;
}
if (stubNeeded) {
stubcc.leave();
- stubcc.call(stubs::BitNot);
+ OOL_STUBCALL(stubs::BitNot);
}
RegisterID reg = frame.ownRegForData(top);
masm.not32(reg);
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
if (stubNeeded)
@@ -370,31 +370,31 @@ mjit::Compiler::jsop_bitop(JSOp op)
Jump notDouble = masm.testDouble(Assembler::NotEqual, typeReg);
stubcc.linkExit(notDouble, Uses(2));
}
frame.loadDouble(lhs, FPRegisters::First, masm);
Jump truncateGuard = masm.branchTruncateDoubleToInt32(FPRegisters::First, reg);
stubcc.linkExit(truncateGuard, Uses(2));
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
if (isInt.isSet())
isInt.get().linkTo(masm.label(), &masm);
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
stubcc.rejoin(Changes(1));
return;
}
/* We only want to handle integers here. */
if (rhs->isNotType(JSVAL_TYPE_INT32) || lhs->isNotType(JSVAL_TYPE_INT32) ||
(op == JSOP_URSH && rhs->isConstant() && rhs->getValue().toInt32() % 32 == 0)) {
prepareStubCall(Uses(2));
- stubCall(stub);
+ INLINE_STUBCALL(stub);
frame.popn(2);
if (op == JSOP_URSH)
frame.pushSynced();
else
frame.pushSyncedType(JSVAL_TYPE_INT32);
return;
}
@@ -498,17 +498,17 @@ mjit::Compiler::jsop_bitop(JSOp op)
if (shift) {
if (op == JSOP_LSH)
masm.lshift32(Imm32(shift), reg);
else
masm.urshift32(Imm32(shift), reg);
}
if (stubNeeded) {
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
}
frame.popn(2);
/* x >>> 0 may result in a double, handled above. */
JS_ASSERT_IF(op == JSOP_URSH, shift >= 1);
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
if (stubNeeded)
@@ -555,17 +555,17 @@ mjit::Compiler::jsop_bitop(JSOp op)
default:
JS_NOT_REACHED("NYI");
return;
}
if (stubNeeded) {
stubcc.leave();
- stubcc.call(stub);
+ OOL_STUBCALL(stub);
}
frame.pop();
frame.pop();
if (op == JSOP_URSH)
frame.pushNumber(reg, true);
else
@@ -805,17 +805,17 @@ mjit::Compiler::jsop_not()
frame.pop();
frame.push(BooleanValue(false));
break;
}
default:
{
prepareStubCall(Uses(1));
- stubCall(stubs::ValueToBoolean);
+ INLINE_STUBCALL(stubs::ValueToBoolean);
RegisterID reg = Registers::ReturnReg;
frame.takeReg(reg);
masm.xor32(Imm32(1), reg);
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, reg);
break;
@@ -862,17 +862,17 @@ mjit::Compiler::jsop_not()
stubcc.crossJump(jmpInt32Exit, lblRejoin);
jmpNotObject.linkTo(syncTarget, &stubcc.masm);
stubcc.crossJump(jmpObjectExit, lblRejoin);
/* Leave. */
stubcc.leave();
- stubcc.call(stubs::Not);
+ OOL_STUBCALL(stubs::Not);
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, data);
stubcc.rejoin(Changes(1));
}
void
@@ -908,17 +908,17 @@ mjit::Compiler::jsop_typeof()
if (atom) {
frame.pop();
frame.push(StringValue(ATOM_TO_STRING(atom)));
return;
}
}
prepareStubCall(Uses(1));
- stubCall(stubs::TypeOf);
+ INLINE_STUBCALL(stubs::TypeOf);
frame.pop();
frame.takeReg(Registers::ReturnReg);
frame.pushTypedPayload(JSVAL_TYPE_STRING, Registers::ReturnReg);
}
bool
mjit::Compiler::booleanJumpScript(JSOp op, jsbytecode *target)
{
@@ -966,25 +966,18 @@ mjit::Compiler::booleanJumpScript(JSOp o
/* OOL path: Conversion to boolean. */
MaybeJump jmpCvtExecScript;
MaybeJump jmpCvtRejoin;
Label lblCvtPath = stubcc.masm.label();
if (!fe->isTypeKnown() ||
!(fe->isType(JSVAL_TYPE_BOOLEAN) || fe->isType(JSVAL_TYPE_INT32))) {
- stubcc.masm.fixScriptStack(frame.frameDepth());
- stubcc.masm.setupVMFrame();
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- stubcc.masm.push(Registers::ArgReg0);
-#endif
- stubcc.masm.call(JS_FUNC_TO_DATA_PTR(void *, stubs::ValueToBoolean));
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- stubcc.masm.pop();
-#endif
+ stubcc.masm.infallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::ValueToBoolean),
+ frame.frameDepth());
jmpCvtExecScript.setJump(stubcc.masm.branchTest32(cond, Registers::ReturnReg,
Registers::ReturnReg));
jmpCvtRejoin.setJump(stubcc.masm.jump());
}
/* Rejoin tag. */
Label lblAfterScript = masm.label();
@@ -1127,19 +1120,19 @@ mjit::Compiler::jsop_localinc(JSOp op, u
ovf = masm.branchSub32(Assembler::Overflow, Imm32(1), reg);
stubcc.linkExit(ovf, Uses(1));
/* Note, stub call will push the original value again no matter what. */
stubcc.leave();
stubcc.masm.move(Imm32(slot), Registers::ArgReg1);
if (op == JSOP_LOCALINC || op == JSOP_INCLOCAL)
- stubcc.call(stubs::IncLocal);
+ OOL_STUBCALL(stubs::IncLocal);
else
- stubcc.call(stubs::DecLocal);
+ OOL_STUBCALL(stubs::DecLocal);
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
frame.storeLocal(slot, popped, false);
if (popped)
frame.pop();
else
@@ -1324,19 +1317,19 @@ mjit::Compiler::jsop_setelem()
ic.holeGuard = masm.guardNotHole(slot);
masm.storeValue(ic.vr, slot);
}
stubcc.linkExitDirect(ic.holeGuard, ic.slowPathStart);
stubcc.leave();
#ifdef JS_POLYIC
passICAddress(&ic);
- ic.slowPathCall = stubcc.call(STRICT_VARIANT(ic::SetElement));
+ ic.slowPathCall = OOL_STUBCALL(STRICT_VARIANT(ic::SetElement));
#else
- stubcc.call(STRICT_VARIANT(stubs::SetElem));
+ OOL_STUBCALL(STRICT_VARIANT(stubs::SetElem));
#endif
ic.fastPathRejoin = masm.label();
frame.freeReg(ic.objReg);
frame.shimmy(2);
stubcc.rejoin(Changes(2));
@@ -1483,24 +1476,24 @@ mjit::Compiler::jsop_getelem(bool isCall
}
stubcc.leave();
if (objTypeGuard.isSet())
objTypeGuard.get().linkTo(stubcc.masm.label(), &stubcc.masm);
#ifdef JS_POLYIC
passICAddress(&ic);
if (isCall)
- ic.slowPathCall = stubcc.call(ic::CallElement);
+ ic.slowPathCall = OOL_STUBCALL(ic::CallElement);
else
- ic.slowPathCall = stubcc.call(ic::GetElement);
+ ic.slowPathCall = OOL_STUBCALL(ic::GetElement);
#else
if (isCall)
- ic.slowPathCall = stubcc.call(stubs::CallElem);
+ ic.slowPathCall = OOL_STUBCALL(stubs::CallElem);
else
- ic.slowPathCall = stubcc.call(stubs::GetElem);
+ ic.slowPathCall = OOL_STUBCALL(stubs::GetElem);
#endif
ic.fastPathRejoin = masm.label();
frame.popn(2);
frame.pushRegs(ic.typeReg, ic.objReg);
if (isCall)
frame.pushSynced();
@@ -1664,19 +1657,19 @@ mjit::Compiler::jsop_stricteq(JSOp op)
}
/* Is it impossible that both Values are ints? */
if ((lhs->isTypeKnown() && lhs->isNotType(JSVAL_TYPE_INT32)) ||
(rhs->isTypeKnown() && rhs->isNotType(JSVAL_TYPE_INT32))) {
prepareStubCall(Uses(2));
if (op == JSOP_STRICTEQ)
- stubCall(stubs::StrictEq);
+ INLINE_STUBCALL(stubs::StrictEq);
else
- stubCall(stubs::StrictNe);
+ INLINE_STUBCALL(stubs::StrictNe);
frame.popn(2);
frame.pushSyncedType(JSVAL_TYPE_BOOLEAN);
return;
}
#ifndef JS_CPU_ARM
/* Try an integer fast-path. */
@@ -1718,63 +1711,63 @@ mjit::Compiler::jsop_stricteq(JSOp op)
masm.set32(cond, testReg, otherReg, resultReg);
}
frame.unpinReg(testReg);
if (needStub) {
stubcc.leave();
if (op == JSOP_STRICTEQ)
- stubcc.call(stubs::StrictEq);
+ OOL_STUBCALL(stubs::StrictEq);
else
- stubcc.call(stubs::StrictNe);
+ OOL_STUBCALL(stubs::StrictNe);
}
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, resultReg);
if (needStub)
stubcc.rejoin(Changes(1));
#else
/* TODO: Port set32() logic to ARM. */
prepareStubCall(Uses(2));
if (op == JSOP_STRICTEQ)
- stubCall(stubs::StrictEq);
+ INLINE_STUBCALL(stubs::StrictEq);
else
- stubCall(stubs::StrictNe);
+ INLINE_STUBCALL(stubs::StrictNe);
frame.popn(2);
frame.pushSyncedType(JSVAL_TYPE_BOOLEAN);
return;
#endif
}
void
mjit::Compiler::jsop_pos()
{
FrameEntry *top = frame.peek(-1);
if (top->isTypeKnown()) {
if (top->getKnownType() <= JSVAL_TYPE_INT32)
return;
prepareStubCall(Uses(1));
- stubCall(stubs::Pos);
+ INLINE_STUBCALL(stubs::Pos);
frame.pop();
frame.pushSynced();
return;
}
frame.giveOwnRegs(top);
Jump j;
if (frame.shouldAvoidTypeRemat(top))
j = masm.testNumber(Assembler::NotEqual, frame.addressOf(top));
else
j = masm.testNumber(Assembler::NotEqual, frame.tempRegForType(top));
stubcc.linkExit(j, Uses(1));
stubcc.leave();
- stubcc.call(stubs::Pos);
+ OOL_STUBCALL(stubs::Pos);
stubcc.rejoin(Changes(1));
}
--- a/js/src/methodjit/MachineRegs.h
+++ b/js/src/methodjit/MachineRegs.h
@@ -43,16 +43,20 @@
#include "jsbit.h"
#include "assembler/assembler/MacroAssembler.h"
namespace js {
namespace mjit {
struct Registers {
+ enum CallConvention {
+ NormalCall,
+ FastCall
+ };
typedef JSC::MacroAssembler::RegisterID RegisterID;
// Homed and scratch registers for working with Values on x64.
#if defined(JS_CPU_X64)
static const RegisterID TypeMaskReg = JSC::X86Registers::r13;
static const RegisterID PayloadMaskReg = JSC::X86Registers::r14;
static const RegisterID ValueReg = JSC::X86Registers::r10;
@@ -167,16 +171,80 @@ struct Registers {
static const uint32 AvailRegs = SavedRegs | TempRegs;
static bool isSaved(RegisterID reg) {
uint32 mask = maskReg(reg);
JS_ASSERT(mask & AvailRegs);
return bool(mask & SavedRegs);
}
+ static inline uint32 numArgRegs(CallConvention convention) {
+#if defined(JS_CPU_X86)
+# if defined(JS_NO_FASTCALL)
+ return 0;
+# else
+ return (convention == FastCall) ? 2 : 0;
+# endif
+#elif defined(JS_CPU_X64)
+# ifdef _WIN64
+ return 4;
+# else
+ return 6;
+# endif
+#elif defined(JS_CPU_ARM)
+ return 4;
+#endif
+ }
+
+ static inline bool regForArg(CallConvention conv, uint32 i, RegisterID *reg) {
+#if defined(JS_CPU_X86)
+ static const RegisterID regs[] = {
+ JSC::X86Registers::ecx,
+ JSC::X86Registers::edx
+ };
+
+# if defined(JS_NO_FASTCALL)
+ return false;
+# else
+ if (conv == NormalCall)
+ return false;
+# endif
+#elif defined(JS_CPU_X64)
+# ifdef _WIN64
+ static const RegisterID regs[] = {
+ JSC::X86Registers::ecx,
+ JSC::X86Registers::edx,
+ JSC::X86Registers::r8,
+ JSC::X86Registers::r9
+ };
+# else
+ static const RegisterID regs[] = {
+ JSC::X86Registers::edi,
+ JSC::X86Registers::esi,
+ JSC::X86Registers::edx,
+ JSC::X86Registers::ecx,
+ JSC::X86Registers::r8,
+ JSC::X86Registers::r9
+ };
+# endif
+#elif defined(JS_CPU_ARM)
+ static const RegisterID regs[] = {
+ JSC::ARMRegisters::r0,
+ JSC::ARMRegisters::r1,
+ JSC::ARMRegisters::r2,
+ JSC::ARMRegisters::r3
+ };
+#endif
+ JS_ASSERT(numArgRegs(conv) == JS_ARRAY_LENGTH(regs));
+ if (i > JS_ARRAY_LENGTH(regs))
+ return false;
+ *reg = regs[i];
+ return true;
+ }
+
Registers()
: freeMask(AvailRegs)
{ }
Registers(uint32 freeMask)
: freeMask(freeMask)
{ }
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -375,16 +375,32 @@ CanMethodJIT(JSContext *cx, JSScript *sc
return Compile_Okay;
}
struct CallSite
{
uint32 codeOffset;
uint32 pcOffset;
uint32 id;
+
+ // Normally, callsite ID is the __LINE__ in the program that added the
+ // callsite. Since traps can be removed, we make sure they carry over
+ // from each compilation, and identify them with a single, canonical
+ // ID. Hopefully a SpiderMonkey file won't have two billion source lines.
+ static const uint32 MAGIC_TRAP_ID = 0xFEDCBABC;
+
+ void initialize(uint32 codeOffset, uint32 pcOffset, uint32 id) {
+ this->codeOffset = codeOffset;
+ this->pcOffset = pcOffset;
+ this->id = id;
+ }
+
+ bool isTrap() const {
+ return id == MAGIC_TRAP_ID;
+ }
};
/* Re-enables a tracepoint in the method JIT. */
void
EnableTraceHint(JSScript *script, jsbytecode *pc, uint16_t index);
uintN
GetCallTargetCount(JSScript *script, jsbytecode *pc);
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -510,20 +510,20 @@ class CallCompiler : public BaseCompiler
masm.loadPtr(Address(t0, offset), t0);
Jump hasCode = masm.branchPtr(Assembler::Above, t0, ImmPtr(JS_UNJITTABLE_SCRIPT));
/* Try and compile. On success we get back the nmap pointer. */
masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
void *compilePtr = JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction);
if (ic.frameSize.isStatic()) {
masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
- masm.stubCall(compilePtr, script->code, ic.frameSize.staticFrameDepth());
+ masm.fallibleVMCall(compilePtr, script->code, ic.frameSize.staticFrameDepth());
} else {
masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), Registers::ArgReg1);
- masm.stubCallWithDynamicDepth(compilePtr, script->code);
+ masm.fallibleVMCall(compilePtr, script->code, -1);
}
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
masm.jump(Registers::ReturnReg);
@@ -665,18 +665,18 @@ class CallCompiler : public BaseCompiler
/* Generate fast-path for calling this native. */
Assembler masm;
/* Guard on the function object identity, for now. */
Jump funGuard = masm.branchPtr(Assembler::NotEqual, ic.funObjReg, ImmPtr(obj));
/* N.B. After this call, the frame will have a dynamic frame size. */
if (ic.frameSize.isDynamic()) {
- masm.stubCall(JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
- f.regs.pc, initialFrameDepth);
+ masm.fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
+ f.regs.pc, initialFrameDepth);
}
Registers tempRegs;
#ifndef JS_CPU_X86
tempRegs.takeReg(Registers::ArgReg0);
tempRegs.takeReg(Registers::ArgReg1);
tempRegs.takeReg(Registers::ArgReg2);
#endif
@@ -691,17 +691,17 @@ class CallCompiler : public BaseCompiler
uint32 spOffset = sizeof(JSStackFrame) + initialFrameDepth * sizeof(Value);
masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
}
/* Store fp. */
masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
- /* Grab cx early on to avoid stack mucking on x86. */
+ /* Grab cx. */
#ifdef JS_CPU_X86
RegisterID cxReg = tempRegs.takeAnyReg();
#else
RegisterID cxReg = Registers::ArgReg0;
#endif
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
/* Compute vp. */
@@ -732,85 +732,41 @@ class CallCompiler : public BaseCompiler
/* Mark vp[1] as magic for |new|. */
if (callingNew) {
Value v;
v.setMagicWithObjectOrNullPayload(NULL);
masm.storeValue(v, Address(vpReg, sizeof(Value)));
}
-#ifdef JS_CPU_X86
- /* x86's stack should be 16-byte aligned. */
- masm.subPtr(Imm32(16), Assembler::stackPointerRegister);
- masm.storePtr(vpReg, Address(Assembler::stackPointerRegister, 8));
-#endif
-
- /* Push argc. */
-#ifdef JS_CPU_X86
- if (ic.frameSize.isStatic())
- masm.store32(Imm32(ic.frameSize.staticArgc()), Address(Assembler::stackPointerRegister, 4));
- else
- masm.store32(argcReg.reg(), Address(Assembler::stackPointerRegister, 4));
-#else
+ masm.setupABICall(Registers::NormalCall, 3);
+ masm.storeArg(2, vpReg);
if (ic.frameSize.isStatic())
- masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
- else if (argcReg.reg() != Registers::ArgReg1)
- masm.move(argcReg.reg(), Registers::ArgReg1);
-#endif
-
- /* Push cx. */
-#ifdef JS_CPU_X86
- masm.storePtr(cxReg, Address(Assembler::stackPointerRegister, 0));
-#endif
-
-#ifdef _WIN64
- /* x64 needs to pad the stack */
- masm.subPtr(Imm32(32), Assembler::stackPointerRegister);
-#endif
- /* Make the call. */
- Assembler::Call call = masm.call();
-
-#ifdef JS_CPU_X86
- masm.addPtr(Imm32(16), Assembler::stackPointerRegister);
-#endif
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- // Usually JaegerThrowpoline got called from return address.
- // So in JaegerThrowpoline without fastcall, esp was added by 8.
- // If we just want to jump there, we need to sub esp by 8 first.
- masm.subPtr(Imm32(8), Assembler::stackPointerRegister);
-#endif
+ masm.storeArg(1, Imm32(ic.frameSize.staticArgc()));
+ else
+ masm.storeArg(1, argcReg.reg());
+ masm.storeArg(0, cxReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, fun->u.n.native));
Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
-#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
- // Usually JaegerThrowpoline got called from return address.
- // So in JaegerThrowpoline without fastcall, esp was added by 8.
- // If we just want to jump there, we need to sub esp by 8 first.
- masm.addPtr(Imm32(8), Assembler::stackPointerRegister);
-#elif defined(_WIN64)
- /* JaegerThrowpoline expcets that stack is added by 32 for padding */
- masm.addPtr(Imm32(32), Assembler::stackPointerRegister);
-#endif
-
Jump done = masm.jump();
/* Move JaegerThrowpoline into register for very far jump on x64. */
hasException.linkTo(masm.label(), &masm);
- masm.move(ImmPtr(JS_FUNC_TO_DATA_PTR(void *, JaegerThrowpoline)), Registers::ReturnReg);
- masm.jump(Registers::ReturnReg);
+ masm.throwInJIT();
JSC::ExecutablePool *ep = poolForSize(masm.size(), CallICInfo::Pool_NativeStub);
if (!ep)
THROWV(true);
JSC::LinkBuffer buffer(&masm, ep);
buffer.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
- buffer.link(call, JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, fun->u.n.native)));
buffer.link(funGuard, ic.slowPathStart);
masm.finalize(buffer);
JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
JaegerSpew(JSpew_PICs, "generated native CALL stub %p (%d bytes)\n",
cs.executableAddress(), masm.size());
--- a/js/src/methodjit/Retcon.cpp
+++ b/js/src/methodjit/Retcon.cpp
@@ -156,35 +156,61 @@ Recompiler::recompile()
if (!ctorPatches.append(findPatch(script->jitCtor, addr)))
return false;
} else if (script->jitNormal && script->jitNormal->isValidCode(*addr)) {
if (!normalPatches.append(findPatch(script->jitNormal, addr)))
return false;
}
}
+ Vector<CallSite> normalSites(cx);
+ Vector<CallSite> ctorSites(cx);
+
+ if (script->jitNormal && !saveTraps(script->jitNormal, &normalSites))
+ return false;
+ if (script->jitCtor && !saveTraps(script->jitCtor, &ctorSites))
+ return false;
+
ReleaseScriptCode(cx, script);
- if (normalPatches.length() && !recompile(firstNormalFrame, normalPatches))
+ if (normalPatches.length() &&
+ !recompile(firstNormalFrame, normalPatches, normalSites)) {
return false;
+ }
- if (ctorPatches.length() && !recompile(firstCtorFrame, ctorPatches))
+ if (ctorPatches.length() &&
+ !recompile(firstCtorFrame, ctorPatches, ctorSites)) {
return false;
+ }
return true;
}
bool
-Recompiler::recompile(JSStackFrame *fp, Vector<PatchableAddress> &patches)
+Recompiler::saveTraps(JITScript *jit, Vector<CallSite> *sites)
+{
+ for (uint32 i = 0; i < jit->nCallSites; i++) {
+ CallSite &site = jit->callSites[i];
+ if (site.isTrap() && !sites->append(site))
+ return false;
+ }
+ return true;
+}
+
+bool
+Recompiler::recompile(JSStackFrame *fp, Vector<PatchableAddress> &patches,
+ Vector<CallSite> &sites)
{
/* If we get this far, the script is live, and we better be safe to re-jit. */
JS_ASSERT(cx->compartment->debugMode);
JS_ASSERT(fp);
Compiler c(cx, fp);
+ if (!c.loadOldTraps(sites))
+ return false;
if (c.compile() != Compile_Okay)
return false;
/* Perform the earlier scanned patches */
for (uint32 i = 0; i < patches.length(); i++)
applyPatch(c, patches[i]);
return true;
--- a/js/src/methodjit/Retcon.h
+++ b/js/src/methodjit/Retcon.h
@@ -94,16 +94,18 @@ public:
bool recompile();
private:
JSContext *cx;
JSScript *script;
PatchableAddress findPatch(JITScript *jit, void **location);
void applyPatch(Compiler& c, PatchableAddress& toPatch);
- bool recompile(JSStackFrame *fp, Vector<PatchableAddress> &patches);
+ bool recompile(JSStackFrame *fp, Vector<PatchableAddress> &patches,
+ Vector<CallSite> &sites);
+ bool saveTraps(JITScript *jit, Vector<CallSite> *sites);
};
} /* namespace mjit */
} /* namespace js */
#endif
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -170,27 +170,31 @@ StubCompiler::linkRejoin(Jump j)
crossJump(j, cc.getLabel());
}
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::ImmPtr ImmPtr;
typedef JSC::MacroAssembler::Imm32 Imm32;
JSC::MacroAssembler::Call
-StubCompiler::stubCall(void *ptr)
+StubCompiler::emitStubCall(void *ptr, uint32 id)
{
- return stubCall(ptr, frame.stackDepth() + script->nfixed);
+ return emitStubCall(ptr, frame.stackDepth() + script->nfixed, id);
}
JSC::MacroAssembler::Call
-StubCompiler::stubCall(void *ptr, uint32 slots)
+StubCompiler::emitStubCall(void *ptr, int32 slots, uint32 id)
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
- Call cl = masm.stubCall(ptr, cc.getPC(), slots);
+ Call cl = masm.fallibleVMCall(ptr, cc.getPC(), slots);
JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
+ if (cc.debugMode()) {
+ Compiler::InternalCallSite site(masm.callReturnOffset(cl), cc.getPC(), id, true, true);
+ cc.addCallSite(site);
+ }
return cl;
}
void
StubCompiler::fixCrossJumps(uint8 *ncode, size_t offset, size_t total)
{
JSC::LinkBuffer fast(ncode, total);
JSC::LinkBuffer slow(ncode + offset, total - offset);
@@ -234,17 +238,17 @@ StubCompiler::vpInc(JSOp op, uint32 dept
stub = stubs::DecVp;
break;
default:
JS_NOT_REACHED("unknown incdec op");
break;
}
- return stubCall(JS_FUNC_TO_DATA_PTR(void *, stub), slots);
+ return emitStubCall(JS_FUNC_TO_DATA_PTR(void *, stub), slots, __LINE__);
}
void
StubCompiler::crossJump(Jump j, Label L)
{
joins.append(CrossPatch(j, L));
}
--- a/js/src/methodjit/StubCompiler.h
+++ b/js/src/methodjit/StubCompiler.h
@@ -103,48 +103,16 @@ class StubCompiler
}
uint8 *buffer() {
return masm.buffer();
}
Call vpInc(JSOp op, uint32 depth);
-#define STUB_CALL_TYPE(type) \
- Call call(type stub) { \
- return stubCall(JS_FUNC_TO_DATA_PTR(void *, stub)); \
- } \
- Call call(type stub, uint32 slots) { \
- return stubCall(JS_FUNC_TO_DATA_PTR(void *, stub), slots); \
- }
-
- STUB_CALL_TYPE(JSObjStub);
- STUB_CALL_TYPE(VoidStub);
- STUB_CALL_TYPE(VoidStubUInt32);
- STUB_CALL_TYPE(VoidPtrStubUInt32);
- STUB_CALL_TYPE(VoidPtrStub);
- STUB_CALL_TYPE(BoolStub);
- STUB_CALL_TYPE(VoidStubAtom);
- STUB_CALL_TYPE(VoidStubPC);
-#ifdef JS_POLYIC
- STUB_CALL_TYPE(VoidStubPIC);
- STUB_CALL_TYPE(VoidStubGetElemIC);
- STUB_CALL_TYPE(VoidStubSetElemIC);
-#endif
-#ifdef JS_MONOIC
- STUB_CALL_TYPE(VoidStubMIC);
- STUB_CALL_TYPE(VoidPtrStubMIC);
- STUB_CALL_TYPE(VoidStubCallIC);
- STUB_CALL_TYPE(VoidPtrStubCallIC);
- STUB_CALL_TYPE(BoolStubEqualityIC);
- STUB_CALL_TYPE(VoidPtrStubTraceIC);
-#endif
-
-#undef STUB_CALL_TYPE
-
/*
* Force a frame sync and return a label before the syncing code.
* A Jump may bind to the label with leaveExitDirect().
*/
JSC::MacroAssembler::Label syncExit(Uses uses);
/*
* Sync the exit, and state that code will be immediately outputted
@@ -168,18 +136,17 @@ class StubCompiler
void rejoin(Changes changes);
void linkRejoin(Jump j);
/* Finish all native code patching. */
void fixCrossJumps(uint8 *ncode, size_t offset, size_t total);
bool jumpInScript(Jump j, jsbytecode *target);
void crossJump(Jump j, Label l);
- private:
- Call stubCall(void *ptr);
- Call stubCall(void *ptr, uint32 slots);
+ Call emitStubCall(void *ptr, uint32 id);
+ Call emitStubCall(void *ptr, int32 slots, uint32 id);
};
} /* namepsace mjit */
} /* namespace js */
#endif /* jsstub_compiler_h__ */
--- a/js/src/methodjit/TrampolineCompiler.cpp
+++ b/js/src/methodjit/TrampolineCompiler.cpp
@@ -115,17 +115,17 @@ TrampolineCompiler::compileTrampoline(Tr
* - There is no stub buffer.
*/
bool
TrampolineCompiler::generateForceReturn(Assembler &masm)
{
/* if (hasArgsObj() || hasCallObj()) stubs::PutActivationObjects() */
Jump noActObjs = masm.branchTest32(Assembler::Zero, FrameFlagsAddress(),
Imm32(JSFRAME_HAS_CALL_OBJ | JSFRAME_HAS_ARGS_OBJ));
- masm.stubCall(stubs::PutActivationObjects, NULL, 0);
+ masm.fallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::PutActivationObjects), NULL, 0);
noActObjs.linkTo(masm.label(), &masm);
/* Store any known return value */
masm.loadValueAsComponents(UndefinedValue(), JSReturnReg_Type, JSReturnReg_Data);
Jump rvalClear = masm.branchTest32(Assembler::Zero,
FrameFlagsAddress(), Imm32(JSFRAME_HAS_RVAL));
Address rvalAddress(JSFrameReg, JSStackFrame::offsetOfReturnValue());
masm.loadValueAsComponents(rvalAddress, JSReturnReg_Type, JSReturnReg_Data);