--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -27,40 +27,44 @@ CacheRegisterAllocator::addressOf(MacroA
{
uint32_t offset = stackPushed_ + ICStackValueOffset + slot.slot() * sizeof(JS::Value);
return Address(masm.getStackPointer(), offset);
}
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
{
+#ifdef DEBUG
// Some Baseline IC stubs can be used in IonMonkey through SharedStubs.
// Those stubs have different machine code, so we need to track whether
// we're compiling for Baseline or Ion.
ICStubEngine engine_;
+#endif
uint32_t stubDataOffset_;
bool inStubFrame_;
bool makesGCCalls_;
MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
- MOZ_MUST_USE bool callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
- Register scratch, LiveGeneralRegisterSet saveRegs);
+ MOZ_MUST_USE bool callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
+ LiveGeneralRegisterSet saveRegs);
MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
public:
friend class AutoStubFrame;
BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine,
uint32_t stubDataOffset)
: CacheIRCompiler(cx, writer, Mode::Baseline),
+#ifdef DEBUG
engine_(engine),
+#endif
stubDataOffset_(stubDataOffset),
inStubFrame_(false),
makesGCCalls_(false)
{}
MOZ_MUST_USE bool init(CacheKind kind);
JitCode* compile();
@@ -79,75 +83,62 @@ class MOZ_RAII BaselineCacheIRCompiler :
#define DEFINE_SHARED_OP(op) \
bool BaselineCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
#undef DEFINE_SHARED_OP
enum class CallCanGC { CanGC, CanNotGC };
-// Instructions that have to perform a callVM require a stub frame. Use
-// AutoStubFrame before allocating any registers, then call its enter() and
-// leave() methods to enter/leave the stub frame.
+// Instructions that have to perform a callVM require a stub frame. Call its
+// enter() and leave() methods to enter/leave the stub frame.
class MOZ_RAII AutoStubFrame
{
BaselineCacheIRCompiler& compiler;
#ifdef DEBUG
uint32_t framePushedAtEnterStubFrame_;
#endif
- Maybe<AutoScratchRegister> tail;
AutoStubFrame(const AutoStubFrame&) = delete;
void operator=(const AutoStubFrame&) = delete;
public:
explicit AutoStubFrame(BaselineCacheIRCompiler& compiler)
- : compiler(compiler),
+ : compiler(compiler)
#ifdef DEBUG
- framePushedAtEnterStubFrame_(0),
+ , framePushedAtEnterStubFrame_(0)
#endif
- tail()
- {
- // We use ICTailCallReg when entering the stub frame, so ensure it's not
- // used for something else.
- if (compiler.allocator.isAllocatable(ICTailCallReg))
- tail.emplace(compiler.allocator, compiler.masm, ICTailCallReg);
- }
+ { }
void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) {
MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
- if (compiler.engine_ == ICStubEngine::Baseline) {
- EmitBaselineEnterStubFrame(masm, scratch);
+ MOZ_ASSERT(compiler.engine_ == ICStubEngine::Baseline);
+
+ EmitBaselineEnterStubFrame(masm, scratch);
+
#ifdef DEBUG
- framePushedAtEnterStubFrame_ = masm.framePushed();
+ framePushedAtEnterStubFrame_ = masm.framePushed();
#endif
- } else {
- EmitIonEnterStubFrame(masm, scratch);
- }
MOZ_ASSERT(!compiler.inStubFrame_);
compiler.inStubFrame_ = true;
if (canGC == CallCanGC::CanGC)
compiler.makesGCCalls_ = true;
}
void leave(MacroAssembler& masm, bool calledIntoIon = false) {
MOZ_ASSERT(compiler.inStubFrame_);
compiler.inStubFrame_ = false;
- if (compiler.engine_ == ICStubEngine::Baseline) {
#ifdef DEBUG
- masm.setFramePushed(framePushedAtEnterStubFrame_);
- if (calledIntoIon)
- masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
+ masm.setFramePushed(framePushedAtEnterStubFrame_);
+ if (calledIntoIon)
+ masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
#endif
- EmitBaselineLeaveStubFrame(masm, calledIntoIon);
- } else {
- EmitIonLeaveStubFrame(masm);
- }
+ EmitBaselineLeaveStubFrame(masm, calledIntoIon);
}
#ifdef DEBUG
~AutoStubFrame() {
MOZ_ASSERT(!compiler.inStubFrame_);
}
#endif
};
@@ -157,20 +148,19 @@ BaselineCacheIRCompiler::callVM(MacroAss
{
MOZ_ASSERT(inStubFrame_);
JitCode* code = cx_->runtime()->jitRuntime()->getVMWrapper(fun);
if (!code)
return false;
MOZ_ASSERT(fun.expectTailCall == NonTailCall);
- if (engine_ == ICStubEngine::Baseline)
- EmitBaselineCallVM(code, masm);
- else
- EmitIonCallVM(code, fun.explicitStackSlots(), masm);
+ MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
+
+ EmitBaselineCallVM(code, masm);
return true;
}
JitCode*
BaselineCacheIRCompiler::compile()
{
#ifndef JS_USE_LINK_REGISTER
// The first value contains the return addres,
@@ -372,18 +362,16 @@ BaselineCacheIRCompiler::emitLoadDynamic
return true;
}
bool
BaselineCacheIRCompiler::emitCallScriptedGetterResult()
{
MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address getterAddr(stubAddress(reader.stubOffset()));
AutoScratchRegisterExcluding code(allocator, masm, ArgumentsRectifierReg);
AutoScratchRegister callee(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
// First, ensure our getter is non-lazy and has JIT code.
@@ -395,16 +383,17 @@ BaselineCacheIRCompiler::emitCallScripte
masm.loadPtr(getterAddr, callee);
masm.branchIfFunctionHasNoScript(callee, failure->label());
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, failure->label());
}
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Getter is called with 0 arguments, just |obj| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
@@ -439,25 +428,24 @@ BaselineCacheIRCompiler::emitCallScripte
typedef bool (*CallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
static const VMFunction CallNativeGetterInfo =
FunctionInfo<CallNativeGetterFn>(CallNativeGetter, "CallNativeGetter");
bool
BaselineCacheIRCompiler::emitCallNativeGetterResult()
{
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address getterAddr(stubAddress(reader.stubOffset()));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the callee in the scratch register.
masm.loadPtr(getterAddr, scratch);
masm.Push(obj);
masm.Push(scratch);
@@ -470,25 +458,24 @@ BaselineCacheIRCompiler::emitCallNativeG
typedef bool (*ProxyGetPropertyFn)(JSContext*, HandleObject, HandleId, MutableHandleValue);
static const VMFunction ProxyGetPropertyInfo =
FunctionInfo<ProxyGetPropertyFn>(ProxyGetProperty, "ProxyGetProperty");
bool
BaselineCacheIRCompiler::emitCallProxyGetResult()
{
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address idAddr(stubAddress(reader.stubOffset()));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the jsid in the scratch register.
masm.loadPtr(idAddr, scratch);
masm.Push(scratch);
masm.Push(obj);
@@ -501,25 +488,24 @@ BaselineCacheIRCompiler::emitCallProxyGe
typedef bool (*ProxyGetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
static const VMFunction ProxyGetPropertyByValueInfo =
FunctionInfo<ProxyGetPropertyByValueFn>(ProxyGetPropertyByValue, "ProxyGetPropertyByValue");
bool
BaselineCacheIRCompiler::emitCallProxyGetByValueResult()
{
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(idVal);
masm.Push(obj);
if (!callVM(masm, ProxyGetPropertyByValueInfo))
return false;
@@ -666,18 +652,18 @@ BaselineCacheIRCompiler::emitLoadEnviron
masm.branchTestMagic(Assembler::Equal, slot, failure->label());
// Load the value.
masm.loadValue(slot, output.valueReg());
return true;
}
bool
-BaselineCacheIRCompiler::callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
- Register scratch, LiveGeneralRegisterSet saveRegs)
+BaselineCacheIRCompiler::callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
+ LiveGeneralRegisterSet saveRegs)
{
// Ensure the stack is empty for the VM call below.
allocator.discardStack(masm);
// R0 contains the value that needs to be typechecked.
MOZ_ASSERT(val == R0);
MOZ_ASSERT(scratch == R1.scratchReg());
@@ -698,16 +684,17 @@ BaselineCacheIRCompiler::callTypeUpdateI
if (CallClobbersTailReg)
masm.pop(ICTailCallReg);
// The update IC will store 0 or 1 in |scratch|, R1.scratchReg(), reflecting
// if the value in R0 type-checked properly or not.
Label done;
masm.branch32(Assembler::Equal, scratch, Imm32(1), &done);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch, CallCanGC::CanNotGC);
masm.PushRegsInMask(saveRegs);
masm.Push(val);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(ICStubReg);
@@ -729,46 +716,45 @@ BaselineCacheIRCompiler::callTypeUpdateI
bool
BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
{
ObjOperandId objId = reader.objOperandId();
Address offsetAddr = stubAddress(reader.stubOffset());
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
- AutoStubFrame stubFrame(*this);
- AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
+ AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
+ Maybe<AutoScratchRegister> scratch2;
+ if (!isFixed)
+ scratch2.emplace(allocator, masm);
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(val);
- if (!callTypeUpdateIC(stubFrame, obj, val, scratch, saveRegs))
+ if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
return false;
- masm.load32(offsetAddr, scratch);
+ masm.load32(offsetAddr, scratch1);
if (isFixed) {
- BaseIndex slot(obj, scratch, TimesOne);
+ BaseIndex slot(obj, scratch1, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
} else {
- // To avoid running out of registers on x86, use ICStubReg as scratch.
- // We don't need it anymore.
- Register slots = ICStubReg;
- masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), slots);
- BaseIndex slot(slots, scratch, TimesOne);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
+ BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
}
if (cx_->nursery().exists())
- BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
+ BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch1, LiveGeneralRegisterSet(), cx_);
return true;
}
bool
BaselineCacheIRCompiler::emitStoreFixedSlot()
{
return emitStoreSlotShared(true);
}
@@ -782,21 +768,22 @@ BaselineCacheIRCompiler::emitStoreDynami
bool
BaselineCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op)
{
ObjOperandId objId = reader.objOperandId();
Address offsetAddr = stubAddress(reader.stubOffset());
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
- AutoStubFrame stubFrame(*this);
- AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
+ AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch2(allocator, masm);
+
bool changeGroup = reader.readBool();
Address newGroupAddr = stubAddress(reader.stubOffset());
Address newShapeAddr = stubAddress(reader.stubOffset());
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
// We have to (re)allocate dynamic slots. Do this first, as it's the
// only fallible operation here. This simplifies the callTypeUpdateIC
// call below: it does not have to worry about saving registers used by
@@ -805,90 +792,83 @@ BaselineCacheIRCompiler::emitAddAndStore
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
- // Use ICStubReg as second scratch.
- if (!save.has(ICStubReg))
- save.add(ICStubReg);
-
masm.PushRegsInMask(save);
- masm.setupUnalignedABICall(scratch);
- masm.loadJSContext(scratch);
- masm.passABIArg(scratch);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
masm.passABIArg(obj);
- masm.load32(numNewSlotsAddr, ICStubReg);
- masm.passABIArg(ICStubReg);
+ masm.load32(numNewSlotsAddr, scratch2);
+ masm.passABIArg(scratch2);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsDontReportOOM));
- masm.mov(ReturnReg, scratch);
+ masm.mov(ReturnReg, scratch1);
LiveRegisterSet ignore;
- ignore.add(scratch);
+ ignore.add(scratch1);
masm.PopRegsInMaskIgnore(save, ignore);
- masm.branchIfFalseBool(scratch, failure->label());
+ masm.branchIfFalseBool(scratch1, failure->label());
}
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(val);
- if (!callTypeUpdateIC(stubFrame, obj, val, scratch, saveRegs))
+ if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
return false;
if (changeGroup) {
// Changing object's group from a partially to fully initialized group,
// per the acquired properties analysis. Only change the group if the
// old group still has a newScript. This only applies to PlainObjects.
Label noGroupChange;
- masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch1);
masm.branchPtr(Assembler::Equal,
- Address(scratch, ObjectGroup::offsetOfAddendum()),
+ Address(scratch1, ObjectGroup::offsetOfAddendum()),
ImmWord(0),
&noGroupChange);
// Reload the new group from the cache.
- masm.loadPtr(newGroupAddr, scratch);
+ masm.loadPtr(newGroupAddr, scratch1);
Address groupAddr(obj, JSObject::offsetOfGroup());
EmitPreBarrier(masm, groupAddr, MIRType::ObjectGroup);
- masm.storePtr(scratch, groupAddr);
+ masm.storePtr(scratch1, groupAddr);
masm.bind(&noGroupChange);
}
// Update the object's shape.
Address shapeAddr(obj, ShapedObject::offsetOfShape());
- masm.loadPtr(newShapeAddr, scratch);
+ masm.loadPtr(newShapeAddr, scratch1);
EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
- masm.storePtr(scratch, shapeAddr);
+ masm.storePtr(scratch1, shapeAddr);
// Perform the store. No pre-barrier required since this is a new
// initialization.
- masm.load32(offsetAddr, scratch);
+ masm.load32(offsetAddr, scratch1);
if (op == CacheOp::AddAndStoreFixedSlot) {
- BaseIndex slot(obj, scratch, TimesOne);
+ BaseIndex slot(obj, scratch1, TimesOne);
masm.storeValue(val, slot);
} else {
MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
op == CacheOp::AllocateAndStoreDynamicSlot);
- // To avoid running out of registers on x86, use ICStubReg as scratch.
- // We don't need it anymore.
- Register slots = ICStubReg;
- masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), slots);
- BaseIndex slot(slots, scratch, TimesOne);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
+ BaseIndex slot(scratch2, scratch1, TimesOne);
masm.storeValue(val, slot);
}
if (cx_->nursery().exists())
- BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
+ BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch1, LiveGeneralRegisterSet(), cx_);
return true;
}
bool
BaselineCacheIRCompiler::emitAddAndStoreFixedSlot()
{
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
}
@@ -909,28 +889,27 @@ bool
BaselineCacheIRCompiler::emitStoreUnboxedProperty()
{
ObjOperandId objId = reader.objOperandId();
JSValueType fieldType = reader.valueType();
Address offsetAddr = stubAddress(reader.stubOffset());
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
- AutoStubFrame stubFrame(*this);
AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
// We only need the type update IC if we are storing an object.
if (fieldType == JSVAL_TYPE_OBJECT) {
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(val);
- if (!callTypeUpdateIC(stubFrame, obj, val, scratch, saveRegs))
+ if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
return false;
}
masm.load32(offsetAddr, scratch);
BaseIndex fieldAddr(obj, scratch, TimesOne);
// Note that the storeUnboxedProperty call here is infallible, as the
// IR emitter is responsible for guarding on |val|'s type.
@@ -949,40 +928,36 @@ BaselineCacheIRCompiler::emitStoreTypedO
{
ObjOperandId objId = reader.objOperandId();
Address offsetAddr = stubAddress(reader.stubOffset());
TypedThingLayout layout = reader.typedThingLayout();
ReferenceTypeDescr::Type type = reader.referenceTypeDescrType();
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
- AutoStubFrame stubFrame(*this);
AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch2(allocator, masm);
- // We don't need a type update IC if the property is always a string.
+ // We don't need a type update IC if the property is always a string.scratch
if (type != ReferenceTypeDescr::TYPE_STRING) {
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(val);
- if (!callTypeUpdateIC(stubFrame, obj, val, scratch1, saveRegs))
+ if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
return false;
}
// Compute the address being written to.
LoadTypedThingData(masm, layout, obj, scratch1);
masm.addPtr(offsetAddr, scratch1);
Address dest(scratch1, 0);
- // To avoid running out of registers on x86, use ICStubReg as second
- // scratch. It won't be used after this.
- Register scratch2 = ICStubReg;
-
switch (type) {
case ReferenceTypeDescr::TYPE_ANY:
EmitPreBarrier(masm, dest, MIRType::Value);
masm.storeValue(val, dest);
break;
case ReferenceTypeDescr::TYPE_OBJECT: {
EmitPreBarrier(masm, dest, MIRType::Object);
@@ -1038,26 +1013,25 @@ BaselineCacheIRCompiler::emitStoreTypedO
typedef bool (*CallNativeSetterFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
static const VMFunction CallNativeSetterInfo =
FunctionInfo<CallNativeSetterFn>(CallNativeSetter, "CallNativeSetter");
bool
BaselineCacheIRCompiler::emitCallNativeSetter()
{
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address setterAddr(stubAddress(reader.stubOffset()));
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the callee in the scratch register.
masm.loadPtr(setterAddr, scratch);
masm.Push(val);
masm.Push(obj);
masm.Push(scratch);
@@ -1067,106 +1041,102 @@ BaselineCacheIRCompiler::emitCallNativeS
stubFrame.leave(masm);
return true;
}
bool
BaselineCacheIRCompiler::emitCallScriptedSetter()
{
- AutoStubFrame stubFrame(*this);
-
- // We don't have many registers available on x86, so we use a single
- // scratch register.
- AutoScratchRegisterExcluding scratch(allocator, masm, ArgumentsRectifierReg);
+ AutoScratchRegisterExcluding scratch1(allocator, masm, ArgumentsRectifierReg);
+ AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address setterAddr(stubAddress(reader.stubOffset()));
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
- // First, ensure our setter is non-lazy and has JIT code.
+ // First, ensure our setter is non-lazy and has JIT code. This also loads
+ // the callee in scratch1.
{
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
- masm.loadPtr(setterAddr, scratch);
- masm.branchIfFunctionHasNoScript(scratch, failure->label());
- masm.loadPtr(Address(scratch, JSFunction::offsetOfNativeOrScript()), scratch);
- masm.loadBaselineOrIonRaw(scratch, scratch, failure->label());
+ masm.loadPtr(setterAddr, scratch1);
+ masm.branchIfFunctionHasNoScript(scratch1, failure->label());
+ masm.loadPtr(Address(scratch1, JSFunction::offsetOfNativeOrScript()), scratch2);
+ masm.loadBaselineOrIonRaw(scratch2, scratch2, failure->label());
}
allocator.discardStack(masm);
- stubFrame.enter(masm, scratch);
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch2);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(1);
// Setter is called with 1 argument, and |obj| as thisv. Note that we use
// Push, not push, so that callJit will align the stack properly on ARM.
masm.Push(val);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
// Now that the object register is no longer needed, use it as second
// scratch.
- Register scratch2 = obj;
EmitBaselineCreateStubFrameDescriptor(masm, scratch2, JitFrameLayout::Size());
masm.Push(Imm32(1)); // ActualArgc
// Push callee.
- masm.loadPtr(setterAddr, scratch);
- masm.Push(scratch);
+ masm.Push(scratch1);
// Push frame descriptor.
masm.Push(scratch2);
// Load callee->nargs in scratch2 and the JIT code in scratch.
Label noUnderflow;
- masm.load16ZeroExtend(Address(scratch, JSFunction::offsetOfNargs()), scratch2);
- masm.loadPtr(Address(scratch, JSFunction::offsetOfNativeOrScript()), scratch);
- masm.loadBaselineOrIonRaw(scratch, scratch, nullptr);
+ masm.load16ZeroExtend(Address(scratch1, JSFunction::offsetOfNargs()), scratch2);
+ masm.loadPtr(Address(scratch1, JSFunction::offsetOfNativeOrScript()), scratch1);
+ masm.loadBaselineOrIonRaw(scratch1, scratch1, nullptr);
// Handle arguments underflow.
masm.branch32(Assembler::BelowOrEqual, scratch2, Imm32(1), &noUnderflow);
{
// Call the arguments rectifier.
- MOZ_ASSERT(ArgumentsRectifierReg != scratch);
+ MOZ_ASSERT(ArgumentsRectifierReg != scratch1);
JitCode* argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
- masm.movePtr(ImmGCPtr(argumentsRectifier), scratch);
- masm.loadPtr(Address(scratch, JitCode::offsetOfCode()), scratch);
+ masm.movePtr(ImmGCPtr(argumentsRectifier), scratch1);
+ masm.loadPtr(Address(scratch1, JitCode::offsetOfCode()), scratch1);
masm.movePtr(ImmWord(1), ArgumentsRectifierReg);
}
masm.bind(&noUnderflow);
- masm.callJit(scratch);
+ masm.callJit(scratch1);
stubFrame.leave(masm, true);
return true;
}
typedef bool (*SetArrayLengthFn)(JSContext*, HandleObject, HandleValue, bool);
static const VMFunction SetArrayLengthInfo =
FunctionInfo<SetArrayLengthFn>(SetArrayLength, "SetArrayLength");
bool
BaselineCacheIRCompiler::emitCallSetArrayLength()
{
- AutoStubFrame stubFrame(*this);
-
Register obj = allocator.useRegister(masm, reader.objOperandId());
bool strict = reader.readBool();
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(Imm32(strict));
masm.Push(val);
masm.Push(obj);
if (!callVM(masm, SetArrayLengthInfo))
return false;