--- a/js/src/jit/mips/Architecture-mips.h
+++ b/js/src/jit/mips/Architecture-mips.h
@@ -146,25 +146,28 @@ class Registers
(1 << Registers::t1) |
(1 << Registers::t2) |
(1 << Registers::t3) |
(1 << Registers::t4) |
(1 << Registers::t5) |
(1 << Registers::t6) |
(1 << Registers::t7);
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
static const uint32_t NonVolatileMask =
(1 << Registers::s0) |
(1 << Registers::s1) |
(1 << Registers::s2) |
(1 << Registers::s3) |
(1 << Registers::s4) |
(1 << Registers::s5) |
(1 << Registers::s6) |
- (1 << Registers::s7);
+ (1 << Registers::s7) |
+ (1 << Registers::ra);
static const uint32_t WrapperMask =
VolatileMask | // = arguments
(1 << Registers::t0) | // = outReg
(1 << Registers::t1); // = argBase
static const uint32_t NonAllocatableMask =
(1 << Registers::zero) |
--- a/js/src/jit/mips/Assembler-mips.cpp
+++ b/js/src/jit/mips/Assembler-mips.cpp
@@ -25,18 +25,49 @@ ABIArgGenerator::ABIArgGenerator()
: usedArgSlots_(0),
firstArgFloat(false),
current_()
{}
ABIArg
ABIArgGenerator::next(MIRType type)
{
- MOZ_ASSUME_UNREACHABLE("NYI");
- return ABIArg();
+ switch (type) {
+ case MIRType_Int32:
+ case MIRType_Pointer:
+ Register destReg;
+ if (GetIntArgReg(usedArgSlots_, &destReg))
+ current_ = ABIArg(destReg);
+ else
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_++;
+ break;
+ case MIRType_Float32:
+ case MIRType_Double:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12);
+ usedArgSlots_ += 2;
+ firstArgFloat = true;
+ } else if (usedArgSlots_ <= 2) {
+ // NOTE: We will use f14 always. This is not compatible with
+ // system ABI. We will have to introduce some infrastructure
+ // changes if we have to use system ABI here.
+ current_ = ABIArg(f14);
+ usedArgSlots_ = 4;
+ } else {
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
+ }
+ return current_;
+
}
const Register ABIArgGenerator::NonArgReturnVolatileReg0 = t0;
const Register ABIArgGenerator::NonArgReturnVolatileReg1 = t1;
// Encode a standard register when it is being used as rd, the rs, and
// an extra register(rt). These should never be called with an InvalidReg.
uint32_t
js::jit::RS(Register r)
@@ -1073,16 +1104,22 @@ Assembler::as_abss(FloatRegister fd, Flo
BufferOffset
Assembler::as_absd(FloatRegister fd, FloatRegister fs)
{
return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
}
BufferOffset
+Assembler::as_negs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
Assembler::as_negd(FloatRegister fd, FloatRegister fs)
{
return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
}
BufferOffset
Assembler::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft)
{
@@ -1210,38 +1247,43 @@ Assembler::bind(InstImm *inst, uint32_t
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
MOZ_ASSERT(BOffImm16::isInRange(offset));
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
}
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch));
+ writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
if (BOffImm16::isInRange(offset)) {
bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
inst[0].encode() != inst_beq.encode());
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
// Skip the trailing nops in conditional branches.
if (conditional) {
inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void *))).encode();
// There are 2 nops after this
}
return;
}
- if (inst[0].encode() == inst_bgezal.encode()) {
- // Handle long call.
- addLongJump(BufferOffset(branch));
- writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
- inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
- // There is 1 nop after this.
- } else if (inst[0].encode() == inst_beq.encode()) {
+ if (inst[0].encode() == inst_beq.encode()) {
// Handle long unconditional jump.
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
} else {
// Handle long conditional jump.
inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *)));
@@ -1520,10 +1562,14 @@ Assembler::ToggleCall(CodeLocationLabel
*i2 = nop;
}
AutoFlushICache::flush(uintptr_t(i2), 4);
}
void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
{
- MOZ_ASSUME_UNREACHABLE("NYI");
+ InstImm *i0 = (InstImm *) inst;
+ InstImm *i1 = (InstImm *) i0->next();
+
+ // Replace with new value
+ Assembler::updateLuiOriValue(i0, i1, heapSize);
}
--- a/js/src/jit/mips/Assembler-mips.h
+++ b/js/src/jit/mips/Assembler-mips.h
@@ -109,16 +109,31 @@ static MOZ_CONSTEXPR_VAR Register StackP
static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg;
static MOZ_CONSTEXPR_VAR Register ReturnReg = v0;
static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = { FloatRegisters::f0 };
static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = { FloatRegisters::f18 };
static MOZ_CONSTEXPR_VAR FloatRegister SecondScratchFloatReg = { FloatRegisters::f16 };
static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::f30 };
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = t0;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = a0;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = a1;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = a2;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = a3;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = JSReturnReg_Data;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = JSReturnReg_Type;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = a0;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = a1;
+static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = a2;
+
static MOZ_CONSTEXPR_VAR FloatRegister f0 = {FloatRegisters::f0};
static MOZ_CONSTEXPR_VAR FloatRegister f2 = {FloatRegisters::f2};
static MOZ_CONSTEXPR_VAR FloatRegister f4 = {FloatRegisters::f4};
static MOZ_CONSTEXPR_VAR FloatRegister f6 = {FloatRegisters::f6};
static MOZ_CONSTEXPR_VAR FloatRegister f8 = {FloatRegisters::f8};
static MOZ_CONSTEXPR_VAR FloatRegister f10 = {FloatRegisters::f10};
static MOZ_CONSTEXPR_VAR FloatRegister f12 = {FloatRegisters::f12};
static MOZ_CONSTEXPR_VAR FloatRegister f14 = {FloatRegisters::f14};
@@ -920,16 +935,17 @@ class Assembler : public AssemblerShared
// FP arithmetic instructions
BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
--- a/js/src/jit/mips/CodeGenerator-mips.cpp
+++ b/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -35,26 +35,43 @@ using JS::GenericNaN;
CodeGeneratorMIPS::CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
: CodeGeneratorShared(gen, graph, masm)
{
}
bool
CodeGeneratorMIPS::generatePrologue()
{
- if (gen->compilingAsmJS()) {
- masm.Push(ra);
- // Note that this automatically sets MacroAssembler::framePushed().
- masm.reserveStack(frameDepth_);
- } else {
- // Note that this automatically sets MacroAssembler::framePushed().
- masm.reserveStack(frameSize());
- masm.checkStackAlignment();
+ MOZ_ASSERT(!gen->compilingAsmJS());
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ masm.checkStackAlignment();
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::generateAsmJSPrologue(Label *stackOverflowLabel)
+{
+ JS_ASSERT(gen->compilingAsmJS());
+
+ masm.Push(ra);
+
+ // The asm.js over-recursed handler wants to be able to assume that SP
+ // points to the return address, so perform the check after pushing ra but
+ // before pushing frameDepth.
+ if (!omitOverRecursedCheck()) {
+ masm.branchPtr(Assembler::AboveOrEqual,
+ AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
+ StackPointer,
+ stackOverflowLabel);
}
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameDepth_);
+ masm.checkStackAlignment();
return true;
}
bool
CodeGeneratorMIPS::generateEpilogue()
{
masm.bind(&returnLabel_);
@@ -1989,17 +2006,17 @@ CodeGeneratorMIPS::visitAsmJSLoadHeap(LA
masm.convertDoubleToFloat32(NANReg, ToFloatRegister(out));
else
masm.moveDouble(NANReg, ToFloatRegister(out));
} else {
masm.move32(Imm32(0), ToRegister(out));
}
masm.bind(&done);
- return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
+ return masm.append(AsmJSHeapAccess(bo.getOffset()));
}
bool
CodeGeneratorMIPS::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
const MAsmJSStoreHeap *mir = ins->mir();
const LAllocation *value = ins->value();
const LAllocation *ptr = ins->ptr();
@@ -2065,17 +2082,17 @@ CodeGeneratorMIPS::visitAsmJSStoreHeap(L
} else
masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
} else {
masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
masm.bind(&rejoin);
- return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
+ return masm.append(AsmJSHeapAccess(bo.getOffset()));
}
bool
CodeGeneratorMIPS::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
{
const MAsmJSPassStackArg *mir = ins->mir();
if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
--- a/js/src/jit/mips/CodeGenerator-mips.h
+++ b/js/src/jit/mips/CodeGenerator-mips.h
@@ -106,16 +106,17 @@ class CodeGeneratorMIPS : public CodeGen
return bailoutFrom(&bail, snapshot);
}
bool bailoutFrom(Label *label, LSnapshot *snapshot);
bool bailout(LSnapshot *snapshot);
protected:
bool generatePrologue();
+ bool generateAsmJSPrologue(Label *stackOverflowLabel);
bool generateEpilogue();
bool generateOutOfLineCode();
template <typename T>
void branchToBlock(Register lhs, T rhs, MBasicBlock *mir, Assembler::Condition cond)
{
mir = skipTrivialBlocks(mir);
--- a/js/src/jit/mips/MacroAssembler-mips.cpp
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -1769,17 +1769,19 @@ MacroAssemblerMIPSCompat::movePtr(ImmGCP
void
MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
{
movePtr(ImmWord(uintptr_t(imm.value)), dest);
}
void
MacroAssemblerMIPSCompat::movePtr(AsmJSImmPtr imm, Register dest)
{
- MOZ_ASSUME_UNREACHABLE("NYI");
+ enoughMemory_ &= append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()),
+ imm.kind()));
+ ma_liPatchable(dest, Imm32(-1));
}
void
MacroAssemblerMIPSCompat::load8ZeroExtend(const Address &address, Register dest)
{
ma_load(dest, address, SizeByte, ZeroExtend);
}
@@ -2902,16 +2904,24 @@ MacroAssemblerMIPSCompat::storeTypeTag(I
void
MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift)
{
computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
ma_li(ScratchRegister, tag);
as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
}
+void
+MacroAssemblerMIPS::ma_callIonNoPush(const Register r)
+{
+ // This is a MIPS hack to push return address during jalr delay slot.
+ as_jalr(r);
+ as_sw(ra, StackPointer, 0);
+}
+
// This macrosintruction calls the ion code and pushes the return address to
// the stack in the case when stack is alligned.
void
MacroAssemblerMIPS::ma_callIon(const Register r)
{
// This is a MIPS hack to push return address during jalr delay slot.
as_addiu(StackPointer, StackPointer, -2 * sizeof(intptr_t));
as_jalr(r);
@@ -2925,16 +2935,31 @@ MacroAssemblerMIPS::ma_callIonHalfPush(c
{
// This is a MIPS hack to push return address during jalr delay slot.
as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
as_jalr(r);
as_sw(ra, StackPointer, 0);
}
void
+MacroAssemblerMIPS::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes)
+{
+ // Note: this function stores the return address to sp[16]. The caller
+ // must anticipate this by reserving additional space on the stack.
+ // The ABI does not provide space for a return address so this function
+ // stores 'ra' before any ABI arguments.
+ // This function may only be called if there are 4 or less arguments.
+ JS_ASSERT(stackArgBytes == 4 * sizeof(uintptr_t));
+
+ // This is a MIPS hack to push return address during jalr delay slot.
+ as_jalr(r);
+ as_sw(ra, StackPointer, 4 * sizeof(uintptr_t));
+}
+
+void
MacroAssemblerMIPS::ma_call(ImmPtr dest)
{
ma_liPatchable(CallReg, dest);
as_jalr(CallReg);
as_nop();
}
void
--- a/js/src/jit/mips/MacroAssembler-mips.h
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -296,16 +296,19 @@ class MacroAssemblerMIPS : public Assemb
public:
// calls an Ion function, assumes that the stack is untouched (8 byte alinged)
void ma_callIon(const Register reg);
// callso an Ion function, assuming that sp has already been decremented
void ma_callIonNoPush(const Register reg);
// calls an ion function, assuming that the stack is currently not 8 byte aligned
void ma_callIonHalfPush(const Register reg);
+ // calls reg, storing the return address into sp[stackArgBytes]
+ void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
+
void ma_call(ImmPtr dest);
void ma_jump(ImmPtr dest);
void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
@@ -391,17 +394,16 @@ class MacroAssemblerMIPSCompat : public
}
void call(const Register reg) {
as_jalr(reg);
as_nop();
}
void call(Label *label) {
- // for now, assume that it'll be nearby?
ma_bal(label);
}
void call(ImmWord imm) {
call(ImmPtr((void*)imm.value));
}
void call(ImmPtr imm) {
BufferOffset bo = m_buffer.nextOffset();
@@ -413,16 +415,48 @@ class MacroAssemblerMIPSCompat : public
call(CallReg);
}
void call(JitCode *c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
ma_callIonHalfPush(ScratchRegister);
}
+
+ void appendCallSite(const CallSiteDesc &desc) {
+ enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_));
+ }
+
+ void call(const CallSiteDesc &desc, const Register reg) {
+ call(reg);
+ appendCallSite(desc);
+ }
+ void call(const CallSiteDesc &desc, Label *label) {
+ call(label);
+ appendCallSite(desc);
+ }
+ void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
+ call(imm);
+ appendCallSite(desc);
+ }
+ void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
+ movePtr(imm, CallReg);
+ ma_callAndStoreRet(CallReg, stackArgBytes);
+ appendCallSite(CallSiteDesc::Exit());
+ }
+ void callIonFromAsmJS(const Register reg) {
+ ma_callIonNoPush(reg);
+ appendCallSite(CallSiteDesc::Exit());
+
+ // The Ion ABI has the callee pop the return address off the stack.
+ // The asm.js caller assumes that the call leaves sp unchanged, so bump
+ // the stack.
+ subPtr(Imm32(sizeof(void*)), StackPointer);
+ }
+
void branch(JitCode *c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
as_jr(ScratchRegister);
as_nop();
}
void branch(const Register reg) {